[FFmpeg-devel] [PATCH V2 2/7] libavfilter/vf_sr: refine code to remove keyword 'else'
Pedro Arthur
bygrandao at gmail.com
Mon Apr 29 20:35:56 EEST 2019
Em qua, 24 de abr de 2019 às 23:14, Guo, Yejun <yejun.guo at intel.com> escreveu:
>
> remove 'else' since there is always 'return' in 'if' scope,
> so the code will be clean for later maintenance
>
> Signed-off-by: Guo, Yejun <yejun.guo at intel.com>
> ---
> libavfilter/vf_sr.c | 143 ++++++++++++++++++++++++++--------------------------
> 1 file changed, 71 insertions(+), 72 deletions(-)
>
> diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
> index 6423d2e..9bb0fc5 100644
> --- a/libavfilter/vf_sr.c
> +++ b/libavfilter/vf_sr.c
> @@ -127,88 +127,87 @@ static int config_props(AVFilterLink *inlink)
> av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
> return AVERROR(EIO);
> }
> - else{
> - if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
> - sr_context->input.width = inlink->w;
> - sr_context->input.height = inlink->h;
> - result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
> - if (result != DNN_SUCCESS){
> - av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
> - return AVERROR(EIO);
> - }
> - sr_context->scale_factor = 0;
> +
> + if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
> + sr_context->input.width = inlink->w;
> + sr_context->input.height = inlink->h;
> + result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
> + if (result != DNN_SUCCESS){
> + av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
> + return AVERROR(EIO);
> }
> - outlink->h = sr_context->output.height;
> - outlink->w = sr_context->output.width;
> - sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
> - sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
> - 0, NULL, NULL, NULL);
> - sr_context->sws_input_linesize = sr_context->input.width << 2;
> - sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
> - sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
> - 0, NULL, NULL, NULL);
> - sr_context->sws_output_linesize = sr_context->output.width << 2;
> - if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
> - av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
> + sr_context->scale_factor = 0;
> + }
> + outlink->h = sr_context->output.height;
> + outlink->w = sr_context->output.width;
> + sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
> + sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
> + 0, NULL, NULL, NULL);
> + sr_context->sws_input_linesize = sr_context->input.width << 2;
> + sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
> + sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
> + 0, NULL, NULL, NULL);
> + sr_context->sws_output_linesize = sr_context->output.width << 2;
> + if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
> + av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
> + return AVERROR(ENOMEM);
> + }
> + if (sr_context->scale_factor){
> + sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
> + outlink->w, outlink->h, outlink->format,
> + SWS_BICUBIC, NULL, NULL, NULL);
> + if (!sr_context->sws_contexts[0]){
> + av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
> return AVERROR(ENOMEM);
> }
> - if (sr_context->scale_factor){
> - sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
> - outlink->w, outlink->h, outlink->format,
> + sr_context->sws_slice_h = inlink->h;
> + }
> + else{
> + if (inlink->format != AV_PIX_FMT_GRAY8){
> + sws_src_h = sr_context->input.height;
> + sws_src_w = sr_context->input.width;
> + sws_dst_h = sr_context->output.height;
> + sws_dst_w = sr_context->output.width;
> +
> + switch (inlink->format){
> + case AV_PIX_FMT_YUV420P:
> + sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
> + sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> + sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
> + sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> + break;
> + case AV_PIX_FMT_YUV422P:
> + sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> + sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> + break;
> + case AV_PIX_FMT_YUV444P:
> + break;
> + case AV_PIX_FMT_YUV410P:
> + sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
> + sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> + sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
> + sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> + break;
> + case AV_PIX_FMT_YUV411P:
> + sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> + sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> + break;
> + default:
> + av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
> + return AVERROR(EIO);
> + }
> + sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
> + sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
> SWS_BICUBIC, NULL, NULL, NULL);
> if (!sr_context->sws_contexts[0]){
> av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
> return AVERROR(ENOMEM);
> }
> - sr_context->sws_slice_h = inlink->h;
> + sr_context->sws_slice_h = sws_src_h;
> }
> - else{
> - if (inlink->format != AV_PIX_FMT_GRAY8){
> - sws_src_h = sr_context->input.height;
> - sws_src_w = sr_context->input.width;
> - sws_dst_h = sr_context->output.height;
> - sws_dst_w = sr_context->output.width;
> -
> - switch (inlink->format){
> - case AV_PIX_FMT_YUV420P:
> - sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
> - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> - sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
> - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> - break;
> - case AV_PIX_FMT_YUV422P:
> - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> - break;
> - case AV_PIX_FMT_YUV444P:
> - break;
> - case AV_PIX_FMT_YUV410P:
> - sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
> - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> - sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
> - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> - break;
> - case AV_PIX_FMT_YUV411P:
> - sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> - sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> - break;
> - default:
> - av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
> - return AVERROR(EIO);
> - }
> - sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
> - sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
> - SWS_BICUBIC, NULL, NULL, NULL);
> - if (!sr_context->sws_contexts[0]){
> - av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
> - return AVERROR(ENOMEM);
> - }
> - sr_context->sws_slice_h = sws_src_h;
> - }
> - }
> -
> - return 0;
> }
> +
> + return 0;
> }
>
> static int filter_frame(AVFilterLink *inlink, AVFrame *in)
> --
> 2.7.4
>
LGTM.
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request at ffmpeg.org with subject "unsubscribe".
More information about the ffmpeg-devel
mailing list