Browse code

Merge commit 'be209bdabb11c59de17220bdbf0bf9c9f7cc16f5' into release/0.10

* commit 'be209bdabb11c59de17220bdbf0bf9c9f7cc16f5':
vf_pad: don't give up its own reference to the output buffer.
libvorbis: use VBR by default, with default quality of 3
libvorbis: fix use of minrate/maxrate AVOptions
h264: fix deadlocks on incomplete reference frame decoding.
cmdutils: avoid setting data pointers to invalid values in alloc_buffer()
avidec: return 0, not packet size from read_packet().
wmapro: prevent division by zero when sample rate is unspecified
vc1dec: check that coded slice positions and interlacing match.
alsdec: fix number of decoded samples in first sub-block in BGMC mode.
alsdec: remove dead assignments
alsdec: Fix out of ltp_gain_values read.
alsdec: Check that quantized parcor coeffs are within range.
alsdec: Check k used for rice decoder.

Conflicts:
avconv.c
libavcodec/h264.c
libavcodec/libvorbis.c
libavformat/avidec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>

Michael Niedermayer authored on 2012/10/25 23:51:26
Showing 9 changed files
... ...
@@ -505,7 +505,7 @@ static int alloc_buffer(AVCodecContext *s, InputStream *ist, FrameBuffer **pbuf)
505 505
         const int v_shift = i==0 ? 0 : v_chroma_shift;
506 506
         if (s->flags & CODEC_FLAG_EMU_EDGE)
507 507
             buf->data[i] = buf->base[i];
508
-        else
508
+        else if (buf->base[i])
509 509
             buf->data[i] = buf->base[i] +
510 510
                            FFALIGN((buf->linesize[i]*edge >> v_shift) +
511 511
                                    (pixel_size*edge >> h_shift), 32);
... ...
@@ -651,6 +651,11 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
651 651
         for (k = 1; k < sub_blocks; k++)
652 652
             s[k] = s[k - 1] + decode_rice(gb, 0);
653 653
     }
654
+    for (k = 1; k < sub_blocks; k++)
655
+        if (s[k] > 32) {
656
+            av_log(avctx, AV_LOG_ERROR, "k invalid for rice code.\n");
657
+            return AVERROR_INVALIDDATA;
658
+        }
654 659
 
655 660
     if (get_bits1(gb))
656 661
         *bd->shift_lsbs = get_bits(gb, 4) + 1;
... ...
@@ -700,6 +705,10 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
700 700
                     int rice_param = parcor_rice_table[sconf->coef_table][k][1];
701 701
                     int offset     = parcor_rice_table[sconf->coef_table][k][0];
702 702
                     quant_cof[k] = decode_rice(gb, rice_param) + offset;
703
+                    if (quant_cof[k] < -64 || quant_cof[k] > 63) {
704
+                        av_log(avctx, AV_LOG_ERROR, "quant_cof %d is out of range\n", quant_cof[k]);
705
+                        return AVERROR_INVALIDDATA;
706
+                    }
703 707
                 }
704 708
 
705 709
                 // read coefficients 20 to 126
... ...
@@ -732,7 +741,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
732 732
             bd->ltp_gain[0]   = decode_rice(gb, 1) << 3;
733 733
             bd->ltp_gain[1]   = decode_rice(gb, 2) << 3;
734 734
 
735
-            r                 = get_unary(gb, 0, 4);
735
+            r                 = get_unary(gb, 0, 3);
736 736
             c                 = get_bits(gb, 2);
737 737
             bd->ltp_gain[2]   = ltp_gain_values[r][c];
738 738
 
... ...
@@ -761,7 +770,6 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
761 761
         int          delta[8];
762 762
         unsigned int k    [8];
763 763
         unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
764
-        unsigned int i = start;
765 764
 
766 765
         // read most significant bits
767 766
         unsigned int high;
... ...
@@ -772,29 +780,30 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
772 772
 
773 773
         current_res = bd->raw_samples + start;
774 774
 
775
-        for (sb = 0; sb < sub_blocks; sb++, i = 0) {
775
+        for (sb = 0; sb < sub_blocks; sb++) {
776
+            unsigned int sb_len  = sb_length - (sb ? 0 : start);
777
+
776 778
             k    [sb] = s[sb] > b ? s[sb] - b : 0;
777 779
             delta[sb] = 5 - s[sb] + k[sb];
778 780
 
779
-            ff_bgmc_decode(gb, sb_length, current_res,
781
+            ff_bgmc_decode(gb, sb_len, current_res,
780 782
                         delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
781 783
 
782
-            current_res += sb_length;
784
+            current_res += sb_len;
783 785
         }
784 786
 
785 787
         ff_bgmc_decode_end(gb);
786 788
 
787 789
 
788 790
         // read least significant bits and tails
789
-        i = start;
790 791
         current_res = bd->raw_samples + start;
791 792
 
792
-        for (sb = 0; sb < sub_blocks; sb++, i = 0) {
793
+        for (sb = 0; sb < sub_blocks; sb++, start = 0) {
793 794
             unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
794 795
             unsigned int cur_k         = k[sb];
795 796
             unsigned int cur_s         = s[sb];
796 797
 
797
-            for (; i < sb_length; i++) {
798
+            for (; start < sb_length; start++) {
798 799
                 int32_t res = *current_res;
799 800
 
800 801
                 if (res == cur_tail_code) {
... ...
@@ -2506,8 +2506,8 @@ static int field_end(H264Context *h, int in_setup){
2506 2506
     s->mb_y= 0;
2507 2507
 
2508 2508
     if (!in_setup && !s->dropable)
2509
-        ff_thread_report_progress((AVFrame*)s->current_picture_ptr, (16*s->mb_height >> FIELD_PICTURE) - 1,
2510
-                                 s->picture_structure==PICT_BOTTOM_FIELD);
2509
+        ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
2510
+                                  s->picture_structure == PICT_BOTTOM_FIELD);
2511 2511
 
2512 2512
     if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2513 2513
         ff_vdpau_h264_set_reference_frames(s);
... ...
@@ -2624,9 +2624,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2624 2624
     int num_ref_idx_active_override_flag;
2625 2625
     unsigned int slice_type, tmp, i, j;
2626 2626
     int default_ref_list_done = 0;
2627
-    int last_pic_structure;
2628
-
2629
-    s->dropable= h->nal_ref_idc == 0;
2627
+    int last_pic_structure, last_pic_dropable;
2630 2628
 
2631 2629
     /* FIXME: 2tap qpel isn't implemented for high bit depth. */
2632 2630
     if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc && !h->pixel_shift){
... ...
@@ -2645,8 +2643,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2645 2645
         }
2646 2646
 
2647 2647
         h0->current_slice = 0;
2648
-        if (!s0->first_field)
2649
-            s->current_picture_ptr= NULL;
2648
+        if (!s0->first_field) {
2649
+            if (s->current_picture_ptr && !s->dropable &&
2650
+                s->current_picture_ptr->owner2 == s) {
2651
+                ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
2652
+                                          s->picture_structure == PICT_BOTTOM_FIELD);
2653
+            }
2654
+            s->current_picture_ptr = NULL;
2655
+        }
2650 2656
     }
2651 2657
 
2652 2658
     slice_type= get_ue_golomb_31(&s->gb);
... ...
@@ -2864,6 +2868,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2864 2864
     h->mb_mbaff = 0;
2865 2865
     h->mb_aff_frame = 0;
2866 2866
     last_pic_structure = s0->picture_structure;
2867
+    last_pic_dropable  = s->dropable;
2868
+    s->dropable        = h->nal_ref_idc == 0;
2867 2869
     if(h->sps.frame_mbs_only_flag){
2868 2870
         s->picture_structure= PICT_FRAME;
2869 2871
     }else{
... ...
@@ -2880,10 +2886,22 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2880 2880
     }
2881 2881
     h->mb_field_decoding_flag= s->picture_structure != PICT_FRAME;
2882 2882
 
2883
-    if(h0->current_slice == 0){
2884
-        // Shorten frame num gaps so we don't have to allocate reference frames just to throw them away
2885
-        if(h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
2886
-            int unwrap_prev_frame_num = h->prev_frame_num, max_frame_num = 1<<h->sps.log2_max_frame_num;
2883
+    if (h0->current_slice != 0) {
2884
+        if (last_pic_structure != s->picture_structure ||
2885
+            last_pic_dropable  != s->dropable) {
2886
+            av_log(h->s.avctx, AV_LOG_ERROR,
2887
+                   "Changing field mode (%d -> %d) between slices is not allowed\n",
2888
+                   last_pic_structure, s->picture_structure);
2889
+            s->picture_structure = last_pic_structure;
2890
+            s->dropable          = last_pic_dropable;
2891
+            return AVERROR_INVALIDDATA;
2892
+        }
2893
+    } else {
2894
+        /* Shorten frame num gaps so we don't have to allocate reference
2895
+         * frames just to throw them away */
2896
+        if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
2897
+            int unwrap_prev_frame_num = h->prev_frame_num;
2898
+            int max_frame_num         = 1 << h->sps.log2_max_frame_num;
2887 2899
 
2888 2900
             if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num;
2889 2901
 
... ...
@@ -2896,8 +2914,74 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2896 2896
             }
2897 2897
         }
2898 2898
 
2899
-        while(h->frame_num !=  h->prev_frame_num && h->prev_frame_num >= 0 &&
2900
-              h->frame_num != (h->prev_frame_num+1)%(1<<h->sps.log2_max_frame_num)){
2899
+        /* See if we have a decoded first field looking for a pair...
2900
+         * Here, we're using that to see if we should mark previously
2901
+         * decode frames as "finished".
2902
+         * We have to do that before the "dummy" in-between frame allocation,
2903
+         * since that can modify s->current_picture_ptr. */
2904
+        if (s0->first_field) {
2905
+            assert(s0->current_picture_ptr);
2906
+            assert(s0->current_picture_ptr->f.data[0]);
2907
+            assert(s0->current_picture_ptr->f.reference != DELAYED_PIC_REF);
2908
+
2909
+            /* Mark old field/frame as completed */
2910
+            if (!last_pic_dropable && s0->current_picture_ptr->owner2 == s0) {
2911
+                ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
2912
+                                          last_pic_structure == PICT_BOTTOM_FIELD);
2913
+            }
2914
+
2915
+            /* figure out if we have a complementary field pair */
2916
+            if (!FIELD_PICTURE || s->picture_structure == last_pic_structure) {
2917
+                /* Previous field is unmatched. Don't display it, but let it
2918
+                 * remain for reference if marked as such. */
2919
+                if (!last_pic_dropable && last_pic_structure != PICT_FRAME) {
2920
+                    ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
2921
+                                              last_pic_structure == PICT_TOP_FIELD);
2922
+                }
2923
+            } else {
2924
+                if (s0->current_picture_ptr->frame_num != h->frame_num) {
2925
+                    /* This and previous field were reference, but had
2926
+                     * different frame_nums. Consider this field first in
2927
+                     * pair. Throw away previous field except for reference
2928
+                     * purposes. */
2929
+                    if (!last_pic_dropable && last_pic_structure != PICT_FRAME) {
2930
+                        ff_thread_report_progress(&s0->current_picture_ptr->f, INT_MAX,
2931
+                                                  last_pic_structure == PICT_TOP_FIELD);
2932
+                    }
2933
+                } else {
2934
+                    /* Second field in complementary pair */
2935
+                    if (!((last_pic_structure   == PICT_TOP_FIELD &&
2936
+                           s->picture_structure == PICT_BOTTOM_FIELD) ||
2937
+                          (last_pic_structure   == PICT_BOTTOM_FIELD &&
2938
+                           s->picture_structure == PICT_TOP_FIELD))) {
2939
+                        av_log(s->avctx, AV_LOG_ERROR,
2940
+                               "Invalid field mode combination %d/%d\n",
2941
+                               last_pic_structure, s->picture_structure);
2942
+                        s->picture_structure = last_pic_structure;
2943
+                        s->dropable          = last_pic_dropable;
2944
+                        return AVERROR_INVALIDDATA;
2945
+                    } else if (last_pic_dropable != s->dropable) {
2946
+                        av_log(s->avctx, AV_LOG_ERROR,
2947
+                               "Cannot combine reference and non-reference fields in the same frame\n");
2948
+                        av_log_ask_for_sample(s->avctx, NULL);
2949
+                        s->picture_structure = last_pic_structure;
2950
+                        s->dropable          = last_pic_dropable;
2951
+                        return AVERROR_INVALIDDATA;
2952
+                    }
2953
+
2954
+                    /* Take ownership of this buffer. Note that if another thread owned
2955
+                     * the first field of this buffer, we're not operating on that pointer,
2956
+                     * so the original thread is still responsible for reporting progress
2957
+                     * on that first field (or if that was us, we just did that above).
2958
+                     * By taking ownership, we assign responsibility to ourselves to
2959
+                     * report progress on the second field. */
2960
+                    s0->current_picture_ptr->owner2 = s0;
2961
+                }
2962
+            }
2963
+        }
2964
+
2965
+        while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 &&
2966
+               h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
2901 2967
             Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
2902 2968
             av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
2903 2969
             if (ff_h264_frame_start(h) < 0)
... ...
@@ -2928,7 +3012,9 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2928 2928
             }
2929 2929
         }
2930 2930
 
2931
-        /* See if we have a decoded first field looking for a pair... */
2931
+        /* See if we have a decoded first field looking for a pair...
2932
+         * We're using that to see whether to continue decoding in that
2933
+         * frame, or to allocate a new one. */
2932 2934
         if (s0->first_field) {
2933 2935
             assert(s0->current_picture_ptr);
2934 2936
             assert(s0->current_picture_ptr->f.data[0]);
... ...
@@ -2945,13 +3031,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
2945 2945
 
2946 2946
             } else {
2947 2947
                 if (s0->current_picture_ptr->frame_num != h->frame_num) {
2948
-                    /*
2949
-                     * This and previous field had
2950
-                     * different frame_nums. Consider this field first in
2951
-                     * pair. Throw away previous field except for reference
2952
-                     * purposes.
2953
-                     */
2954
-                    s0->first_field = 1;
2948
+                    /* This and the previous field had different frame_nums.
2949
+                     * Consider this field first in pair. Throw away previous
2950
+                     * one except for reference purposes. */
2951
+                    s0->first_field         = 1;
2955 2952
                     s0->current_picture_ptr = NULL;
2956 2953
 
2957 2954
                 } else {
... ...
@@ -3820,8 +3903,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
3820 3820
         hx = h->thread_context[context_count];
3821 3821
 
3822 3822
         ptr= ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index);
3823
-        if (ptr==NULL || dst_length < 0){
3824
-            return -1;
3823
+        if (ptr == NULL || dst_length < 0) {
3824
+            buf_index = -1;
3825
+            goto end;
3825 3826
         }
3826 3827
         i= buf_index + consumed;
3827 3828
         if((s->workaround_bugs & FF_BUG_AUTODETECT) && i+3<next_avc &&
... ...
@@ -3873,7 +3957,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
3873 3873
         case NAL_IDR_SLICE:
3874 3874
             if (h->nal_unit_type != NAL_IDR_SLICE) {
3875 3875
                 av_log(h->s.avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices\n");
3876
-                return -1;
3876
+                buf_index = -1;
3877
+                goto end;
3877 3878
             }
3878 3879
             idr(h); // FIXME ensure we don't lose some frames if there is reordering
3879 3880
         case NAL_SLICE:
... ...
@@ -4017,6 +4102,15 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
4017 4017
     }
4018 4018
     if(context_count)
4019 4019
         execute_decode_slices(h, context_count);
4020
+
4021
+end:
4022
+    /* clean up */
4023
+    if (s->current_picture_ptr && s->current_picture_ptr->owner2 == s &&
4024
+        !s->dropable) {
4025
+        ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX,
4026
+                                  s->picture_structure == PICT_BOTTOM_FIELD);
4027
+    }
4028
+
4020 4029
     return buf_index;
4021 4030
 }
4022 4031
 
... ...
@@ -29,6 +29,7 @@
29 29
 #include "libavutil/opt.h"
30 30
 #include "avcodec.h"
31 31
 #include "bytestream.h"
32
+#include "internal.h"
32 33
 #include "vorbis.h"
33 34
 #include "libavutil/mathematics.h"
34 35
 
... ...
@@ -59,6 +60,12 @@ static const AVOption options[] = {
59 59
     { "iblock", "Sets the impulse block bias", offsetof(OggVorbisContext, iblock), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -15, 0, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
60 60
     { NULL }
61 61
 };
62
+
63
+static const AVCodecDefault defaults[] = {
64
+    { "b",  "0" },
65
+    { NULL },
66
+};
67
+
62 68
 static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
63 69
 
64 70
 static const char * error(int oggerr, int *averr)
... ...
@@ -75,33 +82,29 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco
75 75
 {
76 76
     OggVorbisContext *context = avccontext->priv_data;
77 77
     double cfreq;
78
-    int r;
79 78
 
80
-    if (avccontext->flags & CODEC_FLAG_QSCALE) {
81
-        /* variable bitrate */
82
-        float quality = avccontext->global_quality / (float)FF_QP2LAMBDA;
83
-        r = vorbis_encode_setup_vbr(vi, avccontext->channels,
79
+    if (avccontext->flags & CODEC_FLAG_QSCALE || !avccontext->bit_rate) {
80
+        /* variable bitrate
81
+         * NOTE: we use the oggenc range of -1 to 10 for global_quality for
82
+         *       user convenience, but libvorbis uses -0.1 to 1.0.
83
+         */
84
+        float q = avccontext->global_quality / (float)FF_QP2LAMBDA;
85
+        /* default to 3 if the user did not set quality or bitrate */
86
+        if (!(avccontext->flags & CODEC_FLAG_QSCALE))
87
+            q = 3.0;
88
+        if (vorbis_encode_setup_vbr(vi, avccontext->channels,
84 89
                                     avccontext->sample_rate,
85
-                                    quality / 10.0);
86
-        if (r) {
87
-            av_log(avccontext, AV_LOG_ERROR,
88
-                   "Unable to set quality to %g: %s\n", quality, error(r, &r));
89
-            return r;
90
-        }
90
+                                    q / 10.0))
91
+            return -1;
91 92
     } else {
92 93
         int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1;
93
-        int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1;
94
+        int maxrate = avccontext->rc_max_rate > 0 ? avccontext->rc_max_rate : -1;
94 95
 
95 96
         /* constant bitrate */
96
-        r = vorbis_encode_setup_managed(vi, avccontext->channels,
97
-                                        avccontext->sample_rate, minrate,
98
-                                        avccontext->bit_rate, maxrate);
99
-        if (r) {
100
-            av_log(avccontext, AV_LOG_ERROR,
101
-                   "Unable to set CBR to %d: %s\n", avccontext->bit_rate,
102
-                   error(r, &r));
103
-            return r;
104
-        }
97
+        if (vorbis_encode_setup_managed(vi, avccontext->channels,
98
+                                        avccontext->sample_rate, maxrate,
99
+                                        avccontext->bit_rate, minrate))
100
+            return -1;
105 101
 
106 102
         /* variable bitrate by estimate, disable slow rate management */
107 103
         if (minrate == -1 && maxrate == -1)
... ...
@@ -314,4 +317,5 @@ AVCodec ff_libvorbis_encoder = {
314 314
     .sample_fmts    = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
315 315
     .long_name      = NULL_IF_CONFIG_SMALL("libvorbis Vorbis"),
316 316
     .priv_class     = &class,
317
+    .defaults       = defaults,
317 318
 };
... ...
@@ -230,8 +230,8 @@ static const AVOption options[]={
230 230
 {"rc_qmod_freq", "experimental quantizer modulation", OFFSET(rc_qmod_freq), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
231 231
 {"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX},
232 232
 {"rc_eq", "set rate control equation", OFFSET(rc_eq), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, V|E},
233
-{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
234
-{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|E},
233
+{"maxrate", "set max bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
234
+{"minrate", "set min bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
235 235
 {"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), AV_OPT_TYPE_INT, {.dbl = DEFAULT }, INT_MIN, INT_MAX, A|V|E},
236 236
 {"rc_buf_aggressivity", "currently useless", OFFSET(rc_buffer_aggressivity), AV_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, V|E},
237 237
 {"i_qfactor", "qp factor between P and I frames", OFFSET(i_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = -0.8 }, -FLT_MAX, FLT_MAX, V|E},
... ...
@@ -5713,6 +5713,12 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5713 5713
         mb_height = s->mb_height >> v->field_mode;
5714 5714
         for (i = 0; i <= n_slices; i++) {
5715 5715
             if (i > 0 &&  slices[i - 1].mby_start >= mb_height) {
5716
+                if (v->field_mode <= 0) {
5717
+                    av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
5718
+                           "picture boundary (%d >= %d)\n", i,
5719
+                           slices[i - 1].mby_start, mb_height);
5720
+                    continue;
5721
+                }
5716 5722
                 v->second_field = 1;
5717 5723
                 v->blocks_off   = s->mb_width  * s->mb_height << 1;
5718 5724
                 v->mb_off       = s->mb_stride * s->mb_height >> 1;
... ...
@@ -330,6 +330,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
330 330
         return AVERROR_INVALIDDATA;
331 331
     }
332 332
 
333
+    if (s->avctx->sample_rate <= 0) {
334
+        av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n");
335
+        return AVERROR_INVALIDDATA;
336
+    }
337
+
333 338
     s->num_channels = avctx->channels;
334 339
 
335 340
     if (s->num_channels < 0) {
... ...
@@ -299,6 +299,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
299 299
 {
300 300
     PadContext *pad = inlink->dst->priv;
301 301
     AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
302
+    AVFilterBufferRef *for_next_filter;
302 303
     int plane;
303 304
 
304 305
     for (plane = 0; plane < 4 && outpicref->data[plane]; plane++) {
... ...
@@ -335,12 +336,14 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
335 335
     outpicref->video->w = pad->w;
336 336
     outpicref->video->h = pad->h;
337 337
 
338
-    avfilter_start_frame(inlink->dst->outputs[0], outpicref);
338
+    for_next_filter = avfilter_ref_buffer(outpicref, ~0);
339
+    avfilter_start_frame(inlink->dst->outputs[0], for_next_filter);
339 340
 }
340 341
 
341 342
 static void end_frame(AVFilterLink *link)
342 343
 {
343 344
     avfilter_end_frame(link->dst->outputs[0]);
345
+    avfilter_unref_buffer(link->dst->outputs[0]->out_buf);
344 346
     avfilter_unref_buffer(link->cur_buf);
345 347
 }
346 348
 
... ...
@@ -1221,7 +1221,7 @@ resync:
1221 1221
                 avi->dts_max = dts;
1222 1222
         }
1223 1223
 
1224
-        return size;
1224
+        return 0;
1225 1225
     }
1226 1226
 
1227 1227
     if ((err = avi_sync(s, 0)) < 0)