Browse code

Merge remote-tracking branch 'qatar/master'

* qatar/master:
rv34: add NEON rv34_idct_add
rv34: 1-pass inter MB reconstruction
add SMJPEG muxer
avformat: split out common SMJPEG code
pictordec: Use bytestream2 functions
avconv: use avcodec_encode_audio2()
pcmenc: use AVCodec.encode2()
avcodec: bump minor version and add APIChanges for the new audio encoding API
avcodec: Add avcodec_encode_audio2() as replacement for avcodec_encode_audio()
avcodec: add a public function, avcodec_fill_audio_frame().
rv34: Intra 16x16 handling
rv34: Inter/intra MB code split

Conflicts:
Changelog
libavcodec/avcodec.h
libavcodec/pictordec.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/x86/rv34dsp.asm
libavformat/version.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>

Michael Niedermayer authored on 2012/01/17 09:40:45
Showing 25 changed files
... ...
@@ -14,6 +14,7 @@ version next:
14 14
 - astreamsync audio filter
15 15
 - amerge audio filter
16 16
 - GSM audio parser
17
+- SMJPEG muxer
17 18
 - Automatic thread count based on detection number of (available) CPU cores
18 19
 - y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
19 20
 - ffprobe -show_error option
... ...
@@ -159,8 +159,7 @@ static float dts_delta_threshold = 10;
159 159
 static int print_stats = 1;
160 160
 
161 161
 static uint8_t *audio_buf;
162
-static uint8_t *audio_out;
163
-static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
162
+static unsigned int allocated_audio_buf_size;
164 163
 
165 164
 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
166 165
 
... ...
@@ -225,6 +224,7 @@ typedef struct OutputStream {
225 225
     AVBitStreamFilterContext *bitstream_filters;
226 226
     AVCodec *enc;
227 227
     int64_t max_frames;
228
+    AVFrame *output_frame;
228 229
 
229 230
     /* video only */
230 231
     int video_resample;
... ...
@@ -767,6 +767,13 @@ void exit_program(int ret)
767 767
         }
768 768
         output_streams[i].bitstream_filters = NULL;
769 769
 
770
+        if (output_streams[i].output_frame) {
771
+            AVFrame *frame = output_streams[i].output_frame;
772
+            if (frame->extended_data != frame->data)
773
+                av_freep(&frame->extended_data);
774
+            av_freep(&frame);
775
+        }
776
+
770 777
 #if CONFIG_AVFILTER
771 778
         av_freep(&output_streams[i].avfilter);
772 779
 #endif
... ...
@@ -792,8 +799,7 @@ void exit_program(int ret)
792 792
 
793 793
     uninit_opts();
794 794
     av_free(audio_buf);
795
-    av_free(audio_out);
796
-    allocated_audio_buf_size = allocated_audio_out_size = 0;
795
+    allocated_audio_buf_size = 0;
797 796
 
798 797
 #if CONFIG_AVFILTER
799 798
     avfilter_uninit();
... ...
@@ -957,18 +963,75 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_
957 957
     memset(buf, fill_char, size);
958 958
 }
959 959
 
960
+static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
961
+                              const uint8_t *buf, int buf_size)
962
+{
963
+    AVCodecContext *enc = ost->st->codec;
964
+    AVFrame *frame = NULL;
965
+    AVPacket pkt;
966
+    int ret, got_packet;
967
+
968
+    av_init_packet(&pkt);
969
+    pkt.data = NULL;
970
+    pkt.size = 0;
971
+
972
+    if (buf) {
973
+        if (!ost->output_frame) {
974
+            ost->output_frame = avcodec_alloc_frame();
975
+            if (!ost->output_frame) {
976
+                av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
977
+                exit_program(1);
978
+            }
979
+        }
980
+        frame = ost->output_frame;
981
+        if (frame->extended_data != frame->data)
982
+            av_freep(&frame->extended_data);
983
+        avcodec_get_frame_defaults(frame);
984
+
985
+        frame->nb_samples  = buf_size /
986
+                             (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
987
+        if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
988
+                                            buf, buf_size, 1)) < 0) {
989
+            av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
990
+            exit_program(1);
991
+        }
992
+    }
993
+
994
+    got_packet = 0;
995
+    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
996
+        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
997
+        exit_program(1);
998
+    }
999
+
1000
+    if (got_packet) {
1001
+        pkt.stream_index = ost->index;
1002
+        if (pkt.pts != AV_NOPTS_VALUE)
1003
+            pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
1004
+        if (pkt.duration > 0)
1005
+            pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1006
+
1007
+        write_frame(s, &pkt, ost);
1008
+
1009
+        audio_size += pkt.size;
1010
+    }
1011
+
1012
+    if (frame)
1013
+        ost->sync_opts += frame->nb_samples;
1014
+
1015
+    return pkt.size;
1016
+}
1017
+
960 1018
 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
961 1019
                          InputStream *ist, AVFrame *decoded_frame)
962 1020
 {
963 1021
     uint8_t *buftmp;
964
-    int64_t audio_out_size, audio_buf_size;
1022
+    int64_t audio_buf_size;
965 1023
 
966
-    int size_out, frame_bytes, ret, resample_changed;
1024
+    int size_out, frame_bytes, resample_changed;
967 1025
     AVCodecContext *enc = ost->st->codec;
968 1026
     AVCodecContext *dec = ist->st->codec;
969 1027
     int osize = av_get_bytes_per_sample(enc->sample_fmt);
970 1028
     int isize = av_get_bytes_per_sample(dec->sample_fmt);
971
-    const int coded_bps = av_get_bits_per_sample(enc->codec->id);
972 1029
     uint8_t *buf = decoded_frame->data[0];
973 1030
     int size     = decoded_frame->nb_samples * dec->channels * isize;
974 1031
     int64_t allocated_for_size = size;
... ...
@@ -980,19 +1043,13 @@ need_realloc:
980 980
     audio_buf_size  = FFMAX(audio_buf_size, enc->frame_size);
981 981
     audio_buf_size *= osize * enc->channels;
982 982
 
983
-    audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
984
-    if (coded_bps > 8 * osize)
985
-        audio_out_size = audio_out_size * coded_bps / (8*osize);
986
-    audio_out_size += FF_MIN_BUFFER_SIZE;
987
-
988
-    if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) {
983
+    if (audio_buf_size > INT_MAX) {
989 984
         av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
990 985
         exit_program(1);
991 986
     }
992 987
 
993 988
     av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
994
-    av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
995
-    if (!audio_buf || !audio_out) {
989
+    if (!audio_buf) {
996 990
         av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
997 991
         exit_program(1);
998 992
     }
... ...
@@ -1130,7 +1187,7 @@ need_realloc:
1130 1130
     }
1131 1131
 
1132 1132
     /* now encode as many frames as possible */
1133
-    if (enc->frame_size > 1) {
1133
+    if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1134 1134
         /* output resampled raw samples */
1135 1135
         if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1136 1136
             av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
... ...
@@ -1141,62 +1198,11 @@ need_realloc:
1141 1141
         frame_bytes = enc->frame_size * osize * enc->channels;
1142 1142
 
1143 1143
         while (av_fifo_size(ost->fifo) >= frame_bytes) {
1144
-            AVPacket pkt;
1145
-            av_init_packet(&pkt);
1146
-
1147 1144
             av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1148
-
1149
-            // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
1150
-
1151
-            ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
1152
-                                       (short *)audio_buf);
1153
-            if (ret < 0) {
1154
-                av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1155
-                exit_program(1);
1156
-            }
1157
-            audio_size += ret;
1158
-            pkt.stream_index = ost->index;
1159
-            pkt.data = audio_out;
1160
-            pkt.size = ret;
1161
-            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1162
-                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1163
-            pkt.flags |= AV_PKT_FLAG_KEY;
1164
-            write_frame(s, &pkt, ost);
1165
-
1166
-            ost->sync_opts += enc->frame_size;
1145
+            encode_audio_frame(s, ost, audio_buf, frame_bytes);
1167 1146
         }
1168 1147
     } else {
1169
-        AVPacket pkt;
1170
-        av_init_packet(&pkt);
1171
-
1172
-        ost->sync_opts += size_out / (osize * enc->channels);
1173
-
1174
-        /* output a pcm frame */
1175
-        /* determine the size of the coded buffer */
1176
-        size_out /= osize;
1177
-        if (coded_bps)
1178
-            size_out = size_out * coded_bps / 8;
1179
-
1180
-        if (size_out > audio_out_size) {
1181
-            av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n");
1182
-            exit_program(1);
1183
-        }
1184
-
1185
-        // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
1186
-        ret = avcodec_encode_audio(enc, audio_out, size_out,
1187
-                                   (short *)buftmp);
1188
-        if (ret < 0) {
1189
-            av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1190
-            exit_program(1);
1191
-        }
1192
-        audio_size += ret;
1193
-        pkt.stream_index = ost->index;
1194
-        pkt.data = audio_out;
1195
-        pkt.size = ret;
1196
-        if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1197
-            pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1198
-        pkt.flags |= AV_PKT_FLAG_KEY;
1199
-        write_frame(s, &pkt, ost);
1148
+        encode_audio_frame(s, ost, buftmp, size_out);
1200 1149
     }
1201 1150
 }
1202 1151
 
... ...
@@ -1692,6 +1698,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1692 1692
         OutputStream   *ost = &ost_table[i];
1693 1693
         AVCodecContext *enc = ost->st->codec;
1694 1694
         AVFormatContext *os = output_files[ost->file_index].ctx;
1695
+        int stop_encoding = 0;
1695 1696
 
1696 1697
         if (!ost->encoding_needed)
1697 1698
             continue;
... ...
@@ -1705,41 +1712,35 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1705 1705
             AVPacket pkt;
1706 1706
             int fifo_bytes;
1707 1707
             av_init_packet(&pkt);
1708
-            pkt.stream_index = ost->index;
1708
+            pkt.data = NULL;
1709
+            pkt.size = 0;
1709 1710
 
1710 1711
             switch (ost->st->codec->codec_type) {
1711 1712
             case AVMEDIA_TYPE_AUDIO:
1712 1713
                 fifo_bytes = av_fifo_size(ost->fifo);
1713
-                ret = 0;
1714
-                /* encode any samples remaining in fifo */
1715 1714
                 if (fifo_bytes > 0) {
1716
-                    int osize = av_get_bytes_per_sample(enc->sample_fmt);
1717
-                    int fs_tmp = enc->frame_size;
1715
+                    /* encode any samples remaining in fifo */
1716
+                    int frame_bytes = fifo_bytes;
1718 1717
 
1719 1718
                     av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
1720
-                    if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
1721
-                        enc->frame_size = fifo_bytes / (osize * enc->channels);
1722
-                    } else { /* pad */
1723
-                        int frame_bytes = enc->frame_size*osize*enc->channels;
1719
+
1720
+                    /* pad last frame with silence if needed */
1721
+                    if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) {
1722
+                        frame_bytes = enc->frame_size * enc->channels *
1723
+                                      av_get_bytes_per_sample(enc->sample_fmt);
1724 1724
                         if (allocated_audio_buf_size < frame_bytes)
1725 1725
                             exit_program(1);
1726 1726
                         generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
1727 1727
                     }
1728
-
1729
-                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
1730
-                    pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
1731
-                                              ost->st->time_base.num, enc->sample_rate);
1732
-                    enc->frame_size = fs_tmp;
1733
-                }
1734
-                if (ret <= 0) {
1735
-                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1736
-                }
1737
-                if (ret < 0) {
1738
-                    av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1739
-                    exit_program(1);
1728
+                    encode_audio_frame(os, ost, audio_buf, frame_bytes);
1729
+                } else {
1730
+                    /* flush encoder with NULL frames until it is done
1731
+                       returning packets */
1732
+                    if (encode_audio_frame(os, ost, NULL, 0) == 0) {
1733
+                        stop_encoding = 1;
1734
+                        break;
1735
+                    }
1740 1736
                 }
1741
-                audio_size += ret;
1742
-                pkt.flags  |= AV_PKT_FLAG_KEY;
1743 1737
                 break;
1744 1738
             case AVMEDIA_TYPE_VIDEO:
1745 1739
                 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
... ...
@@ -1753,18 +1754,22 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1753 1753
                 if (ost->logfile && enc->stats_out) {
1754 1754
                     fprintf(ost->logfile, "%s", enc->stats_out);
1755 1755
                 }
1756
+                if (ret <= 0) {
1757
+                    stop_encoding = 1;
1758
+                    break;
1759
+                }
1760
+                pkt.stream_index = ost->index;
1761
+                pkt.data = bit_buffer;
1762
+                pkt.size = ret;
1763
+                if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1764
+                    pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1765
+                write_frame(os, &pkt, ost);
1756 1766
                 break;
1757 1767
             default:
1758
-                ret = -1;
1768
+                stop_encoding = 1;
1759 1769
             }
1760
-
1761
-            if (ret <= 0)
1770
+            if (stop_encoding)
1762 1771
                 break;
1763
-            pkt.data = bit_buffer;
1764
-            pkt.size = ret;
1765
-            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1766
-                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1767
-            write_frame(os, &pkt, ost);
1768 1772
         }
1769 1773
     }
1770 1774
 }
... ...
@@ -13,6 +13,14 @@ libavutil:   2011-04-18
13 13
 
14 14
 API changes, most recent first:
15 15
 
16
+2012-xx-xx - lavc 53.34.0
17
+  New audio encoding API:
18
+  xxxxxxx Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
19
+          encoders.
20
+  xxxxxxx Add avcodec_fill_audio_frame() as a convenience function.
21
+  xxxxxxx Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
22
+          Add AVCodec.encode2().
23
+
16 24
 2012-01-xx - xxxxxxx - lavfi 2.15.0
17 25
   Add a new installed header -- libavfilter/version.h -- with version macros.
18 26
 
... ...
@@ -317,7 +317,7 @@ library:
317 317
     @tab Used in Sierra CD-ROM games.
318 318
 @item Smacker                   @tab   @tab X
319 319
     @tab Multimedia format used by many games.
320
-@item SMJPEG                    @tab   @tab X
320
+@item SMJPEG                    @tab X @tab X
321 321
     @tab Used in certain Loki game ports.
322 322
 @item Sony OpenMG (OMA)         @tab X @tab X
323 323
     @tab Audio format used in Sony Sonic Stage and Sony Vegas.
... ...
@@ -176,8 +176,7 @@ static float dts_delta_threshold = 10;
176 176
 static int print_stats = 1;
177 177
 
178 178
 static uint8_t *audio_buf;
179
-static uint8_t *audio_out;
180
-static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
179
+static unsigned int allocated_audio_buf_size;
181 180
 
182 181
 static uint8_t *input_tmp= NULL;
183 182
 
... ...
@@ -245,6 +244,7 @@ typedef struct OutputStream {
245 245
     AVBitStreamFilterContext *bitstream_filters;
246 246
     AVCodec *enc;
247 247
     int64_t max_frames;
248
+    AVFrame *output_frame;
248 249
 
249 250
     /* video only */
250 251
     int video_resample;
... ...
@@ -825,6 +825,13 @@ void av_noreturn exit_program(int ret)
825 825
             bsfc = next;
826 826
         }
827 827
         output_streams[i].bitstream_filters = NULL;
828
+
829
+        if (output_streams[i].output_frame) {
830
+            AVFrame *frame = output_streams[i].output_frame;
831
+            if (frame->extended_data != frame->data)
832
+                av_freep(&frame->extended_data);
833
+            av_freep(&frame);
834
+        }
828 835
     }
829 836
     for (i = 0; i < nb_input_files; i++) {
830 837
         avformat_close_input(&input_files[i].ctx);
... ...
@@ -847,8 +854,7 @@ void av_noreturn exit_program(int ret)
847 847
 
848 848
     uninit_opts();
849 849
     av_free(audio_buf);
850
-    av_free(audio_out);
851
-    allocated_audio_buf_size = allocated_audio_out_size = 0;
850
+    allocated_audio_buf_size = 0;
852 851
 
853 852
 #if CONFIG_AVFILTER
854 853
     avfilter_uninit();
... ...
@@ -1016,18 +1022,75 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_
1016 1016
     memset(buf, fill_char, size);
1017 1017
 }
1018 1018
 
1019
+static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
1020
+                              const uint8_t *buf, int buf_size)
1021
+{
1022
+    AVCodecContext *enc = ost->st->codec;
1023
+    AVFrame *frame = NULL;
1024
+    AVPacket pkt;
1025
+    int ret, got_packet;
1026
+
1027
+    av_init_packet(&pkt);
1028
+    pkt.data = NULL;
1029
+    pkt.size = 0;
1030
+
1031
+    if (buf) {
1032
+        if (!ost->output_frame) {
1033
+            ost->output_frame = avcodec_alloc_frame();
1034
+            if (!ost->output_frame) {
1035
+                av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
1036
+                exit_program(1);
1037
+            }
1038
+        }
1039
+        frame = ost->output_frame;
1040
+        if (frame->extended_data != frame->data)
1041
+            av_freep(&frame->extended_data);
1042
+        avcodec_get_frame_defaults(frame);
1043
+
1044
+        frame->nb_samples  = buf_size /
1045
+                             (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
1046
+        if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
1047
+                                            buf, buf_size, 1)) < 0) {
1048
+            av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1049
+            exit_program(1);
1050
+        }
1051
+    }
1052
+
1053
+    got_packet = 0;
1054
+    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1055
+        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1056
+        exit_program(1);
1057
+    }
1058
+
1059
+    if (got_packet) {
1060
+        pkt.stream_index = ost->index;
1061
+        if (pkt.pts != AV_NOPTS_VALUE)
1062
+            pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
1063
+        if (pkt.duration > 0)
1064
+            pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1065
+
1066
+        write_frame(s, &pkt, ost);
1067
+
1068
+        audio_size += pkt.size;
1069
+    }
1070
+
1071
+    if (frame)
1072
+        ost->sync_opts += frame->nb_samples;
1073
+
1074
+    return pkt.size;
1075
+}
1076
+
1019 1077
 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1020 1078
                          InputStream *ist, AVFrame *decoded_frame)
1021 1079
 {
1022 1080
     uint8_t *buftmp;
1023
-    int64_t audio_out_size, audio_buf_size, size_out;
1081
+    int64_t audio_buf_size, size_out;
1024 1082
 
1025
-    int frame_bytes, ret, resample_changed;
1083
+    int frame_bytes, resample_changed;
1026 1084
     AVCodecContext *enc = ost->st->codec;
1027 1085
     AVCodecContext *dec = ist->st->codec;
1028 1086
     int osize = av_get_bytes_per_sample(enc->sample_fmt);
1029 1087
     int isize = av_get_bytes_per_sample(dec->sample_fmt);
1030
-    const int coded_bps = av_get_bits_per_sample(enc->codec->id);
1031 1088
     uint8_t *buf = decoded_frame->data[0];
1032 1089
     int size     = decoded_frame->nb_samples * dec->channels * isize;
1033 1090
     int64_t allocated_for_size = size;
... ...
@@ -1039,19 +1102,13 @@ need_realloc:
1039 1039
     audio_buf_size  = FFMAX(audio_buf_size, enc->frame_size);
1040 1040
     audio_buf_size *= osize * enc->channels;
1041 1041
 
1042
-    audio_out_size = FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
1043
-    if (coded_bps > 8 * osize)
1044
-        audio_out_size = audio_out_size * coded_bps / (8*osize);
1045
-    audio_out_size += FF_MIN_BUFFER_SIZE;
1046
-
1047
-    if (audio_out_size > INT_MAX || audio_buf_size > INT_MAX) {
1042
+    if (audio_buf_size > INT_MAX) {
1048 1043
         av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
1049 1044
         exit_program(1);
1050 1045
     }
1051 1046
 
1052 1047
     av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
1053
-    av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
1054
-    if (!audio_buf || !audio_out) {
1048
+    if (!audio_buf) {
1055 1049
         av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1056 1050
         exit_program(1);
1057 1051
     }
... ...
@@ -1180,7 +1237,7 @@ need_realloc:
1180 1180
     av_assert0(ost->audio_resample || dec->sample_fmt==enc->sample_fmt);
1181 1181
 
1182 1182
     /* now encode as many frames as possible */
1183
-    if (enc->frame_size > 1) {
1183
+    if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1184 1184
         /* output resampled raw samples */
1185 1185
         if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1186 1186
             av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
... ...
@@ -1191,62 +1248,11 @@ need_realloc:
1191 1191
         frame_bytes = enc->frame_size * osize * enc->channels;
1192 1192
 
1193 1193
         while (av_fifo_size(ost->fifo) >= frame_bytes) {
1194
-            AVPacket pkt;
1195
-            av_init_packet(&pkt);
1196
-
1197 1194
             av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1198
-
1199
-            // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
1200
-
1201
-            ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
1202
-                                       (short *)audio_buf);
1203
-            if (ret < 0) {
1204
-                av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1205
-                exit_program(1);
1206
-            }
1207
-            audio_size += ret;
1208
-            pkt.stream_index = ost->index;
1209
-            pkt.data = audio_out;
1210
-            pkt.size = ret;
1211
-            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1212
-                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1213
-            pkt.flags |= AV_PKT_FLAG_KEY;
1214
-            write_frame(s, &pkt, ost);
1215
-
1216
-            ost->sync_opts += enc->frame_size;
1195
+            encode_audio_frame(s, ost, audio_buf, frame_bytes);
1217 1196
         }
1218 1197
     } else {
1219
-        AVPacket pkt;
1220
-        av_init_packet(&pkt);
1221
-
1222
-        ost->sync_opts += size_out / (osize * enc->channels);
1223
-
1224
-        /* output a pcm frame */
1225
-        /* determine the size of the coded buffer */
1226
-        size_out /= osize;
1227
-        if (coded_bps)
1228
-            size_out = size_out * coded_bps / 8;
1229
-
1230
-        if (size_out > audio_out_size) {
1231
-            av_log(NULL, AV_LOG_FATAL, "Internal error, buffer size too small\n");
1232
-            exit_program(1);
1233
-        }
1234
-
1235
-        // FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
1236
-        ret = avcodec_encode_audio(enc, audio_out, size_out,
1237
-                                   (short *)buftmp);
1238
-        if (ret < 0) {
1239
-            av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1240
-            exit_program(1);
1241
-        }
1242
-        audio_size += ret;
1243
-        pkt.stream_index = ost->index;
1244
-        pkt.data = audio_out;
1245
-        pkt.size = ret;
1246
-        if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1247
-            pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1248
-        pkt.flags |= AV_PKT_FLAG_KEY;
1249
-        write_frame(s, &pkt, ost);
1198
+        encode_audio_frame(s, ost, buftmp, size_out);
1250 1199
     }
1251 1200
 }
1252 1201
 
... ...
@@ -1743,6 +1749,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1743 1743
         OutputStream   *ost = &ost_table[i];
1744 1744
         AVCodecContext *enc = ost->st->codec;
1745 1745
         AVFormatContext *os = output_files[ost->file_index].ctx;
1746
+        int stop_encoding = 0;
1746 1747
 
1747 1748
         if (!ost->encoding_needed)
1748 1749
             continue;
... ...
@@ -1756,41 +1763,35 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1756 1756
             AVPacket pkt;
1757 1757
             int fifo_bytes;
1758 1758
             av_init_packet(&pkt);
1759
-            pkt.stream_index = ost->index;
1759
+            pkt.data = NULL;
1760
+            pkt.size = 0;
1760 1761
 
1761 1762
             switch (ost->st->codec->codec_type) {
1762 1763
             case AVMEDIA_TYPE_AUDIO:
1763 1764
                 fifo_bytes = av_fifo_size(ost->fifo);
1764
-                ret = 0;
1765
-                /* encode any samples remaining in fifo */
1766 1765
                 if (fifo_bytes > 0) {
1767
-                    int osize = av_get_bytes_per_sample(enc->sample_fmt);
1768
-                    int fs_tmp = enc->frame_size;
1766
+                    /* encode any samples remaining in fifo */
1767
+                    int frame_bytes = fifo_bytes;
1769 1768
 
1770 1769
                     av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
1771
-                    if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
1772
-                        enc->frame_size = fifo_bytes / (osize * enc->channels);
1773
-                    } else { /* pad */
1774
-                        int frame_bytes = enc->frame_size*osize*enc->channels;
1770
+
1771
+                    /* pad last frame with silence if needed */
1772
+                    if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) {
1773
+                        frame_bytes = enc->frame_size * enc->channels *
1774
+                                      av_get_bytes_per_sample(enc->sample_fmt);
1775 1775
                         if (allocated_audio_buf_size < frame_bytes)
1776 1776
                             exit_program(1);
1777 1777
                         generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
1778 1778
                     }
1779
-
1780
-                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
1781
-                    pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
1782
-                                              ost->st->time_base.num, enc->sample_rate);
1783
-                    enc->frame_size = fs_tmp;
1784
-                }
1785
-                if (ret <= 0) {
1786
-                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1787
-                }
1788
-                if (ret < 0) {
1789
-                    av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1790
-                    exit_program(1);
1779
+                    encode_audio_frame(os, ost, audio_buf, frame_bytes);
1780
+                } else {
1781
+                    /* flush encoder with NULL frames until it is done
1782
+                       returning packets */
1783
+                    if (encode_audio_frame(os, ost, NULL, 0) == 0) {
1784
+                        stop_encoding = 1;
1785
+                        break;
1786
+                    }
1791 1787
                 }
1792
-                audio_size += ret;
1793
-                pkt.flags  |= AV_PKT_FLAG_KEY;
1794 1788
                 break;
1795 1789
             case AVMEDIA_TYPE_VIDEO:
1796 1790
                 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
... ...
@@ -1804,18 +1805,22 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1804 1804
                 if (ost->logfile && enc->stats_out) {
1805 1805
                     fprintf(ost->logfile, "%s", enc->stats_out);
1806 1806
                 }
1807
+                if (ret <= 0) {
1808
+                    stop_encoding = 1;
1809
+                    break;
1810
+                }
1811
+                pkt.stream_index = ost->index;
1812
+                pkt.data = bit_buffer;
1813
+                pkt.size = ret;
1814
+                if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1815
+                    pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1816
+                write_frame(os, &pkt, ost);
1807 1817
                 break;
1808 1818
             default:
1809
-                ret = -1;
1819
+                stop_encoding = 1;
1810 1820
             }
1811
-
1812
-            if (ret <= 0)
1821
+            if (stop_encoding)
1813 1822
                 break;
1814
-            pkt.data = bit_buffer;
1815
-            pkt.size = ret;
1816
-            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1817
-                pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1818
-            write_frame(os, &pkt, ost);
1819 1823
         }
1820 1824
     }
1821 1825
 }
... ...
@@ -23,16 +23,18 @@
23 23
 #include "libavcodec/avcodec.h"
24 24
 #include "libavcodec/rv34dsp.h"
25 25
 
26
-void ff_rv34_inv_transform_neon(DCTELEM *block);
27 26
 void ff_rv34_inv_transform_noround_neon(DCTELEM *block);
28 27
 
29
-void ff_rv34_inv_transform_dc_neon(DCTELEM *block);
30 28
 void ff_rv34_inv_transform_noround_dc_neon(DCTELEM *block);
31 29
 
30
+void ff_rv34_idct_add_neon(uint8_t *dst, int stride, DCTELEM *block);
31
+void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc);
32
+
32 33
 void ff_rv34dsp_init_neon(RV34DSPContext *c, DSPContext* dsp)
33 34
 {
34
-    c->rv34_inv_transform_tab[0]    = ff_rv34_inv_transform_neon;
35
-    c->rv34_inv_transform_tab[1]    = ff_rv34_inv_transform_noround_neon;
36
-    c->rv34_inv_transform_dc_tab[0] = ff_rv34_inv_transform_dc_neon;
37
-    c->rv34_inv_transform_dc_tab[1] = ff_rv34_inv_transform_noround_dc_neon;
35
+    c->rv34_inv_transform    = ff_rv34_inv_transform_noround_neon;
36
+    c->rv34_inv_transform_dc = ff_rv34_inv_transform_noround_dc_neon;
37
+
38
+    c->rv34_idct_add    = ff_rv34_idct_add_neon;
39
+    c->rv34_idct_dc_add = ff_rv34_idct_dc_add_neon;
38 40
 }
... ...
@@ -19,13 +19,10 @@
19 19
  */
20 20
 
21 21
 #include "asm.S"
22
+#include "neon.S"
22 23
 
23
-.macro rv34_inv_transform
24
-        mov             r1,  #16
25
-        vld1.16         {d28}, [r0,:64], r1     @ block[i+8*0]
26
-        vld1.16         {d29}, [r0,:64], r1     @ block[i+8*1]
27
-        vld1.16         {d30}, [r0,:64], r1     @ block[i+8*2]
28
-        vld1.16         {d31}, [r0,:64], r1     @ block[i+8*3]
24
+.macro rv34_inv_transform    r0
25
+        vld1.16         {q14-q15}, [\r0,:128]
29 26
         vmov.s16        d0,  #13
30 27
         vshll.s16       q12, d29, #3
31 28
         vshll.s16       q13, d29, #4
... ...
@@ -35,12 +32,12 @@
35 35
         vmlal.s16       q10, d30, d0
36 36
         vmull.s16       q11, d28, d0
37 37
         vmlsl.s16       q11, d30, d0
38
-        vsubw.s16       q12, q12, d29   @ z2 = block[i+8*1]*7
39
-        vaddw.s16       q13, q13, d29   @ z3 = block[i+8*1]*17
38
+        vsubw.s16       q12, q12, d29   @ z2 = block[i+4*1]*7
39
+        vaddw.s16       q13, q13, d29   @ z3 = block[i+4*1]*17
40 40
         vsubw.s16       q9,  q9,  d31
41 41
         vaddw.s16       q1,  q1,  d31
42
-        vadd.s32        q13, q13, q9    @ z3 = 17*block[i+8*1] +  7*block[i+8*3]
43
-        vsub.s32        q12, q12, q1    @ z2 = 7*block[i+8*1]  - 17*block[i+8*3]
42
+        vadd.s32        q13, q13, q9    @ z3 = 17*block[i+4*1] +  7*block[i+4*3]
43
+        vsub.s32        q12, q12, q1    @ z2 = 7*block[i+4*1]  - 17*block[i+4*3]
44 44
         vadd.s32        q1,  q10, q13   @ z0 + z3
45 45
         vadd.s32        q2,  q11, q12   @ z1 + z2
46 46
         vsub.s32        q8,  q10, q13   @ z0 - z3
... ...
@@ -70,25 +67,39 @@
70 70
         vsub.s32        q15, q14, q9    @ z0 - z3
71 71
 .endm
72 72
 
73
-/* void ff_rv34_inv_transform_neon(DCTELEM *block); */
74
-function ff_rv34_inv_transform_neon, export=1
75
-        mov             r2,  r0
76
-        rv34_inv_transform
77
-        vrshrn.s32      d1,  q2,  #10   @ (z1 + z2) >> 10
78
-        vrshrn.s32      d0,  q1,  #10   @ (z0 + z3) >> 10
79
-        vrshrn.s32      d2,  q3,  #10   @ (z1 - z2) >> 10
80
-        vrshrn.s32      d3,  q15, #10   @ (z0 - z3) >> 10
81
-        vst4.16         {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
82
-        vst4.16         {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
83
-        vst4.16         {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
84
-        vst4.16         {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
73
+/* void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block) */
74
+function ff_rv34_idct_add_neon, export=1
75
+        mov             r3,  r0
76
+        rv34_inv_transform   r2
77
+        vmov.i16        q12, #0
78
+        vrshrn.s32      d16, q1,  #10   @ (z0 + z3) >> 10
79
+        vrshrn.s32      d17, q2,  #10   @ (z1 + z2) >> 10
80
+        vrshrn.s32      d18, q3,  #10   @ (z1 - z2) >> 10
81
+        vrshrn.s32      d19, q15, #10   @ (z0 - z3) >> 10
82
+        vld1.32         {d28[]},  [r0,:32], r1
83
+        vld1.32         {d29[]},  [r0,:32], r1
84
+        vtrn.32         q8,  q9
85
+        vld1.32         {d28[1]}, [r0,:32], r1
86
+        vld1.32         {d29[1]}, [r0,:32], r1
87
+        vst1.16         {q12}, [r2,:128]!       @ memset(block,    0, 16)
88
+        vst1.16         {q12}, [r2,:128]        @ memset(block+16, 0, 16)
89
+        vtrn.16         d16, d17
90
+        vtrn.32         d28, d29
91
+        vtrn.16         d18, d19
92
+        vaddw.u8        q0,   q8,  d28
93
+        vaddw.u8        q1,   q9,  d29
94
+        vqmovun.s16     d28,  q0
95
+        vqmovun.s16     d29,  q1
96
+        vst1.32         {d28[0]}, [r3,:32], r1
97
+        vst1.32         {d28[1]}, [r3,:32], r1
98
+        vst1.32         {d29[0]}, [r3,:32], r1
99
+        vst1.32         {d29[1]}, [r3,:32], r1
85 100
         bx              lr
86 101
 endfunc
87 102
 
88 103
 /* void rv34_inv_transform_noround_neon(DCTELEM *block); */
89 104
 function ff_rv34_inv_transform_noround_neon, export=1
90
-        mov             r2,  r0
91
-        rv34_inv_transform
105
+        rv34_inv_transform   r0
92 106
         vshl.s32        q11, q2,  #1
93 107
         vshl.s32        q10, q1,  #1
94 108
         vshl.s32        q12, q3,  #1
... ...
@@ -101,24 +112,33 @@ function ff_rv34_inv_transform_noround_neon, export=1
101 101
         vshrn.s32       d1,  q11, #11   @ (z1 + z2)*3 >> 11
102 102
         vshrn.s32       d2,  q12, #11   @ (z1 - z2)*3 >> 11
103 103
         vshrn.s32       d3,  q13, #11   @ (z0 - z3)*3 >> 11
104
-        vst4.16         {d0[0], d1[0], d2[0], d3[0]}, [r2,:64], r1
105
-        vst4.16         {d0[1], d1[1], d2[1], d3[1]}, [r2,:64], r1
106
-        vst4.16         {d0[2], d1[2], d2[2], d3[2]}, [r2,:64], r1
107
-        vst4.16         {d0[3], d1[3], d2[3], d3[3]}, [r2,:64], r1
104
+        vst4.16         {d0[0], d1[0], d2[0], d3[0]}, [r0,:64]!
105
+        vst4.16         {d0[1], d1[1], d2[1], d3[1]}, [r0,:64]!
106
+        vst4.16         {d0[2], d1[2], d2[2], d3[2]}, [r0,:64]!
107
+        vst4.16         {d0[3], d1[3], d2[3], d3[3]}, [r0,:64]!
108 108
         bx              lr
109 109
 endfunc
110 110
 
111
-/* void rv34_inv_transform_dc_c(DCTELEM *block) */
112
-function ff_rv34_inv_transform_dc_neon, export=1
113
-        vld1.16         {d28[]}, [r0,:16]       @ block[0]
114
-        vmov.i16        d4,  #169
115
-        mov             r1,  #16
116
-        vmull.s16       q3,  d28, d4
117
-        vrshrn.s32      d0,  q3,  #10
118
-        vst1.16         {d0}, [r0,:64], r1
119
-        vst1.16         {d0}, [r0,:64], r1
120
-        vst1.16         {d0}, [r0,:64], r1
121
-        vst1.16         {d0}, [r0,:64], r1
111
+/* void ff_rv34_idct_dc_add_neon(uint8_t *dst, int stride, int dc) */
112
+function ff_rv34_idct_dc_add_neon, export=1
113
+        mov             r3,  r0
114
+        vld1.32         {d28[]},  [r0,:32], r1
115
+        vld1.32         {d29[]},  [r0,:32], r1
116
+        vdup.16         d0,  r2
117
+        vmov.s16        d1,  #169
118
+        vld1.32         {d28[1]}, [r0,:32], r1
119
+        vmull.s16       q1,  d0,  d1    @ dc * 13 * 13
120
+        vld1.32         {d29[1]}, [r0,:32], r1
121
+        vrshrn.s32      d0,  q1,  #10   @ (dc * 13 * 13 + 0x200) >> 10
122
+        vmov            d1,  d0
123
+        vaddw.u8        q2,  q0,  d28
124
+        vaddw.u8        q3,  q0,  d29
125
+        vqmovun.s16     d28, q2
126
+        vqmovun.s16     d29, q3
127
+        vst1.32         {d28[0]}, [r3,:32], r1
128
+        vst1.32         {d29[0]}, [r3,:32], r1
129
+        vst1.32         {d28[1]}, [r3,:32], r1
130
+        vst1.32         {d29[1]}, [r3,:32], r1
122 131
         bx              lr
123 132
 endfunc
124 133
 
... ...
@@ -127,12 +147,10 @@ function ff_rv34_inv_transform_noround_dc_neon, export=1
127 127
         vld1.16         {d28[]}, [r0,:16]       @ block[0]
128 128
         vmov.i16        d4,  #251
129 129
         vorr.s16        d4,  #256               @ 13^2 * 3
130
-        mov             r1,  #16
131 130
         vmull.s16       q3,  d28, d4
132 131
         vshrn.s32       d0,  q3,  #11
133
-        vst1.64         {d0}, [r0,:64], r1
134
-        vst1.64         {d0}, [r0,:64], r1
135
-        vst1.64         {d0}, [r0,:64], r1
136
-        vst1.64         {d0}, [r0,:64], r1
132
+        vmov.i16        d1,  d0
133
+        vst1.64         {q0}, [r0,:128]!
134
+        vst1.64         {q0}, [r0,:128]!
137 135
         bx              lr
138 136
 endfunc
... ...
@@ -761,6 +761,11 @@ typedef struct RcOverride{
761 761
  * Encoders:
762 762
  * The encoder needs to be fed with NULL data at the end of encoding until the
763 763
  * encoder no longer returns data.
764
+ *
765
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
766
+ *       flag also means that the encoder must set the pts and duration for
767
+ *       each output packet. If this flag is not set, the pts and duration will
768
+ *       be determined by libavcodec from the input frame.
764 769
  */
765 770
 #define CODEC_CAP_DELAY           0x0020
766 771
 /**
... ...
@@ -816,6 +821,10 @@ typedef struct RcOverride{
816 816
  */
817 817
 #define CODEC_CAP_AUTO_THREADS     0x8000
818 818
 /**
819
+ * Audio encoder supports receiving a different number of samples in each call.
820
+ */
821
+#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
822
+/**
819 823
  * Codec is lossless.
820 824
  */
821 825
 #define CODEC_CAP_LOSSLESS         0x80000000
... ...
@@ -3314,6 +3323,19 @@ typedef struct AVCodec {
3314 3314
      * Initialize codec static data, called from avcodec_register().
3315 3315
      */
3316 3316
     void (*init_static_data)(struct AVCodec *codec);
3317
+
3318
+    /**
3319
+     * Encode data to an AVPacket.
3320
+     *
3321
+     * @param      avctx          codec context
3322
+     * @param      avpkt          output AVPacket (may contain a user-provided buffer)
3323
+     * @param[in]  frame          AVFrame containing the raw data to be encoded
3324
+     * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
3325
+     *                            non-empty packet was returned in avpkt.
3326
+     * @return 0 on success, negative error code on failure
3327
+     */
3328
+    int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
3329
+                   int *got_packet_ptr);
3317 3330
 } AVCodec;
3318 3331
 
3319 3332
 /**
... ...
@@ -4331,9 +4353,12 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
4331 4331
  */
4332 4332
 void avsubtitle_free(AVSubtitle *sub);
4333 4333
 
4334
+#if FF_API_OLD_ENCODE_AUDIO
4334 4335
 /**
4335 4336
  * Encode an audio frame from samples into buf.
4336 4337
  *
4338
+ * @deprecated Use avcodec_encode_audio2 instead.
4339
+ *
4337 4340
  * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
4338 4341
  * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
4339 4342
  * will know how much space is needed because it depends on the value passed
... ...
@@ -4353,8 +4378,71 @@ void avsubtitle_free(AVSubtitle *sub);
4353 4353
  * @return On error a negative value is returned, on success zero or the number
4354 4354
  * of bytes used to encode the data read from the input buffer.
4355 4355
  */
4356
-int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
4357
-                         const short *samples);
4356
+int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
4357
+                                              uint8_t *buf, int buf_size,
4358
+                                              const short *samples);
4359
+#endif
4360
+
4361
+/**
4362
+ * Encode a frame of audio.
4363
+ *
4364
+ * Takes input samples from frame and writes the next output packet, if
4365
+ * available, to avpkt. The output packet does not necessarily contain data for
4366
+ * the most recent frame, as encoders can delay, split, and combine input frames
4367
+ * internally as needed.
4368
+ *
4369
+ * @param avctx     codec context
4370
+ * @param avpkt     output AVPacket.
4371
+ *                  The user can supply an output buffer by setting
4372
+ *                  avpkt->data and avpkt->size prior to calling the
4373
+ *                  function, but if the size of the user-provided data is not
4374
+ *                  large enough, encoding will fail. All other AVPacket fields
4375
+ *                  will be reset by the encoder using av_init_packet(). If
4376
+ *                  avpkt->data is NULL, the encoder will allocate it.
4377
+ *                  The encoder will set avpkt->size to the size of the
4378
+ *                  output packet.
4379
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
4380
+ *                  May be NULL when flushing an encoder that has the
4381
+ *                  CODEC_CAP_DELAY capability set.
4382
+ *                  There are 2 codec capabilities that affect the allowed
4383
+ *                  values of frame->nb_samples.
4384
+ *                  If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final
4385
+ *                  frame may be smaller than avctx->frame_size, and all other
4386
+ *                  frames must be equal to avctx->frame_size.
4387
+ *                  If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
4388
+ *                  can have any number of samples.
4389
+ *                  If neither is set, frame->nb_samples must be equal to
4390
+ *                  avctx->frame_size for all frames.
4391
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
4392
+ *                            output packet is non-empty, and to 0 if it is
4393
+ *                            empty. If the function returns an error, the
4394
+ *                            packet can be assumed to be invalid, and the
4395
+ *                            value of got_packet_ptr is undefined and should
4396
+ *                            not be used.
4397
+ * @return          0 on success, negative error code on failure
4398
+ */
4399
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
4400
+                          const AVFrame *frame, int *got_packet_ptr);
4401
+
4402
+/**
4403
+ * Fill audio frame data and linesize.
4404
+ * AVFrame extended_data channel pointers are allocated if necessary for
4405
+ * planar audio.
4406
+ *
4407
+ * @param frame       the AVFrame
4408
+ *                    frame->nb_samples must be set prior to calling the
4409
+ *                    function. This function fills in frame->data,
4410
+ *                    frame->extended_data, frame->linesize[0].
4411
+ * @param nb_channels channel count
4412
+ * @param sample_fmt  sample format
4413
+ * @param buf         buffer to use for frame data
4414
+ * @param buf_size    size of buffer
4415
+ * @param align       plane size sample alignment
4416
+ * @return            0 on success, negative error code on failure
4417
+ */
4418
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
4419
+                             enum AVSampleFormat sample_fmt, const uint8_t *buf,
4420
+                             int buf_size, int align);
4358 4421
 
4359 4422
 /**
4360 4423
  * Encode a video frame from pict into buf.
... ...
@@ -61,6 +61,14 @@ typedef struct AVCodecInternal {
61 61
      * should be freed from the original context only.
62 62
      */
63 63
     int is_copy;
64
+
65
+#if FF_API_OLD_DECODE_AUDIO
66
+    /**
67
+     * Internal sample count used by avcodec_encode_audio() to fabricate pts.
68
+     * Can be removed along with avcodec_encode_audio().
69
+     */
70
+    int sample_count;
71
+#endif
64 72
 } AVCodecInternal;
65 73
 
66 74
 struct AVCodecDefault {
... ...
@@ -111,4 +119,21 @@ int avpriv_unlock_avformat(void);
111 111
  */
112 112
 #define FF_MAX_EXTRADATA_SIZE ((1 << 28) - FF_INPUT_BUFFER_PADDING_SIZE)
113 113
 
114
+/**
115
+ * Check AVPacket size and/or allocate data.
116
+ *
117
+ * Encoders supporting AVCodec.encode2() can use this as a convenience to
118
+ * ensure the output packet data is large enough, whether provided by the user
119
+ * or allocated in this function.
120
+ *
121
+ * @param avpkt   the AVPacket
122
+ *                If avpkt->data is already set, avpkt->size is checked
123
+ *                to ensure it is large enough.
124
+ *                If avpkt->data is NULL, a new buffer is allocated.
125
+ *                All other AVPacket fields will be reset with av_init_packet().
126
+ * @param size    the minimum required packet size
127
+ * @return        0 on success, negative error code on failure
128
+ */
129
+int ff_alloc_packet(AVPacket *avpkt, int size);
130
+
114 131
 #endif /* AVCODEC_INTERNAL_H */
... ...
@@ -27,6 +27,7 @@
27 27
 #include "avcodec.h"
28 28
 #include "libavutil/common.h" /* for av_reverse */
29 29
 #include "bytestream.h"
30
+#include "internal.h"
30 31
 #include "pcm_tablegen.h"
31 32
 
32 33
 #define MAX_CHANNELS 64
... ...
@@ -77,10 +78,10 @@ static av_cold int pcm_encode_close(AVCodecContext *avctx)
77 77
         bytestream_put_##endian(&dst, v); \
78 78
     }
79 79
 
80
-static int pcm_encode_frame(AVCodecContext *avctx,
81
-                            unsigned char *frame, int buf_size, void *data)
80
+static int pcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
81
+                            const AVFrame *frame, int *got_packet_ptr)
82 82
 {
83
-    int n, sample_size, v;
83
+    int n, sample_size, v, ret;
84 84
     const short *samples;
85 85
     unsigned char *dst;
86 86
     const uint8_t *srcu8;
... ...
@@ -91,9 +92,14 @@ static int pcm_encode_frame(AVCodecContext *avctx,
91 91
     const uint32_t *samples_uint32_t;
92 92
 
93 93
     sample_size = av_get_bits_per_sample(avctx->codec->id)/8;
94
-    n = buf_size / sample_size;
95
-    samples = data;
96
-    dst = frame;
94
+    n           = frame->nb_samples * avctx->channels;
95
+    samples     = (const short *)frame->data[0];
96
+
97
+    if ((ret = ff_alloc_packet(avpkt, n * sample_size))) {
98
+        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
99
+        return ret;
100
+    }
101
+    dst = avpkt->data;
97 102
 
98 103
     switch(avctx->codec->id) {
99 104
     case CODEC_ID_PCM_U32LE:
... ...
@@ -130,7 +136,7 @@ static int pcm_encode_frame(AVCodecContext *avctx,
130 130
         ENCODE(uint16_t, be16, samples, dst, n, 0, 0x8000)
131 131
         break;
132 132
     case CODEC_ID_PCM_S8:
133
-        srcu8= data;
133
+        srcu8 = frame->data[0];
134 134
         for(;n>0;n--) {
135 135
             v = *srcu8++;
136 136
             *dst++ = v - 128;
... ...
@@ -186,9 +192,10 @@ static int pcm_encode_frame(AVCodecContext *avctx,
186 186
     default:
187 187
         return -1;
188 188
     }
189
-    //avctx->frame_size = (dst - frame) / (sample_size * avctx->channels);
190 189
 
191
-    return dst - frame;
190
+    avpkt->size = frame->nb_samples * avctx->channels * sample_size;
191
+    *got_packet_ptr = 1;
192
+    return 0;
192 193
 }
193 194
 
194 195
 typedef struct PCMDecode {
... ...
@@ -474,8 +481,9 @@ AVCodec ff_ ## name_ ## _encoder = {            \
474 474
     .type        = AVMEDIA_TYPE_AUDIO,          \
475 475
     .id          = id_,                         \
476 476
     .init        = pcm_encode_init,             \
477
-    .encode      = pcm_encode_frame,            \
477
+    .encode2     = pcm_encode_frame,            \
478 478
     .close       = pcm_encode_close,            \
479
+    .capabilities = CODEC_CAP_VARIABLE_FRAME_SIZE, \
479 480
     .sample_fmts = (const enum AVSampleFormat[]){sample_fmt_,AV_SAMPLE_FMT_NONE}, \
480 481
     .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
481 482
 }
... ...
@@ -33,6 +33,7 @@ typedef struct PicContext {
33 33
     AVFrame frame;
34 34
     int width, height;
35 35
     int nb_planes;
36
+    GetByteContext g;
36 37
 } PicContext;
37 38
 
38 39
 static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y)
... ...
@@ -55,7 +56,8 @@ static void picmemset_8bpp(PicContext *s, int value, int run, int *x, int *y)
55 55
     }
56 56
 }
57 57
 
58
-static void picmemset(PicContext *s, int value, int run, int *x, int *y, int *plane, int bits_per_plane)
58
+static void picmemset(PicContext *s, int value, int run,
59
+                      int *x, int *y, int *plane, int bits_per_plane)
59 60
 {
60 61
     uint8_t *d;
61 62
     int shift = *plane * bits_per_plane;
... ...
@@ -107,34 +109,35 @@ static int decode_frame(AVCodecContext *avctx,
107 107
                         AVPacket *avpkt)
108 108
 {
109 109
     PicContext *s = avctx->priv_data;
110
-    int buf_size = avpkt->size;
111
-    const uint8_t *buf = avpkt->data;
112
-    const uint8_t *buf_end = avpkt->data + buf_size;
113 110
     uint32_t *palette;
114
-    int bits_per_plane, bpp, etype, esize, npal;
115
-    int i, x, y, plane;
111
+    int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
112
+    int i, x, y, plane, tmp;
116 113
 
117
-    if (buf_size < 11)
114
+    bytestream2_init(&s->g, avpkt->data, avpkt->size);
115
+
116
+    if (bytestream2_get_bytes_left(&s->g) < 11)
118 117
         return AVERROR_INVALIDDATA;
119 118
 
120
-    if (bytestream_get_le16(&buf) != 0x1234)
119
+    if (bytestream2_get_le16u(&s->g) != 0x1234)
121 120
         return AVERROR_INVALIDDATA;
122
-    s->width  = bytestream_get_le16(&buf);
123
-    s->height = bytestream_get_le16(&buf);
124
-    buf += 4;
125
-    bits_per_plane    = *buf & 0xF;
126
-    s->nb_planes      = (*buf++ >> 4) + 1;
127
-    bpp               = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane;
121
+
122
+    s->width       = bytestream2_get_le16u(&s->g);
123
+    s->height      = bytestream2_get_le16u(&s->g);
124
+    bytestream2_skip(&s->g, 4);
125
+    tmp            = bytestream2_get_byteu(&s->g);
126
+    bits_per_plane = tmp & 0xF;
127
+    s->nb_planes   = (tmp >> 4) + 1;
128
+    bpp            = bits_per_plane * s->nb_planes;
128 129
     if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
129 130
         av_log_ask_for_sample(avctx, "unsupported bit depth\n");
130 131
         return AVERROR_INVALIDDATA;
131 132
     }
132 133
 
133
-    if (*buf == 0xFF || bpp == 8) {
134
-        buf += 2;
135
-        etype  = bytestream_get_le16(&buf);
136
-        esize  = bytestream_get_le16(&buf);
137
-        if (buf_end - buf < esize)
134
+    if (bytestream2_peek_byte(&s->g) == 0xFF || bpp == 8) {
135
+        bytestream2_skip(&s->g, 2);
136
+        etype = bytestream2_get_le16(&s->g);
137
+        esize = bytestream2_get_le16(&s->g);
138
+        if (bytestream2_get_bytes_left(&s->g) < esize)
138 139
             return AVERROR_INVALIDDATA;
139 140
     } else {
140 141
         etype = -1;
... ...
@@ -159,25 +162,30 @@ static int decode_frame(AVCodecContext *avctx,
159 159
     s->frame.pict_type           = AV_PICTURE_TYPE_I;
160 160
     s->frame.palette_has_changed = 1;
161 161
 
162
+    pos_after_pal = bytestream2_tell(&s->g) + esize;
162 163
     palette = (uint32_t*)s->frame.data[1];
163
-    if (etype == 1 && esize > 1 && *buf < 6) {
164
-        int idx = *buf;
164
+    if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) {
165
+        int idx = bytestream2_get_byte(&s->g);
165 166
         npal = 4;
166 167
         for (i = 0; i < npal; i++)
167 168
             palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
168 169
     } else if (etype == 2) {
169 170
         npal = FFMIN(esize, 16);
170
-        for (i = 0; i < npal; i++)
171
-            palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)];
171
+        for (i = 0; i < npal; i++) {
172
+            int pal_idx = bytestream2_get_byte(&s->g);
173
+            palette[i]  = ff_cga_palette[FFMIN(pal_idx, 16)];
174
+        }
172 175
     } else if (etype == 3) {
173 176
         npal = FFMIN(esize, 16);
174
-        for (i = 0; i < npal; i++)
175
-            palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)];
177
+        for (i = 0; i < npal; i++) {
178
+            int pal_idx = bytestream2_get_byte(&s->g);
179
+            palette[i]  = ff_ega_palette[FFMIN(pal_idx, 63)];
180
+        }
176 181
     } else if (etype == 4 || etype == 5) {
177 182
         npal = FFMIN(esize / 3, 256);
178 183
         for (i = 0; i < npal; i++) {
179
-            palette[i] = AV_RB24(buf + i*3) << 2;
180
-            palette[i] |= 0xFF << 24 | palette[i] >> 6 & 0x30303;
184
+            palette[i] = bytestream2_get_be24(&s->g) << 2;
185
+            palette[i] |= 0xFFU << 24 | palette[i] >> 6 & 0x30303;
181 186
         }
182 187
     } else {
183 188
         if (bpp == 1) {
... ...
@@ -195,29 +203,34 @@ static int decode_frame(AVCodecContext *avctx,
195 195
     }
196 196
     // fill remaining palette entries
197 197
     memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4);
198
-    buf += esize;
199
-
198
+    // skip remaining palette bytes
199
+    bytestream2_seek(&s->g, pos_after_pal, SEEK_SET);
200 200
 
201 201
     y = s->height - 1;
202
-    if (bytestream_get_le16(&buf)) {
202
+    if (bytestream2_get_le16(&s->g)) {
203 203
         x = 0;
204 204
         plane = 0;
205
-        while (y >= 0 && buf_end - buf >= 6) {
206
-            const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf);
207
-            //ignore uncompressed block size reported at buf[2]
208
-            int marker = buf[4];
209
-            buf += 5;
205
+        while (y >= 0 && bytestream2_get_bytes_left(&s->g) >= 6) {
206
+            int stop_size, marker, t1, t2;
207
+
208
+            t1        = bytestream2_get_bytes_left(&s->g);
209
+            t2        = bytestream2_get_le16(&s->g);
210
+            stop_size = t1 - FFMIN(t1, t2);
211
+            // ignore uncompressed block size
212
+            bytestream2_skip(&s->g, 2);
213
+            marker    = bytestream2_get_byte(&s->g);
210 214
 
211
-            while (plane < s->nb_planes && y >= 0 && buf_pend - buf >= 1) {
215
+            while (plane < s->nb_planes && y >= 0 &&
216
+                   bytestream2_get_bytes_left(&s->g) > stop_size) {
212 217
                 int run = 1;
213
-                int val = *buf++;
218
+                int val = bytestream2_get_byte(&s->g);
214 219
                 if (val == marker) {
215
-                    run = *buf++;
220
+                    run = bytestream2_get_byte(&s->g);
216 221
                     if (run == 0)
217
-                        run = bytestream_get_le16(&buf);
218
-                    val = *buf++;
222
+                        run = bytestream2_get_le16(&s->g);
223
+                    val = bytestream2_get_byte(&s->g);
219 224
                 }
220
-                if (buf > buf_end)
225
+                if (!bytestream2_get_bytes_left(&s->g))
221 226
                     break;
222 227
 
223 228
                 if (bits_per_plane == 8) {
... ...
@@ -228,16 +241,16 @@ static int decode_frame(AVCodecContext *avctx,
228 228
             }
229 229
         }
230 230
     } else {
231
-        while (y >= 0 && buf < buf_end) {
232
-            memcpy(s->frame.data[0] + y * s->frame.linesize[0], buf, FFMIN(avctx->width, buf_end - buf));
233
-            buf += avctx->width;
231
+        while (y >= 0 && bytestream2_get_bytes_left(&s->g) > 0) {
232
+            memcpy(s->frame.data[0] + y * s->frame.linesize[0], s->g.buffer, FFMIN(avctx->width, bytestream2_get_bytes_left(&s->g)));
233
+            bytestream2_skip(&s->g, avctx->width);
234 234
             y--;
235 235
         }
236 236
     }
237 237
 
238 238
     *data_size = sizeof(AVFrame);
239 239
     *(AVFrame*)data = s->frame;
240
-    return buf_size;
240
+    return avpkt->size;
241 241
 }
242 242
 
243 243
 static av_cold int decode_end(AVCodecContext *avctx)
... ...
@@ -240,15 +240,15 @@ static inline void decode_subblock(DCTELEM *dst, int code, const int is_block2,
240 240
 {
241 241
     int flags = modulo_three_table[code];
242 242
 
243
-    decode_coeff(    dst+0, (flags >> 6)    , 3, gb, vlc, q);
243
+    decode_coeff(    dst+0*4+0, (flags >> 6)    , 3, gb, vlc, q);
244 244
     if(is_block2){
245
-        decode_coeff(dst+8, (flags >> 4) & 3, 2, gb, vlc, q);
246
-        decode_coeff(dst+1, (flags >> 2) & 3, 2, gb, vlc, q);
245
+        decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
246
+        decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
247 247
     }else{
248
-        decode_coeff(dst+1, (flags >> 4) & 3, 2, gb, vlc, q);
249
-        decode_coeff(dst+8, (flags >> 2) & 3, 2, gb, vlc, q);
248
+        decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
249
+        decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
250 250
     }
251
-    decode_coeff(    dst+9, (flags >> 0) & 3, 2, gb, vlc, q);
251
+    decode_coeff(    dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
252 252
 }
253 253
 
254 254
 /**
... ...
@@ -265,15 +265,15 @@ static inline void decode_subblock3(DCTELEM *dst, int code, const int is_block2,
265 265
 {
266 266
     int flags = modulo_three_table[code];
267 267
 
268
-    decode_coeff(    dst+0, (flags >> 6)    , 3, gb, vlc, q_dc);
268
+    decode_coeff(    dst+0*4+0, (flags >> 6)    , 3, gb, vlc, q_dc);
269 269
     if(is_block2){
270
-        decode_coeff(dst+8, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
271
-        decode_coeff(dst+1, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
270
+        decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
271
+        decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
272 272
     }else{
273
-        decode_coeff(dst+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
274
-        decode_coeff(dst+8, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
273
+        decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
274
+        decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
275 275
     }
276
-    decode_coeff(    dst+9, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
276
+    decode_coeff(    dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
277 277
 }
278 278
 
279 279
 /**
... ...
@@ -308,15 +308,15 @@ static inline int rv34_decode_block(DCTELEM *dst, GetBitContext *gb, RV34VLC *rv
308 308
 
309 309
     if(pattern & 4){
310 310
         code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
311
-        decode_subblock(dst + 2, code, 0, gb, &rvlc->coefficient, q_ac2);
311
+        decode_subblock(dst + 4*0+2, code, 0, gb, &rvlc->coefficient, q_ac2);
312 312
     }
313 313
     if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
314 314
         code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2);
315
-        decode_subblock(dst + 8*2, code, 1, gb, &rvlc->coefficient, q_ac2);
315
+        decode_subblock(dst + 4*2+0, code, 1, gb, &rvlc->coefficient, q_ac2);
316 316
     }
317 317
     if(pattern & 1){
318 318
         code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2);
319
-        decode_subblock(dst + 8*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
319
+        decode_subblock(dst + 4*2+2, code, 0, gb, &rvlc->coefficient, q_ac2);
320 320
     }
321 321
     return has_ac || pattern;
322 322
 }
... ...
@@ -351,44 +351,70 @@ static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
351 351
 }
352 352
 
353 353
 /**
354
- * Decode macroblock header and return CBP in case of success, -1 otherwise.
354
+ * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
355 355
  */
356
-static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types)
356
+static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
357 357
 {
358 358
     MpegEncContext *s = &r->s;
359 359
     GetBitContext *gb = &s->gb;
360 360
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
361
-    int i, t;
361
+    int t;
362 362
 
363
-    if(!r->si.type){
364
-        r->is16 = get_bits1(gb);
365
-        if(!r->is16 && !r->rv30){
363
+    r->is16 = get_bits1(gb);
364
+    if(r->is16){
365
+        s->current_picture_ptr->f.mb_type[mb_pos] = MB_TYPE_INTRA16x16;
366
+        r->block_type = RV34_MB_TYPE_INTRA16x16;
367
+        t = get_bits(gb, 2);
368
+        fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
369
+        r->luma_vlc   = 2;
370
+    }else{
371
+        if(!r->rv30){
366 372
             if(!get_bits1(gb))
367 373
                 av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
368 374
         }
369
-        s->current_picture_ptr->f.mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA;
370
-        r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA;
371
-    }else{
372
-        r->block_type = r->decode_mb_info(r);
373
-        if(r->block_type == -1)
375
+        s->current_picture_ptr->f.mb_type[mb_pos] = MB_TYPE_INTRA;
376
+        r->block_type = RV34_MB_TYPE_INTRA;
377
+        if(r->decode_intra_types(r, gb, intra_types) < 0)
374 378
             return -1;
375
-        s->current_picture_ptr->f.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
376
-        r->mb_type[mb_pos] = r->block_type;
377
-        if(r->block_type == RV34_MB_SKIP){
378
-            if(s->pict_type == AV_PICTURE_TYPE_P)
379
-                r->mb_type[mb_pos] = RV34_MB_P_16x16;
380
-            if(s->pict_type == AV_PICTURE_TYPE_B)
381
-                r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
382
-        }
383
-        r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->f.mb_type[mb_pos]);
384
-        rv34_decode_mv(r, r->block_type);
385
-        if(r->block_type == RV34_MB_SKIP){
386
-            fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
387
-            return 0;
388
-        }
389
-        r->chroma_vlc = 1;
390
-        r->luma_vlc   = 0;
379
+        r->luma_vlc   = 1;
380
+    }
381
+
382
+    r->chroma_vlc = 0;
383
+    r->cur_vlcs   = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
384
+
385
+    return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
386
+}
387
+
388
+/**
389
+ * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
390
+ */
391
+static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
392
+{
393
+    MpegEncContext *s = &r->s;
394
+    GetBitContext *gb = &s->gb;
395
+    int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
396
+    int i, t;
397
+
398
+    r->block_type = r->decode_mb_info(r);
399
+    if(r->block_type == -1)
400
+        return -1;
401
+    s->current_picture_ptr->f.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
402
+    r->mb_type[mb_pos] = r->block_type;
403
+    if(r->block_type == RV34_MB_SKIP){
404
+        if(s->pict_type == AV_PICTURE_TYPE_P)
405
+            r->mb_type[mb_pos] = RV34_MB_P_16x16;
406
+        if(s->pict_type == AV_PICTURE_TYPE_B)
407
+            r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
391 408
     }
409
+    r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->f.mb_type[mb_pos]);
410
+    rv34_decode_mv(r, r->block_type);
411
+    if(r->block_type == RV34_MB_SKIP){
412
+        fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
413
+        return 0;
414
+    }
415
+    r->chroma_vlc = 1;
416
+    r->luma_vlc   = 0;
417
+
392 418
     if(IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){
393 419
         if(r->is16){
394 420
             t = get_bits(gb, 2);
... ...
@@ -956,15 +982,6 @@ static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int
956 956
     r->h.pred4x4[itype](dst, prev, stride);
957 957
 }
958 958
 
959
-/** add_pixels_clamped for 4x4 block */
960
-static void rv34_add_4x4_block(uint8_t *dst, int stride, DCTELEM block[64], int off)
961
-{
962
-    int x, y;
963
-    for(y = 0; y < 4; y++)
964
-        for(x = 0; x < 4; x++)
965
-            dst[x + y*stride] = av_clip_uint8(dst[x + y*stride] + block[off + x+y*8]);
966
-}
967
-
968 959
 static inline int adjust_pred16(int itype, int up, int left)
969 960
 {
970 961
     if(!up && !left)
... ...
@@ -981,15 +998,35 @@ static inline int adjust_pred16(int itype, int up, int left)
981 981
     return itype;
982 982
 }
983 983
 
984
-static void rv34_output_macroblock(RV34DecContext *r, int8_t *intra_types, int cbp, int is16)
984
+static inline void rv34_process_block(RV34DecContext *r,
985
+                                      uint8_t *pdst, int stride,
986
+                                      int fc, int sc, int q_dc, int q_ac)
985 987
 {
986 988
     MpegEncContext *s = &r->s;
987
-    DSPContext *dsp = &s->dsp;
988
-    int i, j;
989
-    uint8_t *Y, *U, *V;
990
-    int itype;
991
-    int avail[6*8] = {0};
992
-    int idx;
989
+    DCTELEM *ptr = s->block[0];
990
+    int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
991
+                                   fc, sc, q_dc, q_ac, q_ac);
992
+    if(has_ac){
993
+        r->rdsp.rv34_idct_add(pdst, stride, ptr);
994
+    }else{
995
+        r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
996
+        ptr[0] = 0;
997
+    }
998
+}
999
+
1000
+static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1001
+{
1002
+    LOCAL_ALIGNED_16(DCTELEM, block16, [16]);
1003
+    MpegEncContext *s    = &r->s;
1004
+    GetBitContext  *gb   = &s->gb;
1005
+    int             q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1006
+                    q_ac = rv34_qscale_tab[s->qscale];
1007
+    uint8_t        *dst  = s->dest[0];
1008
+    DCTELEM        *ptr  = s->block[0];
1009
+    int       avail[6*8] = {0};
1010
+    int i, j, itype, has_ac;
1011
+
1012
+    memset(block16, 0, 16 * sizeof(*block16));
993 1013
 
994 1014
     // Set neighbour information.
995 1015
     if(r->avail_cache[1])
... ...
@@ -1005,80 +1042,118 @@ static void rv34_output_macroblock(RV34DecContext *r, int8_t *intra_types, int c
1005 1005
     if(r->avail_cache[9])
1006 1006
         avail[24] = avail[32] = 1;
1007 1007
 
1008
-    Y = s->dest[0];
1009
-    U = s->dest[1];
1010
-    V = s->dest[2];
1011
-    if(!is16){
1012
-        for(j = 0; j < 4; j++){
1013
-            idx = 9 + j*8;
1014
-            for(i = 0; i < 4; i++, cbp >>= 1, Y += 4, idx++){
1015
-                rv34_pred_4x4_block(r, Y, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1016
-                avail[idx] = 1;
1017
-                if(cbp & 1)
1018
-                    rv34_add_4x4_block(Y, s->linesize, s->block[(i>>1)+(j&2)], (i&1)*4+(j&1)*32);
1019
-            }
1020
-            Y += s->linesize * 4 - 4*4;
1021
-            intra_types += r->intra_types_stride;
1008
+    has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1009
+    if(has_ac)
1010
+        r->rdsp.rv34_inv_transform(block16);
1011
+    else
1012
+        r->rdsp.rv34_inv_transform_dc(block16);
1013
+
1014
+    itype = ittrans16[intra_types[0]];
1015
+    itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1016
+    r->h.pred16x16[itype](dst, s->linesize);
1017
+
1018
+    for(j = 0; j < 4; j++){
1019
+        for(i = 0; i < 4; i++, cbp >>= 1){
1020
+            int dc = block16[i + j*4];
1021
+
1022
+            if(cbp & 1){
1023
+                has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1024
+            }else
1025
+                has_ac = 0;
1026
+
1027
+            if(has_ac){
1028
+                ptr[0] = dc;
1029
+                r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1030
+            }else
1031
+                r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1022 1032
         }
1023
-        intra_types -= r->intra_types_stride * 4;
1024
-        fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1025
-        for(j = 0; j < 2; j++){
1026
-            idx = 6 + j*4;
1027
-            for(i = 0; i < 2; i++, cbp >>= 1, idx++){
1028
-                rv34_pred_4x4_block(r, U + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*r->intra_types_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]);
1029
-                rv34_pred_4x4_block(r, V + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*r->intra_types_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]);
1030
-                r->avail_cache[idx] = 1;
1031
-                if(cbp & 0x01)
1032
-                    rv34_add_4x4_block(U + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[4], i*4+j*32);
1033
-                if(cbp & 0x10)
1034
-                    rv34_add_4x4_block(V + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[5], i*4+j*32);
1035
-            }
1033
+
1034
+        dst += 4*s->linesize;
1035
+    }
1036
+
1037
+    itype = ittrans16[intra_types[0]];
1038
+    if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1039
+    itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1040
+
1041
+    q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1042
+    q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1043
+
1044
+    for(j = 1; j < 3; j++){
1045
+        dst = s->dest[j];
1046
+        r->h.pred8x8[itype](dst, s->uvlinesize);
1047
+        for(i = 0; i < 4; i++, cbp >>= 1){
1048
+            uint8_t *pdst;
1049
+            if(!(cbp & 1)) continue;
1050
+            pdst   = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1051
+
1052
+            rv34_process_block(r, pdst, s->uvlinesize,
1053
+                               r->chroma_vlc, 1, q_dc, q_ac);
1036 1054
         }
1037
-    }else{
1038
-        itype = ittrans16[intra_types[0]];
1039
-        itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1040
-        r->h.pred16x16[itype](Y, s->linesize);
1041
-        dsp->add_pixels_clamped(s->block[0], Y,     s->linesize);
1042
-        dsp->add_pixels_clamped(s->block[1], Y + 8, s->linesize);
1043
-        Y += s->linesize * 8;
1044
-        dsp->add_pixels_clamped(s->block[2], Y,     s->linesize);
1045
-        dsp->add_pixels_clamped(s->block[3], Y + 8, s->linesize);
1046
-
1047
-        itype = ittrans16[intra_types[0]];
1048
-        if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1049
-        itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1050
-        r->h.pred8x8[itype](U, s->uvlinesize);
1051
-        dsp->add_pixels_clamped(s->block[4], U, s->uvlinesize);
1052
-        r->h.pred8x8[itype](V, s->uvlinesize);
1053
-        dsp->add_pixels_clamped(s->block[5], V, s->uvlinesize);
1054 1055
     }
1055 1056
 }
1056 1057
 
1057
-/**
1058
- * mask for retrieving all bits in coded block pattern
1059
- * corresponding to one 8x8 block
1060
- */
1061
-#define LUMA_CBP_BLOCK_MASK 0x33
1058
+static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1059
+{
1060
+    MpegEncContext *s   = &r->s;
1061
+    uint8_t        *dst = s->dest[0];
1062
+    int      avail[6*8] = {0};
1063
+    int i, j, k;
1064
+    int idx, q_ac, q_dc;
1062 1065
 
1063
-#define U_CBP_MASK 0x0F0000
1064
-#define V_CBP_MASK 0xF00000
1066
+    // Set neighbour information.
1067
+    if(r->avail_cache[1])
1068
+        avail[0] = 1;
1069
+    if(r->avail_cache[2])
1070
+        avail[1] = avail[2] = 1;
1071
+    if(r->avail_cache[3])
1072
+        avail[3] = avail[4] = 1;
1073
+    if(r->avail_cache[4])
1074
+        avail[5] = 1;
1075
+    if(r->avail_cache[5])
1076
+        avail[8] = avail[16] = 1;
1077
+    if(r->avail_cache[9])
1078
+        avail[24] = avail[32] = 1;
1065 1079
 
1066
-/** @} */ // recons group
1080
+    q_ac = rv34_qscale_tab[s->qscale];
1081
+    for(j = 0; j < 4; j++){
1082
+        idx = 9 + j*8;
1083
+        for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1084
+            rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1085
+            avail[idx] = 1;
1086
+            if(!(cbp & 1)) continue;
1087
+
1088
+            rv34_process_block(r, dst, s->linesize,
1089
+                               r->luma_vlc, 0, q_ac, q_ac);
1090
+        }
1091
+        dst += s->linesize * 4 - 4*4;
1092
+        intra_types += r->intra_types_stride;
1093
+    }
1067 1094
 
1095
+    intra_types -= r->intra_types_stride * 4;
1068 1096
 
1069
-static void rv34_apply_differences(RV34DecContext *r, int cbp)
1070
-{
1071
-    static const int shifts[4] = { 0, 2, 8, 10 };
1072
-    MpegEncContext *s = &r->s;
1073
-    int i;
1097
+    q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1098
+    q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1099
+
1100
+    for(k = 0; k < 2; k++){
1101
+        dst = s->dest[1+k];
1102
+        fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1103
+
1104
+        for(j = 0; j < 2; j++){
1105
+            int* acache = r->avail_cache + 6 + j*4;
1106
+            for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1107
+                int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1108
+                rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1109
+                acache[0] = 1;
1074 1110
 
1075
-    for(i = 0; i < 4; i++)
1076
-        if((cbp & (LUMA_CBP_BLOCK_MASK << shifts[i])) || r->block_type == RV34_MB_P_MIX16x16)
1077
-            s->dsp.add_pixels_clamped(s->block[i], s->dest[0] + (i & 1)*8 + (i&2)*4*s->linesize, s->linesize);
1078
-    if(cbp & U_CBP_MASK)
1079
-        s->dsp.add_pixels_clamped(s->block[4], s->dest[1], s->uvlinesize);
1080
-    if(cbp & V_CBP_MASK)
1081
-        s->dsp.add_pixels_clamped(s->block[5], s->dest[2], s->uvlinesize);
1111
+                if(!(cbp&1)) continue;
1112
+
1113
+                rv34_process_block(r, dst + 4*i, s->uvlinesize,
1114
+                                   r->chroma_vlc, 1, q_dc, q_ac);
1115
+            }
1116
+
1117
+            dst += 4*s->uvlinesize;
1118
+        }
1119
+    }
1082 1120
 }
1083 1121
 
1084 1122
 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
... ...
@@ -1123,17 +1198,17 @@ static int rv34_set_deblock_coef(RV34DecContext *r)
1123 1123
     return hmvmask | vmvmask;
1124 1124
 }
1125 1125
 
1126
-static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
1126
+static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1127 1127
 {
1128
-    MpegEncContext *s = &r->s;
1129
-    GetBitContext *gb = &s->gb;
1128
+    MpegEncContext *s   = &r->s;
1129
+    GetBitContext  *gb  = &s->gb;
1130
+    uint8_t        *dst = s->dest[0];
1131
+    DCTELEM        *ptr = s->block[0];
1132
+    int          mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1130 1133
     int cbp, cbp2;
1131 1134
     int q_dc, q_ac, has_ac;
1132
-    int i, blknum, blkoff;
1133
-    LOCAL_ALIGNED_16(DCTELEM, block16, [64]);
1134
-    int luma_dc_quant;
1135
+    int i, j;
1135 1136
     int dist;
1136
-    int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1137 1137
 
1138 1138
     // Calculate which neighbours are available. Maybe it's worth optimizing too.
1139 1139
     memset(r->avail_cache, 0, sizeof(r->avail_cache));
... ...
@@ -1151,70 +1226,126 @@ static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types)
1151 1151
         r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1];
1152 1152
 
1153 1153
     s->qscale = r->si.quant;
1154
-    cbp = cbp2 = rv34_decode_mb_header(r, intra_types);
1154
+    cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1155 1155
     r->cbp_luma  [mb_pos] = cbp;
1156 1156
     r->cbp_chroma[mb_pos] = cbp >> 16;
1157
-    if(s->pict_type == AV_PICTURE_TYPE_I)
1158
-        r->deblock_coefs[mb_pos] = 0xFFFF;
1159
-    else
1160
-        r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1157
+    r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1161 1158
     s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
1162 1159
 
1163 1160
     if(cbp == -1)
1164 1161
         return -1;
1165 1162
 
1166
-    luma_dc_quant = r->block_type == RV34_MB_P_MIX16x16 ? r->luma_dc_quant_p[s->qscale] : r->luma_dc_quant_i[s->qscale];
1163
+    if (IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos])){
1164
+        if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1165
+        else        rv34_output_intra(r, intra_types, cbp);
1166
+        return 0;
1167
+    }
1168
+
1167 1169
     if(r->is16){
1168
-        q_dc = rv34_qscale_tab[luma_dc_quant];
1170
+        // Only for RV34_MB_P_MIX16x16
1171
+        LOCAL_ALIGNED_16(DCTELEM, block16, [16]);
1172
+        memset(block16, 0, 16 * sizeof(*block16));
1173
+        q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1169 1174
         q_ac = rv34_qscale_tab[s->qscale];
1170
-        s->dsp.clear_block(block16);
1171 1175
         if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1172
-            r->rdsp.rv34_inv_transform_tab[1](block16);
1176
+            r->rdsp.rv34_inv_transform(block16);
1173 1177
         else
1174
-            r->rdsp.rv34_inv_transform_dc_tab[1](block16);
1175
-    }
1178
+            r->rdsp.rv34_inv_transform_dc(block16);
1179
+
1180
+        q_ac = rv34_qscale_tab[s->qscale];
1181
+
1182
+        for(j = 0; j < 4; j++){
1183
+            for(i = 0; i < 4; i++, cbp >>= 1){
1184
+                int      dc   = block16[i + j*4];
1185
+
1186
+                if(cbp & 1){
1187
+                    has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1188
+                }else
1189
+                    has_ac = 0;
1190
+
1191
+                if(has_ac){
1192
+                    ptr[0] = dc;
1193
+                    r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1194
+                }else
1195
+                    r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1196
+            }
1197
+
1198
+            dst += 4*s->linesize;
1199
+        }
1176 1200
 
1177
-    q_ac = rv34_qscale_tab[s->qscale];
1178
-    for(i = 0; i < 16; i++, cbp >>= 1){
1179
-        DCTELEM *ptr;
1180
-        if(!r->is16 && !(cbp & 1)) continue;
1181
-        blknum = ((i & 2) >> 1) + ((i & 8) >> 2);
1182
-        blkoff = ((i & 1) << 2) + ((i & 4) << 3);
1183
-        ptr    = s->block[blknum] + blkoff;
1184
-        if(cbp & 1)
1185
-            has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1186
-        else
1187
-            has_ac = 0;
1188
-        if(r->is16) //FIXME: optimize
1189
-            ptr[0] = block16[(i & 3) | ((i & 0xC) << 1)];
1190
-        if(has_ac)
1191
-            r->rdsp.rv34_inv_transform_tab[0](ptr);
1192
-        else
1193
-            r->rdsp.rv34_inv_transform_dc_tab[0](ptr);
1194
-    }
1195
-    if(r->block_type == RV34_MB_P_MIX16x16)
1196 1201
         r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1202
+    }else{
1203
+        q_ac = rv34_qscale_tab[s->qscale];
1204
+
1205
+        for(j = 0; j < 4; j++){
1206
+            for(i = 0; i < 4; i++, cbp >>= 1){
1207
+                if(!(cbp & 1)) continue;
1208
+
1209
+                rv34_process_block(r, dst + 4*i, s->linesize,
1210
+                                   r->luma_vlc, 0, q_ac, q_ac);
1211
+            }
1212
+            dst += 4*s->linesize;
1213
+        }
1214
+    }
1215
+
1197 1216
     q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1198 1217
     q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1199
-    for(; i < 24; i++, cbp >>= 1){
1200
-        DCTELEM *ptr;
1201
-        if(!(cbp & 1)) continue;
1202
-        blknum = ((i & 4) >> 2) + 4;
1203
-        blkoff = ((i & 1) << 2) + ((i & 2) << 4);
1204
-        ptr    = s->block[blknum] + blkoff;
1205
-        if (rv34_decode_block(ptr, gb, r->cur_vlcs, r->chroma_vlc, 1, q_dc, q_ac, q_ac))
1206
-            r->rdsp.rv34_inv_transform_tab[0](ptr);
1207
-        else
1208
-            r->rdsp.rv34_inv_transform_dc_tab[0](ptr);
1218
+
1219
+    for(j = 1; j < 3; j++){
1220
+        dst = s->dest[j];
1221
+        for(i = 0; i < 4; i++, cbp >>= 1){
1222
+            uint8_t *pdst;
1223
+            if(!(cbp & 1)) continue;
1224
+            pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1225
+
1226
+            rv34_process_block(r, pdst, s->uvlinesize,
1227
+                               r->chroma_vlc, 1, q_dc, q_ac);
1228
+        }
1209 1229
     }
1210
-    if (IS_INTRA(s->current_picture_ptr->f.mb_type[mb_pos]))
1211
-        rv34_output_macroblock(r, intra_types, cbp2, r->is16);
1212
-    else
1213
-        rv34_apply_differences(r, cbp2);
1214 1230
 
1215 1231
     return 0;
1216 1232
 }
1217 1233
 
1234
+static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1235
+{
1236
+    MpegEncContext *s = &r->s;
1237
+    int cbp, dist;
1238
+    int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1239
+
1240
+    // Calculate which neighbours are available. Maybe it's worth optimizing too.
1241
+    memset(r->avail_cache, 0, sizeof(r->avail_cache));
1242
+    fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1243
+    dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1244
+    if(s->mb_x && dist)
1245
+        r->avail_cache[5] =
1246
+        r->avail_cache[9] = s->current_picture_ptr->f.mb_type[mb_pos - 1];
1247
+    if(dist >= s->mb_width)
1248
+        r->avail_cache[2] =
1249
+        r->avail_cache[3] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride];
1250
+    if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1251
+        r->avail_cache[4] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride + 1];
1252
+    if(s->mb_x && dist > s->mb_width)
1253
+        r->avail_cache[1] = s->current_picture_ptr->f.mb_type[mb_pos - s->mb_stride - 1];
1254
+
1255
+    s->qscale = r->si.quant;
1256
+    cbp = rv34_decode_intra_mb_header(r, intra_types);
1257
+    r->cbp_luma  [mb_pos] = cbp;
1258
+    r->cbp_chroma[mb_pos] = cbp >> 16;
1259
+    r->deblock_coefs[mb_pos] = 0xFFFF;
1260
+    s->current_picture_ptr->f.qscale_table[mb_pos] = s->qscale;
1261
+
1262
+    if(cbp == -1)
1263
+        return -1;
1264
+
1265
+    if(r->is16){
1266
+        rv34_output_i16x16(r, intra_types, cbp);
1267
+        return 0;
1268
+    }
1269
+
1270
+    rv34_output_intra(r, intra_types, cbp);
1271
+    return 0;
1272
+}
1273
+
1218 1274
 static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
1219 1275
 {
1220 1276
     int bits;
... ...
@@ -1326,9 +1457,12 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
1326 1326
     ff_init_block_index(s);
1327 1327
     while(!check_slice_end(r, s)) {
1328 1328
         ff_update_block_index(s);
1329
-        s->dsp.clear_blocks(s->block[0]);
1330 1329
 
1331
-        if(rv34_decode_macroblock(r, r->intra_types + s->mb_x * 4 + 4) < 0){
1330
+        if(r->si.type)
1331
+            res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1332
+        else
1333
+            res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1334
+        if(res < 0){
1332 1335
             ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1333 1336
             return -1;
1334 1337
         }
... ...
@@ -37,10 +37,10 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
37 37
     int i;
38 38
 
39 39
     for(i = 0; i < 4; i++){
40
-        const int z0 = 13*(block[i+8*0] +    block[i+8*2]);
41
-        const int z1 = 13*(block[i+8*0] -    block[i+8*2]);
42
-        const int z2 =  7* block[i+8*1] - 17*block[i+8*3];
43
-        const int z3 = 17* block[i+8*1] +  7*block[i+8*3];
40
+        const int z0 = 13*(block[i+4*0] +    block[i+4*2]);
41
+        const int z1 = 13*(block[i+4*0] -    block[i+4*2]);
42
+        const int z2 =  7* block[i+4*1] - 17*block[i+4*3];
43
+        const int z3 = 17* block[i+4*1] +  7*block[i+4*3];
44 44
 
45 45
         temp[4*i+0] = z0 + z3;
46 46
         temp[4*i+1] = z1 + z2;
... ...
@@ -50,14 +50,16 @@ static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block)
50 50
 }
51 51
 
52 52
 /**
53
- * Real Video 3.0/4.0 inverse transform
53
+ * Real Video 3.0/4.0 inverse transform + sample reconstruction
54 54
  * Code is almost the same as in SVQ3, only scaling is different.
55 55
  */
56
-static void rv34_inv_transform_c(DCTELEM *block){
57
-    int temp[16];
58
-    int i;
56
+static void rv34_idct_add_c(uint8_t *dst, int stride, DCTELEM *block){
57
+    int      temp[16];
58
+    uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
59
+    int      i;
59 60
 
60 61
     rv34_row_transform(temp, block);
62
+    memset(block, 0, 16*sizeof(DCTELEM));
61 63
 
62 64
     for(i = 0; i < 4; i++){
63 65
         const int z0 = 13*(temp[4*0+i] +    temp[4*2+i]) + 0x200;
... ...
@@ -65,10 +67,12 @@ static void rv34_inv_transform_c(DCTELEM *block){
65 65
         const int z2 =  7* temp[4*1+i] - 17*temp[4*3+i];
66 66
         const int z3 = 17* temp[4*1+i] +  7*temp[4*3+i];
67 67
 
68
-        block[i*8+0] = (z0 + z3) >> 10;
69
-        block[i*8+1] = (z1 + z2) >> 10;
70
-        block[i*8+2] = (z1 - z2) >> 10;
71
-        block[i*8+3] = (z0 - z3) >> 10;
68
+        dst[0] = cm[ dst[0] + ( (z0 + z3) >> 10 ) ];
69
+        dst[1] = cm[ dst[1] + ( (z1 + z2) >> 10 ) ];
70
+        dst[2] = cm[ dst[2] + ( (z1 - z2) >> 10 ) ];
71
+        dst[3] = cm[ dst[3] + ( (z0 - z3) >> 10 ) ];
72
+
73
+        dst  += stride;
72 74
     }
73 75
 }
74 76
 
... ...
@@ -90,21 +94,27 @@ static void rv34_inv_transform_noround_c(DCTELEM *block){
90 90
         const int z2 =  7* temp[4*1+i] - 17*temp[4*3+i];
91 91
         const int z3 = 17* temp[4*1+i] +  7*temp[4*3+i];
92 92
 
93
-        block[i*8+0] = ((z0 + z3) * 3) >> 11;
94
-        block[i*8+1] = ((z1 + z2) * 3) >> 11;
95
-        block[i*8+2] = ((z1 - z2) * 3) >> 11;
96
-        block[i*8+3] = ((z0 - z3) * 3) >> 11;
93
+        block[i*4+0] = ((z0 + z3) * 3) >> 11;
94
+        block[i*4+1] = ((z1 + z2) * 3) >> 11;
95
+        block[i*4+2] = ((z1 - z2) * 3) >> 11;
96
+        block[i*4+3] = ((z0 - z3) * 3) >> 11;
97 97
     }
98 98
 }
99 99
 
100
-static void rv34_inv_transform_dc_c(DCTELEM *block)
100
+static void rv34_idct_dc_add_c(uint8_t *dst, int stride, int dc)
101 101
 {
102
-    DCTELEM dc = (13 * 13 * block[0] + 0x200) >> 10;
102
+    const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
103 103
     int i, j;
104 104
 
105
-    for (i = 0; i < 4; i++, block += 8)
105
+    cm += (13*13*dc + 0x200) >> 10;
106
+
107
+    for (i = 0; i < 4; i++)
108
+    {
106 109
         for (j = 0; j < 4; j++)
107
-            block[j] = dc;
110
+            dst[j] = cm[ dst[j] ];
111
+
112
+        dst += stride;
113
+    }
108 114
 }
109 115
 
110 116
 static void rv34_inv_transform_dc_noround_c(DCTELEM *block)
... ...
@@ -112,7 +122,7 @@ static void rv34_inv_transform_dc_noround_c(DCTELEM *block)
112 112
     DCTELEM dc = (13 * 13 * 3 * block[0]) >> 11;
113 113
     int i, j;
114 114
 
115
-    for (i = 0; i < 4; i++, block += 8)
115
+    for (i = 0; i < 4; i++, block += 4)
116 116
         for (j = 0; j < 4; j++)
117 117
             block[j] = dc;
118 118
 }
... ...
@@ -121,10 +131,11 @@ static void rv34_inv_transform_dc_noround_c(DCTELEM *block)
121 121
 
122 122
 
123 123
 av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) {
124
-    c->rv34_inv_transform_tab[0] = rv34_inv_transform_c;
125
-    c->rv34_inv_transform_tab[1] = rv34_inv_transform_noround_c;
126
-    c->rv34_inv_transform_dc_tab[0]  = rv34_inv_transform_dc_c;
127
-    c->rv34_inv_transform_dc_tab[1]  = rv34_inv_transform_dc_noround_c;
124
+    c->rv34_inv_transform    = rv34_inv_transform_noround_c;
125
+    c->rv34_inv_transform_dc = rv34_inv_transform_dc_noround_c;
126
+
127
+    c->rv34_idct_add    = rv34_idct_add_c;
128
+    c->rv34_idct_dc_add = rv34_idct_dc_add_c;
128 129
 
129 130
     if (HAVE_NEON)
130 131
         ff_rv34dsp_init_neon(c, dsp);
... ...
@@ -36,6 +36,10 @@ typedef void (*rv40_weight_func)(uint8_t *dst/*align width (8 or 16)*/,
36 36
 
37 37
 typedef void (*rv34_inv_transform_func)(DCTELEM *block);
38 38
 
39
+typedef void (*rv34_idct_add_func)(uint8_t *dst, int stride, DCTELEM *block);
40
+typedef void (*rv34_idct_dc_add_func)(uint8_t *dst, int stride,
41
+                                      int   dc);
42
+
39 43
 typedef void (*rv40_weak_loop_filter_func)(uint8_t *src, int stride,
40 44
                                            int filter_p1, int filter_q1,
41 45
                                            int alpha, int beta,
... ...
@@ -55,8 +59,10 @@ typedef struct RV34DSPContext {
55 55
     h264_chroma_mc_func put_chroma_pixels_tab[3];
56 56
     h264_chroma_mc_func avg_chroma_pixels_tab[3];
57 57
     rv40_weight_func rv40_weight_pixels_tab[2];
58
-    rv34_inv_transform_func rv34_inv_transform_tab[2];
59
-    void (*rv34_inv_transform_dc_tab[2])(DCTELEM *block);
58
+    rv34_inv_transform_func rv34_inv_transform;
59
+    rv34_inv_transform_func rv34_inv_transform_dc;
60
+    rv34_idct_add_func rv34_idct_add;
61
+    rv34_idct_dc_add_func rv34_idct_dc_add;
60 62
     rv40_weak_loop_filter_func rv40_weak_loop_filter[2];
61 63
     rv40_strong_loop_filter_func rv40_strong_loop_filter[2];
62 64
     rv40_loop_filter_strength_func rv40_loop_filter_strength[2];
... ...
@@ -25,6 +25,7 @@
25 25
  * utils.
26 26
  */
27 27
 
28
+#include "libavutil/avassert.h"
28 29
 #include "libavutil/avstring.h"
29 30
 #include "libavutil/crc.h"
30 31
 #include "libavutil/mathematics.h"
... ...
@@ -102,6 +103,16 @@ void avcodec_init(void)
102 102
     dsputil_static_init();
103 103
 }
104 104
 
105
+static av_always_inline int codec_is_encoder(AVCodec *codec)
106
+{
107
+    return codec && (codec->encode || codec->encode2);
108
+}
109
+
110
+static av_always_inline int codec_is_decoder(AVCodec *codec)
111
+{
112
+    return codec && codec->decode;
113
+}
114
+
105 115
 void avcodec_register(AVCodec *codec)
106 116
 {
107 117
     AVCodec **p;
... ...
@@ -260,11 +271,47 @@ void ff_init_buffer_info(AVCodecContext *s, AVFrame *pic)
260 260
     pic->format              = s->pix_fmt;
261 261
 }
262 262
 
263
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
264
+                             enum AVSampleFormat sample_fmt, const uint8_t *buf,
265
+                             int buf_size, int align)
266
+{
267
+    int ch, planar, needed_size, ret = 0;
268
+
269
+    needed_size = av_samples_get_buffer_size(NULL, nb_channels,
270
+                                             frame->nb_samples, sample_fmt,
271
+                                             align);
272
+    if (buf_size < needed_size)
273
+        return AVERROR(EINVAL);
274
+
275
+    planar = av_sample_fmt_is_planar(sample_fmt);
276
+    if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
277
+        if (!(frame->extended_data = av_mallocz(nb_channels *
278
+                                                sizeof(*frame->extended_data))))
279
+            return AVERROR(ENOMEM);
280
+    } else {
281
+        frame->extended_data = frame->data;
282
+    }
283
+
284
+    if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0],
285
+                                      buf, nb_channels, frame->nb_samples,
286
+                                      sample_fmt, align)) < 0) {
287
+        if (frame->extended_data != frame->data)
288
+            av_free(frame->extended_data);
289
+        return ret;
290
+    }
291
+    if (frame->extended_data != frame->data) {
292
+        for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
293
+            frame->data[ch] = frame->extended_data[ch];
294
+    }
295
+
296
+    return ret;
297
+}
298
+
263 299
 static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
264 300
 {
265 301
     AVCodecInternal *avci = avctx->internal;
266 302
     InternalBuffer *buf;
267
-    int buf_size, ret, i, needs_extended_data;
303
+    int buf_size, ret;
268 304
 
269 305
     buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
270 306
                                           frame->nb_samples, avctx->sample_fmt,
... ...
@@ -272,9 +319,6 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
272 272
     if (buf_size < 0)
273 273
         return AVERROR(EINVAL);
274 274
 
275
-    needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) &&
276
-                          avctx->channels > AV_NUM_DATA_POINTERS;
277
-
278 275
     /* allocate InternalBuffer if needed */
279 276
     if (!avci->buffer) {
280 277
         avci->buffer = av_mallocz(sizeof(InternalBuffer));
... ...
@@ -306,48 +350,31 @@ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
306 306
     /* if there is no previous buffer or the previous buffer cannot be used
307 307
        as-is, allocate a new buffer and/or rearrange the channel pointers */
308 308
     if (!buf->extended_data) {
309
-        /* if the channel pointers will fit, just set extended_data to data,
310
-           otherwise allocate the extended_data channel pointers */
311
-        if (needs_extended_data) {
312
-            buf->extended_data = av_mallocz(avctx->channels *
313
-                                            sizeof(*buf->extended_data));
314
-            if (!buf->extended_data)
309
+        if (!buf->data[0]) {
310
+            if (!(buf->data[0] = av_mallocz(buf_size)))
315 311
                 return AVERROR(ENOMEM);
316
-        } else {
317
-            buf->extended_data = buf->data;
318
-        }
319
-
320
-        /* if there is a previous buffer and it is large enough, reuse it and
321
-           just fill-in new channel pointers and linesize, otherwise allocate
322
-           a new buffer */
323
-        if (buf->extended_data[0]) {
324
-            ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
325
-                                         buf->extended_data[0], avctx->channels,
326
-                                         frame->nb_samples, avctx->sample_fmt,
327
-                                         32);
328
-        } else {
329
-            ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
330
-                                   avctx->channels, frame->nb_samples,
331
-                                   avctx->sample_fmt, 32);
312
+            buf->audio_data_size = buf_size;
332 313
         }
333
-        if (ret)
314
+        if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
315
+                                            avctx->sample_fmt, buf->data[0],
316
+                                            buf->audio_data_size, 32)))
334 317
             return ret;
335 318
 
336
-        /* if data was not used for extended_data, we need to copy as many of
337
-           the extended_data channel pointers as will fit */
338
-        if (needs_extended_data) {
339
-            for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
340
-                buf->data[i] = buf->extended_data[i];
341
-        }
342
-        buf->audio_data_size = buf_size;
343
-        buf->nb_channels     = avctx->channels;
319
+        if (frame->extended_data == frame->data)
320
+            buf->extended_data = buf->data;
321
+        else
322
+            buf->extended_data = frame->extended_data;
323
+        memcpy(buf->data, frame->data, sizeof(frame->data));
324
+        buf->linesize[0] = frame->linesize[0];
325
+        buf->nb_channels = avctx->channels;
326
+    } else {
327
+        /* copy InternalBuffer info to the AVFrame */
328
+        frame->extended_data = buf->extended_data;
329
+        frame->linesize[0]   = buf->linesize[0];
330
+        memcpy(frame->data, buf->data, sizeof(frame->data));
344 331
     }
345 332
 
346
-    /* copy InternalBuffer info to the AVFrame */
347 333
     frame->type          = FF_BUFFER_TYPE_INTERNAL;
348
-    frame->extended_data = buf->extended_data;
349
-    frame->linesize[0]   = buf->linesize[0];
350
-    memcpy(frame->data, buf->data, sizeof(frame->data));
351 334
 
352 335
     if (avctx->pkt) {
353 336
         frame->pkt_pts = avctx->pkt->pts;
... ...
@@ -732,7 +759,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
732 732
 
733 733
     /* if the decoder init function was already called previously,
734 734
        free the already allocated subtitle_header before overwriting it */
735
-    if (codec->decode)
735
+    if (codec_is_decoder(codec))
736 736
         av_freep(&avctx->subtitle_header);
737 737
 
738 738
 #define SANE_NB_CHANNELS 128U
... ...
@@ -789,7 +816,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVD
789 789
         ret = AVERROR(EINVAL);
790 790
         goto free_and_end;
791 791
     }
792
-    if (avctx->codec->encode) {
792
+    if (codec_is_encoder(avctx->codec)) {
793 793
         int i;
794 794
         if (avctx->codec->sample_fmts) {
795 795
             for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++)
... ...
@@ -870,21 +897,225 @@ free_and_end:
870 870
     goto end;
871 871
 }
872 872
 
873
-int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
874
-                         const short *samples)
873
+int ff_alloc_packet(AVPacket *avpkt, int size)
875 874
 {
876
-    if(buf_size < FF_MIN_BUFFER_SIZE && 0){
877
-        av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
878
-        return -1;
875
+    if (size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE)
876
+        return AVERROR(EINVAL);
877
+
878
+    if (avpkt->data) {
879
+        uint8_t *pkt_data;
880
+        int pkt_size;
881
+
882
+        if (avpkt->size < size)
883
+            return AVERROR(EINVAL);
884
+
885
+        pkt_data = avpkt->data;
886
+        pkt_size = avpkt->size;
887
+        av_init_packet(avpkt);
888
+        avpkt->data = pkt_data;
889
+        avpkt->size = pkt_size;
890
+        return 0;
891
+    } else {
892
+        return av_new_packet(avpkt, size);
879 893
     }
880
-    if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
881
-        int ret = avctx->codec->encode(avctx, buf, buf_size, samples);
882
-        avctx->frame_number++;
883
-        return ret;
884
-    }else
894
+}
895
+
896
+int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
897
+                                              AVPacket *avpkt,
898
+                                              const AVFrame *frame,
899
+                                              int *got_packet_ptr)
900
+{
901
+    int ret;
902
+    int user_packet = !!avpkt->data;
903
+    int nb_samples;
904
+
905
+    if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
906
+        av_init_packet(avpkt);
907
+        avpkt->size = 0;
885 908
         return 0;
909
+    }
910
+
911
+    /* check for valid frame size */
912
+    if (frame) {
913
+        nb_samples = frame->nb_samples;
914
+        if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
915
+            if (nb_samples > avctx->frame_size)
916
+                return AVERROR(EINVAL);
917
+        } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
918
+            if (nb_samples != avctx->frame_size)
919
+                return AVERROR(EINVAL);
920
+        }
921
+    } else {
922
+        nb_samples = avctx->frame_size;
923
+    }
924
+
925
+    if (avctx->codec->encode2) {
926
+        *got_packet_ptr = 0;
927
+        ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
928
+        if (!ret && *got_packet_ptr &&
929
+            !(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
930
+            avpkt->pts = frame->pts;
931
+            avpkt->duration = av_rescale_q(frame->nb_samples,
932
+                                           (AVRational){ 1, avctx->sample_rate },
933
+                                           avctx->time_base);
934
+        }
935
+    } else {
936
+        /* for compatibility with encoders not supporting encode2(), we need to
937
+           allocate a packet buffer if the user has not provided one or check
938
+           the size otherwise */
939
+        int fs_tmp   = 0;
940
+        int buf_size = avpkt->size;
941
+        if (!user_packet) {
942
+            if (avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) {
943
+                av_assert0(av_get_bits_per_sample(avctx->codec_id) != 0);
944
+                buf_size = nb_samples * avctx->channels *
945
+                           av_get_bits_per_sample(avctx->codec_id) / 8;
946
+            } else {
947
+                /* this is a guess as to the required size.
948
+                   if an encoder needs more than this, it should probably
949
+                   implement encode2() */
950
+                buf_size = 2 * avctx->frame_size * avctx->channels *
951
+                           av_get_bytes_per_sample(avctx->sample_fmt);
952
+                buf_size += FF_MIN_BUFFER_SIZE;
953
+            }
954
+        }
955
+        if ((ret = ff_alloc_packet(avpkt, buf_size)))
956
+            return ret;
957
+
958
+        /* Encoders using AVCodec.encode() that support
959
+           CODEC_CAP_SMALL_LAST_FRAME require avctx->frame_size to be set to
960
+           the smaller size when encoding the last frame.
961
+           This code can be removed once all encoders supporting
962
+           CODEC_CAP_SMALL_LAST_FRAME use encode2() */
963
+        if ((avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) &&
964
+            nb_samples < avctx->frame_size) {
965
+            fs_tmp = avctx->frame_size;
966
+            avctx->frame_size = nb_samples;
967
+        }
968
+
969
+        /* encode the frame */
970
+        ret = avctx->codec->encode(avctx, avpkt->data, avpkt->size,
971
+                                   frame ? frame->data[0] : NULL);
972
+        if (ret >= 0) {
973
+            if (!ret) {
974
+                /* no output. if the packet data was allocated by libavcodec,
975
+                   free it */
976
+                if (!user_packet)
977
+                    av_freep(&avpkt->data);
978
+            } else {
979
+                if (avctx->coded_frame)
980
+                    avpkt->pts = avctx->coded_frame->pts;
981
+                /* Set duration for final small packet. This can be removed
982
+                   once all encoders supporting CODEC_CAP_SMALL_LAST_FRAME use
983
+                   encode2() */
984
+                if (fs_tmp) {
985
+                    avpkt->duration = av_rescale_q(avctx->frame_size,
986
+                                                   (AVRational){ 1, avctx->sample_rate },
987
+                                                   avctx->time_base);
988
+                }
989
+            }
990
+            avpkt->size = ret;
991
+            *got_packet_ptr = (ret > 0);
992
+            ret = 0;
993
+        }
994
+
995
+        if (fs_tmp)
996
+            avctx->frame_size = fs_tmp;
997
+    }
998
+    if (!ret)
999
+        avctx->frame_number++;
1000
+
1001
+    /* NOTE: if we add any audio encoders which output non-keyframe packets,
1002
+             this needs to be moved to the encoders, but for now we can do it
1003
+             here to simplify things */
1004
+    avpkt->flags |= AV_PKT_FLAG_KEY;
1005
+
1006
+    return ret;
886 1007
 }
887 1008
 
1009
+#if FF_API_OLD_DECODE_AUDIO
1010
+int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
1011
+                                             uint8_t *buf, int buf_size,
1012
+                                             const short *samples)
1013
+{
1014
+    AVPacket pkt;
1015
+    AVFrame frame0;
1016
+    AVFrame *frame;
1017
+    int ret, samples_size, got_packet;
1018
+
1019
+    av_init_packet(&pkt);
1020
+    pkt.data = buf;
1021
+    pkt.size = buf_size;
1022
+
1023
+    if (samples) {
1024
+        frame = &frame0;
1025
+        avcodec_get_frame_defaults(frame);
1026
+
1027
+        if (avctx->frame_size) {
1028
+            frame->nb_samples = avctx->frame_size;
1029
+        } else {
1030
+            /* if frame_size is not set, the number of samples must be
1031
+               calculated from the buffer size */
1032
+            int64_t nb_samples;
1033
+            if (!av_get_bits_per_sample(avctx->codec_id)) {
1034
+                av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
1035
+                       "support this codec\n");
1036
+                return AVERROR(EINVAL);
1037
+            }
1038
+            nb_samples = (int64_t)buf_size * 8 /
1039
+                         (av_get_bits_per_sample(avctx->codec_id) *
1040
+                         avctx->channels);
1041
+            if (nb_samples >= INT_MAX)
1042
+                return AVERROR(EINVAL);
1043
+            frame->nb_samples = nb_samples;
1044
+        }
1045
+
1046
+        /* it is assumed that the samples buffer is large enough based on the
1047
+           relevant parameters */
1048
+        samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
1049
+                                                  frame->nb_samples,
1050
+                                                  avctx->sample_fmt, 1);
1051
+        if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
1052
+                                            avctx->sample_fmt,
1053
+                                            samples, samples_size, 1)))
1054
+            return ret;
1055
+
1056
+        /* fabricate frame pts from sample count.
1057
+           this is needed because the avcodec_encode_audio() API does not have
1058
+           a way for the user to provide pts */
1059
+        if(avctx->sample_rate && avctx->time_base.num)
1060
+            frame->pts = av_rescale_q(avctx->internal->sample_count,
1061
+                                  (AVRational){ 1, avctx->sample_rate },
1062
+                                  avctx->time_base);
1063
+        else
1064
+            frame->pts = AV_NOPTS_VALUE;
1065
+        avctx->internal->sample_count += frame->nb_samples;
1066
+    } else {
1067
+        frame = NULL;
1068
+    }
1069
+
1070
+    got_packet = 0;
1071
+    ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet);
1072
+    if (!ret && got_packet && avctx->coded_frame) {
1073
+        avctx->coded_frame->pts       = pkt.pts;
1074
+        avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
1075
+    }
1076
+    /* free any side data since we cannot return it */
1077
+    if (pkt.side_data_elems > 0) {
1078
+        int i;
1079
+        for (i = 0; i < pkt.side_data_elems; i++)
1080
+            av_free(pkt.side_data[i].data);
1081
+        av_freep(&pkt.side_data);
1082
+        pkt.side_data_elems = 0;
1083
+    }
1084
+
1085
+    if (frame && frame->extended_data != frame->data)
1086
+        av_free(frame->extended_data);
1087
+
1088
+    return ret ? ret : pkt.size;
1089
+}
1090
+#endif
1091
+
888 1092
 int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
889 1093
                          const AVFrame *pict)
890 1094
 {
... ...
@@ -1187,7 +1418,7 @@ av_cold int avcodec_close(AVCodecContext *avctx)
1187 1187
         av_opt_free(avctx->priv_data);
1188 1188
     av_opt_free(avctx);
1189 1189
     av_freep(&avctx->priv_data);
1190
-    if(avctx->codec && avctx->codec->encode)
1190
+    if (codec_is_encoder(avctx->codec))
1191 1191
         av_freep(&avctx->extradata);
1192 1192
     avctx->codec = NULL;
1193 1193
     avctx->active_thread_type = 0;
... ...
@@ -1216,7 +1447,7 @@ AVCodec *avcodec_find_encoder(enum CodecID id)
1216 1216
     p = first_avcodec;
1217 1217
     id= remap_deprecated_codec_id(id);
1218 1218
     while (p) {
1219
-        if (p->encode != NULL && p->id == id) {
1219
+        if (codec_is_encoder(p) && p->id == id) {
1220 1220
             if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) {
1221 1221
                 experimental = p;
1222 1222
             } else
... ...
@@ -1234,7 +1465,7 @@ AVCodec *avcodec_find_encoder_by_name(const char *name)
1234 1234
         return NULL;
1235 1235
     p = first_avcodec;
1236 1236
     while (p) {
1237
-        if (p->encode != NULL && strcmp(name,p->name) == 0)
1237
+        if (codec_is_encoder(p) && strcmp(name,p->name) == 0)
1238 1238
             return p;
1239 1239
         p = p->next;
1240 1240
     }
... ...
@@ -1247,7 +1478,7 @@ AVCodec *avcodec_find_decoder(enum CodecID id)
1247 1247
     p = first_avcodec;
1248 1248
     id= remap_deprecated_codec_id(id);
1249 1249
     while (p) {
1250
-        if (p->decode != NULL && p->id == id) {
1250
+        if (codec_is_decoder(p) && p->id == id) {
1251 1251
             if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) {
1252 1252
                 experimental = p;
1253 1253
             } else
... ...
@@ -1265,7 +1496,7 @@ AVCodec *avcodec_find_decoder_by_name(const char *name)
1265 1265
         return NULL;
1266 1266
     p = first_avcodec;
1267 1267
     while (p) {
1268
-        if (p->decode != NULL && strcmp(name,p->name) == 0)
1268
+        if (codec_is_decoder(p) && strcmp(name,p->name) == 0)
1269 1269
             return p;
1270 1270
         p = p->next;
1271 1271
     }
... ...
@@ -21,7 +21,7 @@
21 21
 #define AVCODEC_VERSION_H
22 22
 
23 23
 #define LIBAVCODEC_VERSION_MAJOR 53
24
-#define LIBAVCODEC_VERSION_MINOR 55
24
+#define LIBAVCODEC_VERSION_MINOR 56
25 25
 #define LIBAVCODEC_VERSION_MICRO 105
26 26
 
27 27
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
... ...
@@ -123,5 +123,8 @@
123 123
 #ifndef FF_API_AVFRAME_AGE
124 124
 #define FF_API_AVFRAME_AGE (LIBAVCODEC_VERSION_MAJOR < 54)
125 125
 #endif
126
+#ifndef FF_API_OLD_ENCODE_AUDIO
127
+#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
128
+#endif
126 129
 
127 130
 #endif /* AVCODEC_VERSION_H */
... ...
@@ -35,21 +35,84 @@ SECTION .text
35 35
     sar    %1, 10
36 36
 %endmacro
37 37
 
38
-%macro rv34_idct_dequant4x4_dc 1
39
-cglobal rv34_idct_dequant4x4_%1_mmx2, 1, 2, 0
38
+%macro rv34_idct 1
39
+cglobal rv34_idct_%1_mmx2, 1, 2, 0
40 40
     movsx   r1, word [r0]
41 41
     IDCT_DC r1
42
-    movd    mm0, r1d
43
-    pshufw  mm0, mm0, 0
44
-    movq    [r0+ 0], mm0
45
-    movq    [r0+16], mm0
46
-    movq    [r0+32], mm0
47
-    movq    [r0+48], mm0
42
+    movd    m0, r1d
43
+    pshufw  m0, m0, 0
44
+    movq    [r0+ 0], m0
45
+    movq    [r0+ 8], m0
46
+    movq    [r0+16], m0
47
+    movq    [r0+24], m0
48 48
     REP_RET
49 49
 %endmacro
50 50
 
51 51
 INIT_MMX
52 52
 %define IDCT_DC IDCT_DC_ROUND
53
-rv34_idct_dequant4x4_dc dc
53
+rv34_idct dc
54 54
 %define IDCT_DC IDCT_DC_NOROUND
55
-rv34_idct_dequant4x4_dc dc_noround
55
+rv34_idct dc_noround
56
+
57
+; ff_rv34_idct_dc_add_mmx(uint8_t *dst, int stride, int dc);
58
+cglobal rv34_idct_dc_add_mmx, 3, 3
59
+    ; calculate DC
60
+    IDCT_DC_ROUND r2
61
+    pxor       m1, m1
62
+    movd       m0, r2
63
+    psubw      m1, m0
64
+    packuswb   m0, m0
65
+    packuswb   m1, m1
66
+    punpcklbw  m0, m0
67
+    punpcklbw  m1, m1
68
+    punpcklwd  m0, m0
69
+    punpcklwd  m1, m1
70
+
71
+    ; add DC
72
+    lea        r2, [r0+r1*2]
73
+    movh       m2, [r0]
74
+    movh       m3, [r0+r1]
75
+    movh       m4, [r2]
76
+    movh       m5, [r2+r1]
77
+    paddusb    m2, m0
78
+    paddusb    m3, m0
79
+    paddusb    m4, m0
80
+    paddusb    m5, m0
81
+    psubusb    m2, m1
82
+    psubusb    m3, m1
83
+    psubusb    m4, m1
84
+    psubusb    m5, m1
85
+    movh       [r0], m2
86
+    movh       [r0+r1], m3
87
+    movh       [r2], m4
88
+    movh       [r2+r1], m5
89
+    RET
90
+
91
+; ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc);
92
+INIT_XMM
93
+cglobal rv34_idct_dc_add_sse4, 3, 3, 6
94
+    ; load data
95
+    IDCT_DC_ROUND r2
96
+    pxor       m1, m1
97
+
98
+    ; calculate DC
99
+    movd       m0, r2
100
+    lea        r2, [r0+r1*2]
101
+    movd       m2, [r0]
102
+    movd       m3, [r0+r1]
103
+    pshuflw    m0, m0, 0
104
+    movd       m4, [r2]
105
+    movd       m5, [r2+r1]
106
+    punpcklqdq m0, m0
107
+    punpckldq  m2, m3
108
+    punpckldq  m4, m5
109
+    punpcklbw  m2, m1
110
+    punpcklbw  m4, m1
111
+    paddw      m2, m0
112
+    paddw      m4, m0
113
+    packuswb   m2, m4
114
+    movd      [r0], m2
115
+    pextrd [r0+r1], m2, 1
116
+    pextrd    [r2], m2, 2
117
+    pextrd [r2+r1], m2, 3
118
+    RET
... ...
@@ -24,17 +24,22 @@
24 24
 #include "libavcodec/dsputil.h"
25 25
 #include "libavcodec/rv34dsp.h"
26 26
 
27
-void ff_rv34_idct_dequant4x4_dc_mmx2(DCTELEM *block);
28
-void ff_rv34_idct_dequant4x4_dc_noround_mmx2(DCTELEM *block);
27
+void ff_rv34_idct_dc_mmx2(DCTELEM *block);
28
+void ff_rv34_idct_dc_noround_mmx2(DCTELEM *block);
29
+void ff_rv34_idct_dc_add_mmx(uint8_t *dst, int stride, int dc);
30
+void ff_rv34_idct_dc_add_sse4(uint8_t *dst, int stride, int dc);
29 31
 
30 32
 av_cold void ff_rv34dsp_init_x86(RV34DSPContext* c, DSPContext *dsp)
31 33
 {
32 34
 #if HAVE_YASM
33 35
     int mm_flags = av_get_cpu_flags();
34 36
 
37
+    if (mm_flags & AV_CPU_FLAG_MMX)
38
+        c->rv34_idct_dc_add = ff_rv34_idct_dc_add_mmx;
35 39
     if (mm_flags & AV_CPU_FLAG_MMX2) {
36
-        c->rv34_inv_transform_dc_tab[0] = ff_rv34_idct_dequant4x4_dc_mmx2;
37
-        c->rv34_inv_transform_dc_tab[1] = ff_rv34_idct_dequant4x4_dc_noround_mmx2;
40
+        c->rv34_inv_transform_dc = ff_rv34_idct_dc_noround_mmx2;
38 41
     }
42
+    if (mm_flags & AV_CPU_FLAG_SSE4)
43
+        c->rv34_idct_dc_add = ff_rv34_idct_dc_add_sse4;
39 44
 #endif
40 45
 }
... ...
@@ -291,7 +291,8 @@ OBJS-$(CONFIG_SEGMENT_MUXER)             += segment.o
291 291
 OBJS-$(CONFIG_SHORTEN_DEMUXER)           += rawdec.o
292 292
 OBJS-$(CONFIG_SIFF_DEMUXER)              += siff.o
293 293
 OBJS-$(CONFIG_SMACKER_DEMUXER)           += smacker.o
294
-OBJS-$(CONFIG_SMJPEG_DEMUXER)            += smjpeg.o
294
+OBJS-$(CONFIG_SMJPEG_DEMUXER)            += smjpegdec.o smjpeg.o
295
+OBJS-$(CONFIG_SMJPEG_MUXER)              += smjpegenc.o smjpeg.o
295 296
 OBJS-$(CONFIG_SOL_DEMUXER)               += sol.o pcm.o
296 297
 OBJS-$(CONFIG_SOX_DEMUXER)               += soxdec.o pcm.o
297 298
 OBJS-$(CONFIG_SOX_MUXER)                 += soxenc.o
... ...
@@ -212,7 +212,7 @@ void av_register_all(void)
212 212
     REGISTER_DEMUXER  (SHORTEN, shorten);
213 213
     REGISTER_DEMUXER  (SIFF, siff);
214 214
     REGISTER_DEMUXER  (SMACKER, smacker);
215
-    REGISTER_DEMUXER  (SMJPEG, smjpeg);
215
+    REGISTER_MUXDEMUX (SMJPEG, smjpeg);
216 216
     REGISTER_DEMUXER  (SOL, sol);
217 217
     REGISTER_MUXDEMUX (SOX, sox);
218 218
     REGISTER_MUXDEMUX (SPDIF, spdif);
... ...
@@ -1,6 +1,6 @@
1 1
 /*
2
- * SMJPEG demuxer
3
- * Copyright (c) 2011 Paul B Mahol
2
+ * SMJPEG common code
3
+ * Copyright (c) 2011-2012 Paul B Mahol
4 4
  *
5 5
  * This file is part of FFmpeg.
6 6
  *
... ...
@@ -21,167 +21,20 @@
21 21
 
22 22
 /**
23 23
  * @file
24
- * This is a demuxer for Loki SDL Motion JPEG files
24
+ * SMJPEG common code
25 25
  */
26 26
 
27 27
 #include "avformat.h"
28 28
 #include "internal.h"
29
-#include "riff.h"
29
+#include "smjpeg.h"
30 30
 
31
-static const AVCodecTag codec_smjpeg_video_tags[] = {
31
+const AVCodecTag ff_codec_smjpeg_video_tags[] = {
32 32
     { CODEC_ID_MJPEG,             MKTAG('J', 'F', 'I', 'F') },
33 33
     { CODEC_ID_NONE, 0 },
34 34
 };
35 35
 
36
-static const AVCodecTag codec_smjpeg_audio_tags[] = {
36
+const AVCodecTag ff_codec_smjpeg_audio_tags[] = {
37 37
     { CODEC_ID_ADPCM_IMA_SMJPEG,  MKTAG('A', 'P', 'C', 'M') },
38 38
     { CODEC_ID_PCM_S16LE,         MKTAG('N', 'O', 'N', 'E') },
39 39
     { CODEC_ID_NONE, 0 },
40 40
 };
41
-
42
-typedef struct SMJPEGContext {
43
-    int audio_stream_index;
44
-    int video_stream_index;
45
-} SMJPEGContext;
46
-
47
-static int smjpeg_probe(AVProbeData *p)
48
-{
49
-    if (!memcmp(p->buf, "\x0\xaSMJPEG", 8))
50
-        return AVPROBE_SCORE_MAX;
51
-    return 0;
52
-}
53
-
54
-static int smjpeg_read_header(AVFormatContext *s, AVFormatParameters *ap)
55
-{
56
-    SMJPEGContext *sc = s->priv_data;
57
-    AVStream *ast = NULL, *vst = NULL;
58
-    AVIOContext *pb = s->pb;
59
-    uint32_t version, htype, hlength, duration;
60
-    char *comment;
61
-
62
-    avio_skip(pb, 8); // magic
63
-    version = avio_rb32(pb);
64
-    if (version)
65
-        av_log_ask_for_sample(s, "unknown version %d\n", version);
66
-
67
-    duration = avio_rb32(pb); // in msec
68
-
69
-    while (!pb->eof_reached) {
70
-        htype = avio_rl32(pb);
71
-        switch (htype) {
72
-        case MKTAG('_', 'T', 'X', 'T'):
73
-            hlength = avio_rb32(pb);
74
-            if (!hlength || hlength > 512)
75
-                return AVERROR_INVALIDDATA;
76
-            comment = av_malloc(hlength + 1);
77
-            if (!comment)
78
-                return AVERROR(ENOMEM);
79
-            if (avio_read(pb, comment, hlength) != hlength) {
80
-                av_freep(&comment);
81
-                av_log(s, AV_LOG_ERROR, "error when reading comment\n");
82
-                return AVERROR_INVALIDDATA;
83
-            }
84
-            comment[hlength] = 0;
85
-            av_dict_set(&s->metadata, "comment", comment,
86
-                        AV_DICT_DONT_STRDUP_VAL);
87
-            break;
88
-        case MKTAG('_', 'S', 'N', 'D'):
89
-            if (ast) {
90
-                av_log_ask_for_sample(s, "multiple audio streams not supported\n");
91
-                return AVERROR_INVALIDDATA;
92
-            }
93
-            hlength = avio_rb32(pb);
94
-            if (hlength < 8)
95
-                return AVERROR_INVALIDDATA;
96
-            ast = avformat_new_stream(s, 0);
97
-            if (!ast)
98
-                return AVERROR(ENOMEM);
99
-            ast->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
100
-            ast->codec->sample_rate = avio_rb16(pb);
101
-            ast->codec->bits_per_coded_sample = avio_r8(pb);
102
-            ast->codec->channels    = avio_r8(pb);
103
-            ast->codec->codec_tag   = avio_rl32(pb);
104
-            ast->codec->codec_id    = ff_codec_get_id(codec_smjpeg_audio_tags,
105
-                                                      ast->codec->codec_tag);
106
-            ast->duration           = duration;
107
-            sc->audio_stream_index  = ast->index;
108
-            avpriv_set_pts_info(ast, 32, 1, 1000);
109
-            avio_skip(pb, hlength - 8);
110
-            break;
111
-        case MKTAG('_', 'V', 'I', 'D'):
112
-            if (vst) {
113
-                av_log_ask_for_sample(s, "multiple video streams not supported\n");
114
-                return AVERROR_INVALIDDATA;
115
-            }
116
-            hlength = avio_rb32(pb);
117
-            if (hlength < 12)
118
-                return AVERROR_INVALIDDATA;
119
-            avio_skip(pb, 4); // number of frames
120
-            vst = avformat_new_stream(s, 0);
121
-            if (!vst)
122
-                return AVERROR(ENOMEM);
123
-            vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
124
-            vst->codec->width      = avio_rb16(pb);
125
-            vst->codec->height     = avio_rb16(pb);
126
-            vst->codec->codec_tag  = avio_rl32(pb);
127
-            vst->codec->codec_id   = ff_codec_get_id(codec_smjpeg_video_tags,
128
-                                                     vst->codec->codec_tag);
129
-            vst->duration          = duration;
130
-            sc->video_stream_index = vst->index;
131
-            avpriv_set_pts_info(vst, 32, 1, 1000);
132
-            avio_skip(pb, hlength - 12);
133
-            break;
134
-        case MKTAG('H', 'E', 'N', 'D'):
135
-            return 0;
136
-        default:
137
-            av_log(s, AV_LOG_ERROR, "unknown header %x\n", htype);
138
-            return AVERROR_INVALIDDATA;
139
-        }
140
-    }
141
-
142
-    return AVERROR_EOF;
143
-}
144
-
145
-static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
146
-{
147
-    SMJPEGContext *sc = s->priv_data;
148
-    uint32_t dtype, ret, size, timestamp;
149
-
150
-    if (s->pb->eof_reached)
151
-        return AVERROR_EOF;
152
-    dtype = avio_rl32(s->pb);
153
-    switch (dtype) {
154
-    case MKTAG('s', 'n', 'd', 'D'):
155
-        timestamp = avio_rb32(s->pb);
156
-        size = avio_rb32(s->pb);
157
-        ret = av_get_packet(s->pb, pkt, size);
158
-        pkt->stream_index = sc->audio_stream_index;
159
-        pkt->pts = timestamp;
160
-        break;
161
-    case MKTAG('v', 'i', 'd', 'D'):
162
-        timestamp = avio_rb32(s->pb);
163
-        size = avio_rb32(s->pb);
164
-        ret = av_get_packet(s->pb, pkt, size);
165
-        pkt->stream_index = sc->video_stream_index;
166
-        pkt->pts = timestamp;
167
-        break;
168
-    case MKTAG('D', 'O', 'N', 'E'):
169
-        ret = AVERROR_EOF;
170
-        break;
171
-    default:
172
-        av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
173
-        ret = AVERROR_INVALIDDATA;
174
-        break;
175
-    }
176
-    return ret;
177
-}
178
-
179
-AVInputFormat ff_smjpeg_demuxer = {
180
-    .name           = "smjpeg",
181
-    .long_name      = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
182
-    .priv_data_size = sizeof(SMJPEGContext),
183
-    .read_probe     = smjpeg_probe,
184
-    .read_header    = smjpeg_read_header,
185
-    .read_packet    = smjpeg_read_packet,
186
-    .extensions     = "mjpg",
187
-};
188 41
new file mode 100644
... ...
@@ -0,0 +1,45 @@
0
+/*
1
+ * SMJPEG common code
2
+ * Copyright (c) 2011-2012 Paul B Mahol
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * SMJPEG common code
24
+ */
25
+
26
+#ifndef AVFORMAT_SMJPEG_H
27
+#define AVFORMAT_SMJPEG_H
28
+
29
+#include "internal.h"
30
+
31
+#define SMJPEG_MAGIC "\x0\xaSMJPEG"
32
+
33
+#define SMJPEG_DONE     MKTAG('D', 'O', 'N', 'E')
34
+#define SMJPEG_HEND     MKTAG('H', 'E', 'N', 'D')
35
+#define SMJPEG_SND      MKTAG('_', 'S', 'N', 'D')
36
+#define SMJPEG_SNDD     MKTAG('s', 'n', 'd', 'D')
37
+#define SMJPEG_TXT      MKTAG('_', 'T', 'X', 'T')
38
+#define SMJPEG_VID      MKTAG('_', 'V', 'I', 'D')
39
+#define SMJPEG_VIDD     MKTAG('v', 'i', 'd', 'D')
40
+
41
+extern const AVCodecTag ff_codec_smjpeg_video_tags[];
42
+extern const AVCodecTag ff_codec_smjpeg_audio_tags[];
43
+
44
+#endif /* AVFORMAT_SMJPEG_H */
0 45
new file mode 100644
... ...
@@ -0,0 +1,177 @@
0
+/*
1
+ * SMJPEG demuxer
2
+ * Copyright (c) 2011 Paul B Mahol
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * This is a demuxer for Loki SDL Motion JPEG files
24
+ */
25
+
26
+#include "avformat.h"
27
+#include "internal.h"
28
+#include "riff.h"
29
+#include "smjpeg.h"
30
+
31
+typedef struct SMJPEGContext {
32
+    int audio_stream_index;
33
+    int video_stream_index;
34
+} SMJPEGContext;
35
+
36
+static int smjpeg_probe(AVProbeData *p)
37
+{
38
+    if (!memcmp(p->buf, SMJPEG_MAGIC, 8))
39
+        return AVPROBE_SCORE_MAX;
40
+    return 0;
41
+}
42
+
43
+static int smjpeg_read_header(AVFormatContext *s, AVFormatParameters *ap)
44
+{
45
+    SMJPEGContext *sc = s->priv_data;
46
+    AVStream *ast = NULL, *vst = NULL;
47
+    AVIOContext *pb = s->pb;
48
+    uint32_t version, htype, hlength, duration;
49
+    char *comment;
50
+
51
+    avio_skip(pb, 8); // magic
52
+    version = avio_rb32(pb);
53
+    if (version)
54
+        av_log_ask_for_sample(s, "unknown version %d\n", version);
55
+
56
+    duration = avio_rb32(pb); // in msec
57
+
58
+    while (!pb->eof_reached) {
59
+        htype = avio_rl32(pb);
60
+        switch (htype) {
61
+        case SMJPEG_TXT:
62
+            hlength = avio_rb32(pb);
63
+            if (!hlength || hlength > 512)
64
+                return AVERROR_INVALIDDATA;
65
+            comment = av_malloc(hlength + 1);
66
+            if (!comment)
67
+                return AVERROR(ENOMEM);
68
+            if (avio_read(pb, comment, hlength) != hlength) {
69
+                av_freep(&comment);
70
+                av_log(s, AV_LOG_ERROR, "error when reading comment\n");
71
+                return AVERROR_INVALIDDATA;
72
+            }
73
+            comment[hlength] = 0;
74
+            av_dict_set(&s->metadata, "comment", comment,
75
+                        AV_DICT_DONT_STRDUP_VAL);
76
+            break;
77
+        case SMJPEG_SND:
78
+            if (ast) {
79
+                av_log_ask_for_sample(s, "multiple audio streams not supported\n");
80
+                return AVERROR_INVALIDDATA;
81
+            }
82
+            hlength = avio_rb32(pb);
83
+            if (hlength < 8)
84
+                return AVERROR_INVALIDDATA;
85
+            ast = avformat_new_stream(s, 0);
86
+            if (!ast)
87
+                return AVERROR(ENOMEM);
88
+            ast->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
89
+            ast->codec->sample_rate = avio_rb16(pb);
90
+            ast->codec->bits_per_coded_sample = avio_r8(pb);
91
+            ast->codec->channels    = avio_r8(pb);
92
+            ast->codec->codec_tag   = avio_rl32(pb);
93
+            ast->codec->codec_id    = ff_codec_get_id(ff_codec_smjpeg_audio_tags,
94
+                                                      ast->codec->codec_tag);
95
+            ast->duration           = duration;
96
+            sc->audio_stream_index  = ast->index;
97
+            avpriv_set_pts_info(ast, 32, 1, 1000);
98
+            avio_skip(pb, hlength - 8);
99
+            break;
100
+        case SMJPEG_VID:
101
+            if (vst) {
102
+                av_log_ask_for_sample(s, "multiple video streams not supported\n");
103
+                return AVERROR_INVALIDDATA;
104
+            }
105
+            hlength = avio_rb32(pb);
106
+            if (hlength < 12)
107
+                return AVERROR_INVALIDDATA;
108
+            avio_skip(pb, 4); // number of frames
109
+            vst = avformat_new_stream(s, 0);
110
+            if (!vst)
111
+                return AVERROR(ENOMEM);
112
+            vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
113
+            vst->codec->width      = avio_rb16(pb);
114
+            vst->codec->height     = avio_rb16(pb);
115
+            vst->codec->codec_tag  = avio_rl32(pb);
116
+            vst->codec->codec_id   = ff_codec_get_id(ff_codec_smjpeg_video_tags,
117
+                                                     vst->codec->codec_tag);
118
+            vst->duration          = duration;
119
+            sc->video_stream_index = vst->index;
120
+            avpriv_set_pts_info(vst, 32, 1, 1000);
121
+            avio_skip(pb, hlength - 12);
122
+            break;
123
+        case SMJPEG_HEND:
124
+            return 0;
125
+        default:
126
+            av_log(s, AV_LOG_ERROR, "unknown header %x\n", htype);
127
+            return AVERROR_INVALIDDATA;
128
+        }
129
+    }
130
+
131
+    return AVERROR_EOF;
132
+}
133
+
134
+static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
135
+{
136
+    SMJPEGContext *sc = s->priv_data;
137
+    uint32_t dtype, ret, size, timestamp;
138
+
139
+    if (s->pb->eof_reached)
140
+        return AVERROR_EOF;
141
+    dtype = avio_rl32(s->pb);
142
+    switch (dtype) {
143
+    case SMJPEG_SNDD:
144
+        timestamp = avio_rb32(s->pb);
145
+        size = avio_rb32(s->pb);
146
+        ret = av_get_packet(s->pb, pkt, size);
147
+        pkt->stream_index = sc->audio_stream_index;
148
+        pkt->pts = timestamp;
149
+        break;
150
+    case SMJPEG_VIDD:
151
+        timestamp = avio_rb32(s->pb);
152
+        size = avio_rb32(s->pb);
153
+        ret = av_get_packet(s->pb, pkt, size);
154
+        pkt->stream_index = sc->video_stream_index;
155
+        pkt->pts = timestamp;
156
+        break;
157
+    case SMJPEG_DONE:
158
+        ret = AVERROR_EOF;
159
+        break;
160
+    default:
161
+        av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
162
+        ret = AVERROR_INVALIDDATA;
163
+        break;
164
+    }
165
+    return ret;
166
+}
167
+
168
+AVInputFormat ff_smjpeg_demuxer = {
169
+    .name           = "smjpeg",
170
+    .long_name      = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
171
+    .priv_data_size = sizeof(SMJPEGContext),
172
+    .read_probe     = smjpeg_probe,
173
+    .read_header    = smjpeg_read_header,
174
+    .read_packet    = smjpeg_read_packet,
175
+    .extensions     = "mjpg",
176
+};
0 177
new file mode 100644
... ...
@@ -0,0 +1,149 @@
0
+/*
1
+ * SMJPEG muxer
2
+ * Copyright (c) 2012 Paul B Mahol
3
+ *
4
+ * This file is part of Libav.
5
+ *
6
+ * Libav is free software; you can redistribute it and/or
7
+ * modify it under the terms of the GNU Lesser General Public
8
+ * License as published by the Free Software Foundation; either
9
+ * version 2.1 of the License, or (at your option) any later version.
10
+ *
11
+ * Libav is distributed in the hope that it will be useful,
12
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
+ * Lesser General Public License for more details.
15
+ *
16
+ * You should have received a copy of the GNU Lesser General Public
17
+ * License along with Libav; if not, write to the Free Software
18
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
+ */
20
+
21
+/**
22
+ * @file
23
+ * This is a muxer for Loki SDL Motion JPEG files
24
+ */
25
+
26
+#include "avformat.h"
27
+#include "internal.h"
28
+#include "riff.h"
29
+#include "smjpeg.h"
30
+
31
+typedef struct SMJPEGMuxContext {
32
+    uint32_t duration;
33
+} SMJPEGMuxContext;
34
+
35
+static int smjpeg_write_header(AVFormatContext *s)
36
+{
37
+    AVDictionaryEntry *t = NULL;
38
+    AVIOContext *pb = s->pb;
39
+    int n, tag;
40
+
41
+    if (s->nb_streams > 2) {
42
+        av_log(s, AV_LOG_ERROR, "more than >2 streams are not supported\n");
43
+        return AVERROR(EINVAL);
44
+    }
45
+    avio_write(pb, SMJPEG_MAGIC, 8);
46
+    avio_wb32(pb, 0);
47
+    avio_wb32(pb, 0);
48
+
49
+    while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
50
+        avio_wl32(pb, SMJPEG_TXT);
51
+        avio_wb32(pb, strlen(t->key) + strlen(t->value) + 3);
52
+        avio_write(pb, t->key, strlen(t->key));
53
+        avio_write(pb, " = ", 3);
54
+        avio_write(pb, t->value, strlen(t->value));
55
+    }
56
+
57
+    for (n = 0; n < s->nb_streams; n++) {
58
+        AVStream *st = s->streams[n];
59
+        AVCodecContext *codec = st->codec;
60
+        if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
61
+            tag = ff_codec_get_tag(ff_codec_smjpeg_audio_tags, codec->codec_id);
62
+            if (!tag) {
63
+                av_log(s, AV_LOG_ERROR, "unsupported audio codec\n");
64
+                return AVERROR(EINVAL);
65
+            }
66
+            avio_wl32(pb, SMJPEG_SND);
67
+            avio_wb32(pb, 8);
68
+            avio_wb16(pb, codec->sample_rate);
69
+            avio_w8(pb, av_get_bits_per_sample(codec->codec_id));
70
+            avio_w8(pb, codec->channels);
71
+            avio_wl32(pb, tag);
72
+            avpriv_set_pts_info(st, 32, 1, 1000);
73
+        } else if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
74
+            tag = ff_codec_get_tag(ff_codec_smjpeg_video_tags, codec->codec_id);
75
+            if (!tag) {
76
+                av_log(s, AV_LOG_ERROR, "unsupported video codec\n");
77
+                return AVERROR(EINVAL);
78
+            }
79
+            avio_wl32(pb, SMJPEG_VID);
80
+            avio_wb32(pb, 12);
81
+            avio_wb32(pb, 0);
82
+            avio_wb16(pb, codec->width);
83
+            avio_wb16(pb, codec->height);
84
+            avio_wl32(pb, tag);
85
+            avpriv_set_pts_info(st, 32, 1, 1000);
86
+        }
87
+    }
88
+
89
+    avio_wl32(pb, SMJPEG_HEND);
90
+    avio_flush(pb);
91
+
92
+    return 0;
93
+}
94
+
95
+static int smjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
96
+{
97
+    SMJPEGMuxContext *smc = s->priv_data;
98
+    AVIOContext *pb = s->pb;
99
+    AVStream *st = s->streams[pkt->stream_index];
100
+    AVCodecContext *codec = st->codec;
101
+
102
+    if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
103
+        avio_wl32(pb, SMJPEG_SNDD);
104
+    else if (codec->codec_type == AVMEDIA_TYPE_VIDEO)
105
+        avio_wl32(pb, SMJPEG_VIDD);
106
+    else
107
+        return 0;
108
+
109
+    avio_wb32(pb, pkt->pts);
110
+    avio_wb32(pb, pkt->size);
111
+    avio_write(pb, pkt->data, pkt->size);
112
+    avio_flush(pb);
113
+
114
+    smc->duration = FFMAX(smc->duration, pkt->pts + pkt->duration);
115
+    return 0;
116
+}
117
+
118
+static int smjpeg_write_trailer(AVFormatContext *s)
119
+{
120
+    SMJPEGMuxContext *smc = s->priv_data;
121
+    AVIOContext *pb = s->pb;
122
+    int64_t currentpos;
123
+
124
+    if (pb->seekable) {
125
+        currentpos = avio_tell(pb);
126
+        avio_seek(pb, 12, SEEK_SET);
127
+        avio_wb32(pb, smc->duration);
128
+        avio_seek(pb, currentpos, SEEK_SET);
129
+    }
130
+
131
+    avio_wl32(pb, SMJPEG_DONE);
132
+    avio_flush(pb);
133
+
134
+    return 0;
135
+}
136
+
137
+AVOutputFormat ff_smjpeg_muxer = {
138
+    .name           = "smjpeg",
139
+    .long_name      = NULL_IF_CONFIG_SMALL("Loki SDL MJPEG"),
140
+    .priv_data_size = sizeof(SMJPEGMuxContext),
141
+    .audio_codec    = CODEC_ID_PCM_S16LE,
142
+    .video_codec    = CODEC_ID_MJPEG,
143
+    .write_header   = smjpeg_write_header,
144
+    .write_packet   = smjpeg_write_packet,
145
+    .write_trailer  = smjpeg_write_trailer,
146
+    .flags          = AVFMT_GLOBALHEADER,
147
+    .codec_tag      = (const AVCodecTag *const []){ ff_codec_smjpeg_video_tags, ff_codec_smjpeg_audio_tags, 0 },
148
+};
... ...
@@ -30,7 +30,7 @@
30 30
 #include "libavutil/avutil.h"
31 31
 
32 32
 #define LIBAVFORMAT_VERSION_MAJOR 53
33
-#define LIBAVFORMAT_VERSION_MINOR 29
33
+#define LIBAVFORMAT_VERSION_MINOR 30
34 34
 #define LIBAVFORMAT_VERSION_MICRO 100
35 35
 
36 36
 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \