Autodetected by default. Encode using -codec:v h264_videotoolbox.
Signed-off-by: Rick Kern <kernrj@gmail.com>
Signed-off-by: wm4 <nfxjfg@googlemail.com>
... | ... |
@@ -155,7 +155,6 @@ Hardware accelerators: |
155 | 155 |
--disable-vaapi disable VAAPI code [autodetect] |
156 | 156 |
--disable-vda disable VDA code [autodetect] |
157 | 157 |
--disable-vdpau disable VDPAU code [autodetect] |
158 |
- --enable-videotoolbox enable VideoToolbox code [autodetect] |
|
159 | 158 |
|
160 | 159 |
Individual component options: |
161 | 160 |
--disable-everything disable all components listed below |
... | ... |
@@ -289,6 +288,7 @@ External library support: |
289 | 289 |
--disable-sdl disable sdl [autodetect] |
290 | 290 |
--disable-securetransport disable Secure Transport, needed for TLS support |
291 | 291 |
on OSX if openssl and gnutls are not used [autodetect] |
292 |
+ --disable-videotoolbox disable VideoToolbox code [autodetect] |
|
292 | 293 |
--enable-x11grab enable X11 grabbing (legacy) [no] |
293 | 294 |
--disable-xlib disable xlib [autodetect] |
294 | 295 |
--disable-zlib disable zlib [autodetect] |
... | ... |
@@ -1509,6 +1509,7 @@ EXTERNAL_LIBRARY_LIST=" |
1509 | 1509 |
schannel |
1510 | 1510 |
sdl |
1511 | 1511 |
securetransport |
1512 |
+ videotoolbox |
|
1512 | 1513 |
x11grab |
1513 | 1514 |
xlib |
1514 | 1515 |
zlib |
... | ... |
@@ -1540,7 +1541,7 @@ HWACCEL_LIST=" |
1540 | 1540 |
vaapi |
1541 | 1541 |
vda |
1542 | 1542 |
vdpau |
1543 |
- videotoolbox |
|
1543 |
+ videotoolbox_hwaccel |
|
1544 | 1544 |
xvmc |
1545 | 1545 |
" |
1546 | 1546 |
|
... | ... |
@@ -2484,11 +2485,13 @@ crystalhd_deps="libcrystalhd_libcrystalhd_if_h" |
2484 | 2484 |
d3d11va_deps="d3d11_h dxva_h ID3D11VideoDecoder ID3D11VideoContext" |
2485 | 2485 |
dxva2_deps="dxva2api_h DXVA2_ConfigPictureDecode" |
2486 | 2486 |
vaapi_deps="va_va_h" |
2487 |
-vda_deps="VideoDecodeAcceleration_VDADecoder_h pthreads" |
|
2488 |
-vda_extralibs="-framework CoreFoundation -framework VideoDecodeAcceleration -framework QuartzCore" |
|
2487 |
+vda_framework_deps="VideoDecodeAcceleration_VDADecoder_h" |
|
2488 |
+vda_framework_extralibs="-framework VideoDecodeAcceleration" |
|
2489 |
+vda_deps="vda_framework pthreads" |
|
2490 |
+vda_extralibs="-framework CoreFoundation -framework QuartzCore" |
|
2489 | 2491 |
vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h" |
2490 |
-videotoolbox_deps="VideoToolbox_VideoToolbox_h pthreads" |
|
2491 |
-videotoolbox_extralibs="-framework CoreFoundation -framework VideoToolbox -framework CoreMedia -framework QuartzCore -framework CoreVideo" |
|
2492 |
+videotoolbox_hwaccel_deps="videotoolbox pthreads" |
|
2493 |
+videotoolbox_hwaccel_extralibs="-framework QuartzCore" |
|
2492 | 2494 |
xvmc_deps="X11_extensions_XvMClib_h" |
2493 | 2495 |
|
2494 | 2496 |
h263_vaapi_hwaccel_deps="vaapi" |
... | ... |
@@ -2611,6 +2614,8 @@ mjpeg2jpeg_bsf_select="jpegtables" |
2611 | 2611 |
|
2612 | 2612 |
# external libraries |
2613 | 2613 |
chromaprint_muxer_deps="chromaprint" |
2614 |
+h264_videotoolbox_encoder_deps="videotoolbox_encoder pthreads" |
|
2615 |
+h264_videotoolbox_encoder_select="bzlib zlib iconv" |
|
2614 | 2616 |
libcelt_decoder_deps="libcelt" |
2615 | 2617 |
libdcadec_decoder_deps="libdcadec" |
2616 | 2618 |
libfaac_encoder_deps="libfaac" |
... | ... |
@@ -2672,6 +2677,10 @@ libzvbi_teletext_decoder_deps="libzvbi" |
2672 | 2672 |
nvenc_encoder_deps="nvenc" |
2673 | 2673 |
nvenc_h264_encoder_deps="nvenc" |
2674 | 2674 |
nvenc_hevc_encoder_deps="nvenc" |
2675 |
+videotoolbox_deps="VideoToolbox_VideoToolbox_h" |
|
2676 |
+videotoolbox_extralibs="-framework CoreFoundation -framework VideoToolbox -framework CoreMedia -framework CoreVideo" |
|
2677 |
+videotoolbox_encoder_deps="videotoolbox VTCompressionSessionPrepareToEncodeFrames" |
|
2678 |
+videotoolbox_encoder_suggest="vda_framework" |
|
2675 | 2679 |
|
2676 | 2680 |
# demuxers / muxers |
2677 | 2681 |
ac3_demuxer_select="ac3_parser" |
... | ... |
@@ -3052,9 +3061,11 @@ sws_max_filter_size_default=256 |
3052 | 3052 |
set_default sws_max_filter_size |
3053 | 3053 |
|
3054 | 3054 |
# Enable hwaccels by default. |
3055 |
-enable d3d11va dxva2 vaapi vda vdpau videotoolbox xvmc |
|
3055 |
+enable d3d11va dxva2 vaapi vda vdpau videotoolbox_hwaccel xvmc |
|
3056 | 3056 |
enable xlib |
3057 | 3057 |
|
3058 |
+enable vda_framework videotoolbox videotoolbox_encoder |
|
3059 |
+ |
|
3058 | 3060 |
# build settings |
3059 | 3061 |
SHFLAGS='-shared -Wl,-soname,$$(@F)' |
3060 | 3062 |
LIBPREF="lib" |
... | ... |
@@ -5449,6 +5460,7 @@ check_header vdpau/vdpau.h |
5449 | 5449 |
check_header vdpau/vdpau_x11.h |
5450 | 5450 |
check_header VideoDecodeAcceleration/VDADecoder.h |
5451 | 5451 |
check_header VideoToolbox/VideoToolbox.h |
5452 |
+check_func_headers VideoToolbox/VTCompressionSession.h VTCompressionSessionPrepareToEncodeFrames -framework VideoToolbox |
|
5452 | 5453 |
check_header windows.h |
5453 | 5454 |
check_header X11/extensions/XvMClib.h |
5454 | 5455 |
check_header asm/types.h |
... | ... |
@@ -69,6 +69,7 @@ OBJS-$(CONFIG_H264CHROMA) += h264chroma.o |
69 | 69 |
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o |
70 | 70 |
OBJS-$(CONFIG_H264PRED) += h264pred.o |
71 | 71 |
OBJS-$(CONFIG_H264QPEL) += h264qpel.o |
72 |
+OBJS-$(CONFIG_H264_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o |
|
72 | 73 |
OBJS-$(CONFIG_HPELDSP) += hpeldsp.o |
73 | 74 |
OBJS-$(CONFIG_HUFFMAN) += huffman.o |
74 | 75 |
OBJS-$(CONFIG_HUFFYUVDSP) += huffyuvdsp.o |
... | ... |
@@ -604,6 +604,7 @@ void avcodec_register_all(void) |
604 | 604 |
* above is available */ |
605 | 605 |
REGISTER_ENCODER(LIBOPENH264, libopenh264); |
606 | 606 |
REGISTER_ENCODER(H264_QSV, h264_qsv); |
607 |
+ REGISTER_ENCODER(H264_VIDEOTOOLBOX, h264_videotoolbox); |
|
607 | 608 |
REGISTER_ENCODER(NVENC, nvenc); |
608 | 609 |
REGISTER_ENCODER(NVENC_H264, nvenc_h264); |
609 | 610 |
REGISTER_ENCODER(NVENC_HEVC, nvenc_hevc); |
610 | 611 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,1339 @@ |
0 |
+/* |
|
1 |
+ * copyright (c) 2015 Rick Kern <kernrj@gmail.com> |
|
2 |
+ * |
|
3 |
+ * This file is part of FFmpeg. |
|
4 |
+ * |
|
5 |
+ * FFmpeg is free software; you can redistribute it and/or |
|
6 |
+ * modify it under the terms of the GNU Lesser General Public |
|
7 |
+ * License as published by the Free Software Foundation; either |
|
8 |
+ * version 2.1 of the License, or (at your option) any later version. |
|
9 |
+ * |
|
10 |
+ * FFmpeg is distributed in the hope that it will be useful, |
|
11 |
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 |
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 |
+ * Lesser General Public License for more details. |
|
14 |
+ * |
|
15 |
+ * You should have received a copy of the GNU Lesser General Public |
|
16 |
+ * License along with FFmpeg; if not, write to the Free Software |
|
17 |
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
18 |
+ */ |
|
19 |
+ |
|
20 |
+#include <VideoToolbox/VideoToolbox.h> |
|
21 |
+#include <CoreVideo/CoreVideo.h> |
|
22 |
+#include <CoreMedia/CoreMedia.h> |
|
23 |
+#include <TargetConditionals.h> |
|
24 |
+#include <Availability.h> |
|
25 |
+#include "avcodec.h" |
|
26 |
+#include "libavutil/opt.h" |
|
27 |
+#include "libavutil/avassert.h" |
|
28 |
+#include "libavutil/atomic.h" |
|
29 |
+#include "libavutil/avstring.h" |
|
30 |
+#include "libavcodec/avcodec.h" |
|
31 |
+#include "internal.h" |
|
32 |
+#include <pthread.h> |
|
33 |
+ |
|
34 |
+typedef enum VT_H264Profile { |
|
35 |
+ H264_PROF_AUTO, |
|
36 |
+ H264_PROF_BASELINE, |
|
37 |
+ H264_PROF_MAIN, |
|
38 |
+ H264_PROF_HIGH, |
|
39 |
+ H264_PROF_COUNT |
|
40 |
+} VT_H264Profile; |
|
41 |
+ |
|
42 |
+static const uint8_t start_code[] = { 0, 0, 0, 1 }; |
|
43 |
+ |
|
44 |
+typedef struct BufNode { |
|
45 |
+ CMSampleBufferRef cm_buffer; |
|
46 |
+ struct BufNode* next; |
|
47 |
+ int error; |
|
48 |
+} BufNode; |
|
49 |
+ |
|
50 |
+typedef struct VTEncContext { |
|
51 |
+ AVClass *class; |
|
52 |
+ VTCompressionSessionRef session; |
|
53 |
+ |
|
54 |
+ pthread_mutex_t lock; |
|
55 |
+ pthread_cond_t cv_sample_sent; |
|
56 |
+ |
|
57 |
+ int async_error; |
|
58 |
+ |
|
59 |
+ BufNode *q_head; |
|
60 |
+ BufNode *q_tail; |
|
61 |
+ |
|
62 |
+ int64_t frame_ct_out; |
|
63 |
+ int64_t frame_ct_in; |
|
64 |
+ |
|
65 |
+ int64_t first_pts; |
|
66 |
+ int64_t dts_delta; |
|
67 |
+ |
|
68 |
+ int64_t profile; |
|
69 |
+ int64_t level; |
|
70 |
+ |
|
71 |
+ bool flushing; |
|
72 |
+ bool has_b_frames; |
|
73 |
+ bool warned_color_range; |
|
74 |
+} VTEncContext; |
|
75 |
+ |
|
76 |
+static void set_async_error(VTEncContext *vtctx, int err) |
|
77 |
+{ |
|
78 |
+ BufNode *info; |
|
79 |
+ |
|
80 |
+ pthread_mutex_lock(&vtctx->lock); |
|
81 |
+ |
|
82 |
+ vtctx->async_error = err; |
|
83 |
+ |
|
84 |
+ info = vtctx->q_head; |
|
85 |
+ vtctx->q_head = vtctx->q_tail = NULL; |
|
86 |
+ |
|
87 |
+ while (info) { |
|
88 |
+ BufNode *next = info->next; |
|
89 |
+ CFRelease(info->cm_buffer); |
|
90 |
+ av_free(info); |
|
91 |
+ info = next; |
|
92 |
+ } |
|
93 |
+ |
|
94 |
+ pthread_mutex_unlock(&vtctx->lock); |
|
95 |
+} |
|
96 |
+ |
|
97 |
+static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf) |
|
98 |
+{ |
|
99 |
+ BufNode *info; |
|
100 |
+ |
|
101 |
+ pthread_mutex_lock(&vtctx->lock); |
|
102 |
+ |
|
103 |
+ if (vtctx->async_error) { |
|
104 |
+ pthread_mutex_unlock(&vtctx->lock); |
|
105 |
+ return vtctx->async_error; |
|
106 |
+ } |
|
107 |
+ |
|
108 |
+ if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) { |
|
109 |
+ *buf = NULL; |
|
110 |
+ |
|
111 |
+ pthread_mutex_unlock(&vtctx->lock); |
|
112 |
+ return 0; |
|
113 |
+ } |
|
114 |
+ |
|
115 |
+ while (!vtctx->q_head && !vtctx->async_error && wait) { |
|
116 |
+ pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock); |
|
117 |
+ } |
|
118 |
+ |
|
119 |
+ if (!vtctx->q_head) { |
|
120 |
+ pthread_mutex_unlock(&vtctx->lock); |
|
121 |
+ *buf = NULL; |
|
122 |
+ return 0; |
|
123 |
+ } |
|
124 |
+ |
|
125 |
+ info = vtctx->q_head; |
|
126 |
+ vtctx->q_head = vtctx->q_head->next; |
|
127 |
+ if (!vtctx->q_head) { |
|
128 |
+ vtctx->q_tail = NULL; |
|
129 |
+ } |
|
130 |
+ |
|
131 |
+ pthread_mutex_unlock(&vtctx->lock); |
|
132 |
+ |
|
133 |
+ *buf = info->cm_buffer; |
|
134 |
+ av_free(info); |
|
135 |
+ |
|
136 |
+ vtctx->frame_ct_out++; |
|
137 |
+ |
|
138 |
+ return 0; |
|
139 |
+} |
|
140 |
+ |
|
141 |
+static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer) |
|
142 |
+{ |
|
143 |
+ BufNode *info = av_malloc(sizeof(BufNode)); |
|
144 |
+ if (!info) { |
|
145 |
+ set_async_error(vtctx, AVERROR(ENOMEM)); |
|
146 |
+ return; |
|
147 |
+ } |
|
148 |
+ |
|
149 |
+ CFRetain(buffer); |
|
150 |
+ info->cm_buffer = buffer; |
|
151 |
+ info->next = NULL; |
|
152 |
+ |
|
153 |
+ pthread_mutex_lock(&vtctx->lock); |
|
154 |
+ pthread_cond_signal(&vtctx->cv_sample_sent); |
|
155 |
+ |
|
156 |
+ if (!vtctx->q_head) { |
|
157 |
+ vtctx->q_head = info; |
|
158 |
+ } else { |
|
159 |
+ vtctx->q_tail->next = info; |
|
160 |
+ } |
|
161 |
+ |
|
162 |
+ vtctx->q_tail = info; |
|
163 |
+ |
|
164 |
+ pthread_mutex_unlock(&vtctx->lock); |
|
165 |
+} |
|
166 |
+ |
|
167 |
+static CMVideoCodecType get_cm_codec_type(enum AVCodecID id) |
|
168 |
+{ |
|
169 |
+ switch (id) { |
|
170 |
+ case AV_CODEC_ID_H264: return kCMVideoCodecType_H264; |
|
171 |
+ default: return 0; |
|
172 |
+ } |
|
173 |
+} |
|
174 |
+ |
|
175 |
+static void vtenc_free_block(void *opaque, uint8_t *data) |
|
176 |
+{ |
|
177 |
+ CMBlockBufferRef block = opaque; |
|
178 |
+ CFRelease(block); |
|
179 |
+} |
|
180 |
+ |
|
181 |
+/** |
|
182 |
+ * Get the parameter sets from a CMSampleBufferRef. |
|
183 |
+ * @param dst If *dst isn't NULL, the parameters are copied into existing |
|
184 |
+ * memory. *dst_size must be set accordingly when *dst != NULL. |
|
185 |
+ * If *dst is NULL, it will be allocated. |
|
186 |
+ * In all cases, *dst_size is set to the number of bytes used starting |
|
187 |
+ * at *dst. |
|
188 |
+ */ |
|
189 |
+static int get_params_size( |
|
190 |
+ AVCodecContext *avctx, |
|
191 |
+ CMVideoFormatDescriptionRef vid_fmt, |
|
192 |
+ size_t *size) |
|
193 |
+{ |
|
194 |
+ size_t total_size = 0; |
|
195 |
+ size_t ps_count; |
|
196 |
+ size_t i; |
|
197 |
+ int status; |
|
198 |
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, |
|
199 |
+ 0, |
|
200 |
+ NULL, |
|
201 |
+ NULL, |
|
202 |
+ &ps_count, |
|
203 |
+ NULL); |
|
204 |
+ if (status) { |
|
205 |
+ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set count: %d\n", status); |
|
206 |
+ return AVERROR_EXTERNAL; |
|
207 |
+ } |
|
208 |
+ |
|
209 |
+ for(i = 0; i < ps_count; i++){ |
|
210 |
+ const uint8_t *ps; |
|
211 |
+ size_t ps_size; |
|
212 |
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, |
|
213 |
+ i, |
|
214 |
+ &ps, |
|
215 |
+ &ps_size, |
|
216 |
+ NULL, |
|
217 |
+ NULL); |
|
218 |
+ if(status){ |
|
219 |
+ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set size for index %zd: %d\n", i, status); |
|
220 |
+ return AVERROR_EXTERNAL; |
|
221 |
+ } |
|
222 |
+ |
|
223 |
+ total_size += ps_size + sizeof(start_code); |
|
224 |
+ } |
|
225 |
+ |
|
226 |
+ *size = total_size; |
|
227 |
+ return 0; |
|
228 |
+} |
|
229 |
+ |
|
230 |
+static int copy_param_sets( |
|
231 |
+ AVCodecContext *avctx, |
|
232 |
+ CMVideoFormatDescriptionRef vid_fmt, |
|
233 |
+ uint8_t *dst, |
|
234 |
+ size_t dst_size) |
|
235 |
+{ |
|
236 |
+ size_t ps_count; |
|
237 |
+ int status; |
|
238 |
+ size_t offset = 0; |
|
239 |
+ size_t i; |
|
240 |
+ |
|
241 |
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, |
|
242 |
+ 0, |
|
243 |
+ NULL, |
|
244 |
+ NULL, |
|
245 |
+ &ps_count, |
|
246 |
+ NULL); |
|
247 |
+ if (status) { |
|
248 |
+ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set count for copying: %d\n", status); |
|
249 |
+ return AVERROR_EXTERNAL; |
|
250 |
+ } |
|
251 |
+ |
|
252 |
+ for (i = 0; i < ps_count; i++) { |
|
253 |
+ const uint8_t *ps; |
|
254 |
+ size_t ps_size; |
|
255 |
+ size_t next_offset; |
|
256 |
+ |
|
257 |
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, |
|
258 |
+ i, |
|
259 |
+ &ps, |
|
260 |
+ &ps_size, |
|
261 |
+ NULL, |
|
262 |
+ NULL); |
|
263 |
+ if (status) { |
|
264 |
+ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data for index %zd: %d\n", i, status); |
|
265 |
+ return AVERROR_EXTERNAL; |
|
266 |
+ } |
|
267 |
+ |
|
268 |
+ next_offset = offset + sizeof(start_code) + ps_size; |
|
269 |
+ if (dst_size < next_offset) { |
|
270 |
+ av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n"); |
|
271 |
+ return AVERROR_BUFFER_TOO_SMALL; |
|
272 |
+ } |
|
273 |
+ |
|
274 |
+ memcpy(dst + offset, start_code, sizeof(start_code)); |
|
275 |
+ offset += sizeof(start_code); |
|
276 |
+ |
|
277 |
+ memcpy(dst + offset, ps, ps_size); |
|
278 |
+ offset = next_offset; |
|
279 |
+ } |
|
280 |
+ |
|
281 |
+ return 0; |
|
282 |
+} |
|
283 |
+ |
|
284 |
+static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer) |
|
285 |
+{ |
|
286 |
+ CMVideoFormatDescriptionRef vid_fmt; |
|
287 |
+ size_t total_size; |
|
288 |
+ int status; |
|
289 |
+ |
|
290 |
+ vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer); |
|
291 |
+ if (!vid_fmt) { |
|
292 |
+ av_log(avctx, AV_LOG_ERROR, "No video format.\n"); |
|
293 |
+ return AVERROR_EXTERNAL; |
|
294 |
+ } |
|
295 |
+ |
|
296 |
+ status = get_params_size(avctx, vid_fmt, &total_size); |
|
297 |
+ if (status) { |
|
298 |
+ av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n"); |
|
299 |
+ return status; |
|
300 |
+ } |
|
301 |
+ |
|
302 |
+ avctx->extradata = av_malloc(total_size); |
|
303 |
+ if (!avctx->extradata) { |
|
304 |
+ return AVERROR(ENOMEM); |
|
305 |
+ } |
|
306 |
+ avctx->extradata_size = total_size; |
|
307 |
+ |
|
308 |
+ status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size); |
|
309 |
+ |
|
310 |
+ if (status) { |
|
311 |
+ av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n"); |
|
312 |
+ return status; |
|
313 |
+ } |
|
314 |
+ |
|
315 |
+ return 0; |
|
316 |
+} |
|
317 |
+ |
|
318 |
+static void vtenc_output_callback( |
|
319 |
+ void *CM_NULLABLE ctx, |
|
320 |
+ void *sourceFrameCtx, |
|
321 |
+ OSStatus status, |
|
322 |
+ VTEncodeInfoFlags flags, |
|
323 |
+ CM_NULLABLE CMSampleBufferRef sample_buffer) |
|
324 |
+{ |
|
325 |
+ AVCodecContext *avctx = ctx; |
|
326 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
327 |
+ |
|
328 |
+ if (vtctx->async_error) { |
|
329 |
+ if(sample_buffer) CFRelease(sample_buffer); |
|
330 |
+ return; |
|
331 |
+ } |
|
332 |
+ |
|
333 |
+ if (status || !sample_buffer) { |
|
334 |
+ av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status); |
|
335 |
+ set_async_error(vtctx, AVERROR_EXTERNAL); |
|
336 |
+ return; |
|
337 |
+ } |
|
338 |
+ |
|
339 |
+ if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) { |
|
340 |
+ int set_status = set_extradata(avctx, sample_buffer); |
|
341 |
+ if (set_status) { |
|
342 |
+ set_async_error(vtctx, set_status); |
|
343 |
+ return; |
|
344 |
+ } |
|
345 |
+ } |
|
346 |
+ |
|
347 |
+ vtenc_q_push(vtctx, sample_buffer); |
|
348 |
+} |
|
349 |
+ |
|
350 |
+static int get_length_code_size( |
|
351 |
+ AVCodecContext *avctx, |
|
352 |
+ CMSampleBufferRef sample_buffer, |
|
353 |
+ size_t *size) |
|
354 |
+{ |
|
355 |
+ CMVideoFormatDescriptionRef vid_fmt; |
|
356 |
+ int isize; |
|
357 |
+ int status; |
|
358 |
+ |
|
359 |
+ vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer); |
|
360 |
+ if (!vid_fmt) { |
|
361 |
+ av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n"); |
|
362 |
+ return AVERROR_EXTERNAL; |
|
363 |
+ } |
|
364 |
+ |
|
365 |
+ status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, |
|
366 |
+ 0, |
|
367 |
+ NULL, |
|
368 |
+ NULL, |
|
369 |
+ NULL, |
|
370 |
+ &isize); |
|
371 |
+ if (status) { |
|
372 |
+ av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status); |
|
373 |
+ return AVERROR_EXTERNAL; |
|
374 |
+ } |
|
375 |
+ |
|
376 |
+ *size = isize; |
|
377 |
+ return 0; |
|
378 |
+} |
|
379 |
+ |
|
380 |
+/* |
|
381 |
+ * Returns true on success. |
|
382 |
+ * |
|
383 |
+ * If profile_level_val is NULL and this method returns true, don't specify the |
|
384 |
+ * profile/level to the encoder. |
|
385 |
+ */ |
|
386 |
+static bool get_vt_profile_level(AVCodecContext *avctx, |
|
387 |
+ CFStringRef *profile_level_val) |
|
388 |
+{ |
|
389 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
390 |
+ int64_t profile = vtctx->profile; |
|
391 |
+ |
|
392 |
+ if (profile == H264_PROF_AUTO && vtctx->level) { |
|
393 |
+ //Need to pick a profile if level is not auto-selected. |
|
394 |
+ profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE; |
|
395 |
+ } |
|
396 |
+ |
|
397 |
+ *profile_level_val = NULL; |
|
398 |
+ |
|
399 |
+ switch (profile) { |
|
400 |
+ case H264_PROF_AUTO: |
|
401 |
+ return true; |
|
402 |
+ |
|
403 |
+ case H264_PROF_BASELINE: |
|
404 |
+ switch (vtctx->level) { |
|
405 |
+ case 0: *profile_level_val = kVTProfileLevel_H264_Baseline_AutoLevel; break; |
|
406 |
+ case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break; |
|
407 |
+ case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break; |
|
408 |
+ case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break; |
|
409 |
+ case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break; |
|
410 |
+ case 40: *profile_level_val = kVTProfileLevel_H264_Baseline_4_0; break; |
|
411 |
+ case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break; |
|
412 |
+ case 42: *profile_level_val = kVTProfileLevel_H264_Baseline_4_2; break; |
|
413 |
+ case 50: *profile_level_val = kVTProfileLevel_H264_Baseline_5_0; break; |
|
414 |
+ case 51: *profile_level_val = kVTProfileLevel_H264_Baseline_5_1; break; |
|
415 |
+ case 52: *profile_level_val = kVTProfileLevel_H264_Baseline_5_2; break; |
|
416 |
+ } |
|
417 |
+ break; |
|
418 |
+ |
|
419 |
+ case H264_PROF_MAIN: |
|
420 |
+ switch (vtctx->level) { |
|
421 |
+ case 0: *profile_level_val = kVTProfileLevel_H264_Main_AutoLevel; break; |
|
422 |
+ case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break; |
|
423 |
+ case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break; |
|
424 |
+ case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break; |
|
425 |
+ case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break; |
|
426 |
+ case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break; |
|
427 |
+ case 42: *profile_level_val = kVTProfileLevel_H264_Main_4_2; break; |
|
428 |
+ case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break; |
|
429 |
+ case 51: *profile_level_val = kVTProfileLevel_H264_Main_5_1; break; |
|
430 |
+ case 52: *profile_level_val = kVTProfileLevel_H264_Main_5_2; break; |
|
431 |
+ } |
|
432 |
+ break; |
|
433 |
+ |
|
434 |
+ case H264_PROF_HIGH: |
|
435 |
+ switch (vtctx->level) { |
|
436 |
+ case 0: *profile_level_val = kVTProfileLevel_H264_High_AutoLevel; break; |
|
437 |
+ case 30: *profile_level_val = kVTProfileLevel_H264_High_3_0; break; |
|
438 |
+ case 31: *profile_level_val = kVTProfileLevel_H264_High_3_1; break; |
|
439 |
+ case 32: *profile_level_val = kVTProfileLevel_H264_High_3_2; break; |
|
440 |
+ case 40: *profile_level_val = kVTProfileLevel_H264_High_4_0; break; |
|
441 |
+ case 41: *profile_level_val = kVTProfileLevel_H264_High_4_1; break; |
|
442 |
+ case 42: *profile_level_val = kVTProfileLevel_H264_High_4_2; break; |
|
443 |
+ case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break; |
|
444 |
+ case 51: *profile_level_val = kVTProfileLevel_H264_High_5_1; break; |
|
445 |
+ case 52: *profile_level_val = kVTProfileLevel_H264_High_5_2; break; |
|
446 |
+ } |
|
447 |
+ break; |
|
448 |
+ } |
|
449 |
+ |
|
450 |
+ if (!*profile_level_val) { |
|
451 |
+ av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n"); |
|
452 |
+ return false; |
|
453 |
+ } |
|
454 |
+ |
|
455 |
+ return true; |
|
456 |
+} |
|
457 |
+ |
|
458 |
+static av_cold int vtenc_init(AVCodecContext *avctx) |
|
459 |
+{ |
|
460 |
+ CFMutableDictionaryRef enc_info; |
|
461 |
+ CMVideoCodecType codec_type; |
|
462 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
463 |
+ CFStringRef profile_level; |
|
464 |
+ SInt32 bit_rate = avctx->bit_rate; |
|
465 |
+ CFNumberRef bit_rate_num; |
|
466 |
+ int status; |
|
467 |
+ |
|
468 |
+ codec_type = get_cm_codec_type(avctx->codec_id); |
|
469 |
+ if (!codec_type) { |
|
470 |
+ av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id); |
|
471 |
+ return AVERROR(EINVAL); |
|
472 |
+ } |
|
473 |
+ |
|
474 |
+ vtctx->has_b_frames = avctx->has_b_frames || avctx->max_b_frames > 0; |
|
475 |
+ if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){ |
|
476 |
+ av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n"); |
|
477 |
+ vtctx->has_b_frames = false; |
|
478 |
+ } |
|
479 |
+ |
|
480 |
+ if (!get_vt_profile_level(avctx, &profile_level)) return AVERROR(EINVAL); |
|
481 |
+ |
|
482 |
+ vtctx->session = NULL; |
|
483 |
+ |
|
484 |
+ enc_info = CFDictionaryCreateMutable( |
|
485 |
+ kCFAllocatorDefault, |
|
486 |
+ 20, |
|
487 |
+ &kCFCopyStringDictionaryKeyCallBacks, |
|
488 |
+ &kCFTypeDictionaryValueCallBacks |
|
489 |
+ ); |
|
490 |
+ |
|
491 |
+ if (!enc_info) return AVERROR(ENOMEM); |
|
492 |
+ |
|
493 |
+#if !TARGET_OS_IPHONE |
|
494 |
+ CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder, kCFBooleanTrue); |
|
495 |
+ CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, kCFBooleanTrue); |
|
496 |
+#endif |
|
497 |
+ |
|
498 |
+ status = VTCompressionSessionCreate( |
|
499 |
+ kCFAllocatorDefault, |
|
500 |
+ avctx->width, |
|
501 |
+ avctx->height, |
|
502 |
+ codec_type, |
|
503 |
+ enc_info, |
|
504 |
+ NULL, |
|
505 |
+ kCFAllocatorDefault, |
|
506 |
+ vtenc_output_callback, |
|
507 |
+ avctx, |
|
508 |
+ &vtctx->session |
|
509 |
+ ); |
|
510 |
+ |
|
511 |
+#if !TARGET_OS_IPHONE |
|
512 |
+ if (status != 0 || !vtctx->session) { |
|
513 |
+ CFDictionaryRemoveValue(enc_info, kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder); |
|
514 |
+ |
|
515 |
+ status = VTCompressionSessionCreate( |
|
516 |
+ kCFAllocatorDefault, |
|
517 |
+ avctx->width, |
|
518 |
+ avctx->height, |
|
519 |
+ codec_type, |
|
520 |
+ enc_info, |
|
521 |
+ NULL, |
|
522 |
+ kCFAllocatorDefault, |
|
523 |
+ vtenc_output_callback, |
|
524 |
+ avctx, |
|
525 |
+ &vtctx->session |
|
526 |
+ ); |
|
527 |
+ } |
|
528 |
+#endif |
|
529 |
+ |
|
530 |
+ CFRelease(enc_info); |
|
531 |
+ |
|
532 |
+ if (status || !vtctx->session) { |
|
533 |
+ av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status); |
|
534 |
+ return AVERROR_EXTERNAL; |
|
535 |
+ } |
|
536 |
+ |
|
537 |
+ bit_rate_num = CFNumberCreate(kCFAllocatorDefault, |
|
538 |
+ kCFNumberSInt32Type, |
|
539 |
+ &bit_rate); |
|
540 |
+ if (!bit_rate_num) return AVERROR(ENOMEM); |
|
541 |
+ |
|
542 |
+ status = VTSessionSetProperty(vtctx->session, |
|
543 |
+ kVTCompressionPropertyKey_AverageBitRate, |
|
544 |
+ bit_rate_num); |
|
545 |
+ CFRelease(bit_rate_num); |
|
546 |
+ |
|
547 |
+ if (status) { |
|
548 |
+ av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status); |
|
549 |
+ return AVERROR_EXTERNAL; |
|
550 |
+ } |
|
551 |
+ |
|
552 |
+ if (profile_level) { |
|
553 |
+ status = VTSessionSetProperty(vtctx->session, |
|
554 |
+ kVTCompressionPropertyKey_ProfileLevel, |
|
555 |
+ profile_level); |
|
556 |
+ if (status) { |
|
557 |
+ av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status); |
|
558 |
+ return AVERROR_EXTERNAL; |
|
559 |
+ } |
|
560 |
+ } |
|
561 |
+ |
|
562 |
+ if (avctx->gop_size > 0) { |
|
563 |
+ CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault, |
|
564 |
+ kCFNumberIntType, |
|
565 |
+ &avctx->gop_size); |
|
566 |
+ status = VTSessionSetProperty(vtctx->session, |
|
567 |
+ kVTCompressionPropertyKey_MaxKeyFrameInterval, |
|
568 |
+ interval); |
|
569 |
+ |
|
570 |
+ if (status) { |
|
571 |
+ av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status); |
|
572 |
+ return AVERROR_EXTERNAL; |
|
573 |
+ } |
|
574 |
+ } |
|
575 |
+ |
|
576 |
+ if (!vtctx->has_b_frames) { |
|
577 |
+ status = VTSessionSetProperty(vtctx->session, |
|
578 |
+ kVTCompressionPropertyKey_AllowFrameReordering, |
|
579 |
+ kCFBooleanFalse); |
|
580 |
+ |
|
581 |
+ if (status) { |
|
582 |
+ av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status); |
|
583 |
+ return AVERROR_EXTERNAL; |
|
584 |
+ } |
|
585 |
+ } |
|
586 |
+ |
|
587 |
+ status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session); |
|
588 |
+ if (status) { |
|
589 |
+ av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status); |
|
590 |
+ return AVERROR_EXTERNAL; |
|
591 |
+ } |
|
592 |
+ |
|
593 |
+ pthread_mutex_init(&vtctx->lock, NULL); |
|
594 |
+ pthread_cond_init(&vtctx->cv_sample_sent, NULL); |
|
595 |
+ vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0; |
|
596 |
+ |
|
597 |
+ return 0; |
|
598 |
+} |
|
599 |
+ |
|
600 |
+static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame) |
|
601 |
+{ |
|
602 |
+ CFArrayRef attachments; |
|
603 |
+ CFDictionaryRef attachment; |
|
604 |
+ CFBooleanRef not_sync; |
|
605 |
+ CFIndex len; |
|
606 |
+ |
|
607 |
+ attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false); |
|
608 |
+ len = !attachments ? 0 : CFArrayGetCount(attachments); |
|
609 |
+ |
|
610 |
+ if (!len) { |
|
611 |
+ *is_key_frame = true; |
|
612 |
+ return; |
|
613 |
+ } |
|
614 |
+ |
|
615 |
+ attachment = CFArrayGetValueAtIndex(attachments, 0); |
|
616 |
+ |
|
617 |
+ if (CFDictionaryGetValueIfPresent(attachment, |
|
618 |
+ kCMSampleAttachmentKey_NotSync, |
|
619 |
+ (const void **)¬_sync)) |
|
620 |
+ { |
|
621 |
+ *is_key_frame = !CFBooleanGetValue(not_sync); |
|
622 |
+ } else { |
|
623 |
+ *is_key_frame = true; |
|
624 |
+ } |
|
625 |
+} |
|
626 |
+ |
|
627 |
+/** |
|
628 |
+ * Replaces length codes with H.264 Annex B start codes. |
|
629 |
+ * length_code_size must equal sizeof(start_code). |
|
630 |
+ * On failure, the contents of data may have been modified. |
|
631 |
+ * |
|
632 |
+ * @param length_code_size Byte length of each length code |
|
633 |
+ * @param data Call with NAL units prefixed with length codes. |
|
634 |
+ * On success, the length codes are replace with |
|
635 |
+ * start codes. |
|
636 |
+ * @param size Length of data, excluding any padding. |
|
637 |
+ * @return 0 on success |
|
638 |
+ * AVERROR_BUFFER_TOO_SMALL if length code size is smaller |
|
639 |
+ * than a start code or if a length_code in data specifies |
|
640 |
+ * data beyond the end of its buffer. |
|
641 |
+ */ |
|
642 |
+static int replace_length_codes(size_t length_code_size, |
|
643 |
+ uint8_t *data, |
|
644 |
+ size_t size) |
|
645 |
+{ |
|
646 |
+ size_t remaining_size = size; |
|
647 |
+ |
|
648 |
+ if (length_code_size != sizeof(start_code)) { |
|
649 |
+ av_log(NULL, AV_LOG_ERROR, "Start code size and length code size not equal.\n"); |
|
650 |
+ return AVERROR_BUFFER_TOO_SMALL; |
|
651 |
+ } |
|
652 |
+ |
|
653 |
+ while (remaining_size > 0) { |
|
654 |
+ size_t box_len = 0; |
|
655 |
+ size_t i; |
|
656 |
+ |
|
657 |
+ for (i = 0; i < length_code_size; i++) { |
|
658 |
+ box_len <<= 8; |
|
659 |
+ box_len |= data[i]; |
|
660 |
+ } |
|
661 |
+ |
|
662 |
+ if (remaining_size < box_len + sizeof(start_code)) { |
|
663 |
+ av_log(NULL, AV_LOG_ERROR, "Length is out of range.\n"); |
|
664 |
+ AVERROR_BUFFER_TOO_SMALL; |
|
665 |
+ } |
|
666 |
+ |
|
667 |
+ memcpy(data, start_code, sizeof(start_code)); |
|
668 |
+ data += box_len + sizeof(start_code); |
|
669 |
+ remaining_size -= box_len + sizeof(start_code); |
|
670 |
+ } |
|
671 |
+ |
|
672 |
+ return 0; |
|
673 |
+} |
|
674 |
+ |
|
675 |
+/** |
|
676 |
+ * Copies NAL units and replaces length codes with |
|
677 |
+ * H.264 Annex B start codes. On failure, the contents of |
|
678 |
+ * dst_data may have been modified. |
|
679 |
+ * |
|
680 |
+ * @param length_code_size Byte length of each length code |
|
681 |
+ * @param src_data NAL units prefixed with length codes. |
|
682 |
+ * @param src_size Length of buffer, excluding any padding. |
|
683 |
+ * @param dst_data Must be zeroed before calling this function. |
|
684 |
+ * Contains the copied NAL units prefixed with |
|
685 |
+ * start codes when the function returns |
|
686 |
+ * successfully. |
|
687 |
+ * @param dst_size Length of dst_data |
|
688 |
+ * @return 0 on success |
|
689 |
+ * AVERROR_INVALIDDATA if length_code_size is invalid |
|
690 |
+ * AVERROR_BUFFER_TOO_SMALL if dst_data is too small |
|
691 |
+ * or if a length_code in src_data specifies data beyond |
|
692 |
+ * the end of its buffer. |
|
693 |
+ */ |
|
694 |
+static int copy_replace_length_codes( |
|
695 |
+ size_t length_code_size, |
|
696 |
+ const uint8_t *src_data, |
|
697 |
+ size_t src_size, |
|
698 |
+ uint8_t *dst_data, |
|
699 |
+ size_t dst_size) |
|
700 |
+{ |
|
701 |
+ size_t remaining_src_size = src_size; |
|
702 |
+ size_t remaining_dst_size = dst_size; |
|
703 |
+ |
|
704 |
+ if (length_code_size > 4) { |
|
705 |
+ return AVERROR_INVALIDDATA; |
|
706 |
+ } |
|
707 |
+ |
|
708 |
+ while (remaining_src_size > 0) { |
|
709 |
+ size_t curr_src_len; |
|
710 |
+ size_t curr_dst_len; |
|
711 |
+ size_t box_len = 0; |
|
712 |
+ size_t i; |
|
713 |
+ |
|
714 |
+ uint8_t *dst_box; |
|
715 |
+ const uint8_t *src_box; |
|
716 |
+ |
|
717 |
+ for (i = 0; i < length_code_size; i++) { |
|
718 |
+ box_len <<= 8; |
|
719 |
+ box_len |= src_data[i]; |
|
720 |
+ } |
|
721 |
+ |
|
722 |
+ curr_src_len = box_len + length_code_size; |
|
723 |
+ curr_dst_len = box_len + sizeof(start_code); |
|
724 |
+ |
|
725 |
+ if (remaining_src_size < curr_src_len) { |
|
726 |
+ return AVERROR_BUFFER_TOO_SMALL; |
|
727 |
+ } |
|
728 |
+ |
|
729 |
+ if (remaining_dst_size < curr_dst_len) { |
|
730 |
+ return AVERROR_BUFFER_TOO_SMALL; |
|
731 |
+ } |
|
732 |
+ |
|
733 |
+ dst_box = dst_data + sizeof(start_code); |
|
734 |
+ src_box = src_data + length_code_size; |
|
735 |
+ |
|
736 |
+ memcpy(dst_data, start_code, sizeof(start_code)); |
|
737 |
+ memcpy(dst_box, src_box, box_len); |
|
738 |
+ |
|
739 |
+ src_data += curr_src_len; |
|
740 |
+ dst_data += curr_dst_len; |
|
741 |
+ |
|
742 |
+ remaining_src_size -= curr_src_len; |
|
743 |
+ remaining_dst_size -= curr_dst_len; |
|
744 |
+ } |
|
745 |
+ |
|
746 |
+ return 0; |
|
747 |
+} |
|
748 |
+ |
|
749 |
+static int vtenc_cm_to_avpacket( |
|
750 |
+ AVCodecContext *avctx, |
|
751 |
+ CMSampleBufferRef sample_buffer, |
|
752 |
+ AVPacket *pkt) |
|
753 |
+{ |
|
754 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
755 |
+ |
|
756 |
+ int status; |
|
757 |
+ bool is_key_frame; |
|
758 |
+ bool add_header; |
|
759 |
+ char *buf_data; |
|
760 |
+ size_t length_code_size; |
|
761 |
+ size_t header_size = 0; |
|
762 |
+ size_t in_buf_size; |
|
763 |
+ int64_t dts_delta; |
|
764 |
+ int64_t time_base_num; |
|
765 |
+ CMTime pts; |
|
766 |
+ CMTime dts; |
|
767 |
+ |
|
768 |
+ CMBlockBufferRef block; |
|
769 |
+ CMVideoFormatDescriptionRef vid_fmt; |
|
770 |
+ |
|
771 |
+ |
|
772 |
+ vtenc_get_frame_info(sample_buffer, &is_key_frame); |
|
773 |
+ status = get_length_code_size(avctx, sample_buffer, &length_code_size); |
|
774 |
+ if (status) return status; |
|
775 |
+ |
|
776 |
+ add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER); |
|
777 |
+ |
|
778 |
+ if (add_header) { |
|
779 |
+ vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer); |
|
780 |
+ if (!vid_fmt) { |
|
781 |
+ av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n"); |
|
782 |
+ } |
|
783 |
+ |
|
784 |
+ int status = get_params_size(avctx, vid_fmt, &header_size); |
|
785 |
+ if (status) return status; |
|
786 |
+ } |
|
787 |
+ |
|
788 |
+ block = CMSampleBufferGetDataBuffer(sample_buffer); |
|
789 |
+ if (!block) { |
|
790 |
+ av_log(avctx, AV_LOG_ERROR, "Could not get block buffer from sample buffer.\n"); |
|
791 |
+ return AVERROR_EXTERNAL; |
|
792 |
+ } |
|
793 |
+ |
|
794 |
+ |
|
795 |
+ status = CMBlockBufferGetDataPointer(block, 0, &in_buf_size, NULL, &buf_data); |
|
796 |
+ if (status) { |
|
797 |
+ av_log(avctx, AV_LOG_ERROR, "Error: cannot get data pointer: %d\n", status); |
|
798 |
+ return AVERROR_EXTERNAL; |
|
799 |
+ } |
|
800 |
+ |
|
801 |
+ size_t out_buf_size = header_size + in_buf_size; |
|
802 |
+ bool can_reuse_cmbuffer = !add_header && |
|
803 |
+ !pkt->data && |
|
804 |
+ length_code_size == sizeof(start_code); |
|
805 |
+ |
|
806 |
+ av_init_packet(pkt); |
|
807 |
+ |
|
808 |
+ if (can_reuse_cmbuffer) { |
|
809 |
+ AVBufferRef* buf_ref = av_buffer_create( |
|
810 |
+ buf_data, |
|
811 |
+ out_buf_size, |
|
812 |
+ vtenc_free_block, |
|
813 |
+ block, |
|
814 |
+ 0 |
|
815 |
+ ); |
|
816 |
+ |
|
817 |
+ if (!buf_ref) return AVERROR(ENOMEM); |
|
818 |
+ |
|
819 |
+ CFRetain(block); |
|
820 |
+ |
|
821 |
+ pkt->buf = buf_ref; |
|
822 |
+ pkt->data = buf_data; |
|
823 |
+ pkt->size = in_buf_size; |
|
824 |
+ |
|
825 |
+ status = replace_length_codes(length_code_size, pkt->data, pkt->size); |
|
826 |
+ if (status) { |
|
827 |
+ av_log(avctx, AV_LOG_ERROR, "Error replacing length codes: %d\n", status); |
|
828 |
+ return status; |
|
829 |
+ } |
|
830 |
+ } else { |
|
831 |
+ if (!pkt->data) { |
|
832 |
+ status = av_new_packet(pkt, out_buf_size); |
|
833 |
+ if(status) return status; |
|
834 |
+ } |
|
835 |
+ |
|
836 |
+ if (pkt->size < out_buf_size) { |
|
837 |
+ av_log(avctx, AV_LOG_ERROR, "Error: packet's buffer is too small.\n"); |
|
838 |
+ return AVERROR_BUFFER_TOO_SMALL; |
|
839 |
+ } |
|
840 |
+ |
|
841 |
+ if (add_header) { |
|
842 |
+ status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size); |
|
843 |
+ if(status) return status; |
|
844 |
+ } |
|
845 |
+ |
|
846 |
+ status = copy_replace_length_codes( |
|
847 |
+ length_code_size, |
|
848 |
+ buf_data, |
|
849 |
+ in_buf_size, |
|
850 |
+ pkt->data + header_size, |
|
851 |
+ pkt->size - header_size |
|
852 |
+ ); |
|
853 |
+ |
|
854 |
+ if (status) { |
|
855 |
+ av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d", status); |
|
856 |
+ return status; |
|
857 |
+ } |
|
858 |
+ } |
|
859 |
+ |
|
860 |
+ if (is_key_frame) { |
|
861 |
+ pkt->flags |= AV_PKT_FLAG_KEY; |
|
862 |
+ } |
|
863 |
+ |
|
864 |
+ pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer); |
|
865 |
+ dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer); |
|
866 |
+ |
|
867 |
+ dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0; |
|
868 |
+ time_base_num = avctx->time_base.num; |
|
869 |
+ pkt->pts = pts.value / time_base_num; |
|
870 |
+ pkt->dts = dts.value / time_base_num - dts_delta; |
|
871 |
+ |
|
872 |
+ return 0; |
|
873 |
+} |
|
874 |
+ |
|
875 |
+/* |
|
876 |
+ * contiguous_buf_size is 0 if not contiguous, and the size of the buffer |
|
877 |
+ * containing all planes if so. |
|
878 |
+ */ |
|
879 |
+static int get_cv_pixel_info( |
|
880 |
+ AVCodecContext *avctx, |
|
881 |
+ const AVFrame *frame, |
|
882 |
+ int *color, |
|
883 |
+ int *plane_count, |
|
884 |
+ size_t *widths, |
|
885 |
+ size_t *heights, |
|
886 |
+ size_t *strides, |
|
887 |
+ size_t *contiguous_buf_size) |
|
888 |
+{ |
|
889 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
890 |
+ int av_format = frame->format; |
|
891 |
+ int av_color_range = av_frame_get_color_range(frame); |
|
892 |
+ int i; |
|
893 |
+ |
|
894 |
+ switch (av_format) { |
|
895 |
+ case AV_PIX_FMT_NV12: |
|
896 |
+ switch (av_color_range) { |
|
897 |
+ case AVCOL_RANGE_MPEG: |
|
898 |
+ *color = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; |
|
899 |
+ break; |
|
900 |
+ |
|
901 |
+ case AVCOL_RANGE_JPEG: |
|
902 |
+ *color = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; |
|
903 |
+ break; |
|
904 |
+ |
|
905 |
+ default: |
|
906 |
+ if (!vtctx->warned_color_range) { |
|
907 |
+ vtctx->warned_color_range = true; |
|
908 |
+ av_log(avctx, AV_LOG_WARNING, "Color range not set for NV12. Using MPEG range.\n"); |
|
909 |
+ } |
|
910 |
+ *color = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; |
|
911 |
+ } |
|
912 |
+ |
|
913 |
+ *plane_count = 2; |
|
914 |
+ |
|
915 |
+ widths [0] = avctx->width; |
|
916 |
+ heights[0] = avctx->height; |
|
917 |
+ strides[0] = frame ? frame->linesize[0] : avctx->width; |
|
918 |
+ |
|
919 |
+ widths [1] = (avctx->width + 1) / 2; |
|
920 |
+ heights[1] = (avctx->height + 1) / 2; |
|
921 |
+ strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2; |
|
922 |
+ break; |
|
923 |
+ |
|
924 |
+ case AV_PIX_FMT_YUV420P: |
|
925 |
+ switch (av_color_range) { |
|
926 |
+ case AVCOL_RANGE_MPEG: |
|
927 |
+ *color = kCVPixelFormatType_420YpCbCr8Planar; |
|
928 |
+ break; |
|
929 |
+ |
|
930 |
+ case AVCOL_RANGE_JPEG: |
|
931 |
+ *color = kCVPixelFormatType_420YpCbCr8PlanarFullRange; |
|
932 |
+ break; |
|
933 |
+ |
|
934 |
+ default: |
|
935 |
+ if (!vtctx->warned_color_range) { |
|
936 |
+ vtctx->warned_color_range = true; |
|
937 |
+ av_log(avctx, AV_LOG_WARNING, "Color range not set for YUV 4:2:0. Using MPEG range.\n"); |
|
938 |
+ } |
|
939 |
+ *color = kCVPixelFormatType_420YpCbCr8Planar; |
|
940 |
+ } |
|
941 |
+ |
|
942 |
+ *plane_count = 3; |
|
943 |
+ |
|
944 |
+ widths [0] = avctx->width; |
|
945 |
+ heights[0] = avctx->height; |
|
946 |
+ strides[0] = frame ? frame->linesize[0] : avctx->width; |
|
947 |
+ |
|
948 |
+ widths [1] = (avctx->width + 1) / 2; |
|
949 |
+ heights[1] = (avctx->height + 1) / 2; |
|
950 |
+ strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2; |
|
951 |
+ |
|
952 |
+ widths [2] = (avctx->width + 1) / 2; |
|
953 |
+ heights[2] = (avctx->height + 1) / 2; |
|
954 |
+ strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2; |
|
955 |
+ break; |
|
956 |
+ |
|
957 |
+ default: return AVERROR(EINVAL); |
|
958 |
+ } |
|
959 |
+ |
|
960 |
+ *contiguous_buf_size = 0; |
|
961 |
+ for (i = 0; i < *plane_count; i++) { |
|
962 |
+ if (i < *plane_count - 1 && |
|
963 |
+ frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) { |
|
964 |
+ *contiguous_buf_size = 0; |
|
965 |
+ break; |
|
966 |
+ } |
|
967 |
+ |
|
968 |
+ *contiguous_buf_size += strides[i] * heights[i]; |
|
969 |
+ } |
|
970 |
+ |
|
971 |
+ return 0; |
|
972 |
+} |
|
973 |
+ |
|
974 |
+#if !TARGET_OS_IPHONE |
|
975 |
+//Not used on iOS - frame is always copied. |
|
976 |
+static void free_avframe( |
|
977 |
+ void *CV_NULLABLE release_ctx, |
|
978 |
+ const void *CV_NULLABLE data, |
|
979 |
+ size_t size, |
|
980 |
+ size_t plane_count, |
|
981 |
+ const void *CV_NULLABLE plane_addresses[]) |
|
982 |
+{ |
|
983 |
+ AVFrame *frame = release_ctx; |
|
984 |
+ av_frame_free(&frame); |
|
985 |
+} |
|
986 |
+#else |
|
987 |
+//Not used on OSX - frame is never copied. |
|
988 |
+static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx, |
|
989 |
+ const AVFrame *frame, |
|
990 |
+ CVPixelBufferRef cv_img, |
|
991 |
+ const size_t *plane_strides, |
|
992 |
+ const size_t *plane_rows) |
|
993 |
+{ |
|
994 |
+ int i, j; |
|
995 |
+ size_t plane_count; |
|
996 |
+ int status; |
|
997 |
+ int rows; |
|
998 |
+ int src_stride; |
|
999 |
+ int dst_stride; |
|
1000 |
+ uint8_t *src_addr; |
|
1001 |
+ uint8_t *dst_addr; |
|
1002 |
+ size_t copy_bytes; |
|
1003 |
+ |
|
1004 |
+ status = CVPixelBufferLockBaseAddress(cv_img, 0); |
|
1005 |
+ if (status) { |
|
1006 |
+ av_log( |
|
1007 |
+ avctx, |
|
1008 |
+ AV_LOG_ERROR, |
|
1009 |
+ "Error: Could not lock base address of CVPixelBuffer: %d.\n", |
|
1010 |
+ status |
|
1011 |
+ ); |
|
1012 |
+ } |
|
1013 |
+ |
|
1014 |
+ if (CVPixelBufferIsPlanar(cv_img)) { |
|
1015 |
+ plane_count = CVPixelBufferGetPlaneCount(cv_img); |
|
1016 |
+ for (i = 0; frame->data[i]; i++) { |
|
1017 |
+ if (i == plane_count) { |
|
1018 |
+ CVPixelBufferUnlockBaseAddress(cv_img, 0); |
|
1019 |
+ av_log(avctx, |
|
1020 |
+ AV_LOG_ERROR, |
|
1021 |
+ "Error: different number of planes in AVFrame and CVPixelBuffer.\n" |
|
1022 |
+ ); |
|
1023 |
+ |
|
1024 |
+ return AVERROR_EXTERNAL; |
|
1025 |
+ } |
|
1026 |
+ |
|
1027 |
+ dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i); |
|
1028 |
+ src_addr = (uint8_t*)frame->data[i]; |
|
1029 |
+ dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i); |
|
1030 |
+ src_stride = plane_strides[i]; |
|
1031 |
+ rows = plane_rows[i]; |
|
1032 |
+ |
|
1033 |
+ if (dst_stride == src_stride) { |
|
1034 |
+ memcpy(dst_addr, src_addr, src_stride * rows); |
|
1035 |
+ } else { |
|
1036 |
+ copy_bytes = dst_stride < src_stride ? dst_stride : src_stride; |
|
1037 |
+ |
|
1038 |
+ for (j = 0; j < rows; j++) { |
|
1039 |
+ memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes); |
|
1040 |
+ } |
|
1041 |
+ } |
|
1042 |
+ } |
|
1043 |
+ } else { |
|
1044 |
+ if (frame->data[1]) { |
|
1045 |
+ CVPixelBufferUnlockBaseAddress(cv_img, 0); |
|
1046 |
+ av_log(avctx, |
|
1047 |
+ AV_LOG_ERROR, |
|
1048 |
+ "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n" |
|
1049 |
+ ); |
|
1050 |
+ |
|
1051 |
+ return AVERROR_EXTERNAL; |
|
1052 |
+ } |
|
1053 |
+ |
|
1054 |
+ dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img); |
|
1055 |
+ src_addr = (uint8_t*)frame->data[0]; |
|
1056 |
+ dst_stride = CVPixelBufferGetBytesPerRow(cv_img); |
|
1057 |
+ src_stride = plane_strides[0]; |
|
1058 |
+ rows = plane_rows[0]; |
|
1059 |
+ |
|
1060 |
+ if (dst_stride == src_stride) { |
|
1061 |
+ memcpy(dst_addr, src_addr, src_stride * rows); |
|
1062 |
+ } else { |
|
1063 |
+ copy_bytes = dst_stride < src_stride ? dst_stride : src_stride; |
|
1064 |
+ |
|
1065 |
+ for (j = 0; j < rows; j++) { |
|
1066 |
+ memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes); |
|
1067 |
+ } |
|
1068 |
+ } |
|
1069 |
+ } |
|
1070 |
+ |
|
1071 |
+ status = CVPixelBufferUnlockBaseAddress(cv_img, 0); |
|
1072 |
+ if (status) { |
|
1073 |
+ av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status); |
|
1074 |
+ return AVERROR_EXTERNAL; |
|
1075 |
+ } |
|
1076 |
+ |
|
1077 |
+ return 0; |
|
1078 |
+} |
|
1079 |
+#endif //!TARGET_OS_IPHONE |
|
1080 |
+ |
|
1081 |
+static int create_cv_pixel_buffer(AVCodecContext *avctx, |
|
1082 |
+ const AVFrame *frame, |
|
1083 |
+ CVPixelBufferRef *cv_img) |
|
1084 |
+{ |
|
1085 |
+ int plane_count; |
|
1086 |
+ int color; |
|
1087 |
+ size_t widths [AV_NUM_DATA_POINTERS]; |
|
1088 |
+ size_t heights[AV_NUM_DATA_POINTERS]; |
|
1089 |
+ size_t strides[AV_NUM_DATA_POINTERS]; |
|
1090 |
+ int status; |
|
1091 |
+ size_t contiguous_buf_size; |
|
1092 |
+ |
|
1093 |
+ memset(widths, 0, sizeof(widths)); |
|
1094 |
+ memset(heights, 0, sizeof(heights)); |
|
1095 |
+ memset(strides, 0, sizeof(strides)); |
|
1096 |
+ |
|
1097 |
+ status = get_cv_pixel_info( |
|
1098 |
+ avctx, |
|
1099 |
+ frame, |
|
1100 |
+ &color, |
|
1101 |
+ &plane_count, |
|
1102 |
+ widths, |
|
1103 |
+ heights, |
|
1104 |
+ strides, |
|
1105 |
+ &contiguous_buf_size |
|
1106 |
+ ); |
|
1107 |
+ |
|
1108 |
+ if (status) { |
|
1109 |
+ av_log( |
|
1110 |
+ avctx, |
|
1111 |
+ AV_LOG_ERROR, |
|
1112 |
+ "Error: Cannot convert format %d color_range %d: %d\n", |
|
1113 |
+ frame->format, |
|
1114 |
+ av_frame_get_color_range(frame), |
|
1115 |
+ status |
|
1116 |
+ ); |
|
1117 |
+ |
|
1118 |
+ return AVERROR_EXTERNAL; |
|
1119 |
+ } |
|
1120 |
+ |
|
1121 |
+#if TARGET_OS_IPHONE |
|
1122 |
+ status = CVPixelBufferCreate( |
|
1123 |
+ kCFAllocatorDefault, |
|
1124 |
+ frame->width, |
|
1125 |
+ frame->height, |
|
1126 |
+ color, |
|
1127 |
+ NULL, |
|
1128 |
+ cv_img |
|
1129 |
+ ); |
|
1130 |
+ |
|
1131 |
+ if (status) { |
|
1132 |
+ return AVERROR_EXTERNAL; |
|
1133 |
+ } |
|
1134 |
+ |
|
1135 |
+ status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights); |
|
1136 |
+ if (status) { |
|
1137 |
+ CFRelease(*cv_img); |
|
1138 |
+ *cv_img = NULL; |
|
1139 |
+ return status; |
|
1140 |
+ } |
|
1141 |
+#else |
|
1142 |
+ AVFrame *enc_frame = av_frame_alloc(); |
|
1143 |
+ if (!enc_frame) return AVERROR(ENOMEM); |
|
1144 |
+ |
|
1145 |
+ status = av_frame_ref(enc_frame, frame); |
|
1146 |
+ if (status) { |
|
1147 |
+ av_frame_free(&enc_frame); |
|
1148 |
+ return status; |
|
1149 |
+ } |
|
1150 |
+ |
|
1151 |
+ status = CVPixelBufferCreateWithPlanarBytes( |
|
1152 |
+ kCFAllocatorDefault, |
|
1153 |
+ enc_frame->width, |
|
1154 |
+ enc_frame->height, |
|
1155 |
+ color, |
|
1156 |
+ NULL, |
|
1157 |
+ contiguous_buf_size, |
|
1158 |
+ plane_count, |
|
1159 |
+ (void **)enc_frame->data, |
|
1160 |
+ widths, |
|
1161 |
+ heights, |
|
1162 |
+ strides, |
|
1163 |
+ free_avframe, |
|
1164 |
+ enc_frame, |
|
1165 |
+ NULL, |
|
1166 |
+ cv_img |
|
1167 |
+ ); |
|
1168 |
+ |
|
1169 |
+ if (status) { |
|
1170 |
+ av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status); |
|
1171 |
+ return AVERROR_EXTERNAL; |
|
1172 |
+ } |
|
1173 |
+#endif |
|
1174 |
+ |
|
1175 |
+ return 0; |
|
1176 |
+} |
|
1177 |
+ |
|
1178 |
+static int vtenc_send_frame(AVCodecContext *avctx, |
|
1179 |
+ VTEncContext *vtctx, |
|
1180 |
+ const AVFrame *frame) |
|
1181 |
+{ |
|
1182 |
+ CMTime time; |
|
1183 |
+ CVPixelBufferRef cv_img = NULL; |
|
1184 |
+ int status = create_cv_pixel_buffer(avctx, frame, &cv_img); |
|
1185 |
+ |
|
1186 |
+ if (status) return status; |
|
1187 |
+ |
|
1188 |
+ time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den); |
|
1189 |
+ status = VTCompressionSessionEncodeFrame( |
|
1190 |
+ vtctx->session, |
|
1191 |
+ cv_img, |
|
1192 |
+ time, |
|
1193 |
+ kCMTimeInvalid, |
|
1194 |
+ NULL, |
|
1195 |
+ NULL, |
|
1196 |
+ NULL |
|
1197 |
+ ); |
|
1198 |
+ |
|
1199 |
+ CFRelease(cv_img); |
|
1200 |
+ |
|
1201 |
+ if (status) { |
|
1202 |
+ av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status); |
|
1203 |
+ return AVERROR_EXTERNAL; |
|
1204 |
+ } |
|
1205 |
+ |
|
1206 |
+ return 0; |
|
1207 |
+} |
|
1208 |
+ |
|
1209 |
+static av_cold int vtenc_frame( |
|
1210 |
+ AVCodecContext *avctx, |
|
1211 |
+ AVPacket *pkt, |
|
1212 |
+ const AVFrame *frame, |
|
1213 |
+ int *got_packet) |
|
1214 |
+{ |
|
1215 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
1216 |
+ bool get_frame; |
|
1217 |
+ int status; |
|
1218 |
+ CMSampleBufferRef buf = NULL; |
|
1219 |
+ |
|
1220 |
+ if (frame) { |
|
1221 |
+ status = vtenc_send_frame(avctx, vtctx, frame); |
|
1222 |
+ |
|
1223 |
+ if (status) { |
|
1224 |
+ status = AVERROR_EXTERNAL; |
|
1225 |
+ goto end_nopkt; |
|
1226 |
+ } |
|
1227 |
+ |
|
1228 |
+ if (vtctx->frame_ct_in == 0) { |
|
1229 |
+ vtctx->first_pts = frame->pts; |
|
1230 |
+ } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) { |
|
1231 |
+ vtctx->dts_delta = frame->pts - vtctx->first_pts; |
|
1232 |
+ } |
|
1233 |
+ |
|
1234 |
+ vtctx->frame_ct_in++; |
|
1235 |
+ } else if(!vtctx->flushing) { |
|
1236 |
+ vtctx->flushing = true; |
|
1237 |
+ |
|
1238 |
+ status = VTCompressionSessionCompleteFrames(vtctx->session, |
|
1239 |
+ kCMTimeIndefinite); |
|
1240 |
+ |
|
1241 |
+ if (status) { |
|
1242 |
+ av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status); |
|
1243 |
+ status = AVERROR_EXTERNAL; |
|
1244 |
+ goto end_nopkt; |
|
1245 |
+ } |
|
1246 |
+ } |
|
1247 |
+ |
|
1248 |
+ *got_packet = 0; |
|
1249 |
+ get_frame = vtctx->dts_delta >= 0 || !frame; |
|
1250 |
+ if (!get_frame) { |
|
1251 |
+ status = 0; |
|
1252 |
+ goto end_nopkt; |
|
1253 |
+ } |
|
1254 |
+ |
|
1255 |
+ status = vtenc_q_pop(vtctx, !frame, &buf); |
|
1256 |
+ if (status) goto end_nopkt; |
|
1257 |
+ if (!buf) goto end_nopkt; |
|
1258 |
+ |
|
1259 |
+ status = vtenc_cm_to_avpacket(avctx, buf, pkt); |
|
1260 |
+ CFRelease(buf); |
|
1261 |
+ if (status) goto end_nopkt; |
|
1262 |
+ |
|
1263 |
+ *got_packet = 1; |
|
1264 |
+ return 0; |
|
1265 |
+ |
|
1266 |
+end_nopkt: |
|
1267 |
+ av_packet_unref(pkt); |
|
1268 |
+ return status; |
|
1269 |
+} |
|
1270 |
+ |
|
1271 |
+static av_cold int vtenc_close(AVCodecContext *avctx) |
|
1272 |
+{ |
|
1273 |
+ VTEncContext *vtctx = avctx->priv_data; |
|
1274 |
+ |
|
1275 |
+ if(!vtctx->session) return 0; |
|
1276 |
+ |
|
1277 |
+ VTCompressionSessionInvalidate(vtctx->session); |
|
1278 |
+ pthread_cond_destroy(&vtctx->cv_sample_sent); |
|
1279 |
+ pthread_mutex_destroy(&vtctx->lock); |
|
1280 |
+ CFRelease(vtctx->session); |
|
1281 |
+ vtctx->session = NULL; |
|
1282 |
+ |
|
1283 |
+ return 0; |
|
1284 |
+} |
|
1285 |
+ |
|
1286 |
+static const enum AVPixelFormat pix_fmts[] = { |
|
1287 |
+ AV_PIX_FMT_NV12, |
|
1288 |
+#if !TARGET_OS_IPHONE |
|
1289 |
+ AV_PIX_FMT_YUV420P, |
|
1290 |
+#endif |
|
1291 |
+ AV_PIX_FMT_NONE |
|
1292 |
+}; |
|
1293 |
+ |
|
1294 |
+#define OFFSET(x) offsetof(VTEncContext, x) |
|
1295 |
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM |
|
1296 |
+static const AVOption options[] = { |
|
1297 |
+ { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" }, |
|
1298 |
+ { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" }, |
|
1299 |
+ { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" }, |
|
1300 |
+ { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" }, |
|
1301 |
+ |
|
1302 |
+ { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" }, |
|
1303 |
+ { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1304 |
+ { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1305 |
+ { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1306 |
+ { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1307 |
+ { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1308 |
+ { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1309 |
+ { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1310 |
+ { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1311 |
+ { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1312 |
+ { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" }, |
|
1313 |
+ |
|
1314 |
+ { NULL }, |
|
1315 |
+}; |
|
1316 |
+ |
|
1317 |
+static const AVClass h264_videotoolbox_class = { |
|
1318 |
+ .class_name = "h264_videotoolbox", |
|
1319 |
+ .item_name = av_default_item_name, |
|
1320 |
+ .option = options, |
|
1321 |
+ .version = LIBAVUTIL_VERSION_INT, |
|
1322 |
+}; |
|
1323 |
+ |
|
1324 |
+AVCodec ff_h264_videotoolbox_encoder = { |
|
1325 |
+ .name = "h264_videotoolbox", |
|
1326 |
+ .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"), |
|
1327 |
+ .type = AVMEDIA_TYPE_VIDEO, |
|
1328 |
+ .id = AV_CODEC_ID_H264, |
|
1329 |
+ .priv_data_size = sizeof(VTEncContext), |
|
1330 |
+ .pix_fmts = pix_fmts, |
|
1331 |
+ .init = vtenc_init, |
|
1332 |
+ .encode2 = vtenc_frame, |
|
1333 |
+ .close = vtenc_close, |
|
1334 |
+ .capabilities = AV_CODEC_CAP_DELAY, |
|
1335 |
+ .priv_class = &h264_videotoolbox_class, |
|
1336 |
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | |
|
1337 |
+ FF_CODEC_CAP_INIT_CLEANUP, |
|
1338 |
+}; |