Browse code

webmdashenc: Support for live stream manifests

This patch adds support for creating DASH manifests for WebM Live
Streams. It also updates the documentation and adds a fate test to
verify the behavior of the new muxer flag.

Signed-off-by: Vignesh Venkatasubramanian <vigneshv@google.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>

Vignesh Venkatasubramanian authored on 2015/04/01 11:40:01
Showing 4 changed files
... ...
@@ -1210,7 +1210,17 @@ is the @option{global_header} flag.
1210 1210
 
1211 1211
 WebM DASH Manifest muxer.
1212 1212
 
1213
-This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
1213
+This muxer implements the WebM DASH Manifest specification to generate the DASH
1214
+manifest XML. It also supports manifest generation for DASH live streams.
1215
+
1216
+For more information see:
1217
+
1218
+@itemize @bullet
1219
+@item
1220
+WebM DASH Specification: @url{https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification}
1221
+@item
1222
+ISO DASH Specification: @url{http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip}
1223
+@end itemize
1214 1224
 
1215 1225
 @subsection Options
1216 1226
 
... ...
@@ -1221,6 +1231,28 @@ This muxer supports the following options:
1221 1221
 This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
1222 1222
 unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
1223 1223
 audio and video streams. Any number of adaptation sets can be added using this option.
1224
+
1225
+@item live
1226
+Set this to 1 to create a live stream DASH Manifest. Default: 0.
1227
+
1228
+@item chunk_start_index
1229
+Start index of the first chunk. This will go in the @samp{startNumber} attribute
1230
+of the @samp{SegmentTemplate} element in the manifest. Default: 0.
1231
+
1232
+@item chunk_duration_ms
1233
+Duration of each chunk in milliseconds. This will go in the @samp{duration}
1234
+attribute of the @samp{SegmentTemplate} element in the manifest. Default: 1000.
1235
+
1236
+@item utc_timing_url
1237
+URL of the page that will return the UTC timestamp in ISO format. This will go
1238
+in the @samp{value} attribute of the @samp{UTCTiming} element in the manifest.
1239
+Default: None.
1240
+
1241
+@item time_shift_buffer_depth
1242
+Smallest time (in seconds) shifting buffer for which any Representation is
1243
+guaranteed to be available. This will go in the @samp{timeShiftBufferDepth}
1244
+attribute of the @samp{MPD} element. Default: 60.
1245
+
1224 1246
 @end table
1225 1247
 
1226 1248
 @subsection Example
... ...
@@ -22,8 +22,11 @@
22 22
 /*
23 23
  * WebM DASH Specification:
24 24
  * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
25
+ * ISO DASH Specification:
26
+ * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
25 27
  */
26 28
 
29
+#include <float.h>
27 30
 #include <stdint.h>
28 31
 #include <string.h>
29 32
 
... ...
@@ -34,6 +37,7 @@
34 34
 #include "libavutil/avstring.h"
35 35
 #include "libavutil/dict.h"
36 36
 #include "libavutil/opt.h"
37
+#include "libavutil/time_internal.h"
37 38
 
38 39
 typedef struct AdaptationSet {
39 40
     char id[10];
... ...
@@ -47,6 +51,12 @@ typedef struct WebMDashMuxContext {
47 47
     AdaptationSet *as;
48 48
     int nb_as;
49 49
     int representation_id;
50
+    int is_live;
51
+    int chunk_start_index;
52
+    int chunk_duration;
53
+    char *utc_timing_url;
54
+    double time_shift_buffer_depth;
55
+    int debug_mode;
50 56
 } WebMDashMuxContext;
51 57
 
52 58
 static const char *get_codec_name(int codec_id)
... ...
@@ -79,19 +89,42 @@ static double get_duration(AVFormatContext *s)
79 79
 
80 80
 static void write_header(AVFormatContext *s)
81 81
 {
82
+    WebMDashMuxContext *w = s->priv_data;
82 83
     double min_buffer_time = 1.0;
84
+    time_t local_time;
85
+    struct tm *gmt, gmt_buffer;
86
+    char *gmt_iso = av_malloc(21);
83 87
     avio_printf(s->pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
84 88
     avio_printf(s->pb, "<MPD\n");
85 89
     avio_printf(s->pb, "  xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
86 90
     avio_printf(s->pb, "  xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
87 91
     avio_printf(s->pb, "  xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
88
-    avio_printf(s->pb, "  type=\"static\"\n");
89
-    avio_printf(s->pb, "  mediaPresentationDuration=\"PT%gS\"\n",
90
-                get_duration(s));
91
-    avio_printf(s->pb, "  minBufferTime=\"PT%gS\"\n",
92
-                min_buffer_time);
93
-    avio_printf(s->pb, "  profiles=\"urn:webm:dash:profile:webm-on-demand:2012\"");
94
-    avio_printf(s->pb, ">\n");
92
+    avio_printf(s->pb, "  type=\"%s\"\n", w->is_live ? "dynamic" : "static");
93
+    if (!w->is_live) {
94
+        avio_printf(s->pb, "  mediaPresentationDuration=\"PT%gS\"\n",
95
+                    get_duration(s));
96
+    }
97
+    avio_printf(s->pb, "  minBufferTime=\"PT%gS\"\n", min_buffer_time);
98
+    avio_printf(s->pb, "  profiles=\"%s\"%s",
99
+                w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
100
+                w->is_live ? "\n" : ">\n");
101
+    time(&local_time);
102
+    gmt = gmtime_r(&local_time, &gmt_buffer);
103
+    strftime(gmt_iso, 21, "%FT%TZ", gmt);
104
+    if (w->debug_mode) {
105
+        av_strlcpy(gmt_iso, "", 1);
106
+    }
107
+    if (w->is_live) {
108
+        avio_printf(s->pb, "  availabilityStartTime=\"%s\"\n", gmt_iso);
109
+        avio_printf(s->pb, "  timeShiftBufferDepth=\"PT%gS\"", w->time_shift_buffer_depth);
110
+        avio_printf(s->pb, ">\n");
111
+        avio_printf(s->pb, "<UTCTiming\n");
112
+        avio_printf(s->pb, "  schemeIdUri=\"%s\"\n",
113
+                    w->utc_timing_url ? "urn:mpeg:dash:utc:http-iso:2014" : "urn:mpeg:dash:utc:direct:2012");
114
+        avio_printf(s->pb, "  value=\"%s\"/>\n",
115
+                    w->utc_timing_url ? w->utc_timing_url : gmt_iso);
116
+    }
117
+    av_free(gmt_iso);
95 118
 }
96 119
 
97 120
 static void write_footer(AVFormatContext *s)
... ...
@@ -137,33 +170,47 @@ static int bitstream_switching(AVFormatContext *s, AdaptationSet *as) {
137 137
  * Writes a Representation within an Adaptation Set. Returns 0 on success and
138 138
  * < 0 on failure.
139 139
  */
140
-static int write_representation(AVFormatContext *s, AVStream *stream, int id,
140
+static int write_representation(AVFormatContext *s, AVStream *stream, char *id,
141 141
                                 int output_width, int output_height,
142 142
                                 int output_sample_rate) {
143
+    WebMDashMuxContext *w = s->priv_data;
143 144
     AVDictionaryEntry *irange = av_dict_get(stream->metadata, INITIALIZATION_RANGE, NULL, 0);
144 145
     AVDictionaryEntry *cues_start = av_dict_get(stream->metadata, CUES_START, NULL, 0);
145 146
     AVDictionaryEntry *cues_end = av_dict_get(stream->metadata, CUES_END, NULL, 0);
146 147
     AVDictionaryEntry *filename = av_dict_get(stream->metadata, FILENAME, NULL, 0);
147 148
     AVDictionaryEntry *bandwidth = av_dict_get(stream->metadata, BANDWIDTH, NULL, 0);
148
-    if (!irange || cues_start == NULL || cues_end == NULL || filename == NULL ||
149
-        !bandwidth) {
149
+    if ((w->is_live && (!filename)) ||
150
+        (!w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
150 151
         return -1;
151 152
     }
152
-    avio_printf(s->pb, "<Representation id=\"%d\"", id);
153
-    avio_printf(s->pb, " bandwidth=\"%s\"", bandwidth->value);
153
+    avio_printf(s->pb, "<Representation id=\"%s\"", id);
154
+    // FIXME: For live, This should be obtained from the input file or as an AVOption.
155
+    avio_printf(s->pb, " bandwidth=\"%s\"",
156
+                w->is_live ? (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO ? "128000" : "1000000") : bandwidth->value);
154 157
     if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
155 158
         avio_printf(s->pb, " width=\"%d\"", stream->codec->width);
156 159
     if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
157 160
         avio_printf(s->pb, " height=\"%d\"", stream->codec->height);
158 161
     if (stream->codec->codec_type = AVMEDIA_TYPE_AUDIO && output_sample_rate)
159 162
         avio_printf(s->pb, " audioSamplingRate=\"%d\"", stream->codec->sample_rate);
160
-    avio_printf(s->pb, ">\n");
161
-    avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
162
-    avio_printf(s->pb, "<SegmentBase\n");
163
-    avio_printf(s->pb, "  indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
164
-    avio_printf(s->pb, "<Initialization\n");
165
-    avio_printf(s->pb, "  range=\"0-%s\" />\n", irange->value);
166
-    avio_printf(s->pb, "</SegmentBase>\n");
163
+    if (w->is_live) {
164
+        // For live streams, Codec and Mime Type always go in the Representation tag.
165
+        avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(stream->codec->codec_id));
166
+        avio_printf(s->pb, " mimeType=\"%s/webm\"",
167
+                    stream->codec->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
168
+        // For live streams, subsegments always start with key frames. So this
169
+        // is always 1.
170
+        avio_printf(s->pb, " startsWithSAP=\"1\"");
171
+        avio_printf(s->pb, ">");
172
+    } else {
173
+        avio_printf(s->pb, ">\n");
174
+        avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
175
+        avio_printf(s->pb, "<SegmentBase\n");
176
+        avio_printf(s->pb, "  indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
177
+        avio_printf(s->pb, "<Initialization\n");
178
+        avio_printf(s->pb, "  range=\"0-%s\" />\n", irange->value);
179
+        avio_printf(s->pb, "</SegmentBase>\n");
180
+    }
167 181
     avio_printf(s->pb, "</Representation>\n");
168 182
     return 0;
169 183
 }
... ...
@@ -208,6 +255,51 @@ static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as) {
208 208
 }
209 209
 
210 210
 /*
211
+ * Parses a live header filename and computes the representation id,
212
+ * initialization pattern and the media pattern. Pass NULL if you don't want to
213
+ * compute any of those 3. Returns 0 on success and non-zero on failure.
214
+ *
215
+ * Name of the header file should conform to the following pattern:
216
+ * <file_description>_<representation_id>.hdr where <file_description> can be
217
+ * anything. The chunks should be named according to the following pattern:
218
+ * <file_description>_<representation_id>_<chunk_number>.chk
219
+ */
220
+static int parse_filename(char *filename, char **representation_id,
221
+                          char **initialization_pattern, char **media_pattern) {
222
+    char *underscore_pos = NULL;
223
+    char *period_pos = NULL;
224
+    char *temp_pos = NULL;
225
+    char *filename_str = av_strdup(filename);
226
+    if (!filename_str) return AVERROR(ENOMEM);
227
+    temp_pos = av_stristr(filename_str, "_");
228
+    while (temp_pos) {
229
+        underscore_pos = temp_pos + 1;
230
+        temp_pos = av_stristr(temp_pos + 1, "_");
231
+    }
232
+    if (!underscore_pos) return -1;
233
+    period_pos = av_stristr(underscore_pos, ".");
234
+    if (!period_pos) return -1;
235
+    *(underscore_pos - 1) = 0;
236
+    if (representation_id) {
237
+        *representation_id = av_malloc(period_pos - underscore_pos + 1);
238
+        if (!(*representation_id)) return AVERROR(ENOMEM);
239
+        av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
240
+    }
241
+    if (initialization_pattern) {
242
+        *initialization_pattern = av_asprintf("%s_$RepresentationID$.hdr",
243
+                                              filename_str);
244
+        if (!(*initialization_pattern)) return AVERROR(ENOMEM);
245
+    }
246
+    if (media_pattern) {
247
+        *media_pattern = av_asprintf("%s_$RepresentationID$_$Number$.chk",
248
+                                     filename_str);
249
+        if (!(*media_pattern)) return AVERROR(ENOMEM);
250
+    }
251
+    av_free(filename_str);
252
+    return 0;
253
+}
254
+
255
+/*
211 256
  * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
212 257
  */
213 258
 static int write_adaptation_set(AVFormatContext *s, int as_index)
... ...
@@ -222,13 +314,14 @@ static int write_adaptation_set(AVFormatContext *s, int as_index)
222 222
 
223 223
     // Width, Height and Sample Rate will go in the AdaptationSet tag if they
224 224
     // are the same for all contained Representations. otherwise, they will go
225
-    // on their respective Representation tag.
225
+    // on their respective Representation tag. For live streams, they always go
226
+    // in the Representation tag.
226 227
     int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
227 228
     if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
228
-      width_in_as = check_matching_width(s, as);
229
-      height_in_as = check_matching_height(s, as);
229
+      width_in_as = !w->is_live && check_matching_width(s, as);
230
+      height_in_as = !w->is_live && check_matching_height(s, as);
230 231
     } else {
231
-      sample_rate_in_as = check_matching_sample_rate(s, as);
232
+      sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
232 233
     }
233 234
 
234 235
     avio_printf(s->pb, "<AdaptationSet id=\"%s\"", as->id);
... ...
@@ -249,19 +342,53 @@ static int write_adaptation_set(AVFormatContext *s, int as_index)
249 249
     avio_printf(s->pb, " bitstreamSwitching=\"%s\"",
250 250
                 boolean[bitstream_switching(s, as)]);
251 251
     avio_printf(s->pb, " subsegmentAlignment=\"%s\"",
252
-                boolean[subsegment_alignment(s, as)]);
252
+                boolean[w->is_live || subsegment_alignment(s, as)]);
253 253
 
254 254
     for (i = 0; i < as->nb_streams; i++) {
255 255
         AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
256 256
                                             CLUSTER_KEYFRAME, NULL, 0);
257
-        if (!kf || !strncmp(kf->value, "0", 1)) subsegmentStartsWithSAP = 0;
257
+        if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
258 258
     }
259 259
     avio_printf(s->pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
260 260
     avio_printf(s->pb, ">\n");
261 261
 
262
+    if (w->is_live) {
263
+        AVDictionaryEntry *filename =
264
+            av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0);
265
+        char *initialization_pattern = NULL;
266
+        char *media_pattern = NULL;
267
+        int ret = parse_filename(filename->value, NULL, &initialization_pattern,
268
+                                 &media_pattern);
269
+        if (ret) return ret;
270
+        avio_printf(s->pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
271
+                    codec->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
272
+        avio_printf(s->pb, "<SegmentTemplate");
273
+        avio_printf(s->pb, " timescale=\"1000\"");
274
+        avio_printf(s->pb, " duration=\"%d\"", w->chunk_duration);
275
+        avio_printf(s->pb, " media=\"%s\"", media_pattern);
276
+        avio_printf(s->pb, " startNumber=\"%d\"", w->chunk_start_index);
277
+        avio_printf(s->pb, " initialization=\"%s\"", initialization_pattern);
278
+        avio_printf(s->pb, "/>\n");
279
+        av_free(initialization_pattern);
280
+        av_free(media_pattern);
281
+    }
282
+
262 283
     for (i = 0; i < as->nb_streams; i++) {
263
-        write_representation(s, s->streams[as->streams[i]], w->representation_id++,
284
+        char *representation_id = NULL;
285
+        if (w->is_live) {
286
+            AVDictionaryEntry *filename =
287
+                av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0);
288
+            if (!filename ||
289
+                parse_filename(filename->value, &representation_id, NULL, NULL)) {
290
+                return -1;
291
+            }
292
+        } else {
293
+            representation_id = av_asprintf("%d", w->representation_id++);
294
+            if (!representation_id) return -1;
295
+        }
296
+        write_representation(s, s->streams[as->streams[i]], representation_id,
264 297
                              !width_in_as, !height_in_as, !sample_rate_in_as);
298
+        av_free(representation_id);
265 299
     }
266 300
     avio_printf(s->pb, "</AdaptationSet>\n");
267 301
     return 0;
... ...
@@ -333,7 +460,9 @@ static int webm_dash_manifest_write_header(AVFormatContext *s)
333 333
     write_header(s);
334 334
     avio_printf(s->pb, "<Period id=\"0\"");
335 335
     avio_printf(s->pb, " start=\"PT%gS\"", start);
336
-    avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
336
+    if (!w->is_live) {
337
+        avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
338
+    }
337 339
     avio_printf(s->pb, " >\n");
338 340
 
339 341
     for (i = 0; i < w->nb_as; i++) {
... ...
@@ -364,6 +493,12 @@ static int webm_dash_manifest_write_trailer(AVFormatContext *s)
364 364
 #define OFFSET(x) offsetof(WebMDashMuxContext, x)
365 365
 static const AVOption options[] = {
366 366
     { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
367
+    { "debug_mode", "[private option - users should never set this]. set this to 1 to create deterministic output", OFFSET(debug_mode), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
368
+    { "live", "set this to 1 to create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
369
+    { "chunk_start_index",  "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
370
+    { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
371
+    { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
372
+    { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
367 373
     { NULL },
368 374
 };
369 375
 
... ...
@@ -43,6 +43,9 @@ fate-webm-dash-manifest-unaligned-audio-streams: CMD = run ffmpeg -f webm_dash_m
43 43
 FATE_VP8-$(call DEMDEC, WEBM_DASH_MANIFEST, VP8) += fate-webm-dash-manifest-representations
44 44
 fate-webm-dash-manifest-representations: CMD = run ffmpeg -f webm_dash_manifest -i $(TARGET_SAMPLES)/vp8/dash_video1.webm -f webm_dash_manifest -i $(TARGET_SAMPLES)/vp8/dash_video4.webm -c copy -map 0 -map 1 -f webm_dash_manifest -adaptation_sets "id=0,streams=0,1" -
45 45
 
46
+FATE_VP8-$(call DEMDEC, WEBM_DASH_MANIFEST, VP8) += fate-webm-dash-manifest-live
47
+fate-webm-dash-manifest-live: CMD = run ffmpeg -f webm_dash_manifest -live 1 -i $(TARGET_SAMPLES)/vp8/dash_live_video_360.hdr -f webm_dash_manifest -live 1 -i $(TARGET_SAMPLES)/vp8/dash_live_audio_171.hdr -c copy -map 0 -map 1 -f webm_dash_manifest -live 1 -adaptation_sets "id=0,streams=0 id=1,streams=1" -chunk_start_index 1 -chunk_duration_ms 5000 -time_shift_buffer_depth 7200 -debug_mode 1 -
48
+
46 49
 FATE_SAMPLES_AVCONV += $(FATE_VP6-yes)
47 50
 fate-vp6: $(FATE_VP6-yes)
48 51
 
49 52
new file mode 100644
... ...
@@ -0,0 +1,26 @@
0
+<?xml version="1.0" encoding="UTF-8"?>
1
+<MPD
2
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
3
+  xmlns="urn:mpeg:DASH:schema:MPD:2011"
4
+  xsi:schemaLocation="urn:mpeg:DASH:schema:MPD:2011"
5
+  type="dynamic"
6
+  minBufferTime="PT1S"
7
+  profiles="urn:mpeg:dash:profile:isoff-live:2011"
8
+  availabilityStartTime=""
9
+  timeShiftBufferDepth="PT7200S">
10
+<UTCTiming
11
+  schemeIdUri="urn:mpeg:dash:utc:direct:2012"
12
+  value=""/>
13
+<Period id="0" start="PT0S" >
14
+<AdaptationSet id="0" mimeType="video/webm" codecs="vp9" bitstreamSwitching="true" subsegmentAlignment="true" subsegmentStartsWithSAP="1">
15
+<ContentComponent id="1" type="video"/>
16
+<SegmentTemplate timescale="1000" duration="5000" media="dash_live_video_$RepresentationID$_$Number$.chk" startNumber="1" initialization="dash_live_video_$RepresentationID$.hdr"/>
17
+<Representation id="360" bandwidth="1000000" width="640" height="360" codecs="vp9" mimeType="video/webm" startsWithSAP="1"></Representation>
18
+</AdaptationSet>
19
+<AdaptationSet id="1" mimeType="audio/webm" codecs="vorbis" bitstreamSwitching="true" subsegmentAlignment="true" subsegmentStartsWithSAP="1">
20
+<ContentComponent id="1" type="audio"/>
21
+<SegmentTemplate timescale="1000" duration="5000" media="dash_live_audio_$RepresentationID$_$Number$.chk" startNumber="1" initialization="dash_live_audio_$RepresentationID$.hdr"/>
22
+<Representation id="171" bandwidth="128000" audioSamplingRate="32000" codecs="vorbis" mimeType="audio/webm" startsWithSAP="1"></Representation>
23
+</AdaptationSet>
24
+</Period>
25
+</MPD>