Shaka Packager SDK
Loading...
Searching...
No Matches
webm_cluster_parser.cc
1// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <packager/media/formats/webm/webm_cluster_parser.h>
6
7#include <algorithm>
8#include <vector>
9
10#include <absl/base/internal/endian.h>
11#include <absl/log/check.h>
12#include <absl/log/log.h>
13
14#include <packager/macros/logging.h>
15#include <packager/media/base/timestamp.h>
16#include <packager/media/codecs/vp8_parser.h>
17#include <packager/media/codecs/vp9_parser.h>
18#include <packager/media/codecs/webvtt_util.h>
19#include <packager/media/formats/webm/webm_constants.h>
20#include <packager/media/formats/webm/webm_crypto_helpers.h>
21#include <packager/media/formats/webm/webm_webvtt_parser.h>
22
23namespace shaka {
24namespace media {
25namespace {
26
27const int64_t kMicrosecondsPerMillisecond = 1000;
28
29} // namespace
30
32 int64_t timecode_scale,
33 std::shared_ptr<AudioStreamInfo> audio_stream_info,
34 std::shared_ptr<VideoStreamInfo> video_stream_info,
35 const VPCodecConfigurationRecord& vp_config,
36 int64_t audio_default_duration,
37 int64_t video_default_duration,
38 const WebMTracksParser::TextTracks& text_tracks,
39 const std::set<int64_t>& ignored_tracks,
40 const std::string& audio_encryption_key_id,
41 const std::string& video_encryption_key_id,
42 const MediaParser::NewMediaSampleCB& new_sample_cb,
43 const MediaParser::InitCB& init_cb,
44 KeySource* decryption_key_source)
45 : timecode_multiplier_(timecode_scale /
46 static_cast<double>(kMicrosecondsPerMillisecond)),
47 audio_stream_info_(audio_stream_info),
48 video_stream_info_(video_stream_info),
49 vp_config_(vp_config),
50 ignored_tracks_(ignored_tracks),
51 audio_encryption_key_id_(audio_encryption_key_id),
52 video_encryption_key_id_(video_encryption_key_id),
53 parser_(kWebMIdCluster, this),
54 initialized_(false),
55 init_cb_(init_cb),
56 cluster_start_time_(kNoTimestamp),
57 audio_(audio_stream_info ? audio_stream_info->track_id() : -1,
58 false,
59 audio_default_duration,
60 new_sample_cb),
61 video_(video_stream_info ? video_stream_info->track_id() : -1,
62 true,
63 video_default_duration,
64 new_sample_cb) {
65 if (decryption_key_source) {
66 decryptor_source_.reset(new DecryptorSource(decryption_key_source));
67 if (audio_stream_info_)
68 audio_stream_info_->set_is_encrypted(false);
69 if (video_stream_info_)
70 video_stream_info_->set_is_encrypted(false);
71 }
72 for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
73 it != text_tracks.end(); ++it) {
74 text_track_map_.insert(std::make_pair(
75 it->first, Track(it->first, false, kNoTimestamp, new_sample_cb)));
76 }
77}
78
79WebMClusterParser::~WebMClusterParser() {}
80
82 last_block_timecode_ = -1;
83 cluster_timecode_ = -1;
84 cluster_start_time_ = kNoTimestamp;
85 cluster_ended_ = false;
86 parser_.Reset();
87 audio_.Reset();
88 video_.Reset();
89 ResetTextTracks();
90}
91
93 // Estimate the duration of the last frame if necessary.
94 bool audio_result = audio_.ApplyDurationEstimateIfNeeded();
95 bool video_result = video_.ApplyDurationEstimateIfNeeded();
96 Reset();
97 return audio_result && video_result;
98}
99
100int WebMClusterParser::Parse(const uint8_t* buf, int size) {
101 int result = parser_.Parse(buf, size);
102
103 if (result < 0) {
104 cluster_ended_ = false;
105 return result;
106 }
107
108 cluster_ended_ = parser_.IsParsingComplete();
109 if (cluster_ended_) {
110 // If there were no buffers in this cluster, set the cluster start time to
111 // be the |cluster_timecode_|.
112 if (cluster_start_time_ == kNoTimestamp) {
113 // If the cluster did not even have a |cluster_timecode_|, signal parse
114 // error.
115 if (cluster_timecode_ < 0)
116 return -1;
117
118 cluster_start_time_ = cluster_timecode_ * timecode_multiplier_;
119 }
120
121 // Reset the parser if we're done parsing so that
122 // it is ready to accept another cluster on the next
123 // call.
124 parser_.Reset();
125
126 last_block_timecode_ = -1;
127 cluster_timecode_ = -1;
128 }
129
130 return result;
131}
132
133WebMParserClient* WebMClusterParser::OnListStart(int id) {
134 if (id == kWebMIdCluster) {
135 cluster_timecode_ = -1;
136 cluster_start_time_ = kNoTimestamp;
137 } else if (id == kWebMIdBlockGroup) {
138 block_data_.reset();
139 block_data_size_ = -1;
140 block_duration_ = -1;
141 discard_padding_ = -1;
142 discard_padding_set_ = false;
143 reference_block_set_ = false;
144 } else if (id == kWebMIdBlockAdditions) {
145 block_add_id_ = -1;
146 block_additional_data_.reset();
147 block_additional_data_size_ = 0;
148 }
149
150 return this;
151}
152
153bool WebMClusterParser::OnListEnd(int id) {
154 if (id != kWebMIdBlockGroup)
155 return true;
156
157 // Make sure the BlockGroup actually had a Block.
158 if (block_data_size_ == -1) {
159 LOG(ERROR) << "Block missing from BlockGroup.";
160 return false;
161 }
162
163 bool result = ParseBlock(
164 false, block_data_.get(), block_data_size_, block_additional_data_.get(),
165 block_additional_data_size_, block_duration_,
166 discard_padding_set_ ? discard_padding_ : 0, reference_block_set_);
167 block_data_.reset();
168 block_data_size_ = -1;
169 block_duration_ = -1;
170 block_add_id_ = -1;
171 block_additional_data_.reset();
172 block_additional_data_size_ = 0;
173 discard_padding_ = -1;
174 discard_padding_set_ = false;
175 reference_block_set_ = false;
176 return result;
177}
178
179bool WebMClusterParser::OnUInt(int id, int64_t val) {
180 int64_t* dst;
181 switch (id) {
182 case kWebMIdTimecode:
183 dst = &cluster_timecode_;
184 break;
185 case kWebMIdBlockDuration:
186 dst = &block_duration_;
187 break;
188 case kWebMIdBlockAddID:
189 dst = &block_add_id_;
190 break;
191 default:
192 return true;
193 }
194 if (*dst != -1)
195 return false;
196 *dst = val;
197 return true;
198}
199
200bool WebMClusterParser::ParseBlock(bool is_simple_block,
201 const uint8_t* buf,
202 int size,
203 const uint8_t* additional,
204 int additional_size,
205 int duration,
206 int64_t discard_padding,
207 bool reference_block_set) {
208 if (size < 4)
209 return false;
210
211 // Return an error if the trackNum > 127. We just aren't
212 // going to support large track numbers right now.
213 if (!(buf[0] & 0x80)) {
214 LOG(ERROR) << "TrackNumber over 127 not supported";
215 return false;
216 }
217
218 int track_num = buf[0] & 0x7f;
219 int timecode = buf[1] << 8 | buf[2];
220 int flags = buf[3] & 0xff;
221 int lacing = (flags >> 1) & 0x3;
222
223 if (lacing) {
224 LOG(ERROR) << "Lacing " << lacing << " is not supported yet.";
225 return false;
226 }
227
228 // Sign extend negative timecode offsets.
229 if (timecode & 0x8000)
230 timecode |= ~0xffff;
231
232 // The first bit of the flags is set when a SimpleBlock contains only
233 // keyframes. If this is a Block, then keyframe is inferred by the absence of
234 // the ReferenceBlock Element.
235 // http://www.matroska.org/technical/specs/index.html
236 bool is_key_frame =
237 is_simple_block ? (flags & 0x80) != 0 : !reference_block_set;
238
239 const uint8_t* frame_data = buf + 4;
240 int frame_size = size - (frame_data - buf);
241 return OnBlock(is_simple_block, track_num, timecode, duration, frame_data,
242 frame_size, additional, additional_size, discard_padding,
243 is_key_frame);
244}
245
246bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
247 switch (id) {
248 case kWebMIdSimpleBlock:
249 return ParseBlock(true, data, size, NULL, 0, -1, 0, false);
250
251 case kWebMIdBlock:
252 if (block_data_) {
253 LOG(ERROR) << "More than 1 Block in a BlockGroup is not "
254 "supported.";
255 return false;
256 }
257 block_data_.reset(new uint8_t[size]);
258 memcpy(block_data_.get(), data, size);
259 block_data_size_ = size;
260 return true;
261
262 case kWebMIdBlockAdditional: {
263 uint64_t block_add_id = absl::big_endian::FromHost64(block_add_id_);
264 if (block_additional_data_) {
265 // TODO: Technically, more than 1 BlockAdditional is allowed as per
266 // matroska spec. But for now we don't have a use case to support
267 // parsing of such files. Take a look at this again when such a case
268 // arises.
269 LOG(ERROR) << "More than 1 BlockAdditional in a "
270 "BlockGroup is not supported.";
271 return false;
272 }
273 // First 8 bytes of side_data in DecoderBuffer is the BlockAddID
274 // element's value in Big Endian format. This is done to mimic ffmpeg
275 // demuxer's behavior.
276 block_additional_data_size_ = size + sizeof(block_add_id);
277 block_additional_data_.reset(new uint8_t[block_additional_data_size_]);
278 memcpy(block_additional_data_.get(), &block_add_id, sizeof(block_add_id));
279 memcpy(block_additional_data_.get() + 8, data, size);
280 return true;
281 }
282 case kWebMIdDiscardPadding: {
283 if (discard_padding_set_ || size <= 0 || size > 8)
284 return false;
285 discard_padding_set_ = true;
286
287 // Read in the big-endian integer.
288 discard_padding_ = static_cast<int8_t>(data[0]);
289 for (int i = 1; i < size; ++i)
290 discard_padding_ = (discard_padding_ << 8) | data[i];
291
292 return true;
293 }
294 case kWebMIdReferenceBlock:
295 // We use ReferenceBlock to determine whether the current Block contains a
296 // keyframe or not. Other than that, we don't care about the value of the
297 // ReferenceBlock element itself.
298 reference_block_set_ = true;
299 return true;
300 default:
301 return true;
302 }
303}
304
305bool WebMClusterParser::OnBlock(bool is_simple_block,
306 int track_num,
307 int timecode,
308 int block_duration,
309 const uint8_t* data,
310 int size,
311 const uint8_t* additional,
312 int additional_size,
313 int64_t /*discard_padding*/,
314 bool is_key_frame) {
315 DCHECK_GE(size, 0);
316 if (cluster_timecode_ == -1) {
317 LOG(ERROR) << "Got a block before cluster timecode.";
318 return false;
319 }
320
321 // TODO: Should relative negative timecode offsets be rejected? Or only when
322 // the absolute timecode is negative? See http://crbug.com/271794
323 if (timecode < 0) {
324 LOG(ERROR) << "Got a block with negative timecode offset " << timecode;
325 return false;
326 }
327
328 if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
329 LOG(ERROR) << "Got a block with a timecode before the previous block.";
330 return false;
331 }
332
333 Track* track = NULL;
334 StreamType stream_type = kStreamUnknown;
335 std::string encryption_key_id;
336 if (track_num == audio_.track_num()) {
337 track = &audio_;
338 encryption_key_id = audio_encryption_key_id_;
339 stream_type = kStreamAudio;
340 } else if (track_num == video_.track_num()) {
341 track = &video_;
342 encryption_key_id = video_encryption_key_id_;
343 stream_type = kStreamVideo;
344 } else if (ignored_tracks_.find(track_num) != ignored_tracks_.end()) {
345 return true;
346 } else if (Track* const text_track = FindTextTrack(track_num)) {
347 if (is_simple_block) // BlockGroup is required for WebVTT cues
348 return false;
349 if (block_duration < 0) // not specified
350 return false;
351 track = text_track;
352 stream_type = kStreamText;
353 } else {
354 LOG(ERROR) << "Unexpected track number " << track_num;
355 return false;
356 }
357 DCHECK_NE(stream_type, kStreamUnknown);
358
359 last_block_timecode_ = timecode;
360
361 int64_t timestamp = (cluster_timecode_ + timecode) * timecode_multiplier_;
362
363 std::shared_ptr<MediaSample> buffer;
364 if (stream_type != kStreamText) {
365 // Every encrypted Block has a signal byte and IV prepended to it. Current
366 // encrypted WebM request for comments specification is here
367 // http://wiki.webmproject.org/encryption/webm-encryption-rfc
368 std::unique_ptr<DecryptConfig> decrypt_config;
369 int data_offset = 0;
370 if (!encryption_key_id.empty() &&
371 !WebMCreateDecryptConfig(
372 data, size,
373 reinterpret_cast<const uint8_t*>(encryption_key_id.data()),
374 encryption_key_id.size(), &decrypt_config, &data_offset)) {
375 return false;
376 }
377
378 const uint8_t* media_data = data + data_offset;
379 const size_t media_data_size = size - data_offset;
380 // Use a dummy data size of 0 to avoid copying overhead.
381 // Actual media data is set later.
382 const size_t kDummyDataSize = 0;
383 buffer = MediaSample::CopyFrom(media_data, kDummyDataSize, additional,
384 additional_size, is_key_frame);
385
386 if (decrypt_config) {
387 if (!decryptor_source_) {
388 buffer->SetData(media_data, media_data_size);
389 // If the demuxer does not have the decryptor_source_, store
390 // decrypt_config so that the demuxed sample can be decrypted later.
391 buffer->set_decrypt_config(std::move(decrypt_config));
392 buffer->set_is_encrypted(true);
393 } else {
394 std::shared_ptr<uint8_t> decrypted_media_data(
395 new uint8_t[media_data_size], std::default_delete<uint8_t[]>());
396 if (!decryptor_source_->DecryptSampleBuffer(
397 decrypt_config.get(), media_data, media_data_size,
398 decrypted_media_data.get())) {
399 LOG(ERROR) << "Cannot decrypt samples";
400 return false;
401 }
402 buffer->TransferData(std::move(decrypted_media_data), media_data_size);
403 }
404 } else {
405 buffer->SetData(media_data, media_data_size);
406 }
407 } else {
408 std::string id, settings, content;
409 WebMWebVTTParser::Parse(data, size, &id, &settings, &content);
410
411 std::vector<uint8_t> side_data;
412 MakeSideData(id.begin(), id.end(), settings.begin(), settings.end(),
413 &side_data);
414
415 buffer = MediaSample::CopyFrom(
416 reinterpret_cast<const uint8_t*>(content.data()), content.length(),
417 &side_data[0], side_data.size(), true);
418 }
419
420 buffer->set_dts(timestamp);
421 buffer->set_pts(timestamp);
422 if (cluster_start_time_ == kNoTimestamp)
423 cluster_start_time_ = timestamp;
424 buffer->set_duration(block_duration > 0
425 ? (block_duration * timecode_multiplier_)
426 : kNoTimestamp);
427
428 if (init_cb_ && !initialized_) {
429 std::vector<std::shared_ptr<StreamInfo>> streams;
430 if (audio_stream_info_)
431 streams.push_back(audio_stream_info_);
432 if (video_stream_info_) {
433 if (stream_type == kStreamVideo) {
434 // Setup codec string and codec config for VP8 and VP9.
435 // Codec config for AV1 is already retrieved from WebM CodecPrivate
436 // instead of extracted from the bit stream.
437 if (video_stream_info_->codec() != kCodecAV1) {
438 std::unique_ptr<VPxParser> vpx_parser;
439 switch (video_stream_info_->codec()) {
440 case kCodecVP8:
441 vpx_parser.reset(new VP8Parser);
442 break;
443 case kCodecVP9:
444 vpx_parser.reset(new VP9Parser);
445 break;
446 default:
447 NOTIMPLEMENTED()
448 << "Unsupported codec " << video_stream_info_->codec();
449 return false;
450 }
451 std::vector<VPxFrameInfo> vpx_frames;
452 if (!vpx_parser->Parse(buffer->data(), buffer->data_size(),
453 &vpx_frames)) {
454 LOG(ERROR) << "Failed to parse vpx frame.";
455 return false;
456 }
457 if (vpx_frames.size() != 1u || !vpx_frames[0].is_keyframe) {
458 LOG(ERROR) << "The first frame should be a key frame.";
459 return false;
460 }
461
462 vp_config_.MergeFrom(vpx_parser->codec_config());
463 video_stream_info_->set_codec_string(
464 vp_config_.GetCodecString(video_stream_info_->codec()));
465 std::vector<uint8_t> config_serialized;
466 vp_config_.WriteMP4(&config_serialized);
467 video_stream_info_->set_codec_config(config_serialized);
468 }
469
470 streams.push_back(video_stream_info_);
471 init_cb_(streams);
472 initialized_ = true;
473 }
474 } else {
475 init_cb_(streams);
476 initialized_ = true;
477 }
478 }
479
480 return track->EmitBuffer(buffer);
481}
482
483WebMClusterParser::Track::Track(
484 int track_num,
485 bool is_video,
486 int64_t default_duration,
487 const MediaParser::NewMediaSampleCB& new_sample_cb)
488 : track_num_(track_num),
489 is_video_(is_video),
490 default_duration_(default_duration),
491 estimated_next_frame_duration_(kNoTimestamp),
492 new_sample_cb_(new_sample_cb) {
493 DCHECK(default_duration_ == kNoTimestamp || default_duration_ > 0);
494}
495
496WebMClusterParser::Track::~Track() {}
497
498bool WebMClusterParser::Track::EmitBuffer(
499 const std::shared_ptr<MediaSample>& buffer) {
500 DVLOG(2) << "EmitBuffer() : " << track_num_ << " ts " << buffer->pts()
501 << " dur " << buffer->duration() << " kf " << buffer->is_key_frame()
502 << " size " << buffer->data_size();
503
504 if (last_added_buffer_missing_duration_.get()) {
505 int64_t derived_duration =
506 buffer->pts() - last_added_buffer_missing_duration_->pts();
507 last_added_buffer_missing_duration_->set_duration(derived_duration);
508
509 DVLOG(2) << "EmitBuffer() : applied derived duration to held-back buffer : "
510 << " ts " << last_added_buffer_missing_duration_->pts() << " dur "
511 << last_added_buffer_missing_duration_->duration() << " kf "
512 << last_added_buffer_missing_duration_->is_key_frame() << " size "
513 << last_added_buffer_missing_duration_->data_size();
514 std::shared_ptr<MediaSample> updated_buffer =
515 last_added_buffer_missing_duration_;
516 last_added_buffer_missing_duration_ = NULL;
517 if (!EmitBufferHelp(updated_buffer))
518 return false;
519 }
520
521 if (buffer->duration() == kNoTimestamp) {
522 last_added_buffer_missing_duration_ = buffer;
523 DVLOG(2) << "EmitBuffer() : holding back buffer that is missing duration";
524 return true;
525 }
526
527 return EmitBufferHelp(buffer);
528}
529
530bool WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
531 if (!last_added_buffer_missing_duration_.get())
532 return true;
533
534 int64_t estimated_duration = GetDurationEstimate();
535 last_added_buffer_missing_duration_->set_duration(estimated_duration);
536
537 VLOG(1) << "Track " << track_num_ << ": Estimating WebM block duration to be "
538 << estimated_duration / 1000
539 << "ms for the last (Simple)Block in the Cluster for this Track. Use "
540 "BlockGroups with BlockDurations at the end of each Track in a "
541 "Cluster to avoid estimation.";
542
543 DVLOG(2) << " new dur : ts " << last_added_buffer_missing_duration_->pts()
544 << " dur " << last_added_buffer_missing_duration_->duration()
545 << " kf " << last_added_buffer_missing_duration_->is_key_frame()
546 << " size " << last_added_buffer_missing_duration_->data_size();
547
548 // Don't use the applied duration as a future estimation (don't use
549 // EmitBufferHelp() here.)
550 if (!new_sample_cb_(track_num_, last_added_buffer_missing_duration_))
551 return false;
552 last_added_buffer_missing_duration_ = NULL;
553 return true;
554}
555
556void WebMClusterParser::Track::Reset() {
557 last_added_buffer_missing_duration_ = NULL;
558}
559
560bool WebMClusterParser::Track::EmitBufferHelp(
561 const std::shared_ptr<MediaSample>& buffer) {
562 DCHECK(!last_added_buffer_missing_duration_.get());
563
564 int64_t duration = buffer->duration();
565 if (duration < 0 || duration == kNoTimestamp) {
566 LOG(ERROR) << "Invalid buffer duration: " << duration;
567 return false;
568 }
569
570 // The estimated frame duration is the maximum non-zero duration since the
571 // last initialization segment.
572 if (duration > 0) {
573 int64_t orig_duration_estimate = estimated_next_frame_duration_;
574 if (estimated_next_frame_duration_ == kNoTimestamp) {
575 estimated_next_frame_duration_ = duration;
576 } else {
577 estimated_next_frame_duration_ =
578 std::max(duration, estimated_next_frame_duration_);
579 }
580
581 if (orig_duration_estimate != estimated_next_frame_duration_) {
582 DVLOG(3) << "Updated duration estimate:" << orig_duration_estimate
583 << " -> " << estimated_next_frame_duration_
584 << " at timestamp: " << buffer->dts();
585 }
586 }
587
588 return new_sample_cb_(track_num_, buffer);
589}
590
591int64_t WebMClusterParser::Track::GetDurationEstimate() {
592 int64_t duration = kNoTimestamp;
593 if (default_duration_ != kNoTimestamp) {
594 duration = default_duration_;
595 DVLOG(3) << __FUNCTION__ << " : using track default duration " << duration;
596 } else if (estimated_next_frame_duration_ != kNoTimestamp) {
597 duration = estimated_next_frame_duration_;
598 DVLOG(3) << __FUNCTION__ << " : using estimated duration " << duration;
599 } else {
600 if (is_video_) {
601 duration = kDefaultVideoBufferDurationInMs * kMicrosecondsPerMillisecond;
602 } else {
603 duration = kDefaultAudioBufferDurationInMs * kMicrosecondsPerMillisecond;
604 }
605 DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration "
606 << duration;
607 }
608
609 DCHECK_GT(duration, 0);
610 DCHECK_NE(duration, kNoTimestamp);
611 return duration;
612}
613
614void WebMClusterParser::ResetTextTracks() {
615 for (TextTrackMap::iterator it = text_track_map_.begin();
616 it != text_track_map_.end(); ++it) {
617 it->second.Reset();
618 }
619}
620
621WebMClusterParser::Track* WebMClusterParser::FindTextTrack(int track_num) {
622 const TextTrackMap::iterator it = text_track_map_.find(track_num);
623
624 if (it == text_track_map_.end())
625 return NULL;
626
627 return &it->second;
628}
629
630} // namespace media
631} // namespace shaka
DecryptorSource wraps KeySource and is responsible for decryptor management.
KeySource is responsible for encryption key acquisition.
Definition key_source.h:53
std::function< bool(uint32_t track_id, std::shared_ptr< MediaSample > media_sample)> NewMediaSampleCB
std::function< void(const std::vector< std::shared_ptr< StreamInfo > > &stream_info)> InitCB
static std::shared_ptr< MediaSample > CopyFrom(const uint8_t *data, size_t size, bool is_key_frame)
Class for parsing or writing VP codec configuration record.
void WriteMP4(std::vector< uint8_t > *data) const
void MergeFrom(const VPCodecConfigurationRecord &other)
WebMClusterParser(int64_t timecode_scale, std::shared_ptr< AudioStreamInfo > audio_stream_info, std::shared_ptr< VideoStreamInfo > video_stream_info, const VPCodecConfigurationRecord &vp_config, int64_t audio_default_duration, int64_t video_default_duration, const WebMTracksParser::TextTracks &text_tracks, const std::set< int64_t > &ignored_tracks, const std::string &audio_encryption_key_id, const std::string &video_encryption_key_id, const MediaParser::NewMediaSampleCB &new_sample_cb, const MediaParser::InitCB &init_cb, KeySource *decryption_key_source)
int Parse(const uint8_t *buf, int size)
void Reset()
Resets the parser state so it can accept a new cluster.
void Reset()
Resets the state of the parser so it can start parsing a new list.
int Parse(const uint8_t *buf, int size)
static void Parse(const uint8_t *payload, int payload_size, std::string *id, std::string *settings, std::string *content)
Utility function to parse the WebVTT cue from a byte stream.
All the methods that are virtual are virtual for mocking.