Shaka Packager SDK
Loading...
Searching...
No Matches
webm_cluster_parser.cc
1// Copyright 2014 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <packager/media/formats/webm/webm_cluster_parser.h>
6
7#include <algorithm>
8#include <vector>
9
10#include <absl/base/internal/endian.h>
11#include <absl/log/check.h>
12#include <absl/log/log.h>
13
14#include <packager/macros/logging.h>
15#include <packager/media/base/timestamp.h>
16#include <packager/media/codecs/vp8_parser.h>
17#include <packager/media/codecs/vp9_parser.h>
18#include <packager/media/codecs/webvtt_util.h>
19#include <packager/media/formats/webm/webm_constants.h>
20#include <packager/media/formats/webm/webm_crypto_helpers.h>
21#include <packager/media/formats/webm/webm_webvtt_parser.h>
22
23namespace shaka {
24namespace media {
25namespace {
26
27const int64_t kMicrosecondsPerMillisecond = 1000;
28
29} // namespace
30
32 int64_t timecode_scale,
33 std::shared_ptr<AudioStreamInfo> audio_stream_info,
34 std::shared_ptr<VideoStreamInfo> video_stream_info,
35 const VPCodecConfigurationRecord& vp_config,
36 int64_t audio_default_duration,
37 int64_t video_default_duration,
38 const WebMTracksParser::TextTracks& text_tracks,
39 const std::set<int64_t>& ignored_tracks,
40 const std::string& audio_encryption_key_id,
41 const std::string& video_encryption_key_id,
42 const MediaParser::NewMediaSampleCB& new_sample_cb,
43 const MediaParser::InitCB& init_cb,
44 KeySource* decryption_key_source)
45 : timecode_multiplier_(timecode_scale /
46 static_cast<double>(kMicrosecondsPerMillisecond)),
47 audio_stream_info_(audio_stream_info),
48 video_stream_info_(video_stream_info),
49 vp_config_(vp_config),
50 ignored_tracks_(ignored_tracks),
51 audio_encryption_key_id_(audio_encryption_key_id),
52 video_encryption_key_id_(video_encryption_key_id),
53 parser_(kWebMIdCluster, this),
54 initialized_(false),
55 init_cb_(init_cb),
56 cluster_start_time_(kNoTimestamp),
57 audio_(audio_stream_info ? audio_stream_info->track_id() : -1,
58 false,
59 audio_default_duration,
60 new_sample_cb),
61 video_(video_stream_info ? video_stream_info->track_id() : -1,
62 true,
63 video_default_duration,
64 new_sample_cb) {
65 if (decryption_key_source) {
66 decryptor_source_.reset(new DecryptorSource(decryption_key_source));
67 if (audio_stream_info_)
68 audio_stream_info_->set_is_encrypted(false);
69 if (video_stream_info_)
70 video_stream_info_->set_is_encrypted(false);
71 }
72 for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
73 it != text_tracks.end();
74 ++it) {
75 text_track_map_.insert(std::make_pair(
76 it->first, Track(it->first, false, kNoTimestamp, new_sample_cb)));
77 }
78}
79
80WebMClusterParser::~WebMClusterParser() {}
81
83 last_block_timecode_ = -1;
84 cluster_timecode_ = -1;
85 cluster_start_time_ = kNoTimestamp;
86 cluster_ended_ = false;
87 parser_.Reset();
88 audio_.Reset();
89 video_.Reset();
90 ResetTextTracks();
91}
92
94 // Estimate the duration of the last frame if necessary.
95 bool audio_result = audio_.ApplyDurationEstimateIfNeeded();
96 bool video_result = video_.ApplyDurationEstimateIfNeeded();
97 Reset();
98 return audio_result && video_result;
99}
100
101int WebMClusterParser::Parse(const uint8_t* buf, int size) {
102 int result = parser_.Parse(buf, size);
103
104 if (result < 0) {
105 cluster_ended_ = false;
106 return result;
107 }
108
109 cluster_ended_ = parser_.IsParsingComplete();
110 if (cluster_ended_) {
111 // If there were no buffers in this cluster, set the cluster start time to
112 // be the |cluster_timecode_|.
113 if (cluster_start_time_ == kNoTimestamp) {
114 // If the cluster did not even have a |cluster_timecode_|, signal parse
115 // error.
116 if (cluster_timecode_ < 0)
117 return -1;
118
119 cluster_start_time_ = cluster_timecode_ * timecode_multiplier_;
120 }
121
122 // Reset the parser if we're done parsing so that
123 // it is ready to accept another cluster on the next
124 // call.
125 parser_.Reset();
126
127 last_block_timecode_ = -1;
128 cluster_timecode_ = -1;
129 }
130
131 return result;
132}
133
134WebMParserClient* WebMClusterParser::OnListStart(int id) {
135 if (id == kWebMIdCluster) {
136 cluster_timecode_ = -1;
137 cluster_start_time_ = kNoTimestamp;
138 } else if (id == kWebMIdBlockGroup) {
139 block_data_.reset();
140 block_data_size_ = -1;
141 block_duration_ = -1;
142 discard_padding_ = -1;
143 discard_padding_set_ = false;
144 reference_block_set_ = false;
145 } else if (id == kWebMIdBlockAdditions) {
146 block_add_id_ = -1;
147 block_additional_data_.reset();
148 block_additional_data_size_ = 0;
149 }
150
151 return this;
152}
153
154bool WebMClusterParser::OnListEnd(int id) {
155 if (id != kWebMIdBlockGroup)
156 return true;
157
158 // Make sure the BlockGroup actually had a Block.
159 if (block_data_size_ == -1) {
160 LOG(ERROR) << "Block missing from BlockGroup.";
161 return false;
162 }
163
164 bool result = ParseBlock(
165 false, block_data_.get(), block_data_size_, block_additional_data_.get(),
166 block_additional_data_size_, block_duration_,
167 discard_padding_set_ ? discard_padding_ : 0, reference_block_set_);
168 block_data_.reset();
169 block_data_size_ = -1;
170 block_duration_ = -1;
171 block_add_id_ = -1;
172 block_additional_data_.reset();
173 block_additional_data_size_ = 0;
174 discard_padding_ = -1;
175 discard_padding_set_ = false;
176 reference_block_set_ = false;
177 return result;
178}
179
180bool WebMClusterParser::OnUInt(int id, int64_t val) {
181 int64_t* dst;
182 switch (id) {
183 case kWebMIdTimecode:
184 dst = &cluster_timecode_;
185 break;
186 case kWebMIdBlockDuration:
187 dst = &block_duration_;
188 break;
189 case kWebMIdBlockAddID:
190 dst = &block_add_id_;
191 break;
192 default:
193 return true;
194 }
195 if (*dst != -1)
196 return false;
197 *dst = val;
198 return true;
199}
200
201bool WebMClusterParser::ParseBlock(bool is_simple_block,
202 const uint8_t* buf,
203 int size,
204 const uint8_t* additional,
205 int additional_size,
206 int duration,
207 int64_t discard_padding,
208 bool reference_block_set) {
209 if (size < 4)
210 return false;
211
212 // Return an error if the trackNum > 127. We just aren't
213 // going to support large track numbers right now.
214 if (!(buf[0] & 0x80)) {
215 LOG(ERROR) << "TrackNumber over 127 not supported";
216 return false;
217 }
218
219 int track_num = buf[0] & 0x7f;
220 int timecode = buf[1] << 8 | buf[2];
221 int flags = buf[3] & 0xff;
222 int lacing = (flags >> 1) & 0x3;
223
224 if (lacing) {
225 LOG(ERROR) << "Lacing " << lacing << " is not supported yet.";
226 return false;
227 }
228
229 // Sign extend negative timecode offsets.
230 if (timecode & 0x8000)
231 timecode |= ~0xffff;
232
233 // The first bit of the flags is set when a SimpleBlock contains only
234 // keyframes. If this is a Block, then keyframe is inferred by the absence of
235 // the ReferenceBlock Element.
236 // http://www.matroska.org/technical/specs/index.html
237 bool is_key_frame =
238 is_simple_block ? (flags & 0x80) != 0 : !reference_block_set;
239
240 const uint8_t* frame_data = buf + 4;
241 int frame_size = size - (frame_data - buf);
242 return OnBlock(is_simple_block, track_num, timecode, duration, frame_data,
243 frame_size, additional, additional_size, discard_padding,
244 is_key_frame);
245}
246
247bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
248 switch (id) {
249 case kWebMIdSimpleBlock:
250 return ParseBlock(true, data, size, NULL, 0, -1, 0, false);
251
252 case kWebMIdBlock:
253 if (block_data_) {
254 LOG(ERROR) << "More than 1 Block in a BlockGroup is not "
255 "supported.";
256 return false;
257 }
258 block_data_.reset(new uint8_t[size]);
259 memcpy(block_data_.get(), data, size);
260 block_data_size_ = size;
261 return true;
262
263 case kWebMIdBlockAdditional: {
264 uint64_t block_add_id = absl::big_endian::FromHost64(block_add_id_);
265 if (block_additional_data_) {
266 // TODO: Technically, more than 1 BlockAdditional is allowed as per
267 // matroska spec. But for now we don't have a use case to support
268 // parsing of such files. Take a look at this again when such a case
269 // arises.
270 LOG(ERROR) << "More than 1 BlockAdditional in a "
271 "BlockGroup is not supported.";
272 return false;
273 }
274 // First 8 bytes of side_data in DecoderBuffer is the BlockAddID
275 // element's value in Big Endian format. This is done to mimic ffmpeg
276 // demuxer's behavior.
277 block_additional_data_size_ = size + sizeof(block_add_id);
278 block_additional_data_.reset(new uint8_t[block_additional_data_size_]);
279 memcpy(block_additional_data_.get(), &block_add_id,
280 sizeof(block_add_id));
281 memcpy(block_additional_data_.get() + 8, data, size);
282 return true;
283 }
284 case kWebMIdDiscardPadding: {
285 if (discard_padding_set_ || size <= 0 || size > 8)
286 return false;
287 discard_padding_set_ = true;
288
289 // Read in the big-endian integer.
290 discard_padding_ = static_cast<int8_t>(data[0]);
291 for (int i = 1; i < size; ++i)
292 discard_padding_ = (discard_padding_ << 8) | data[i];
293
294 return true;
295 }
296 case kWebMIdReferenceBlock:
297 // We use ReferenceBlock to determine whether the current Block contains a
298 // keyframe or not. Other than that, we don't care about the value of the
299 // ReferenceBlock element itself.
300 reference_block_set_ = true;
301 return true;
302 default:
303 return true;
304 }
305}
306
307bool WebMClusterParser::OnBlock(bool is_simple_block,
308 int track_num,
309 int timecode,
310 int block_duration,
311 const uint8_t* data,
312 int size,
313 const uint8_t* additional,
314 int additional_size,
315 int64_t /*discard_padding*/,
316 bool is_key_frame) {
317 DCHECK_GE(size, 0);
318 if (cluster_timecode_ == -1) {
319 LOG(ERROR) << "Got a block before cluster timecode.";
320 return false;
321 }
322
323 // TODO: Should relative negative timecode offsets be rejected? Or only when
324 // the absolute timecode is negative? See http://crbug.com/271794
325 if (timecode < 0) {
326 LOG(ERROR) << "Got a block with negative timecode offset " << timecode;
327 return false;
328 }
329
330 if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
331 LOG(ERROR) << "Got a block with a timecode before the previous block.";
332 return false;
333 }
334
335 Track* track = NULL;
336 StreamType stream_type = kStreamUnknown;
337 std::string encryption_key_id;
338 if (track_num == audio_.track_num()) {
339 track = &audio_;
340 encryption_key_id = audio_encryption_key_id_;
341 stream_type = kStreamAudio;
342 } else if (track_num == video_.track_num()) {
343 track = &video_;
344 encryption_key_id = video_encryption_key_id_;
345 stream_type = kStreamVideo;
346 } else if (ignored_tracks_.find(track_num) != ignored_tracks_.end()) {
347 return true;
348 } else if (Track* const text_track = FindTextTrack(track_num)) {
349 if (is_simple_block) // BlockGroup is required for WebVTT cues
350 return false;
351 if (block_duration < 0) // not specified
352 return false;
353 track = text_track;
354 stream_type = kStreamText;
355 } else {
356 LOG(ERROR) << "Unexpected track number " << track_num;
357 return false;
358 }
359 DCHECK_NE(stream_type, kStreamUnknown);
360
361 last_block_timecode_ = timecode;
362
363 int64_t timestamp = (cluster_timecode_ + timecode) * timecode_multiplier_;
364
365 std::shared_ptr<MediaSample> buffer;
366 if (stream_type != kStreamText) {
367 // Every encrypted Block has a signal byte and IV prepended to it. Current
368 // encrypted WebM request for comments specification is here
369 // http://wiki.webmproject.org/encryption/webm-encryption-rfc
370 std::unique_ptr<DecryptConfig> decrypt_config;
371 int data_offset = 0;
372 if (!encryption_key_id.empty() &&
373 !WebMCreateDecryptConfig(
374 data, size,
375 reinterpret_cast<const uint8_t*>(encryption_key_id.data()),
376 encryption_key_id.size(),
377 &decrypt_config, &data_offset)) {
378 return false;
379 }
380
381 const uint8_t* media_data = data + data_offset;
382 const size_t media_data_size = size - data_offset;
383 // Use a dummy data size of 0 to avoid copying overhead.
384 // Actual media data is set later.
385 const size_t kDummyDataSize = 0;
386 buffer = MediaSample::CopyFrom(media_data, kDummyDataSize, additional,
387 additional_size, is_key_frame);
388
389 if (decrypt_config) {
390 if (!decryptor_source_) {
391 buffer->SetData(media_data, media_data_size);
392 // If the demuxer does not have the decryptor_source_, store
393 // decrypt_config so that the demuxed sample can be decrypted later.
394 buffer->set_decrypt_config(std::move(decrypt_config));
395 buffer->set_is_encrypted(true);
396 } else {
397 std::shared_ptr<uint8_t> decrypted_media_data(
398 new uint8_t[media_data_size], std::default_delete<uint8_t[]>());
399 if (!decryptor_source_->DecryptSampleBuffer(
400 decrypt_config.get(), media_data, media_data_size,
401 decrypted_media_data.get())) {
402 LOG(ERROR) << "Cannot decrypt samples";
403 return false;
404 }
405 buffer->TransferData(std::move(decrypted_media_data), media_data_size);
406 }
407 } else {
408 buffer->SetData(media_data, media_data_size);
409 }
410 } else {
411 std::string id, settings, content;
412 WebMWebVTTParser::Parse(data, size, &id, &settings, &content);
413
414 std::vector<uint8_t> side_data;
415 MakeSideData(id.begin(), id.end(),
416 settings.begin(), settings.end(),
417 &side_data);
418
419 buffer = MediaSample::CopyFrom(
420 reinterpret_cast<const uint8_t*>(content.data()), content.length(),
421 &side_data[0], side_data.size(), true);
422 }
423
424 buffer->set_dts(timestamp);
425 buffer->set_pts(timestamp);
426 if (cluster_start_time_ == kNoTimestamp)
427 cluster_start_time_ = timestamp;
428 buffer->set_duration(block_duration > 0
429 ? (block_duration * timecode_multiplier_)
430 : kNoTimestamp);
431
432 if (init_cb_ && !initialized_) {
433 std::vector<std::shared_ptr<StreamInfo>> streams;
434 if (audio_stream_info_)
435 streams.push_back(audio_stream_info_);
436 if (video_stream_info_) {
437 if (stream_type == kStreamVideo) {
438 // Setup codec string and codec config for VP8 and VP9.
439 // Codec config for AV1 is already retrieved from WebM CodecPrivate
440 // instead of extracted from the bit stream.
441 if (video_stream_info_->codec() != kCodecAV1) {
442 std::unique_ptr<VPxParser> vpx_parser;
443 switch (video_stream_info_->codec()) {
444 case kCodecVP8:
445 vpx_parser.reset(new VP8Parser);
446 break;
447 case kCodecVP9:
448 vpx_parser.reset(new VP9Parser);
449 break;
450 default:
451 NOTIMPLEMENTED()
452 << "Unsupported codec " << video_stream_info_->codec();
453 return false;
454 }
455 std::vector<VPxFrameInfo> vpx_frames;
456 if (!vpx_parser->Parse(buffer->data(), buffer->data_size(),
457 &vpx_frames)) {
458 LOG(ERROR) << "Failed to parse vpx frame.";
459 return false;
460 }
461 if (vpx_frames.size() != 1u || !vpx_frames[0].is_keyframe) {
462 LOG(ERROR) << "The first frame should be a key frame.";
463 return false;
464 }
465
466 vp_config_.MergeFrom(vpx_parser->codec_config());
467 video_stream_info_->set_codec_string(
468 vp_config_.GetCodecString(video_stream_info_->codec()));
469 std::vector<uint8_t> config_serialized;
470 vp_config_.WriteMP4(&config_serialized);
471 video_stream_info_->set_codec_config(config_serialized);
472 }
473
474 streams.push_back(video_stream_info_);
475 init_cb_(streams);
476 initialized_ = true;
477 }
478 } else {
479 init_cb_(streams);
480 initialized_ = true;
481 }
482 }
483
484 return track->EmitBuffer(buffer);
485}
486
487WebMClusterParser::Track::Track(
488 int track_num,
489 bool is_video,
490 int64_t default_duration,
491 const MediaParser::NewMediaSampleCB& new_sample_cb)
492 : track_num_(track_num),
493 is_video_(is_video),
494 default_duration_(default_duration),
495 estimated_next_frame_duration_(kNoTimestamp),
496 new_sample_cb_(new_sample_cb) {
497 DCHECK(default_duration_ == kNoTimestamp || default_duration_ > 0);
498}
499
500WebMClusterParser::Track::~Track() {}
501
502bool WebMClusterParser::Track::EmitBuffer(
503 const std::shared_ptr<MediaSample>& buffer) {
504 DVLOG(2) << "EmitBuffer() : " << track_num_
505 << " ts " << buffer->pts()
506 << " dur " << buffer->duration()
507 << " kf " << buffer->is_key_frame()
508 << " size " << buffer->data_size();
509
510 if (last_added_buffer_missing_duration_.get()) {
511 int64_t derived_duration =
512 buffer->pts() - last_added_buffer_missing_duration_->pts();
513 last_added_buffer_missing_duration_->set_duration(derived_duration);
514
515 DVLOG(2) << "EmitBuffer() : applied derived duration to held-back buffer : "
516 << " ts "
517 << last_added_buffer_missing_duration_->pts()
518 << " dur "
519 << last_added_buffer_missing_duration_->duration()
520 << " kf " << last_added_buffer_missing_duration_->is_key_frame()
521 << " size " << last_added_buffer_missing_duration_->data_size();
522 std::shared_ptr<MediaSample> updated_buffer =
523 last_added_buffer_missing_duration_;
524 last_added_buffer_missing_duration_ = NULL;
525 if (!EmitBufferHelp(updated_buffer))
526 return false;
527 }
528
529 if (buffer->duration() == kNoTimestamp) {
530 last_added_buffer_missing_duration_ = buffer;
531 DVLOG(2) << "EmitBuffer() : holding back buffer that is missing duration";
532 return true;
533 }
534
535 return EmitBufferHelp(buffer);
536}
537
538bool WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
539 if (!last_added_buffer_missing_duration_.get())
540 return true;
541
542 int64_t estimated_duration = GetDurationEstimate();
543 last_added_buffer_missing_duration_->set_duration(estimated_duration);
544
545 VLOG(1) << "Track " << track_num_ << ": Estimating WebM block duration to be "
546 << estimated_duration / 1000
547 << "ms for the last (Simple)Block in the Cluster for this Track. Use "
548 "BlockGroups with BlockDurations at the end of each Track in a "
549 "Cluster to avoid estimation.";
550
551 DVLOG(2) << " new dur : ts " << last_added_buffer_missing_duration_->pts()
552 << " dur " << last_added_buffer_missing_duration_->duration()
553 << " kf " << last_added_buffer_missing_duration_->is_key_frame()
554 << " size " << last_added_buffer_missing_duration_->data_size();
555
556 // Don't use the applied duration as a future estimation (don't use
557 // EmitBufferHelp() here.)
558 if (!new_sample_cb_(track_num_, last_added_buffer_missing_duration_))
559 return false;
560 last_added_buffer_missing_duration_ = NULL;
561 return true;
562}
563
564void WebMClusterParser::Track::Reset() {
565 last_added_buffer_missing_duration_ = NULL;
566}
567
568bool WebMClusterParser::Track::EmitBufferHelp(
569 const std::shared_ptr<MediaSample>& buffer) {
570 DCHECK(!last_added_buffer_missing_duration_.get());
571
572 int64_t duration = buffer->duration();
573 if (duration < 0 || duration == kNoTimestamp) {
574 LOG(ERROR) << "Invalid buffer duration: " << duration;
575 return false;
576 }
577
578 // The estimated frame duration is the maximum non-zero duration since the
579 // last initialization segment.
580 if (duration > 0) {
581 int64_t orig_duration_estimate = estimated_next_frame_duration_;
582 if (estimated_next_frame_duration_ == kNoTimestamp) {
583 estimated_next_frame_duration_ = duration;
584 } else {
585 estimated_next_frame_duration_ =
586 std::max(duration, estimated_next_frame_duration_);
587 }
588
589 if (orig_duration_estimate != estimated_next_frame_duration_) {
590 DVLOG(3) << "Updated duration estimate:"
591 << orig_duration_estimate
592 << " -> "
593 << estimated_next_frame_duration_
594 << " at timestamp: "
595 << buffer->dts();
596 }
597 }
598
599 return new_sample_cb_(track_num_, buffer);
600}
601
602int64_t WebMClusterParser::Track::GetDurationEstimate() {
603 int64_t duration = kNoTimestamp;
604 if (default_duration_ != kNoTimestamp) {
605 duration = default_duration_;
606 DVLOG(3) << __FUNCTION__ << " : using track default duration " << duration;
607 } else if (estimated_next_frame_duration_ != kNoTimestamp) {
608 duration = estimated_next_frame_duration_;
609 DVLOG(3) << __FUNCTION__ << " : using estimated duration " << duration;
610 } else {
611 if (is_video_) {
612 duration = kDefaultVideoBufferDurationInMs * kMicrosecondsPerMillisecond;
613 } else {
614 duration = kDefaultAudioBufferDurationInMs * kMicrosecondsPerMillisecond;
615 }
616 DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration "
617 << duration;
618 }
619
620 DCHECK_GT(duration, 0);
621 DCHECK_NE(duration, kNoTimestamp);
622 return duration;
623}
624
625void WebMClusterParser::ResetTextTracks() {
626 for (TextTrackMap::iterator it = text_track_map_.begin();
627 it != text_track_map_.end();
628 ++it) {
629 it->second.Reset();
630 }
631}
632
633WebMClusterParser::Track*
634WebMClusterParser::FindTextTrack(int track_num) {
635 const TextTrackMap::iterator it = text_track_map_.find(track_num);
636
637 if (it == text_track_map_.end())
638 return NULL;
639
640 return &it->second;
641}
642
643} // namespace media
644} // namespace shaka
DecryptorSource wraps KeySource and is responsible for decryptor management.
KeySource is responsible for encryption key acquisition.
Definition key_source.h:52
std::function< bool(uint32_t track_id, std::shared_ptr< MediaSample > media_sample)> NewMediaSampleCB
std::function< void(const std::vector< std::shared_ptr< StreamInfo > > &stream_info)> InitCB
static std::shared_ptr< MediaSample > CopyFrom(const uint8_t *data, size_t size, bool is_key_frame)
Class for parsing or writing VP codec configuration record.
void WriteMP4(std::vector< uint8_t > *data) const
void MergeFrom(const VPCodecConfigurationRecord &other)
WebMClusterParser(int64_t timecode_scale, std::shared_ptr< AudioStreamInfo > audio_stream_info, std::shared_ptr< VideoStreamInfo > video_stream_info, const VPCodecConfigurationRecord &vp_config, int64_t audio_default_duration, int64_t video_default_duration, const WebMTracksParser::TextTracks &text_tracks, const std::set< int64_t > &ignored_tracks, const std::string &audio_encryption_key_id, const std::string &video_encryption_key_id, const MediaParser::NewMediaSampleCB &new_sample_cb, const MediaParser::InitCB &init_cb, KeySource *decryption_key_source)
int Parse(const uint8_t *buf, int size)
void Reset()
Resets the parser state so it can accept a new cluster.
void Reset()
Resets the state of the parser so it can start parsing a new list.
int Parse(const uint8_t *buf, int size)
static void Parse(const uint8_t *payload, int payload_size, std::string *id, std::string *settings, std::string *content)
Utility function to parse the WebVTT cue from a byte stream.
All the methods that are virtual are virtual for mocking.