This source file includes following definitions.
- kDefaultDuration
- WriteInt64
- MATCHER_P
- MATCHER
- OnReadDone
- OnReadDone_AbortExpected
- OnReadDone_EOSExpected
- OnSeekDone_OKExpected
- LogFunc
- kDefaultFirstCluster
- kDefaultSecondCluster
- CreateNewDemuxer
- CreateInitSegment
- AddId
- AddId
- AppendData
- AppendCluster
- AppendCluster
- AppendCluster
- AppendSingleStreamCluster
- AppendSingleStreamCluster
- AppendData
- AppendDataInPieces
- AppendDataInPieces
- AppendInitSegment
- AppendInitSegmentWithSourceId
- AppendInitSegmentWithEncryptedInfo
- AppendGarbage
- InitDoneCalled
- AppendEmptyCluster
- CreateInitDoneCB
- CreateInitDoneCB
- InitDemuxer
- InitDemuxerWithEncryptionInfo
- InitDemuxerAudioAndVideoSourcesText
- InitDemuxerAudioAndVideoSources
- InitDemuxerWithConfigChangeData
- ShutdownDemuxer
- AddSimpleBlock
- GenerateCluster
- AddVideoBlockGroup
- GenerateCluster
- GenerateSingleStreamCluster
- Read
- ReadAudio
- ReadVideo
- GenerateExpectedReads
- GenerateExpectedReads
- GenerateSingleStreamExpectedReads
- GenerateAudioStreamExpectedReads
- GenerateVideoStreamExpectedReads
- GenerateEmptyCluster
- CheckExpectedRanges
- CheckExpectedRanges
- StoreStatusAndBuffer
- ReadUntilNotOkOrEndOfStream
- ExpectEndOfStream
- ExpectRead
- ExpectConfigChanged
- CheckExpectedBuffers
- ParseWebMFile
- ParseWebMFile
- DemuxerNeedKey
- Seek
- MarkEndOfStream
- SetTimestampOffset
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- video_read_done_
- RequestReads
- CheckIfReadDonesWereCalled
- OnEndOfStreamReadDone
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
- TEST_P
#include <algorithm>
#include "base/bind.h"
#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/decrypt_config.h"
#include "media/base/mock_demuxer_host.h"
#include "media/base/test_data_util.h"
#include "media/base/test_helpers.h"
#include "media/filters/chunk_demuxer.h"
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_crypto_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::AnyNumber;
using ::testing::Exactly;
using ::testing::InSequence;
using ::testing::NotNull;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::SetArgumentPointee;
using ::testing::Values;
using ::testing::_;
namespace media {
const uint8 kTracksHeader[] = {
0x16, 0x54, 0xAE, 0x6B,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
const uint8 kVP8Keyframe[] = {
0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
};
const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
const int kTracksHeaderSize = sizeof(kTracksHeader);
const int kTracksSizeOffset = 4;
const int kAudioTrackSizeOffset = 1;
const int kAudioTrackSizeWidth = 8;
const int kAudioTrackEntryHeaderSize =
kAudioTrackSizeOffset + kAudioTrackSizeWidth;
const int kVideoTrackSizeOffset = 1;
const int kVideoTrackSizeWidth = 8;
const int kVideoTrackEntryHeaderSize =
kVideoTrackSizeOffset + kVideoTrackSizeWidth;
const int kVideoTrackNum = 1;
const int kAudioTrackNum = 2;
const int kTextTrackNum = 3;
const int kAudioBlockDuration = 23;
const int kVideoBlockDuration = 33;
const int kTextBlockDuration = 100;
const int kBlockSize = 10;
const char kSourceId[] = "SourceId";
const char kDefaultFirstClusterRange[] = "{ [0,46) }";
const int kDefaultFirstClusterEndTimestamp = 66;
const int kDefaultSecondClusterEndTimestamp = 132;
base::TimeDelta kDefaultDuration() {
return base::TimeDelta::FromMilliseconds(201224);
}
static void WriteInt64(uint8* buffer, int64 number) {
DCHECK(number >= 0 && number < GG_LONGLONG(0x00FFFFFFFFFFFFFF));
buffer[0] = 0x01;
int64 tmp = number;
for (int i = 7; i > 0; i--) {
buffer[i] = tmp & 0xff;
tmp >>= 8;
}
}
MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
return arg.get() && !arg->end_of_stream() &&
arg->timestamp().InMilliseconds() == timestamp_in_ms;
}
MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
static void OnReadDone(const base::TimeDelta& expected_time,
bool* called,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kOk);
EXPECT_EQ(expected_time, buffer->timestamp());
*called = true;
}
static void OnReadDone_AbortExpected(
bool* called, DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kAborted);
EXPECT_EQ(NULL, buffer.get());
*called = true;
}
static void OnReadDone_EOSExpected(bool* called,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kOk);
EXPECT_TRUE(buffer->end_of_stream());
*called = true;
}
static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
EXPECT_EQ(status, PIPELINE_OK);
*called = true;
}
static void LogFunc(const std::string& str) { DVLOG(1) << str; }
class ChunkDemuxerTest : public ::testing::TestWithParam<bool> {
protected:
enum CodecsIndex {
AUDIO,
VIDEO,
MAX_CODECS_INDEX
};
scoped_ptr<Cluster> kDefaultFirstCluster() {
return GenerateCluster(0, 4);
}
scoped_ptr<Cluster> kDefaultSecondCluster() {
return GenerateCluster(46, 66, 5);
}
ChunkDemuxerTest()
: append_window_end_for_next_append_(kInfiniteDuration()) {
use_legacy_frame_processor_ = GetParam();
CreateNewDemuxer();
}
void CreateNewDemuxer() {
base::Closure open_cb =
base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
Demuxer::NeedKeyCB need_key_cb =
base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
demuxer_.reset(
new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), false));
}
virtual ~ChunkDemuxerTest() {
ShutdownDemuxer();
}
void CreateInitSegment(int stream_flags,
bool is_audio_encrypted, bool is_video_encrypted,
scoped_ptr<uint8[]>* buffer,
int* size) {
bool has_audio = (stream_flags & HAS_AUDIO) != 0;
bool has_video = (stream_flags & HAS_VIDEO) != 0;
bool has_text = (stream_flags & HAS_TEXT) != 0;
scoped_refptr<DecoderBuffer> ebml_header;
scoped_refptr<DecoderBuffer> info;
scoped_refptr<DecoderBuffer> audio_track_entry;
scoped_refptr<DecoderBuffer> video_track_entry;
scoped_refptr<DecoderBuffer> audio_content_encodings;
scoped_refptr<DecoderBuffer> video_content_encodings;
scoped_refptr<DecoderBuffer> text_track_entry;
ebml_header = ReadTestDataFile("webm_ebml_element");
info = ReadTestDataFile("webm_info_element");
int tracks_element_size = 0;
if (has_audio) {
audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
tracks_element_size += audio_track_entry->data_size();
if (is_audio_encrypted) {
audio_content_encodings = ReadTestDataFile("webm_content_encodings");
tracks_element_size += audio_content_encodings->data_size();
}
}
if (has_video) {
video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
tracks_element_size += video_track_entry->data_size();
if (is_video_encrypted) {
video_content_encodings = ReadTestDataFile("webm_content_encodings");
tracks_element_size += video_content_encodings->data_size();
}
}
if (has_text) {
const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
"\x83\x81\x11\x86\x92"
"D_WEBVTT/SUBTITLES";
const int len = strlen(str);
DCHECK_EQ(len, 32);
const uint8* const buf = reinterpret_cast<const uint8*>(str);
text_track_entry = DecoderBuffer::CopyFrom(buf, len);
tracks_element_size += text_track_entry->data_size();
}
*size = ebml_header->data_size() + info->data_size() +
kTracksHeaderSize + tracks_element_size;
buffer->reset(new uint8[*size]);
uint8* buf = buffer->get();
memcpy(buf, ebml_header->data(), ebml_header->data_size());
buf += ebml_header->data_size();
memcpy(buf, info->data(), info->data_size());
buf += info->data_size();
memcpy(buf, kTracksHeader, kTracksHeaderSize);
WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
buf += kTracksHeaderSize;
if (has_audio) {
memcpy(buf, audio_track_entry->data(),
audio_track_entry->data_size());
if (is_audio_encrypted) {
memcpy(buf + audio_track_entry->data_size(),
audio_content_encodings->data(),
audio_content_encodings->data_size());
WriteInt64(buf + kAudioTrackSizeOffset,
audio_track_entry->data_size() +
audio_content_encodings->data_size() -
kAudioTrackEntryHeaderSize);
buf += audio_content_encodings->data_size();
}
buf += audio_track_entry->data_size();
}
if (has_video) {
memcpy(buf, video_track_entry->data(),
video_track_entry->data_size());
if (is_video_encrypted) {
memcpy(buf + video_track_entry->data_size(),
video_content_encodings->data(),
video_content_encodings->data_size());
WriteInt64(buf + kVideoTrackSizeOffset,
video_track_entry->data_size() +
video_content_encodings->data_size() -
kVideoTrackEntryHeaderSize);
buf += video_content_encodings->data_size();
}
buf += video_track_entry->data_size();
}
if (has_text) {
memcpy(buf, text_track_entry->data(),
text_track_entry->data_size());
buf += text_track_entry->data_size();
}
}
ChunkDemuxer::Status AddId() {
return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
}
ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
bool has_audio = (stream_flags & HAS_AUDIO) != 0;
bool has_video = (stream_flags & HAS_VIDEO) != 0;
std::vector<std::string> codecs;
std::string type;
if (has_audio) {
codecs.push_back("vorbis");
type = "audio/webm";
}
if (has_video) {
codecs.push_back("vp8");
type = "video/webm";
}
if (!has_audio && !has_video) {
return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
}
return demuxer_->AddId(source_id, type, codecs,
use_legacy_frame_processor_);
}
void AppendData(const uint8* data, size_t length) {
AppendData(kSourceId, data, length);
}
void AppendCluster(const std::string& source_id,
scoped_ptr<Cluster> cluster) {
AppendData(source_id, cluster->data(), cluster->size());
}
void AppendCluster(scoped_ptr<Cluster> cluster) {
AppendCluster(kSourceId, cluster.Pass());
}
void AppendCluster(int timecode, int block_count) {
AppendCluster(GenerateCluster(timecode, block_count));
}
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
int timecode, int block_count) {
int block_duration = 0;
switch (track_number) {
case kVideoTrackNum:
block_duration = kVideoBlockDuration;
break;
case kAudioTrackNum:
block_duration = kAudioBlockDuration;
break;
case kTextTrackNum:
block_duration = kTextBlockDuration;
break;
}
ASSERT_NE(block_duration, 0);
int end_timecode = timecode + block_count * block_duration;
AppendCluster(source_id,
GenerateSingleStreamCluster(
timecode, end_timecode, track_number, block_duration));
}
void AppendSingleStreamCluster(const std::string& source_id, int track_number,
const std::string& cluster_description) {
std::vector<std::string> timestamps;
base::SplitString(cluster_description, ' ', ×tamps);
ClusterBuilder cb;
std::vector<uint8> data(10);
for (size_t i = 0; i < timestamps.size(); ++i) {
std::string timestamp_str = timestamps[i];
int block_flags = 0;
if (EndsWith(timestamp_str, "K", true)) {
block_flags = kWebMFlagKeyframe;
timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
}
int timestamp_in_ms;
CHECK(base::StringToInt(timestamp_str, ×tamp_in_ms));
if (i == 0)
cb.SetClusterTimecode(timestamp_in_ms);
if (track_number == kTextTrackNum) {
cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
block_flags, &data[0], data.size());
} else {
cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
&data[0], data.size());
}
}
AppendCluster(source_id, cb.Finish());
}
void AppendData(const std::string& source_id,
const uint8* data, size_t length) {
EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
demuxer_->AppendData(source_id, data, length,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
×tamp_offset_map_[source_id]);
}
void AppendDataInPieces(const uint8* data, size_t length) {
AppendDataInPieces(data, length, 7);
}
void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
const uint8* start = data;
const uint8* end = data + length;
while (start < end) {
size_t append_size = std::min(piece_size,
static_cast<size_t>(end - start));
AppendData(start, append_size);
start += append_size;
}
}
void AppendInitSegment(int stream_flags) {
AppendInitSegmentWithSourceId(kSourceId, stream_flags);
}
void AppendInitSegmentWithSourceId(const std::string& source_id,
int stream_flags) {
AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
}
void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
int stream_flags,
bool is_audio_encrypted,
bool is_video_encrypted) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(stream_flags,
is_audio_encrypted, is_video_encrypted,
&info_tracks, &info_tracks_size);
AppendData(source_id, info_tracks.get(), info_tracks_size);
}
void AppendGarbage() {
int garbage_cluster_size = 10;
scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
for (int i = 0; i < garbage_cluster_size; ++i)
garbage_cluster[i] = i;
AppendData(garbage_cluster.get(), garbage_cluster_size);
}
void InitDoneCalled(PipelineStatus expected_status,
PipelineStatus status) {
EXPECT_EQ(status, expected_status);
}
void AppendEmptyCluster(int timecode) {
AppendCluster(GenerateEmptyCluster(timecode));
}
PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
PipelineStatus expected_status) {
if (expected_duration != kNoTimestamp())
EXPECT_CALL(host_, SetDuration(expected_duration));
return CreateInitDoneCB(expected_status);
}
PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
base::Unretained(this),
expected_status);
}
enum StreamFlags {
HAS_AUDIO = 1 << 0,
HAS_VIDEO = 1 << 1,
HAS_TEXT = 1 << 2
};
bool InitDemuxer(int stream_flags) {
return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
}
bool InitDemuxerWithEncryptionInfo(
int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
PipelineStatus expected_status =
(stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
base::TimeDelta expected_duration = kNoTimestamp();
if (expected_status == PIPELINE_OK)
expected_duration = kDefaultDuration();
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(expected_duration, expected_status), true);
if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
return false;
AppendInitSegmentWithEncryptedInfo(
kSourceId, stream_flags,
is_audio_encrypted, is_video_encrypted);
return true;
}
bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
const std::string& video_id,
bool has_text) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
return false;
if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
return false;
int audio_flags = HAS_AUDIO;
int video_flags = HAS_VIDEO;
if (has_text) {
audio_flags |= HAS_TEXT;
video_flags |= HAS_TEXT;
}
AppendInitSegmentWithSourceId(audio_id, audio_flags);
AppendInitSegmentWithSourceId(video_id, video_flags);
return true;
}
bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
const std::string& video_id) {
return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
}
bool InitDemuxerWithConfigChangeData() {
scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
PIPELINE_OK), true);
if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
return false;
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2768)));
AppendData(bear1->data(), bear1->data_size());
CheckExpectedRanges(kSourceId, "{ [0,2736) }");
AppendData(bear2->data(), 4340);
AppendData(bear2->data() + 55290, 18785);
CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
AppendData(bear1->data(), 4370);
AppendData(bear1->data() + 72737, 28183);
CheckExpectedRanges(kSourceId, "{ [0,2736) }");
MarkEndOfStream(PIPELINE_OK);
return true;
}
void ShutdownDemuxer() {
if (demuxer_) {
demuxer_->Shutdown();
message_loop_.RunUntilIdle();
}
}
void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
uint8 data[] = { 0x00 };
cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
}
scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
return GenerateCluster(timecode, timecode, block_count);
}
void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
int duration, int flags) {
const uint8* data =
(flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
sizeof(kVP8Interframe);
cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
}
scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
int first_video_timecode,
int block_count) {
CHECK_GT(block_count, 0);
int size = 10;
scoped_ptr<uint8[]> data(new uint8[size]);
ClusterBuilder cb;
cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
if (block_count == 1) {
cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
kAudioBlockDuration, kWebMFlagKeyframe,
data.get(), size);
return cb.Finish();
}
int audio_timecode = first_audio_timecode;
int video_timecode = first_video_timecode;
uint8 video_flag = kWebMFlagKeyframe;
for (int i = 0; i < block_count - 2; i++) {
if (audio_timecode <= video_timecode) {
cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
data.get(), size);
audio_timecode += kAudioBlockDuration;
continue;
}
cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
size);
video_timecode += kVideoBlockDuration;
video_flag = 0;
}
if (audio_timecode <= video_timecode) {
cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
kWebMFlagKeyframe, data.get(), size);
AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
kVideoBlockDuration, video_flag);
} else {
AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
kVideoBlockDuration, video_flag);
cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
kWebMFlagKeyframe, data.get(), size);
}
return cb.Finish();
}
scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
int end_timecode,
int track_number,
int block_duration) {
CHECK_GT(end_timecode, timecode);
std::vector<uint8> data(kBlockSize);
ClusterBuilder cb;
cb.SetClusterTimecode(timecode);
while (timecode < (end_timecode - block_duration)) {
cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
&data[0], data.size());
timecode += block_duration;
}
if (track_number == kVideoTrackNum) {
AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
kWebMFlagKeyframe);
} else {
cb.AddBlockGroup(track_number, timecode, block_duration,
kWebMFlagKeyframe, &data[0], data.size());
}
return cb.Finish();
}
void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
demuxer_->GetStream(type)->Read(read_cb);
message_loop_.RunUntilIdle();
}
void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
Read(DemuxerStream::AUDIO, read_cb);
}
void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
Read(DemuxerStream::VIDEO, read_cb);
}
void GenerateExpectedReads(int timecode, int block_count) {
GenerateExpectedReads(timecode, timecode, block_count);
}
void GenerateExpectedReads(int start_audio_timecode,
int start_video_timecode,
int block_count) {
CHECK_GT(block_count, 0);
if (block_count == 1) {
ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
return;
}
int audio_timecode = start_audio_timecode;
int video_timecode = start_video_timecode;
for (int i = 0; i < block_count; i++) {
if (audio_timecode <= video_timecode) {
ExpectRead(DemuxerStream::AUDIO, audio_timecode);
audio_timecode += kAudioBlockDuration;
continue;
}
ExpectRead(DemuxerStream::VIDEO, video_timecode);
video_timecode += kVideoBlockDuration;
}
}
void GenerateSingleStreamExpectedReads(int timecode,
int block_count,
DemuxerStream::Type type,
int block_duration) {
CHECK_GT(block_count, 0);
int stream_timecode = timecode;
for (int i = 0; i < block_count; i++) {
ExpectRead(type, stream_timecode);
stream_timecode += block_duration;
}
}
void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
GenerateSingleStreamExpectedReads(
timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
}
void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
GenerateSingleStreamExpectedReads(
timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
}
scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
ClusterBuilder cb;
cb.SetClusterTimecode(timecode);
return cb.Finish();
}
void CheckExpectedRanges(const std::string& expected) {
CheckExpectedRanges(kSourceId, expected);
}
void CheckExpectedRanges(const std::string& id,
const std::string& expected) {
Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
std::stringstream ss;
ss << "{ ";
for (size_t i = 0; i < r.size(); ++i) {
ss << "[" << r.start(i).InMilliseconds() << ","
<< r.end(i).InMilliseconds() << ") ";
}
ss << "}";
EXPECT_EQ(expected, ss.str());
}
MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>&));
void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
scoped_refptr<DecoderBuffer>* buffer_out,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
*status_out = status;
*buffer_out = buffer;
}
void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
DemuxerStream::Status* status,
base::TimeDelta* last_timestamp) {
DemuxerStream* stream = demuxer_->GetStream(type);
scoped_refptr<DecoderBuffer> buffer;
*last_timestamp = kNoTimestamp();
do {
stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
base::Unretained(this), status, &buffer));
base::MessageLoop::current()->RunUntilIdle();
if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
*last_timestamp = buffer->timestamp();
} while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
}
void ExpectEndOfStream(DemuxerStream::Type type) {
EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
demuxer_->GetStream(type)->Read(base::Bind(
&ChunkDemuxerTest::ReadDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
HasTimestamp(timestamp_in_ms)));
demuxer_->GetStream(type)->Read(base::Bind(
&ChunkDemuxerTest::ReadDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void ExpectConfigChanged(DemuxerStream::Type type) {
EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
demuxer_->GetStream(type)->Read(base::Bind(
&ChunkDemuxerTest::ReadDone, base::Unretained(this)));
message_loop_.RunUntilIdle();
}
void CheckExpectedBuffers(DemuxerStream* stream,
const std::string& expected) {
std::vector<std::string> timestamps;
base::SplitString(expected, ' ', ×tamps);
std::stringstream ss;
for (size_t i = 0; i < timestamps.size(); ++i) {
DemuxerStream::Status status;
scoped_refptr<DecoderBuffer> buffer;
stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
base::Unretained(this), &status, &buffer));
base::MessageLoop::current()->RunUntilIdle();
if (status != DemuxerStream::kOk || buffer->end_of_stream())
break;
if (i > 0)
ss << " ";
ss << buffer->timestamp().InMilliseconds();
}
EXPECT_EQ(expected, ss.str());
}
MOCK_METHOD1(Checkpoint, void(int id));
struct BufferTimestamps {
int video_time_ms;
int audio_time_ms;
};
static const int kSkip = -1;
bool ParseWebMFile(const std::string& filename,
const BufferTimestamps* timestamps,
const base::TimeDelta& duration) {
return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
}
bool ParseWebMFile(const std::string& filename,
const BufferTimestamps* timestamps,
const base::TimeDelta& duration,
int stream_flags) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
return false;
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
for (size_t i = 0;
(timestamps[i].audio_time_ms != kSkip ||
timestamps[i].video_time_ms != kSkip);
i++) {
bool audio_read_done = false;
bool video_read_done = false;
if (timestamps[i].audio_time_ms != kSkip) {
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(
timestamps[i].audio_time_ms),
&audio_read_done));
EXPECT_TRUE(audio_read_done);
}
if (timestamps[i].video_time_ms != kSkip) {
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(
timestamps[i].video_time_ms),
&video_read_done));
EXPECT_TRUE(video_read_done);
}
}
return true;
}
MOCK_METHOD0(DemuxerOpened, void());
MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
const uint8* init_data, int init_data_size));
void DemuxerNeedKey(const std::string& type,
const std::vector<uint8>& init_data) {
const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
NeedKeyMock(type, init_data_ptr, init_data.size());
}
void Seek(base::TimeDelta seek_time) {
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
message_loop_.RunUntilIdle();
}
void MarkEndOfStream(PipelineStatus status) {
demuxer_->MarkEndOfStream(status);
message_loop_.RunUntilIdle();
}
bool SetTimestampOffset(const std::string& id,
base::TimeDelta timestamp_offset) {
if (demuxer_->IsParsingMediaSegment(id))
return false;
timestamp_offset_map_[id] = timestamp_offset;
return true;
}
base::MessageLoop message_loop_;
MockDemuxerHost host_;
scoped_ptr<ChunkDemuxer> demuxer_;
bool use_legacy_frame_processor_;
base::TimeDelta append_window_start_for_next_append_;
base::TimeDelta append_window_end_for_next_append_;
std::map<std::string, base::TimeDelta> timestamp_offset_map_;
private:
DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
};
TEST_P(ChunkDemuxerTest, Init) {
for (int i = 0; i < 16; i++) {
bool has_audio = (i & 0x1) != 0;
bool has_video = (i & 0x2) != 0;
bool is_audio_encrypted = (i & 0x4) != 0;
bool is_video_encrypted = (i & 0x8) != 0;
if ((!has_audio && is_audio_encrypted) ||
(!has_video && is_video_encrypted)) {
continue;
}
CreateNewDemuxer();
if (is_audio_encrypted || is_video_encrypted) {
int need_key_count = (is_audio_encrypted ? 1 : 0) +
(is_video_encrypted ? 1 : 0);
EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
DecryptConfig::kDecryptionKeySize))
.Times(Exactly(need_key_count));
}
int stream_flags = 0;
if (has_audio)
stream_flags |= HAS_AUDIO;
if (has_video)
stream_flags |= HAS_VIDEO;
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
stream_flags, is_audio_encrypted, is_video_encrypted));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
ASSERT_TRUE(audio_stream);
const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
EXPECT_EQ(kCodecVorbis, config.codec());
EXPECT_EQ(32, config.bits_per_channel());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
EXPECT_EQ(44100, config.samples_per_second());
EXPECT_TRUE(config.extra_data());
EXPECT_GT(config.extra_data_size(), 0u);
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
} else {
EXPECT_FALSE(audio_stream);
}
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
if (has_video) {
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
} else {
EXPECT_FALSE(video_stream);
}
ShutdownDemuxer();
demuxer_.reset();
}
}
TEST_P(ChunkDemuxerTest, InitText) {
bool has_video = true;
bool is_audio_encrypted = false;
bool is_video_encrypted = false;
for (int i = 0; i < 2; i++) {
bool has_audio = (i & 0x1) != 0;
CreateNewDemuxer();
DemuxerStream* text_stream = NULL;
TextTrackConfig text_config;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(DoAll(SaveArg<0>(&text_stream),
SaveArg<1>(&text_config)));
int stream_flags = HAS_TEXT;
if (has_audio)
stream_flags |= HAS_AUDIO;
if (has_video)
stream_flags |= HAS_VIDEO;
ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
stream_flags, is_audio_encrypted, is_video_encrypted));
ASSERT_TRUE(text_stream);
EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
EXPECT_EQ(kTextSubtitles, text_config.kind());
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
if (has_audio) {
ASSERT_TRUE(audio_stream);
const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
EXPECT_EQ(kCodecVorbis, config.codec());
EXPECT_EQ(32, config.bits_per_channel());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
EXPECT_EQ(44100, config.samples_per_second());
EXPECT_TRUE(config.extra_data());
EXPECT_GT(config.extra_data_size(), 0u);
EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
EXPECT_EQ(is_audio_encrypted,
audio_stream->audio_decoder_config().is_encrypted());
} else {
EXPECT_FALSE(audio_stream);
}
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
if (has_video) {
EXPECT_TRUE(video_stream);
EXPECT_EQ(is_video_encrypted,
video_stream->video_decoder_config().is_encrypted());
} else {
EXPECT_FALSE(video_stream);
}
ShutdownDemuxer();
demuxer_.reset();
}
}
TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
ShutdownDemuxer();
}
TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(1));
AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
ShutdownDemuxer();
}
TEST_P(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
bool audio_read_done = false;
bool video_read_done = false;
bool text_read_done = false;
audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
message_loop_.RunUntilIdle();
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
EXPECT_FALSE(text_read_done);
ShutdownDemuxer();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
EXPECT_TRUE(text_read_done);
}
TEST_P(ChunkDemuxerTest, AppendDataAfterSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
InSequence s;
EXPECT_CALL(*this, Checkpoint(1));
Seek(base::TimeDelta::FromMilliseconds(46));
EXPECT_CALL(*this, Checkpoint(2));
Checkpoint(1);
AppendCluster(kDefaultSecondCluster());
message_loop_.RunUntilIdle();
Checkpoint(2);
}
TEST_P(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendGarbage();
}
TEST_P(ChunkDemuxerTest, SeekWhileParsingCluster) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
InSequence s;
scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
int first_append_size = cluster_a->size() - 11;
int second_append_size = cluster_a->size() - first_append_size;
AppendData(cluster_a->data(), first_append_size);
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectRead(DemuxerStream::VIDEO, 0);
ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
ExpectRead(DemuxerStream::AUDIO, 2 * kAudioBlockDuration);
Seek(base::TimeDelta::FromSeconds(5));
AppendData(cluster_a->data() + first_append_size, second_append_size);
AppendCluster(GenerateCluster(5000, 6));
GenerateExpectedReads(5000, 6);
}
TEST_P(ChunkDemuxerTest, AppendDataBeforeInit) {
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
×tamp_offset_map_[kSourceId]);
}
TEST_P(ChunkDemuxerTest, Read) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done));
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}
TEST_P(ChunkDemuxerTest, OutOfOrderClusters) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
AppendCluster(GenerateCluster(10, 4));
AppendCluster(GenerateCluster(5, 4));
scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
×tamp_offset_map_[kSourceId]);
}
TEST_P(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
cb.SetClusterTimecode(5);
AddSimpleBlock(&cb, kAudioTrackNum, 5);
AddSimpleBlock(&cb, kVideoTrackNum, 10);
AddSimpleBlock(&cb, kAudioTrackNum, 7);
AddSimpleBlock(&cb, kVideoTrackNum, 15);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendCluster(cb.Finish());
scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
×tamp_offset_map_[kSourceId]);
}
TEST_P(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
cb.SetClusterTimecode(5);
AddSimpleBlock(&cb, kAudioTrackNum, 5);
AddSimpleBlock(&cb, kVideoTrackNum, 5);
AddSimpleBlock(&cb, kAudioTrackNum, 3);
AddSimpleBlock(&cb, kVideoTrackNum, 3);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendCluster(cb.Finish());
scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
append_window_start_for_next_append_,
append_window_end_for_next_append_,
×tamp_offset_map_[kSourceId]);
}
TEST_P(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
ClusterBuilder cb;
cb.SetClusterTimecode(5);
AddSimpleBlock(&cb, kAudioTrackNum, 5);
AddSimpleBlock(&cb, kVideoTrackNum, 5);
AddSimpleBlock(&cb, kAudioTrackNum, 4);
AddSimpleBlock(&cb, kVideoTrackNum, 7);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendCluster(cb.Finish());
}
TEST_P(ChunkDemuxerTest, ClusterBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
AppendCluster(GenerateCluster(0, 1));
}
TEST_P(ChunkDemuxerTest, EOSDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
MarkEndOfStream(PIPELINE_OK);
}
TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
CheckExpectedRanges("{ }");
MarkEndOfStream(PIPELINE_OK);
ShutdownDemuxer();
CheckExpectedRanges("{ }");
demuxer_->RemoveId(kSourceId);
demuxer_.reset();
}
TEST_P(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
CheckExpectedRanges("{ }");
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges("{ }");
}
TEST_P(ChunkDemuxerTest, DecodeErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
MarkEndOfStream(PIPELINE_ERROR_DECODE);
CheckExpectedRanges(kDefaultFirstClusterRange);
}
TEST_P(ChunkDemuxerTest, NetworkErrorEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
CheckExpectedRanges(kDefaultFirstClusterRange);
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
MarkEndOfStream(PIPELINE_ERROR_NETWORK);
}
class EndOfStreamHelper {
public:
explicit EndOfStreamHelper(Demuxer* demuxer)
: demuxer_(demuxer),
audio_read_done_(false),
video_read_done_(false) {
}
void RequestReads() {
EXPECT_FALSE(audio_read_done_);
EXPECT_FALSE(video_read_done_);
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
base::MessageLoop::current()->RunUntilIdle();
}
void CheckIfReadDonesWereCalled(bool expected) {
base::MessageLoop::current()->RunUntilIdle();
EXPECT_EQ(expected, audio_read_done_);
EXPECT_EQ(expected, video_read_done_);
}
private:
static void OnEndOfStreamReadDone(
bool* called,
DemuxerStream::Status status,
const scoped_refptr<DecoderBuffer>& buffer) {
EXPECT_EQ(status, DemuxerStream::kOk);
EXPECT_TRUE(buffer->end_of_stream());
*called = true;
}
Demuxer* demuxer_;
bool audio_read_done_;
bool video_read_done_;
DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
};
TEST_P(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 2));
bool audio_read_done_1 = false;
bool video_read_done_1 = false;
EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done_1));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done_1));
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done_1);
EXPECT_TRUE(video_read_done_1);
end_of_stream_helper_1.RequestReads();
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
MarkEndOfStream(PIPELINE_OK);
end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
end_of_stream_helper_2.RequestReads();
end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
}
TEST_P(ChunkDemuxerTest, ReadsAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 2));
bool audio_read_done_1 = false;
bool video_read_done_1 = false;
EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done_1));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done_1));
end_of_stream_helper_1.RequestReads();
EXPECT_TRUE(audio_read_done_1);
EXPECT_TRUE(video_read_done_1);
end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
MarkEndOfStream(PIPELINE_OK);
end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
end_of_stream_helper_2.RequestReads();
end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
end_of_stream_helper_3.RequestReads();
end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
}
TEST_P(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(0, 10);
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
MarkEndOfStream(PIPELINE_OK);
Seek(base::TimeDelta::FromMilliseconds(20));
base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
demuxer_->CancelPendingSeek(seek_time2);
Seek(seek_time2);
DemuxerStream::Status status;
base::TimeDelta last_timestamp;
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
}
TEST_P(ChunkDemuxerTest, EndOfStreamRangeChanges) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
CheckExpectedRanges(kSourceId, "{ [0,46) }");
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges(kSourceId, "{ [0,66) }");
demuxer_->UnmarkEndOfStream();
CheckExpectedRanges(kSourceId, "{ [0,46) }");
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
CheckExpectedRanges(kSourceId, "{ [0,46) }");
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges(kSourceId, "{ [0,200) }");
}
TEST_P(ChunkDemuxerTest, AppendingInPieces) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
scoped_ptr<uint8[]> info_tracks;
int info_tracks_size = 0;
CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
false, false, &info_tracks, &info_tracks_size);
scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
uint8* dst = buffer.get();
memcpy(dst, info_tracks.get(), info_tracks_size);
dst += info_tracks_size;
memcpy(dst, cluster_a->data(), cluster_a->size());
dst += cluster_a->size();
memcpy(dst, cluster_b->data(), cluster_b->size());
dst += cluster_b->size();
AppendDataInPieces(buffer.get(), buffer_size);
GenerateExpectedReads(0, 9);
}
TEST_P(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
{67, 6},
{100, 9},
{133, 12},
{kSkip, kSkip},
};
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2768)));
ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744)));
}
TEST_P(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
{67, 6},
{100, 9},
{133, 12},
{kSkip, kSkip},
};
ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
kInfiniteDuration()));
}
TEST_P(ChunkDemuxerTest, WebMFile_AudioOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{kSkip, 0},
{kSkip, 3},
{kSkip, 6},
{kSkip, 9},
{kSkip, 12},
{kSkip, kSkip},
};
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2768)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2744),
HAS_AUDIO));
}
TEST_P(ChunkDemuxerTest, WebMFile_VideoOnly) {
struct BufferTimestamps buffer_timestamps[] = {
{0, kSkip},
{33, kSkip},
{67, kSkip},
{100, kSkip},
{133, kSkip},
{kSkip, kSkip},
};
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2703),
HAS_VIDEO));
}
TEST_P(ChunkDemuxerTest, WebMFile_AltRefFrames) {
struct BufferTimestamps buffer_timestamps[] = {
{0, 0},
{33, 3},
{33, 6},
{67, 9},
{100, 12},
{kSkip, kSkip},
};
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2768)));
ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
base::TimeDelta::FromMilliseconds(2767)));
}
TEST_P(ChunkDemuxerTest, IncrementalClusterParsing) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendEmptyCluster(0);
scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
int i = 0;
for (; i < cluster->size() && !audio_read_done; ++i) {
AppendData(cluster->data() + i, 1);
message_loop_.RunUntilIdle();
}
EXPECT_TRUE(audio_read_done);
EXPECT_FALSE(video_read_done);
EXPECT_GT(i, 0);
EXPECT_LT(i, cluster->size());
for (; i < cluster->size() && !video_read_done; ++i) {
AppendData(cluster->data() + i, 1);
message_loop_.RunUntilIdle();
}
EXPECT_TRUE(video_read_done);
EXPECT_LT(i, cluster->size());
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(23),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(33),
&video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
ASSERT_LT(i, cluster->size());
AppendData(cluster->data() + i, cluster->size() - i);
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}
TEST_P(ChunkDemuxerTest, ParseErrorDuringInit) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(
kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
uint8 tmp = 0;
demuxer_->AppendData(kSourceId, &tmp, 1,
append_window_start_for_next_append_,
append_window_end_for_next_append_,
×tamp_offset_map_[kSourceId]);
}
TEST_P(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
DEMUXER_ERROR_COULD_NOT_OPEN), true);
std::vector<std::string> codecs(1);
codecs[0] = "vorbis";
ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs,
use_legacy_frame_processor_),
ChunkDemuxer::kOk);
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
}
TEST_P(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(),
DEMUXER_ERROR_COULD_NOT_OPEN), true);
std::vector<std::string> codecs(1);
codecs[0] = "vp8";
ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs,
use_legacy_frame_processor_),
ChunkDemuxer::kOk);
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
}
TEST_P(ChunkDemuxerTest, MultipleHeaders) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
AppendCluster(kDefaultSecondCluster());
GenerateExpectedReads(0, 9);
}
TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(0, 4);
}
TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
std::string audio_id = "audio1";
std::string video_id = "video1";
EXPECT_CALL(host_, AddTextStream(_, _))
.Times(Exactly(2));
ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(0, 4);
}
TEST_P(ChunkDemuxerTest, AddIdFailures) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
}
TEST_P(ChunkDemuxerTest, RemoveId) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
AppendCluster(audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
demuxer_->RemoveId(audio_id);
bool audio_read_done = false;
ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
GenerateVideoStreamExpectedReads(0, 4);
}
TEST_P(ChunkDemuxerTest, RemoveAndAddId) {
std::string audio_id_1 = "audio1";
ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
demuxer_->RemoveId(audio_id_1);
std::string audio_id_2 = "audio2";
ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
}
TEST_P(ChunkDemuxerTest, SeekCanceled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 4));
Seek(base::TimeDelta::FromSeconds(50));
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
demuxer_->CancelPendingSeek(seek_time);
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
Seek(seek_time);
GenerateExpectedReads(0, 4);
}
TEST_P(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 4));
base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
demuxer_->StartWaitingForSeek(seek_time1);
demuxer_->CancelPendingSeek(seek_time2);
demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
Seek(seek_time2);
GenerateExpectedReads(0, 4);
}
TEST_P(ChunkDemuxerTest, SeekAudioAndVideoSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
AppendCluster(
audio_id,
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(
video_id,
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(0),
&video_read_done));
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
Seek(base::TimeDelta::FromSeconds(3));
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromSeconds(3),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromSeconds(3),
&video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
AppendCluster(audio_id,
GenerateSingleStreamCluster(
3000, 3092, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id,
GenerateSingleStreamCluster(
3000, 3132, kVideoTrackNum, kVideoBlockDuration));
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}
TEST_P(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
bool seek_cb_was_called = false;
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time,
base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
message_loop_.RunUntilIdle();
EXPECT_FALSE(seek_cb_was_called);
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(120)));
MarkEndOfStream(PIPELINE_OK);
message_loop_.RunUntilIdle();
EXPECT_TRUE(seek_cb_was_called);
ShutdownDemuxer();
}
TEST_P(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
bool seek_cb_was_called = false;
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time,
base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
message_loop_.RunUntilIdle();
EXPECT_FALSE(seek_cb_was_called);
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
MarkEndOfStream(PIPELINE_OK);
message_loop_.RunUntilIdle();
EXPECT_FALSE(seek_cb_was_called);
demuxer_->UnmarkEndOfStream();
AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
message_loop_.RunUntilIdle();
EXPECT_TRUE(seek_cb_was_called);
ShutdownDemuxer();
}
TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
AppendInitSegment(HAS_AUDIO);
AppendCluster(
GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
CheckExpectedRanges("{ [0,92) }");
AppendCluster(GenerateSingleStreamCluster(
150, 219, kAudioTrackNum, kAudioBlockDuration));
CheckExpectedRanges("{ [0,92) [150,219) }");
}
TEST_P(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
AppendInitSegment(HAS_VIDEO);
AppendCluster(
GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
CheckExpectedRanges("{ [0,132) }");
AppendCluster(GenerateSingleStreamCluster(
200, 299, kVideoTrackNum, kVideoBlockDuration));
CheckExpectedRanges("{ [0,132) [200,299) }");
}
TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateSingleStreamCluster(
0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(GenerateSingleStreamCluster(
0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
CheckExpectedRanges("{ [0,23) }");
AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
CheckExpectedRanges("{ [0,23) [320,400) }");
AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
}
TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
EXPECT_CALL(host_, AddTextStream(_, _));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
CheckExpectedRanges("{ [0,46) }");
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
CheckExpectedRanges("{ [0,46) }");
demuxer_->Remove(kSourceId, base::TimeDelta(),
base::TimeDelta::FromMilliseconds(46));
CheckExpectedRanges("{ }");
}
TEST_P(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
CheckExpectedRanges("{ [0,46) }");
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges("{ [0,66) }");
demuxer_->UnmarkEndOfStream();
CheckExpectedRanges("{ [0,46) }");
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(366)));
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"200K 233 266 299 300K 333");
CheckExpectedRanges("{ [0,46) [200,246) }");
demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
base::TimeDelta::FromMilliseconds(300));
CheckExpectedRanges("{ [0,46) }");
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
CheckExpectedRanges("{ [0,46) [200,246) }");
MarkEndOfStream(PIPELINE_OK);
CheckExpectedRanges("{ [0,46) [200,266) [300,366) }");
}
TEST_P(ChunkDemuxerTest, DifferentStreamTimecodes) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(GenerateCluster(0, 25, 8));
Seek(base::TimeDelta::FromSeconds(0));
GenerateExpectedReads(0, 25, 8);
Seek(base::TimeDelta::FromSeconds(5));
AppendCluster(GenerateCluster(5025, 5000, 8));
GenerateExpectedReads(5025, 5000, 8);
}
TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
AppendCluster(audio_id, GenerateSingleStreamCluster(
25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id, GenerateSingleStreamCluster(
30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
Seek(base::TimeDelta::FromMilliseconds(25));
GenerateAudioStreamExpectedReads(25, 4);
GenerateVideoStreamExpectedReads(30, 4);
}
TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
AppendCluster(audio_id, GenerateSingleStreamCluster(0,
4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id, GenerateSingleStreamCluster(10000,
4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time,
NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectEndOfStream(DemuxerStream::VIDEO);
}
TEST_P(ChunkDemuxerTest, ClusterWithNoBuffers) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendEmptyCluster(0);
AppendCluster(GenerateCluster(0, 2));
ExpectRead(DemuxerStream::AUDIO, 0);
ExpectRead(DemuxerStream::VIDEO, 0);
}
TEST_P(ChunkDemuxerTest, CodecPrefixMatching) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
#if defined(USE_PROPRIETARY_CODECS)
expected = ChunkDemuxer::kOk;
#endif
std::vector<std::string> codecs;
codecs.push_back("avc1.4D4041");
EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs,
use_legacy_frame_processor_),
expected);
}
TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
#if defined(USE_PROPRIETARY_CODECS)
expected = ChunkDemuxer::kOk;
#endif
const char* codec_ids[] = {
"mp4a.40.02",
"mp4a.40.05"
};
for (size_t i = 0; i < arraysize(codec_ids); ++i) {
std::vector<std::string> codecs;
codecs.push_back(codec_ids[i]);
ChunkDemuxer::Status result =
demuxer_->AddId("source_id", "audio/mp4", codecs,
use_legacy_frame_processor_);
EXPECT_EQ(result, expected)
<< "Fail to add codec_id '" << codec_ids[i] << "'";
if (result == ChunkDemuxer::kOk)
demuxer_->RemoveId("source_id");
}
}
TEST_P(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, SetDuration(_))
.Times(AnyNumber());
base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
AppendCluster(kDefaultFirstCluster());
AppendCluster(kDefaultSecondCluster());
MarkEndOfStream(PIPELINE_OK);
DemuxerStream::Status status;
base::TimeDelta last_timestamp;
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
EXPECT_EQ(DemuxerStream::kOk, status);
EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
EXPECT_EQ(DemuxerStream::kOk, status);
EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
Seek(base::TimeDelta::FromMilliseconds(0));
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
EXPECT_EQ(DemuxerStream::kOk, status);
EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
EXPECT_EQ(DemuxerStream::kOk, status);
EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
}
TEST_P(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
CheckExpectedRanges("audio", "{ }");
CheckExpectedRanges("video", "{ }");
}
TEST_P(ChunkDemuxerTest, EndOfStreamDuringSeek) {
InSequence s;
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
demuxer_->StartWaitingForSeek(seek_time);
AppendCluster(kDefaultSecondCluster());
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
MarkEndOfStream(PIPELINE_OK);
demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
GenerateExpectedReads(0, 4);
GenerateExpectedReads(46, 66, 5);
EndOfStreamHelper end_of_stream_helper(demuxer_.get());
end_of_stream_helper.RequestReads();
end_of_stream_helper.CheckIfReadDonesWereCalled(true);
}
TEST_P(ChunkDemuxerTest, ConfigChange_Video) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
DemuxerStream::Status status;
base::TimeDelta last_timestamp;
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
ASSERT_TRUE(video_config_1.IsValidConfig());
EXPECT_EQ(video_config_1.natural_size().width(), 320);
EXPECT_EQ(video_config_1.natural_size().height(), 240);
ExpectRead(DemuxerStream::VIDEO, 0);
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
ASSERT_TRUE(video_config_2.IsValidConfig());
EXPECT_EQ(video_config_2.natural_size().width(), 640);
EXPECT_EQ(video_config_2.natural_size().height(), 360);
ExpectRead(DemuxerStream::VIDEO, 527);
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
ExpectRead(DemuxerStream::VIDEO, 801);
ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
}
TEST_P(ChunkDemuxerTest, ConfigChange_Audio) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
DemuxerStream::Status status;
base::TimeDelta last_timestamp;
DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
ASSERT_TRUE(audio_config_1.IsValidConfig());
EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
ExpectRead(DemuxerStream::AUDIO, 0);
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
ASSERT_TRUE(audio_config_2.IsValidConfig());
EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
ExpectRead(DemuxerStream::AUDIO, 527);
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kConfigChanged);
EXPECT_EQ(last_timestamp.InMilliseconds(), 759);
ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
ExpectRead(DemuxerStream::AUDIO, 779);
ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
ASSERT_EQ(status, DemuxerStream::kOk);
}
TEST_P(ChunkDemuxerTest, ConfigChange_Seek) {
InSequence s;
ASSERT_TRUE(InitDemuxerWithConfigChangeData());
DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
ASSERT_TRUE(video_config_1.IsValidConfig());
EXPECT_EQ(video_config_1.natural_size().width(), 320);
EXPECT_EQ(video_config_1.natural_size().height(), 240);
ExpectRead(DemuxerStream::VIDEO, 0);
Seek(base::TimeDelta::FromMilliseconds(527));
ExpectConfigChanged(DemuxerStream::VIDEO);
const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
ASSERT_TRUE(video_config_2.IsValidConfig());
EXPECT_EQ(video_config_2.natural_size().width(), 640);
EXPECT_EQ(video_config_2.natural_size().height(), 360);
ExpectRead(DemuxerStream::VIDEO, 527);
Seek(base::TimeDelta::FromMilliseconds(0));
ExpectConfigChanged(DemuxerStream::VIDEO);
ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
ExpectRead(DemuxerStream::VIDEO, 0);
Seek(base::TimeDelta::FromMilliseconds(527));
Seek(base::TimeDelta::FromMilliseconds(801));
ExpectRead(DemuxerStream::VIDEO, 801);
ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
}
TEST_P(ChunkDemuxerTest, TimestampPositiveOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
AppendCluster(GenerateCluster(0, 2));
Seek(base::TimeDelta::FromMilliseconds(30000));
GenerateExpectedReads(30000, 2);
}
TEST_P(ChunkDemuxerTest, TimestampNegativeOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
AppendCluster(GenerateCluster(1000, 2));
GenerateExpectedReads(0, 2);
}
TEST_P(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
std::string audio_id = "audio1";
std::string video_id = "video1";
ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
ASSERT_TRUE(SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(-2500)));
ASSERT_TRUE(SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(-2500)));
AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
GenerateAudioStreamExpectedReads(0, 4);
GenerateVideoStreamExpectedReads(0, 4);
Seek(base::TimeDelta::FromMilliseconds(27300));
ASSERT_TRUE(SetTimestampOffset(
audio_id, base::TimeDelta::FromMilliseconds(27300)));
ASSERT_TRUE(SetTimestampOffset(
video_id, base::TimeDelta::FromMilliseconds(27300)));
AppendCluster(audio_id, GenerateSingleStreamCluster(
0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
AppendCluster(video_id, GenerateSingleStreamCluster(
0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
GenerateVideoStreamExpectedReads(27300, 4);
GenerateAudioStreamExpectedReads(27300, 4);
}
TEST_P(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
AppendData(cluster->data(), cluster->size() - 13);
ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
demuxer_->Abort(kSourceId);
ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
}
TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
const uint8 kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0x83,
0xE7, 0x81, 0x01,
};
const bool kExpectedReturnValues[] = {
false, false, false, false, true,
true, true, false,
};
COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
test_arrays_out_of_sync);
COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
for (size_t i = 0; i < sizeof(kBuffer); i++) {
DVLOG(3) << "Appending and testing index " << i;
AppendData(kBuffer + i, 1);
bool expected_return_value = kExpectedReturnValues[i];
EXPECT_EQ(expected_return_value,
demuxer_->IsParsingMediaSegment(kSourceId));
}
}
TEST_P(ChunkDemuxerTest, DurationChange) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
const int kStreamDuration = kDefaultDuration().InMilliseconds();
AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
kStreamDuration - kVideoBlockDuration,
2));
CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
const int kNewStreamDurationAudio = kStreamDuration + kAudioBlockDuration;
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kNewStreamDurationAudio)));
const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
kStreamDuration + kVideoBlockDuration,
3));
CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
}
TEST_P(ChunkDemuxerTest, DurationChangeTimestampOffset) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
EXPECT_CALL(host_, SetDuration(
kDefaultDuration() + base::TimeDelta::FromMilliseconds(
kAudioBlockDuration * 2)));
EXPECT_CALL(host_, SetDuration(
kDefaultDuration() + base::TimeDelta::FromMilliseconds(
kVideoBlockDuration * 2)));
AppendCluster(GenerateCluster(0, 4));
}
TEST_P(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
EXPECT_CALL(host_, SetDuration(
base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
MarkEndOfStream(PIPELINE_OK);
}
TEST_P(ChunkDemuxerTest, ZeroLengthAppend) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendData(NULL, 0);
}
TEST_P(ChunkDemuxerTest, AppendAfterEndOfStream) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, SetDuration(_))
.Times(AnyNumber());
AppendCluster(kDefaultFirstCluster());
MarkEndOfStream(PIPELINE_OK);
demuxer_->UnmarkEndOfStream();
AppendCluster(kDefaultSecondCluster());
MarkEndOfStream(PIPELINE_OK);
}
TEST_P(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
demuxer_->Shutdown();
demuxer_->Initialize(
&host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
message_loop_.RunUntilIdle();
}
TEST_P(ChunkDemuxerTest, ReadAfterAudioDisabled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(kDefaultFirstCluster());
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
ASSERT_TRUE(stream);
demuxer_->OnAudioRendererDisabled();
ASSERT_FALSE(demuxer_->GetStream(DemuxerStream::AUDIO));
bool audio_read_done = false;
stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
message_loop_.RunUntilIdle();
EXPECT_TRUE(audio_read_done);
}
TEST_P(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
AppendCluster(0, 10);
AppendCluster(300, 10);
CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
GenerateExpectedReads(0, 10);
bool audio_read_done = false;
bool video_read_done = false;
ReadAudio(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(138),
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(138),
&video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
MarkEndOfStream(PIPELINE_OK);
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
demuxer_->UnmarkEndOfStream();
AppendCluster(138, 22);
message_loop_.RunUntilIdle();
CheckExpectedRanges(kSourceId, "{ [0,435) }");
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
GenerateExpectedReads(161, 171, 20);
audio_read_done = false;
video_read_done = false;
ReadAudio(base::Bind(&OnReadDone_EOSExpected,
&audio_read_done));
ReadVideo(base::Bind(&OnReadDone_EOSExpected,
&video_read_done));
EXPECT_FALSE(audio_read_done);
EXPECT_FALSE(video_read_done);
EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
MarkEndOfStream(PIPELINE_OK);
EXPECT_TRUE(audio_read_done);
EXPECT_TRUE(video_read_done);
}
TEST_P(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
demuxer_->CancelPendingSeek(seek_time);
Seek(seek_time);
AppendCluster(seek_time.InMilliseconds(), 10);
}
TEST_P(ChunkDemuxerTest, GCDuringSeek) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
Seek(seek_time1);
AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
seek_time1.InMilliseconds(), 5);
CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
demuxer_->StartWaitingForSeek(seek_time2);
AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
seek_time2.InMilliseconds(), 5);
CheckExpectedRanges(kSourceId, "{ [500,615) }");
demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
}
TEST_P(ChunkDemuxerTest, RemoveBeforeInitSegment) {
EXPECT_CALL(*this, DemuxerOpened());
demuxer_->Initialize(
&host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO | HAS_VIDEO));
demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(0),
base::TimeDelta::FromMilliseconds(1));
}
TEST_P(ChunkDemuxerTest, AppendWindow_Video) {
ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"0K 30 60 90 120K 150 180 210 240K 270 300 330K");
CheckExpectedRanges(kSourceId, "{ [120,270) }");
CheckExpectedBuffers(stream, "120 150 180 210 240");
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"360 390 420K 450 480 510 540K 570 600 630K");
CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
}
TEST_P(ChunkDemuxerTest, AppendWindow_Audio) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
AppendSingleStreamCluster(
kSourceId, kAudioTrackNum,
"0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
CheckExpectedRanges(kSourceId, "{ [30,270) }");
CheckExpectedBuffers(stream, "30 60 90 120 150 180 210 240");
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
AppendSingleStreamCluster(
kSourceId, kAudioTrackNum,
"360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
CheckExpectedRanges(kSourceId, "{ [30,270) [360,630) }");
}
TEST_P(ChunkDemuxerTest, AppendWindow_Text) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"0K 30 60 90 120K 150 180 210 240K 270 300 330K");
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
CheckExpectedRanges(kSourceId, "{ [120,270) }");
CheckExpectedBuffers(video_stream, "120 150 180 210 240");
CheckExpectedBuffers(text_stream, "100");
append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"360 390 420K 450 480 510 540K 570 600 630K");
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
Seek(base::TimeDelta::FromMilliseconds(420));
CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
CheckExpectedBuffers(text_stream, "400 500");
}
TEST_P(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
AppendGarbage();
base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
demuxer_->StartWaitingForSeek(seek_time);
}
TEST_P(ChunkDemuxerTest, Remove_AudioVideoText) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
"0K 20K 40K 60K 80K 100K 120K 140K");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"0K 30 60 90 120K 150 180");
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
CheckExpectedBuffers(text_stream, "0 100 200");
demuxer_->Remove(kSourceId, base::TimeDelta(),
base::TimeDelta::FromMilliseconds(300));
CheckExpectedRanges(kSourceId, "{ }");
AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
"1K 21K 41K 61K 81K 101K 121K 141K");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"1K 31 61 91 121K 151 181");
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
Seek(base::TimeDelta());
CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
CheckExpectedBuffers(text_stream, "1 101 201");
}
TEST_P(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
DemuxerStream* text_stream = NULL;
EXPECT_CALL(host_, AddTextStream(_, _))
.WillOnce(SaveArg<0>(&text_stream));
ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
bool seek_cb_was_called = false;
demuxer_->StartWaitingForSeek(seek_time);
demuxer_->Seek(seek_time,
base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
message_loop_.RunUntilIdle();
EXPECT_FALSE(seek_cb_was_called);
bool text_read_done = false;
text_stream->Read(base::Bind(&OnReadDone,
base::TimeDelta::FromMilliseconds(125),
&text_read_done));
AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
"0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
"0K 30 60 90 120K 150 180 210");
message_loop_.RunUntilIdle();
EXPECT_TRUE(seek_cb_was_called);
EXPECT_FALSE(text_read_done);
CheckExpectedBuffers(audio_stream, "120 140");
CheckExpectedBuffers(video_stream, "120 150");
EXPECT_FALSE(text_read_done);
AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
message_loop_.RunUntilIdle();
EXPECT_TRUE(text_read_done);
CheckExpectedBuffers(text_stream, "175 225");
CheckExpectedBuffers(audio_stream, "160 180");
CheckExpectedBuffers(video_stream, "180 210");
}
INSTANTIATE_TEST_CASE_P(LegacyFrameProcessor, ChunkDemuxerTest, Values(true));
}