This source file includes following definitions.
- av_sample_format_
- Open
- Close
- GetDuration
- GetNumberOfFrames
#include "media/filters/audio_file_reader.h"
#include <cmath>
#include "base/logging.h"
#include "base/time/time.h"
#include "media/base/audio_bus.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/filters/ffmpeg_glue.h"
namespace media {
AudioFileReader::AudioFileReader(FFmpegURLProtocol* protocol)
: codec_context_(NULL),
stream_index_(0),
protocol_(protocol),
channels_(0),
sample_rate_(0),
av_sample_format_(0) {
}
AudioFileReader::~AudioFileReader() {
Close();
}
bool AudioFileReader::Open() {
glue_.reset(new FFmpegGlue(protocol_));
AVFormatContext* format_context = glue_->format_context();
if (!glue_->OpenContext()) {
DLOG(WARNING) << "AudioFileReader::Open() : error in avformat_open_input()";
return false;
}
codec_context_ = NULL;
for (size_t i = 0; i < format_context->nb_streams; ++i) {
AVCodecContext* c = format_context->streams[i]->codec;
if (c->codec_type == AVMEDIA_TYPE_AUDIO) {
codec_context_ = c;
stream_index_ = i;
break;
}
}
if (!codec_context_)
return false;
int result = avformat_find_stream_info(format_context, NULL);
if (result < 0) {
DLOG(WARNING)
<< "AudioFileReader::Open() : error in avformat_find_stream_info()";
return false;
}
AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
if (codec) {
if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P)
codec_context_->request_sample_fmt = AV_SAMPLE_FMT_S16;
if ((result = avcodec_open2(codec_context_, codec, NULL)) < 0) {
DLOG(WARNING) << "AudioFileReader::Open() : could not open codec -"
<< " result: " << result;
return false;
}
if (codec_context_->sample_fmt == AV_SAMPLE_FMT_S16P) {
DLOG(ERROR) << "AudioFileReader::Open() : unable to configure a"
<< " supported sample format - "
<< codec_context_->sample_fmt;
return false;
}
} else {
DLOG(WARNING) << "AudioFileReader::Open() : could not find codec -"
<< " result: " << result;
return false;
}
if (ChannelLayoutToChromeChannelLayout(
codec_context_->channel_layout, codec_context_->channels) ==
CHANNEL_LAYOUT_UNSUPPORTED) {
return false;
}
channels_ = codec_context_->channels;
sample_rate_ = codec_context_->sample_rate;
av_sample_format_ = codec_context_->sample_fmt;
return true;
}
void AudioFileReader::Close() {
glue_.reset();
codec_context_ = NULL;
}
int AudioFileReader::Read(AudioBus* audio_bus) {
DCHECK(glue_.get() && codec_context_) <<
"AudioFileReader::Read() : reader is not opened!";
DCHECK_EQ(audio_bus->channels(), channels());
if (audio_bus->channels() != channels())
return 0;
size_t bytes_per_sample = av_get_bytes_per_sample(codec_context_->sample_fmt);
scoped_ptr<AVFrame, ScopedPtrAVFreeFrame> av_frame(av_frame_alloc());
AVPacket packet;
int current_frame = 0;
bool continue_decoding = true;
while (current_frame < audio_bus->frames() && continue_decoding &&
av_read_frame(glue_->format_context(), &packet) >= 0 &&
av_dup_packet(&packet) >= 0) {
if (packet.stream_index != stream_index_) {
av_free_packet(&packet);
continue;
}
AVPacket packet_temp = packet;
do {
avcodec_get_frame_defaults(av_frame.get());
int frame_decoded = 0;
int result = avcodec_decode_audio4(
codec_context_, av_frame.get(), &frame_decoded, &packet_temp);
if (result < 0) {
DLOG(WARNING)
<< "AudioFileReader::Read() : error in avcodec_decode_audio4() -"
<< result;
break;
}
packet_temp.size -= result;
packet_temp.data += result;
if (!frame_decoded)
continue;
int frames_read = av_frame->nb_samples;
if (frames_read < 0) {
continue_decoding = false;
break;
}
#ifdef CHROMIUM_NO_AVFRAME_CHANNELS
int channels = av_get_channel_layout_nb_channels(
av_frame->channel_layout);
#else
int channels = av_frame->channels;
#endif
if (av_frame->sample_rate != sample_rate_ ||
channels != channels_ ||
av_frame->format != av_sample_format_) {
DLOG(ERROR) << "Unsupported midstream configuration change!"
<< " Sample Rate: " << av_frame->sample_rate << " vs "
<< sample_rate_
<< ", Channels: " << channels << " vs "
<< channels_
<< ", Sample Format: " << av_frame->format << " vs "
<< av_sample_format_;
continue_decoding = false;
break;
}
if (current_frame + frames_read > audio_bus->frames()) {
DLOG(ERROR) << "Truncating decoded data due to output size.";
frames_read = audio_bus->frames() - current_frame;
}
if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLT) {
float* decoded_audio_data = reinterpret_cast<float*>(av_frame->data[0]);
int channels = audio_bus->channels();
for (int ch = 0; ch < channels; ++ch) {
float* bus_data = audio_bus->channel(ch) + current_frame;
for (int i = 0, offset = ch; i < frames_read;
++i, offset += channels) {
bus_data[i] = decoded_audio_data[offset];
}
}
} else if (codec_context_->sample_fmt == AV_SAMPLE_FMT_FLTP) {
for (int ch = 0; ch < audio_bus->channels(); ++ch) {
memcpy(audio_bus->channel(ch) + current_frame,
av_frame->extended_data[ch], sizeof(float) * frames_read);
}
} else {
audio_bus->FromInterleavedPartial(
av_frame->data[0], current_frame, frames_read, bytes_per_sample);
}
current_frame += frames_read;
} while (packet_temp.size > 0);
av_free_packet(&packet);
}
audio_bus->ZeroFramesPartial(
current_frame, audio_bus->frames() - current_frame);
return current_frame;
}
base::TimeDelta AudioFileReader::GetDuration() const {
const AVRational av_time_base = {1, AV_TIME_BASE};
return ConvertFromTimeBase(av_time_base,
glue_->format_context()->duration + 1);
}
int AudioFileReader::GetNumberOfFrames() const {
return static_cast<int>(ceil(GetDuration().InSecondsF() * sample_rate()));
}
}