This source file includes following definitions.
- read_fd
- write_fd
- write_fd_
- IsValid
- ShareEncodedToProcess
- ConvertSampleToFloat
- sample_rate_
- ReadUnsignedInteger
- ReadPCMSample
- ReadChunkHeader
- ReadFMTChunk
- CopyDataChunkToBus
- DecodeWAVEFile
- CopyPcmDataToBus
- BufferAndCopyPcmDataToBus
- TryWAVEFileDecoder
- DecodeAudioFileData
#include "content/renderer/media/android/audio_decoder_android.h"
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <sys/mman.h>
#include <unistd.h>
#include <vector>
#include "base/file_descriptor_posix.h"
#include "base/logging.h"
#include "base/memory/shared_memory.h"
#include "base/posix/eintr_wrapper.h"
#include "content/common/view_messages.h"
#include "media/base/android/webaudio_media_codec_info.h"
#include "media/base/audio_bus.h"
#include "media/base/limits.h"
#include "third_party/WebKit/public/platform/WebAudioBus.h"
namespace content {
class AudioDecoderIO {
public:
AudioDecoderIO(const char* data, size_t data_size);
~AudioDecoderIO();
bool ShareEncodedToProcess(base::SharedMemoryHandle* handle);
bool IsValid() const;
int read_fd() const { return read_fd_; }
int write_fd() const { return write_fd_; }
private:
base::SharedMemory encoded_shared_memory_;
int read_fd_;
int write_fd_;
DISALLOW_COPY_AND_ASSIGN(AudioDecoderIO);
};
AudioDecoderIO::AudioDecoderIO(const char* data, size_t data_size)
: read_fd_(-1),
write_fd_(-1) {
if (!data || !data_size || data_size > 0x80000000)
return;
encoded_shared_memory_.CreateAndMapAnonymous(data_size);
if (!encoded_shared_memory_.memory())
return;
memcpy(encoded_shared_memory_.memory(), data, data_size);
int pipefd[2];
if (pipe(pipefd))
return;
read_fd_ = pipefd[0];
write_fd_ = pipefd[1];
}
AudioDecoderIO::~AudioDecoderIO() {
if (read_fd_ >= 0 && close(read_fd_)) {
DVLOG(1) << "Cannot close read fd " << read_fd_
<< ": " << strerror(errno);
}
}
bool AudioDecoderIO::IsValid() const {
return read_fd_ >= 0 && write_fd_ >= 0 &&
encoded_shared_memory_.memory();
}
bool AudioDecoderIO::ShareEncodedToProcess(base::SharedMemoryHandle* handle) {
return encoded_shared_memory_.ShareToProcess(
base::Process::Current().handle(),
handle);
}
static float ConvertSampleToFloat(int16_t sample) {
const float kMaxScale = 1.0f / std::numeric_limits<int16_t>::max();
const float kMinScale = -1.0f / std::numeric_limits<int16_t>::min();
return sample * (sample < 0 ? kMinScale : kMaxScale);
}
class WAVEDecoder {
public:
WAVEDecoder(const uint8* data, size_t data_size);
~WAVEDecoder();
bool DecodeWAVEFile(blink::WebAudioBus* destination_bus);
private:
static const unsigned kMinimumWAVLength = 44;
static const unsigned kChunkIDLength = 4;
static const unsigned kChunkSizeLength = 4;
static const unsigned kFormatFieldLength = 4;
static const unsigned kFMTChunkLength = 16;
static const int16_t kAudioFormatPCM = 1;
static const unsigned kMaximumBytesPerSample = 3;
uint32_t ReadUnsignedInteger(const uint8_t* buffer, size_t length);
int16_t ReadPCMSample(const uint8_t* pcm_data);
bool ReadChunkHeader();
bool ReadFMTChunk();
bool CopyDataChunkToBus(blink::WebAudioBus* destination_bus);
uint8_t chunk_id_[kChunkIDLength];
size_t chunk_size_;
const uint8_t* buffer_;
const uint8_t* buffer_end_;
size_t bytes_per_sample_;
uint16_t number_of_channels_;
uint32_t sample_rate_;
DISALLOW_COPY_AND_ASSIGN(WAVEDecoder);
};
WAVEDecoder::WAVEDecoder(const uint8_t* encoded_data, size_t data_size)
: buffer_(encoded_data),
buffer_end_(encoded_data + 1),
bytes_per_sample_(0),
number_of_channels_(0),
sample_rate_(0) {
if (buffer_ + data_size > buffer_)
buffer_end_ = buffer_ + data_size;
}
WAVEDecoder::~WAVEDecoder() {}
uint32_t WAVEDecoder::ReadUnsignedInteger(const uint8_t* buffer,
size_t length) {
unsigned value = 0;
if (length == 0 || length > sizeof(value)) {
DCHECK(false) << "ReadUnsignedInteger: Invalid length: " << length;
return 0;
}
for (size_t k = length; k > 0; --k)
value = (value << 8) + buffer[k - 1];
return value;
}
int16_t WAVEDecoder::ReadPCMSample(const uint8_t* pcm_data) {
uint32_t unsigned_sample = ReadUnsignedInteger(pcm_data, bytes_per_sample_);
int16_t sample;
switch (bytes_per_sample_) {
case 1:
sample = (unsigned_sample - 128) << 8;
break;
case 2:
sample = static_cast<int16_t>(unsigned_sample);
break;
case 3:
sample = static_cast<int16_t>(unsigned_sample >> 8);
break;
default:
sample = 0;
break;
}
return sample;
}
bool WAVEDecoder::ReadChunkHeader() {
if (buffer_ + kChunkIDLength + kChunkSizeLength >= buffer_end_)
return false;
memcpy(chunk_id_, buffer_, kChunkIDLength);
chunk_size_ = ReadUnsignedInteger(buffer_ + kChunkIDLength, kChunkSizeLength);
if (chunk_size_ % 2)
++chunk_size_;
return true;
}
bool WAVEDecoder::ReadFMTChunk() {
if (chunk_size_ < kFMTChunkLength) {
DVLOG(1) << "FMT chunk too short: " << chunk_size_;
return 0;
}
uint16_t audio_format = ReadUnsignedInteger(buffer_, 2);
if (audio_format != kAudioFormatPCM) {
DVLOG(1) << "Audio format not supported: " << audio_format;
return false;
}
number_of_channels_ = ReadUnsignedInteger(buffer_ + 2, 2);
sample_rate_ = ReadUnsignedInteger(buffer_ + 4, 4);
unsigned bits_per_sample = ReadUnsignedInteger(buffer_ + 14, 2);
if (!number_of_channels_ ||
number_of_channels_ > media::limits::kMaxChannels) {
DVLOG(1) << "Unsupported number of channels: " << number_of_channels_;
return false;
}
if (sample_rate_ < media::limits::kMinSampleRate ||
sample_rate_ > media::limits::kMaxSampleRate) {
DVLOG(1) << "Unsupported sample rate: " << sample_rate_;
return false;
}
if (bits_per_sample == 8 || bits_per_sample == 16 || bits_per_sample == 24) {
bytes_per_sample_ = bits_per_sample / 8;
return true;
}
DVLOG(1) << "Unsupported bits per sample: " << bits_per_sample;
return false;
}
bool WAVEDecoder::CopyDataChunkToBus(blink::WebAudioBus* destination_bus) {
if (!bytes_per_sample_ || bytes_per_sample_ > kMaximumBytesPerSample) {
DVLOG(1) << "WARNING: data chunk without preceeding fmt chunk,"
<< " or invalid bytes per sample.";
return false;
}
VLOG(0) << "Decoding WAVE file: " << number_of_channels_ << " channels, "
<< sample_rate_ << " kHz, "
<< chunk_size_ / bytes_per_sample_ / number_of_channels_
<< " frames, " << 8 * bytes_per_sample_ << " bits/sample";
size_t number_of_frames =
chunk_size_ / bytes_per_sample_ / number_of_channels_;
destination_bus->initialize(
number_of_channels_, number_of_frames, sample_rate_);
for (size_t m = 0; m < number_of_frames; ++m) {
for (uint16_t k = 0; k < number_of_channels_; ++k) {
int16_t sample = ReadPCMSample(buffer_);
buffer_ += bytes_per_sample_;
destination_bus->channelData(k)[m] = ConvertSampleToFloat(sample);
}
}
return true;
}
bool WAVEDecoder::DecodeWAVEFile(blink::WebAudioBus* destination_bus) {
if (buffer_ + kMinimumWAVLength > buffer_end_) {
DVLOG(1) << "Buffer too small to contain full WAVE header: ";
return false;
}
ReadChunkHeader();
if (memcmp(chunk_id_, "RIFF", kChunkIDLength) != 0) {
DVLOG(1) << "RIFF missing";
return false;
}
buffer_ += kChunkIDLength + kChunkSizeLength;
memcpy(chunk_id_, buffer_, kFormatFieldLength);
if (memcmp(chunk_id_, "WAVE", kFormatFieldLength) != 0) {
DVLOG(1) << "Invalid WAVE file: missing WAVE header";
return false;
}
buffer_ += kFormatFieldLength;
while (buffer_ < buffer_end_) {
if (!ReadChunkHeader()) {
DVLOG(1) << "Couldn't read chunk header";
return false;
}
buffer_ += kChunkIDLength + kChunkSizeLength;
if (buffer_ + chunk_size_ > buffer_end_) {
DVLOG(1) << "Insufficient bytes to read chunk of size " << chunk_size_;
return false;
}
if (memcmp(chunk_id_, "fmt ", kChunkIDLength) == 0) {
if (!ReadFMTChunk())
return false;
} else if (memcmp(chunk_id_, "data", kChunkIDLength) == 0) {
return CopyDataChunkToBus(destination_bus);
} else {
DVLOG(0) << "Ignoring WAVE chunk `" << chunk_id_ << "' size "
<< chunk_size_;
}
buffer_ += chunk_size_;
}
return false;
}
static void CopyPcmDataToBus(int input_fd,
blink::WebAudioBus* destination_bus,
size_t number_of_frames,
unsigned number_of_channels,
double file_sample_rate) {
destination_bus->initialize(number_of_channels,
number_of_frames,
file_sample_rate);
int16_t pipe_data[PIPE_BUF / sizeof(int16_t)];
size_t decoded_frames = 0;
size_t current_sample_in_frame = 0;
ssize_t nread;
while ((nread = HANDLE_EINTR(read(input_fd, pipe_data, sizeof(pipe_data)))) >
0) {
size_t samples_in_pipe = nread / sizeof(int16_t);
for (size_t m = 0; m < samples_in_pipe; ++m) {
if (decoded_frames >= number_of_frames)
break;
destination_bus->channelData(current_sample_in_frame)[decoded_frames] =
ConvertSampleToFloat(pipe_data[m]);
++current_sample_in_frame;
if (current_sample_in_frame >= number_of_channels) {
current_sample_in_frame = 0;
++decoded_frames;
}
}
}
if (decoded_frames < number_of_frames)
destination_bus->resizeSmaller(decoded_frames);
}
static void BufferAndCopyPcmDataToBus(int input_fd,
blink::WebAudioBus* destination_bus,
unsigned number_of_channels,
double file_sample_rate) {
int16_t pipe_data[PIPE_BUF / sizeof(int16_t)];
std::vector<int16_t> decoded_samples;
ssize_t nread;
while ((nread = HANDLE_EINTR(read(input_fd, pipe_data, sizeof(pipe_data)))) >
0) {
size_t samples_in_pipe = nread / sizeof(int16_t);
if (decoded_samples.size() + samples_in_pipe > decoded_samples.capacity()) {
decoded_samples.reserve(std::max(samples_in_pipe,
2 * decoded_samples.capacity()));
}
std::copy(pipe_data,
pipe_data + samples_in_pipe,
back_inserter(decoded_samples));
}
DVLOG(1) << "Total samples read = " << decoded_samples.size();
size_t number_of_samples = decoded_samples.size();
size_t number_of_frames = decoded_samples.size() / number_of_channels;
size_t decoded_frames = 0;
destination_bus->initialize(number_of_channels,
number_of_frames,
file_sample_rate);
for (size_t m = 0; m < number_of_samples; m += number_of_channels) {
for (size_t k = 0; k < number_of_channels; ++k) {
int16_t sample = decoded_samples[m + k];
destination_bus->channelData(k)[decoded_frames] =
ConvertSampleToFloat(sample);
}
++decoded_frames;
}
if (decoded_frames < number_of_frames)
destination_bus->resizeSmaller(decoded_frames);
}
static bool TryWAVEFileDecoder(blink::WebAudioBus* destination_bus,
const uint8_t* encoded_data,
size_t data_size) {
WAVEDecoder decoder(encoded_data, data_size);
return decoder.DecodeWAVEFile(destination_bus);
}
bool DecodeAudioFileData(blink::WebAudioBus* destination_bus, const char* data,
size_t data_size,
scoped_refptr<ThreadSafeSender> sender) {
if (TryWAVEFileDecoder(
destination_bus, reinterpret_cast<const uint8_t*>(data), data_size)) {
return true;
}
AudioDecoderIO audio_decoder(data, data_size);
if (!audio_decoder.IsValid())
return false;
base::SharedMemoryHandle encoded_data_handle;
audio_decoder.ShareEncodedToProcess(&encoded_data_handle);
base::FileDescriptor fd(audio_decoder.write_fd(), true);
DVLOG(1) << "DecodeAudioFileData: Starting MediaCodec";
sender->Send(new ViewHostMsg_RunWebAudioMediaCodec(
encoded_data_handle, fd, data_size));
int input_fd = audio_decoder.read_fd();
struct media::WebAudioMediaCodecInfo info;
DVLOG(1) << "Reading audio file info from fd " << input_fd;
ssize_t nread = HANDLE_EINTR(read(input_fd, &info, sizeof(info)));
DVLOG(1) << "read: " << nread << " bytes:\n"
<< " 0: number of channels = " << info.channel_count << "\n"
<< " 1: sample rate = " << info.sample_rate << "\n"
<< " 2: number of frames = " << info.number_of_frames << "\n";
if (nread != sizeof(info))
return false;
unsigned number_of_channels = info.channel_count;
double file_sample_rate = static_cast<double>(info.sample_rate);
size_t number_of_frames = info.number_of_frames;
if (!number_of_channels ||
number_of_channels > media::limits::kMaxChannels ||
file_sample_rate < media::limits::kMinSampleRate ||
file_sample_rate > media::limits::kMaxSampleRate) {
return false;
}
if (number_of_frames > 0) {
CopyPcmDataToBus(input_fd,
destination_bus,
number_of_frames,
number_of_channels,
file_sample_rate);
} else {
BufferAndCopyPcmDataToBus(input_fd,
destination_bus,
number_of_channels,
file_sample_rate);
}
return true;
}
}