This source file includes following definitions.
- GetShareMode
- HardwareSampleRate
- audio_bus_
- Open
- Start
- Stop
- Close
- SetVolume
- GetVolume
- Run
- RenderAudioFromSource
- ExclusiveModeInitialization
- StopThread
#include "media/audio/win/audio_low_latency_output_win.h"
#include <Functiondiscoverykeys_devpkey.h>
#include "base/command_line.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/metrics/histogram.h"
#include "base/strings/utf_string_conversions.h"
#include "base/win/scoped_propvariant.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
using base::win::ScopedComPtr;
using base::win::ScopedCOMInitializer;
using base::win::ScopedCoMem;
namespace media {
AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
return AUDCLNT_SHAREMODE_EXCLUSIVE;
return AUDCLNT_SHAREMODE_SHARED;
}
int WASAPIAudioOutputStream::HardwareSampleRate(const std::string& device_id) {
WAVEFORMATPCMEX format;
ScopedComPtr<IAudioClient> client;
if (device_id.empty()) {
client = CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id));
if (!device)
return 0;
client = CoreAudioUtil::CreateClient(device);
}
if (!client || FAILED(CoreAudioUtil::GetSharedModeMixFormat(client, &format)))
return 0;
return static_cast<int>(format.Format.nSamplesPerSec);
}
WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
const std::string& device_id,
const AudioParameters& params,
ERole device_role)
: creating_thread_id_(base::PlatformThread::CurrentId()),
manager_(manager),
format_(),
opened_(false),
volume_(1.0),
packet_size_frames_(0),
packet_size_bytes_(0),
endpoint_buffer_size_frames_(0),
device_id_(device_id),
device_role_(device_role),
share_mode_(GetShareMode()),
num_written_frames_(0),
source_(NULL),
audio_bus_(AudioBus::Create(params)) {
DCHECK(manager_);
VLOG(1) << "WASAPIAudioOutputStream::WASAPIAudioOutputStream()";
VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
bool avrt_init = avrt::Initialize();
DCHECK(avrt_init) << "Failed to load the avrt.dll";
WAVEFORMATEX* format = &format_.Format;
format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
format->nChannels = params.channels();
format->nSamplesPerSec = params.sample_rate();
format->wBitsPerSample = params.bits_per_sample();
format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
format_.Samples.wValidBitsPerSample = params.bits_per_sample();
format_.dwChannelMask = CoreAudioUtil::GetChannelConfig(device_id, eRender);
format_.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
packet_size_frames_ = params.frames_per_buffer();
packet_size_bytes_ = params.GetBytesPerBuffer();
VLOG(1) << "Number of bytes per audio frame : " << format->nBlockAlign;
VLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
VLOG(1) << "Number of bytes per packet : " << packet_size_bytes_;
VLOG(1) << "Number of milliseconds per packet: "
<< params.GetBufferDuration().InMillisecondsF();
audio_samples_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
DCHECK(audio_samples_render_event_.IsValid());
stop_render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
DCHECK(stop_render_event_.IsValid());
}
WASAPIAudioOutputStream::~WASAPIAudioOutputStream() {}
bool WASAPIAudioOutputStream::Open() {
VLOG(1) << "WASAPIAudioOutputStream::Open()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
if (opened_)
return true;
ScopedComPtr<IAudioClient> audio_client;
if (device_id_.empty() ||
CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
DLOG_IF(ERROR, !device) << "Failed to open device: " << device_id_;
if (device)
audio_client = CoreAudioUtil::CreateClient(device);
}
if (!audio_client)
return false;
if (!CoreAudioUtil::IsFormatSupported(audio_client,
share_mode_,
&format_)) {
LOG(ERROR) << "Audio parameters are not supported.";
return false;
}
HRESULT hr = S_FALSE;
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
hr = CoreAudioUtil::SharedModeInitialize(
audio_client, &format_, audio_samples_render_event_.Get(),
&endpoint_buffer_size_frames_);
if (FAILED(hr))
return false;
if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) {
LOG(ERROR)
<< "Bailing out due to non-perfect timing. Buffer size of "
<< packet_size_frames_ << " is not an even divisor of "
<< endpoint_buffer_size_frames_;
return false;
}
} else {
hr = ExclusiveModeInitialization(audio_client,
audio_samples_render_event_.Get(),
&endpoint_buffer_size_frames_);
if (FAILED(hr))
return false;
if (endpoint_buffer_size_frames_ != packet_size_frames_) {
LOG(ERROR) << "Bailing out due to non-perfect timing.";
return false;
}
}
ScopedComPtr<IAudioRenderClient> audio_render_client =
CoreAudioUtil::CreateRenderClient(audio_client);
if (!audio_render_client)
return false;
audio_client_ = audio_client;
audio_render_client_ = audio_render_client;
hr = audio_client_->GetService(__uuidof(IAudioClock),
audio_clock_.ReceiveVoid());
if (FAILED(hr)) {
LOG(ERROR) << "Failed to get IAudioClock service.";
return false;
}
opened_ = true;
return true;
}
void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
VLOG(1) << "WASAPIAudioOutputStream::Start()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
CHECK(callback);
CHECK(opened_);
if (render_thread_) {
CHECK_EQ(callback, source_);
return;
}
source_ = callback;
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
audio_client_, audio_render_client_)) {
LOG(ERROR) << "Failed to prepare endpoint buffers with silence.";
callback->OnError(this);
return;
}
}
num_written_frames_ = endpoint_buffer_size_frames_;
render_thread_.reset(
new base::DelegateSimpleThread(this, "wasapi_render_thread"));
render_thread_->Start();
if (!render_thread_->HasBeenStarted()) {
LOG(ERROR) << "Failed to start WASAPI render thread.";
StopThread();
callback->OnError(this);
return;
}
HRESULT hr = audio_client_->Start();
if (FAILED(hr)) {
LOG_GETLASTERROR(ERROR)
<< "Failed to start output streaming: " << std::hex << hr;
StopThread();
callback->OnError(this);
}
}
void WASAPIAudioOutputStream::Stop() {
VLOG(1) << "WASAPIAudioOutputStream::Stop()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
if (!render_thread_)
return;
HRESULT hr = audio_client_->Stop();
if (FAILED(hr)) {
LOG_GETLASTERROR(ERROR)
<< "Failed to stop output streaming: " << std::hex << hr;
source_->OnError(this);
}
AudioSourceCallback* callback = source_;
StopThread();
hr = audio_client_->Reset();
if (FAILED(hr)) {
LOG_GETLASTERROR(ERROR)
<< "Failed to reset streaming: " << std::hex << hr;
callback->OnError(this);
}
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
UINT32 num_queued_frames = 0;
audio_client_->GetCurrentPadding(&num_queued_frames);
DCHECK_EQ(0u, num_queued_frames);
}
}
void WASAPIAudioOutputStream::Close() {
VLOG(1) << "WASAPIAudioOutputStream::Close()";
DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
Stop();
manager_->ReleaseOutputStream(this);
}
void WASAPIAudioOutputStream::SetVolume(double volume) {
VLOG(1) << "SetVolume(volume=" << volume << ")";
float volume_float = static_cast<float>(volume);
if (volume_float < 0.0f || volume_float > 1.0f) {
return;
}
volume_ = volume_float;
}
void WASAPIAudioOutputStream::GetVolume(double* volume) {
VLOG(1) << "GetVolume()";
*volume = static_cast<double>(volume_);
}
void WASAPIAudioOutputStream::Run() {
ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
render_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
DWORD task_index = 0;
HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
&task_index);
bool mmcss_is_ok =
(mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
if (!mmcss_is_ok) {
DWORD err = GetLastError();
LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
}
HRESULT hr = S_FALSE;
bool playing = true;
bool error = false;
HANDLE wait_array[] = { stop_render_event_,
audio_samples_render_event_ };
UINT64 device_frequency = 0;
hr = audio_clock_->GetFrequency(&device_frequency);
error = FAILED(hr);
PLOG_IF(ERROR, error) << "Failed to acquire IAudioClock interface: "
<< std::hex << hr;
while (playing && !error) {
DWORD wait_result = WaitForMultipleObjects(arraysize(wait_array),
wait_array,
FALSE,
INFINITE);
switch (wait_result) {
case WAIT_OBJECT_0 + 0:
playing = false;
break;
case WAIT_OBJECT_0 + 1:
error = !RenderAudioFromSource(device_frequency);
break;
default:
error = true;
break;
}
}
if (playing && error) {
audio_client_->Stop();
PLOG(ERROR) << "WASAPI rendering failed.";
}
if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
PLOG(WARNING) << "Failed to disable MMCSS";
}
}
bool WASAPIAudioOutputStream::RenderAudioFromSource(UINT64 device_frequency) {
TRACE_EVENT0("audio", "RenderAudioFromSource");
HRESULT hr = S_FALSE;
UINT32 num_queued_frames = 0;
uint8* audio_data = NULL;
size_t num_available_frames = 0;
if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
hr = audio_client_->GetCurrentPadding(&num_queued_frames);
num_available_frames =
endpoint_buffer_size_frames_ - num_queued_frames;
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to retrieve amount of available space: "
<< std::hex << hr;
return false;
}
} else {
num_available_frames = endpoint_buffer_size_frames_;
}
if (num_available_frames < packet_size_frames_)
return true;
DLOG_IF(ERROR, num_available_frames % packet_size_frames_ != 0)
<< "Non-perfect timing detected (num_available_frames="
<< num_available_frames << ", packet_size_frames="
<< packet_size_frames_ << ")";
size_t num_packets = (num_available_frames / packet_size_frames_);
for (size_t n = 0; n < num_packets; ++n) {
hr = audio_render_client_->GetBuffer(packet_size_frames_,
&audio_data);
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to use rendering audio buffer: "
<< std::hex << hr;
return false;
}
UINT64 position = 0;
int audio_delay_bytes = 0;
hr = audio_clock_->GetPosition(&position, NULL);
if (SUCCEEDED(hr)) {
double pos_sample_playing_frames = format_.Format.nSamplesPerSec *
(static_cast<double>(position) / device_frequency);
size_t pos_last_sample_written_frames =
num_written_frames_ + packet_size_frames_;
audio_delay_bytes = (pos_last_sample_written_frames -
pos_sample_playing_frames) * format_.Format.nBlockAlign;
}
int frames_filled = source_->OnMoreData(
audio_bus_.get(), AudioBuffersState(0, audio_delay_bytes));
uint32 num_filled_bytes = frames_filled * format_.Format.nBlockAlign;
DCHECK_LE(num_filled_bytes, packet_size_bytes_);
const int bytes_per_sample = format_.Format.wBitsPerSample >> 3;
audio_bus_->Scale(volume_);
audio_bus_->ToInterleaved(
frames_filled, bytes_per_sample, audio_data);
DWORD flags = (num_filled_bytes < packet_size_bytes_) ?
AUDCLNT_BUFFERFLAGS_SILENT : 0;
audio_render_client_->ReleaseBuffer(packet_size_frames_, flags);
num_written_frames_ += packet_size_frames_;
}
return true;
}
HRESULT WASAPIAudioOutputStream::ExclusiveModeInitialization(
IAudioClient* client, HANDLE event_handle, uint32* endpoint_buffer_size) {
DCHECK_EQ(share_mode_, AUDCLNT_SHAREMODE_EXCLUSIVE);
float f = (1000.0 * packet_size_frames_) / format_.Format.nSamplesPerSec;
REFERENCE_TIME requested_buffer_duration =
static_cast<REFERENCE_TIME>(f * 10000.0 + 0.5);
DWORD stream_flags = AUDCLNT_STREAMFLAGS_NOPERSIST;
bool use_event = (event_handle != NULL &&
event_handle != INVALID_HANDLE_VALUE);
if (use_event)
stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
VLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
HRESULT hr = S_FALSE;
hr = client->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE,
stream_flags,
requested_buffer_duration,
requested_buffer_duration,
reinterpret_cast<WAVEFORMATEX*>(&format_),
NULL);
if (FAILED(hr)) {
if (hr == AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
LOG(ERROR) << "AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED";
UINT32 aligned_buffer_size = 0;
client->GetBufferSize(&aligned_buffer_size);
VLOG(1) << "Use aligned buffer size instead: " << aligned_buffer_size;
REFERENCE_TIME aligned_buffer_duration = static_cast<REFERENCE_TIME>(
(10000000.0 * aligned_buffer_size / format_.Format.nSamplesPerSec)
+ 0.5);
VLOG(1) << "aligned_buffer_duration: "
<< static_cast<double>(aligned_buffer_duration / 10000.0)
<< " [ms]";
} else if (hr == AUDCLNT_E_INVALID_DEVICE_PERIOD) {
LOG(ERROR) << "AUDCLNT_E_INVALID_DEVICE_PERIOD";
}
return hr;
}
if (use_event) {
hr = client->SetEventHandle(event_handle);
if (FAILED(hr)) {
VLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
return hr;
}
}
UINT32 buffer_size_in_frames = 0;
hr = client->GetBufferSize(&buffer_size_in_frames);
if (FAILED(hr)) {
VLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
return hr;
}
*endpoint_buffer_size = buffer_size_in_frames;
VLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
return hr;
}
void WASAPIAudioOutputStream::StopThread() {
if (render_thread_ ) {
if (render_thread_->HasBeenStarted()) {
SetEvent(stop_render_event_.Get());
render_thread_->Join();
}
render_thread_.reset();
ResetEvent(stop_render_event_.Get());
}
source_ = NULL;
}
}