root/content/renderer/media/webrtc_audio_device_impl.cc

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. aec_dump_file_
  2. AddRef
  3. Release
  4. OnData
  5. OnSetFormat
  6. RenderData
  7. RemoveAudioRenderer
  8. RegisterAudioCallback
  9. Init
  10. Terminate
  11. Initialized
  12. PlayoutIsAvailable
  13. PlayoutIsInitialized
  14. RecordingIsAvailable
  15. RecordingIsInitialized
  16. StartPlayout
  17. StopPlayout
  18. Playing
  19. StartRecording
  20. StopRecording
  21. Recording
  22. SetMicrophoneVolume
  23. MicrophoneVolume
  24. MaxMicrophoneVolume
  25. MinMicrophoneVolume
  26. StereoPlayoutIsAvailable
  27. StereoRecordingIsAvailable
  28. PlayoutDelay
  29. RecordingDelay
  30. RecordingSampleRate
  31. PlayoutSampleRate
  32. SetAudioRenderer
  33. AddAudioCapturer
  34. RemoveAudioCapturer
  35. GetDefaultCapturer
  36. AddPlayoutSink
  37. RemovePlayoutSink
  38. GetAuthorizedDeviceInfoForAudioRenderer
  39. EnableAecDump
  40. DisableAecDump
  41. MaybeStartAecDump

// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "content/renderer/media/webrtc_audio_device_impl.h"

#include "base/bind.h"
#include "base/metrics/histogram.h"
#include "base/platform_file.h"
#include "base/strings/string_util.h"
#include "base/win/windows_version.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_renderer.h"
#include "content/renderer/render_thread_impl.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/sample_rates.h"

using media::AudioParameters;
using media::ChannelLayout;

namespace content {

WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()
    : ref_count_(0),
      audio_transport_callback_(NULL),
      input_delay_ms_(0),
      output_delay_ms_(0),
      initialized_(false),
      playing_(false),
      recording_(false),
      microphone_volume_(0),
      aec_dump_file_(base::kInvalidPlatformFileValue) {
  DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()";
}

WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl()";
  DCHECK(thread_checker_.CalledOnValidThread());
  Terminate();
}

int32_t WebRtcAudioDeviceImpl::AddRef() {
  DCHECK(thread_checker_.CalledOnValidThread());
  return base::subtle::Barrier_AtomicIncrement(&ref_count_, 1);
}

int32_t WebRtcAudioDeviceImpl::Release() {
  DCHECK(thread_checker_.CalledOnValidThread());
  int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1);
  if (ret == 0) {
    delete this;
  }
  return ret;
}
int WebRtcAudioDeviceImpl::OnData(const int16* audio_data,
                                  int sample_rate,
                                  int number_of_channels,
                                  int number_of_frames,
                                  const std::vector<int>& channels,
                                  int audio_delay_milliseconds,
                                  int current_volume,
                                  bool need_audio_processing,
                                  bool key_pressed) {
  int total_delay_ms = 0;
  {
    base::AutoLock auto_lock(lock_);
    // Return immediately when not recording or |channels| is empty.
    // See crbug.com/274017: renderer crash dereferencing invalid channels[0].
    if (!recording_ || channels.empty())
      return 0;

    // Store the reported audio delay locally.
    input_delay_ms_ = audio_delay_milliseconds;
    total_delay_ms = input_delay_ms_ + output_delay_ms_;
    DVLOG(2) << "total delay: " << input_delay_ms_ + output_delay_ms_;
  }

  // Write audio samples in blocks of 10 milliseconds to the registered
  // webrtc::AudioTransport sink. Keep writing until our internal byte
  // buffer is empty.
  const int16* audio_buffer = audio_data;
  const int samples_per_10_msec = (sample_rate / 100);
  CHECK_EQ(number_of_frames % samples_per_10_msec, 0);
  int accumulated_audio_samples = 0;
  uint32_t new_volume = 0;

  // The lock here is to protect a race in the resampler inside webrtc when
  // there are more than one input stream calling OnData(), which can happen
  // when the users setup two getUserMedia, one for the microphone, another
  // for WebAudio. Currently we don't have a better way to fix it except for
  // adding a lock here to sequence the call.
  // TODO(xians): Remove this workaround after we move the
  // webrtc::AudioProcessing module to Chrome. See http://crbug/264611 for
  // details.
  base::AutoLock auto_lock(capture_callback_lock_);
  while (accumulated_audio_samples < number_of_frames) {
    // Deliver 10ms of recorded 16-bit linear PCM audio.
    int new_mic_level = audio_transport_callback_->OnDataAvailable(
        &channels[0],
        channels.size(),
        audio_buffer,
        sample_rate,
        number_of_channels,
        samples_per_10_msec,
        total_delay_ms,
        current_volume,
        key_pressed,
        need_audio_processing);

    accumulated_audio_samples += samples_per_10_msec;
    audio_buffer += samples_per_10_msec * number_of_channels;

    // The latest non-zero new microphone level will be returned.
    if (new_mic_level)
      new_volume = new_mic_level;
  }

  return new_volume;
}

void WebRtcAudioDeviceImpl::OnSetFormat(
    const media::AudioParameters& params) {
  DVLOG(1) << "WebRtcAudioDeviceImpl::OnSetFormat()";
}

void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus,
                                       int sample_rate,
                                       int audio_delay_milliseconds) {
  render_buffer_.resize(audio_bus->frames() * audio_bus->channels());

  {
    base::AutoLock auto_lock(lock_);
    DCHECK(audio_transport_callback_);
    // Store the reported audio delay locally.
    output_delay_ms_ = audio_delay_milliseconds;
  }

  int samples_per_10_msec = (sample_rate / 100);
  int bytes_per_sample = sizeof(render_buffer_[0]);
  const int bytes_per_10_msec =
      audio_bus->channels() * samples_per_10_msec * bytes_per_sample;
  DCHECK_EQ(audio_bus->frames() % samples_per_10_msec, 0);

  // Get audio samples in blocks of 10 milliseconds from the registered
  // webrtc::AudioTransport source. Keep reading until our internal buffer
  // is full.
  uint32_t num_audio_samples = 0;
  int accumulated_audio_samples = 0;
  int16* audio_data = &render_buffer_[0];
  while (accumulated_audio_samples < audio_bus->frames()) {
    // Get 10ms and append output to temporary byte buffer.
    audio_transport_callback_->NeedMorePlayData(samples_per_10_msec,
                                                bytes_per_sample,
                                                audio_bus->channels(),
                                                sample_rate,
                                                audio_data,
                                                num_audio_samples);
    accumulated_audio_samples += num_audio_samples;
    audio_data += bytes_per_10_msec;
  }

  // De-interleave each channel and convert to 32-bit floating-point
  // with nominal range -1.0 -> +1.0 to match the callback format.
  audio_bus->FromInterleaved(&render_buffer_[0],
                             audio_bus->frames(),
                             bytes_per_sample);

  // Pass the render data to the playout sinks.
  base::AutoLock auto_lock(lock_);
  for (PlayoutDataSinkList::const_iterator it = playout_sinks_.begin();
       it != playout_sinks_.end(); ++it) {
    (*it)->OnPlayoutData(audio_bus, sample_rate, audio_delay_milliseconds);
  }
}

void WebRtcAudioDeviceImpl::RemoveAudioRenderer(WebRtcAudioRenderer* renderer) {
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK_EQ(renderer, renderer_);
  base::AutoLock auto_lock(lock_);
  // Notify the playout sink of the change.
  for (PlayoutDataSinkList::const_iterator it = playout_sinks_.begin();
       it != playout_sinks_.end(); ++it) {
    (*it)->OnPlayoutDataSourceChanged();
  }

  renderer_ = NULL;
  playing_ = false;
}

int32_t WebRtcAudioDeviceImpl::RegisterAudioCallback(
    webrtc::AudioTransport* audio_callback) {
  DVLOG(1) << "WebRtcAudioDeviceImpl::RegisterAudioCallback()";
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK_EQ(audio_transport_callback_ == NULL, audio_callback != NULL);
  audio_transport_callback_ = audio_callback;
  return 0;
}

int32_t WebRtcAudioDeviceImpl::Init() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::Init()";
  DCHECK(thread_checker_.CalledOnValidThread());

  // We need to return a success to continue the initialization of WebRtc VoE
  // because failure on the capturer_ initialization should not prevent WebRTC
  // from working. See issue http://crbug.com/144421 for details.
  initialized_ = true;

  return 0;
}

int32_t WebRtcAudioDeviceImpl::Terminate() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::Terminate()";
  DCHECK(thread_checker_.CalledOnValidThread());

  // Calling Terminate() multiple times in a row is OK.
  if (!initialized_)
    return 0;

  StopRecording();
  StopPlayout();

  DCHECK(!renderer_.get() || !renderer_->IsStarted())
      << "The shared audio renderer shouldn't be running";

  DisableAecDump();

  capturers_.clear();

  initialized_ = false;
  return 0;
}

bool WebRtcAudioDeviceImpl::Initialized() const {
  return initialized_;
}

int32_t WebRtcAudioDeviceImpl::PlayoutIsAvailable(bool* available) {
  *available = initialized_;
  return 0;
}

bool WebRtcAudioDeviceImpl::PlayoutIsInitialized() const {
  return initialized_;
}

int32_t WebRtcAudioDeviceImpl::RecordingIsAvailable(bool* available) {
  *available = (!capturers_.empty());
  return 0;
}

bool WebRtcAudioDeviceImpl::RecordingIsInitialized() const {
  DVLOG(1) << "WebRtcAudioDeviceImpl::RecordingIsInitialized()";
  DCHECK(thread_checker_.CalledOnValidThread());
  return (!capturers_.empty());
}

int32_t WebRtcAudioDeviceImpl::StartPlayout() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::StartPlayout()";
  LOG_IF(ERROR, !audio_transport_callback_) << "Audio transport is missing";
  {
    base::AutoLock auto_lock(lock_);
    if (!audio_transport_callback_)
      return 0;
  }

  if (playing_) {
    // webrtc::VoiceEngine assumes that it is OK to call Start() twice and
    // that the call is ignored the second time.
    return 0;
  }

  playing_ = true;
  return 0;
}

int32_t WebRtcAudioDeviceImpl::StopPlayout() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::StopPlayout()";
  if (!playing_) {
    // webrtc::VoiceEngine assumes that it is OK to call Stop() just in case.
    return 0;
  }

  playing_ = false;
  return 0;
}

bool WebRtcAudioDeviceImpl::Playing() const {
  return playing_;
}

int32_t WebRtcAudioDeviceImpl::StartRecording() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::StartRecording()";
  DCHECK(initialized_);
  LOG_IF(ERROR, !audio_transport_callback_) << "Audio transport is missing";
  if (!audio_transport_callback_) {
    return -1;
  }

  {
    base::AutoLock auto_lock(lock_);
    if (recording_)
      return 0;

    recording_ = true;
  }

  return 0;
}

int32_t WebRtcAudioDeviceImpl::StopRecording() {
  DVLOG(1) << "WebRtcAudioDeviceImpl::StopRecording()";
  {
    base::AutoLock auto_lock(lock_);
    if (!recording_)
      return 0;

    recording_ = false;
  }

  return 0;
}

bool WebRtcAudioDeviceImpl::Recording() const {
  base::AutoLock auto_lock(lock_);
  return recording_;
}

int32_t WebRtcAudioDeviceImpl::SetMicrophoneVolume(uint32_t volume) {
  DVLOG(1) << "WebRtcAudioDeviceImpl::SetMicrophoneVolume(" << volume << ")";
  DCHECK(initialized_);

  // Only one microphone is supported at the moment, which is represented by
  // the default capturer.
  scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
  if (!capturer.get())
    return -1;

  capturer->SetVolume(volume);
  return 0;
}

// TODO(henrika): sort out calling thread once we start using this API.
int32_t WebRtcAudioDeviceImpl::MicrophoneVolume(uint32_t* volume) const {
  DVLOG(1) << "WebRtcAudioDeviceImpl::MicrophoneVolume()";
  // We only support one microphone now, which is accessed via the default
  // capturer.
  DCHECK(initialized_);
  scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
  if (!capturer.get())
    return -1;

  *volume = static_cast<uint32_t>(capturer->Volume());

  return 0;
}

int32_t WebRtcAudioDeviceImpl::MaxMicrophoneVolume(uint32_t* max_volume) const {
  DCHECK(initialized_);
  *max_volume = kMaxVolumeLevel;
  return 0;
}

int32_t WebRtcAudioDeviceImpl::MinMicrophoneVolume(uint32_t* min_volume) const {
  *min_volume = 0;
  return 0;
}

int32_t WebRtcAudioDeviceImpl::StereoPlayoutIsAvailable(bool* available) const {
  DCHECK(initialized_);
  *available = renderer_ && renderer_->channels() == 2;
  return 0;
}

int32_t WebRtcAudioDeviceImpl::StereoRecordingIsAvailable(
    bool* available) const {
  DCHECK(initialized_);
  // TODO(xians): These kind of hardware methods do not make much sense since we
  // support multiple sources. Remove or figure out new APIs for such methods.
  scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
  if (!capturer.get())
    return -1;

  *available = (capturer->source_audio_parameters().channels() == 2);
  return 0;
}

int32_t WebRtcAudioDeviceImpl::PlayoutDelay(uint16_t* delay_ms) const {
  base::AutoLock auto_lock(lock_);
  *delay_ms = static_cast<uint16_t>(output_delay_ms_);
  return 0;
}

int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const {
  base::AutoLock auto_lock(lock_);
  *delay_ms = static_cast<uint16_t>(input_delay_ms_);
  return 0;
}

int32_t WebRtcAudioDeviceImpl::RecordingSampleRate(
    uint32_t* samples_per_sec) const {
  // We use the default capturer as the recording sample rate.
  scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer());
  if (!capturer.get())
    return -1;

  *samples_per_sec = static_cast<uint32_t>(
      capturer->source_audio_parameters().sample_rate());
  return 0;
}

int32_t WebRtcAudioDeviceImpl::PlayoutSampleRate(
    uint32_t* samples_per_sec) const {
  *samples_per_sec = renderer_ ? renderer_->sample_rate() : 0;
  return 0;
}

bool WebRtcAudioDeviceImpl::SetAudioRenderer(WebRtcAudioRenderer* renderer) {
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK(renderer);

  base::AutoLock auto_lock(lock_);
  if (renderer_.get())
    return false;

  if (!renderer->Initialize(this))
    return false;

  renderer_ = renderer;
  return true;
}

void WebRtcAudioDeviceImpl::AddAudioCapturer(
    const scoped_refptr<WebRtcAudioCapturer>& capturer) {
  DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK(capturer.get());
  DCHECK(!capturer->device_id().empty());
  {
    base::AutoLock auto_lock(lock_);
    DCHECK(std::find(capturers_.begin(), capturers_.end(), capturer) ==
        capturers_.end());
    capturers_.push_back(capturer);
  }

  // Start the Aec dump if the Aec dump has been enabled and has not been
  // started.
  if (aec_dump_file_ != base::kInvalidPlatformFileValue)
    MaybeStartAecDump();
}

void WebRtcAudioDeviceImpl::RemoveAudioCapturer(
    const scoped_refptr<WebRtcAudioCapturer>& capturer) {
  DVLOG(1) << "WebRtcAudioDeviceImpl::AddAudioCapturer()";
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK(capturer.get());
  base::AutoLock auto_lock(lock_);
  capturers_.remove(capturer);
}

scoped_refptr<WebRtcAudioCapturer>
WebRtcAudioDeviceImpl::GetDefaultCapturer() const {
  base::AutoLock auto_lock(lock_);
  // Use the last |capturer| which is from the latest getUserMedia call as
  // the default capture device.
  return capturers_.empty() ? NULL : capturers_.back();
}

void WebRtcAudioDeviceImpl::AddPlayoutSink(
    WebRtcPlayoutDataSource::Sink* sink) {
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK(sink);
  base::AutoLock auto_lock(lock_);
  DCHECK(std::find(playout_sinks_.begin(), playout_sinks_.end(), sink) ==
      playout_sinks_.end());
  playout_sinks_.push_back(sink);
}

void WebRtcAudioDeviceImpl::RemovePlayoutSink(
    WebRtcPlayoutDataSource::Sink* sink) {
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK(sink);
  base::AutoLock auto_lock(lock_);
  playout_sinks_.remove(sink);
}

bool WebRtcAudioDeviceImpl::GetAuthorizedDeviceInfoForAudioRenderer(
    int* session_id,
    int* output_sample_rate,
    int* output_frames_per_buffer) {
  DCHECK(thread_checker_.CalledOnValidThread());
  // If there is no capturer or there are more than one open capture devices,
  // return false.
  if (capturers_.empty() || capturers_.size() > 1)
    return false;

  return GetDefaultCapturer()->GetPairedOutputParameters(
      session_id, output_sample_rate, output_frames_per_buffer);
}

void WebRtcAudioDeviceImpl::EnableAecDump(
    const base::PlatformFile& aec_dump_file) {
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK_NE(aec_dump_file, base::kInvalidPlatformFileValue);
  DCHECK_EQ(aec_dump_file_, base::kInvalidPlatformFileValue);
  aec_dump_file_ = aec_dump_file;
  MaybeStartAecDump();
}

void WebRtcAudioDeviceImpl::DisableAecDump() {
  DCHECK(thread_checker_.CalledOnValidThread());
  // Simply invalidate the |aec_dump_file_| if we have not pass the ownership
  // to WebRtc.
  if (aec_dump_file_ != base::kInvalidPlatformFileValue) {
    base::ClosePlatformFile(aec_dump_file_);
    aec_dump_file_ = base::kInvalidPlatformFileValue;
    return;
  }

  // We might have call StartAecDump() on one of the capturer. Loop
  // through all the capturers and call StopAecDump() on each of them.
  for (CapturerList::const_iterator iter = capturers_.begin();
       iter != capturers_.end(); ++iter) {
    (*iter)->StopAecDump();
  }
}

void WebRtcAudioDeviceImpl::MaybeStartAecDump() {
  DCHECK(thread_checker_.CalledOnValidThread());
  DCHECK_NE(aec_dump_file_, base::kInvalidPlatformFileValue);

  // Start the Aec dump on the current default capturer.
  scoped_refptr<WebRtcAudioCapturer> default_capturer(GetDefaultCapturer());
  if (!default_capturer)
    return;

  default_capturer->StartAecDump(aec_dump_file_);

  // Invalidate the |aec_dump_file_| since the ownership of the file has been
  // passed to WebRtc.
  aec_dump_file_ = base::kInvalidPlatformFileValue;
}

}  // namespace content

/* [<][>][^][v][top][bottom][index][help] */