This source file includes following definitions.
- TrackOwner
- Capture
- OnSetFormat
- SetAudioProcessor
- Reset
- Stop
- TrackOwner
- CreateCapturer
- Initialize
- audio_device_
- AddTrack
- RemoveTrack
- SetCapturerSource
- EnablePeerConnectionMode
- Start
- Stop
- SetVolume
- Volume
- MaxVolume
- OnCaptureError
- source_audio_parameters
- GetPairedOutputParameters
- GetBufferSize
- GetAudioProcessingParams
- SetCapturerSourceForTesting
- StartAecDump
- StopAecDump
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "base/bind.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "content/child/child_process.h"
#include "content/renderer/media/audio_device_factory.h"
#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/media_stream_audio_processor_options.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "content/renderer/media/webrtc_logging.h"
#include "media/audio/sample_rates.h"
namespace content {
namespace {
#if defined(OS_WIN) || defined(OS_MACOSX)
const int kValidInputRates[] =
{192000, 96000, 48000, 44100, 32000, 16000, 8000};
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
const int kValidInputRates[] = {48000, 44100};
#elif defined(OS_ANDROID)
const int kValidInputRates[] = {48000, 44100};
#else
const int kValidInputRates[] = {44100};
#endif
}
class WebRtcAudioCapturer::TrackOwner
: public base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner> {
public:
explicit TrackOwner(WebRtcLocalAudioTrack* track)
: delegate_(track) {}
void Capture(const int16* audio_data,
base::TimeDelta delay,
double volume,
bool key_pressed,
bool need_audio_processing) {
base::AutoLock lock(lock_);
if (delegate_) {
delegate_->Capture(audio_data,
delay,
volume,
key_pressed,
need_audio_processing);
}
}
void OnSetFormat(const media::AudioParameters& params) {
base::AutoLock lock(lock_);
if (delegate_)
delegate_->OnSetFormat(params);
}
void SetAudioProcessor(
const scoped_refptr<MediaStreamAudioProcessor>& processor) {
base::AutoLock lock(lock_);
if (delegate_)
delegate_->SetAudioProcessor(processor);
}
void Reset() {
base::AutoLock lock(lock_);
delegate_ = NULL;
}
void Stop() {
base::AutoLock lock(lock_);
DCHECK(delegate_);
WebRtcLocalAudioTrack* temp = delegate_;
delegate_ = NULL;
temp->Stop();
}
struct TrackWrapper {
TrackWrapper(WebRtcLocalAudioTrack* track) : track_(track) {}
bool operator()(
const scoped_refptr<WebRtcAudioCapturer::TrackOwner>& owner) const {
return owner->IsEqual(track_);
}
WebRtcLocalAudioTrack* track_;
};
protected:
virtual ~TrackOwner() {}
private:
friend class base::RefCountedThreadSafe<WebRtcAudioCapturer::TrackOwner>;
bool IsEqual(const WebRtcLocalAudioTrack* other) const {
base::AutoLock lock(lock_);
return (other == delegate_);
}
WebRtcLocalAudioTrack* delegate_;
mutable base::Lock lock_;
DISALLOW_COPY_AND_ASSIGN(TrackOwner);
};
scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer(
int render_view_id, const StreamDeviceInfo& device_info,
const blink::WebMediaConstraints& constraints,
WebRtcAudioDeviceImpl* audio_device) {
scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer(
render_view_id, device_info, constraints, audio_device);
if (capturer->Initialize())
return capturer;
return NULL;
}
bool WebRtcAudioCapturer::Initialize() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Initialize()";
WebRtcLogMessage(base::StringPrintf(
"WAC::Initialize. render_view_id=%d"
", channel_layout=%d, sample_rate=%d, buffer_size=%d"
", session_id=%d, paired_output_sample_rate=%d"
", paired_output_frames_per_buffer=%d, effects=%d. ",
render_view_id_,
device_info_.device.input.channel_layout,
device_info_.device.input.sample_rate,
device_info_.device.input.frames_per_buffer,
device_info_.session_id,
device_info_.device.matched_output.sample_rate,
device_info_.device.matched_output.frames_per_buffer,
device_info_.device.input.effects));
if (render_view_id_ == -1) {
return true;
}
media::ChannelLayout channel_layout = static_cast<media::ChannelLayout>(
device_info_.device.input.channel_layout);
DVLOG(1) << "Audio input hardware channel layout: " << channel_layout;
UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioInputChannelLayout",
channel_layout, media::CHANNEL_LAYOUT_MAX + 1);
if (channel_layout != media::CHANNEL_LAYOUT_MONO &&
channel_layout != media::CHANNEL_LAYOUT_STEREO) {
DLOG(ERROR) << channel_layout
<< " is not a supported input channel configuration.";
return false;
}
DVLOG(1) << "Audio input hardware sample rate: "
<< device_info_.device.input.sample_rate;
media::AudioSampleRate asr;
if (media::ToAudioSampleRate(device_info_.device.input.sample_rate, &asr)) {
UMA_HISTOGRAM_ENUMERATION(
"WebRTC.AudioInputSampleRate", asr, media::kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS("WebRTC.AudioInputSampleRateUnexpected",
device_info_.device.input.sample_rate);
}
if (std::find(&kValidInputRates[0],
&kValidInputRates[0] + arraysize(kValidInputRates),
device_info_.device.input.sample_rate) ==
&kValidInputRates[arraysize(kValidInputRates)]) {
DLOG(ERROR) << device_info_.device.input.sample_rate
<< " is not a supported input rate.";
return false;
}
SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id_),
channel_layout,
static_cast<float>(device_info_.device.input.sample_rate));
if (audio_device_)
audio_device_->AddAudioCapturer(this);
return true;
}
WebRtcAudioCapturer::WebRtcAudioCapturer(
int render_view_id,
const StreamDeviceInfo& device_info,
const blink::WebMediaConstraints& constraints,
WebRtcAudioDeviceImpl* audio_device)
: constraints_(constraints),
audio_processor_(
new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
constraints, device_info.device.input.effects,
device_info.device.type, audio_device)),
running_(false),
render_view_id_(render_view_id),
device_info_(device_info),
volume_(0),
peer_connection_mode_(false),
key_pressed_(false),
need_audio_processing_(false),
audio_device_(audio_device) {
DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()";
}
WebRtcAudioCapturer::~WebRtcAudioCapturer() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(tracks_.IsEmpty());
DCHECK(!running_);
DVLOG(1) << "WebRtcAudioCapturer::~WebRtcAudioCapturer()";
}
void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
DCHECK(track);
DVLOG(1) << "WebRtcAudioCapturer::AddTrack()";
{
base::AutoLock auto_lock(lock_);
DCHECK(!tracks_.Contains(TrackOwner::TrackWrapper(track)));
scoped_refptr<TrackOwner> track_owner(new TrackOwner(track));
tracks_.AddAndTag(track_owner);
}
Start();
}
void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) {
DCHECK(thread_checker_.CalledOnValidThread());
base::AutoLock auto_lock(lock_);
scoped_refptr<TrackOwner> removed_item =
tracks_.Remove(TrackOwner::TrackWrapper(track));
if (removed_item.get())
removed_item->Reset();
}
void WebRtcAudioCapturer::SetCapturerSource(
const scoped_refptr<media::AudioCapturerSource>& source,
media::ChannelLayout channel_layout,
float sample_rate) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << ","
<< "sample_rate=" << sample_rate << ")";
scoped_refptr<media::AudioCapturerSource> old_source;
bool restart_source = false;
{
base::AutoLock auto_lock(lock_);
if (source_.get() == source.get())
return;
source_.swap(old_source);
source_ = source;
restart_source = running_;
running_ = false;
}
DVLOG(1) << "Switching to a new capture source.";
if (old_source.get())
old_source->Stop();
int buffer_size = GetBufferSize(sample_rate);
media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
channel_layout, 0, sample_rate,
16, buffer_size,
device_info_.device.input.effects);
{
base::AutoLock auto_lock(lock_);
audio_processor_->OnCaptureFormatChanged(params);
need_audio_processing_ = NeedsAudioProcessing(
constraints_, device_info_.device.input.effects);
tracks_.TagAll();
}
if (source.get())
source->Initialize(params, this, session_id());
if (restart_source)
Start();
}
void WebRtcAudioCapturer::EnablePeerConnectionMode() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "EnablePeerConnectionMode";
if (peer_connection_mode_)
return;
peer_connection_mode_ = true;
int render_view_id = -1;
media::AudioParameters input_params;
{
base::AutoLock auto_lock(lock_);
if (!source_.get() || render_view_id_== -1)
return;
render_view_id = render_view_id_;
input_params = audio_processor_->InputFormat();
}
if (GetBufferSize(input_params.sample_rate()) ==
input_params.frames_per_buffer()) {
return;
}
SetCapturerSource(AudioDeviceFactory::NewInputDevice(render_view_id),
input_params.channel_layout(),
static_cast<float>(input_params.sample_rate()));
}
void WebRtcAudioCapturer::Start() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Start()";
base::AutoLock auto_lock(lock_);
if (running_ || !source_)
return;
source_->SetAutomaticGainControl(true);
source_->Start();
running_ = true;
}
void WebRtcAudioCapturer::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcAudioCapturer::Stop()";
scoped_refptr<media::AudioCapturerSource> source;
TrackList::ItemList tracks;
{
base::AutoLock auto_lock(lock_);
if (!running_)
return;
source = source_;
tracks = tracks_.Items();
tracks_.Clear();
running_ = false;
}
if (audio_device_)
audio_device_->RemoveAudioCapturer(this);
StopAecDump();
for (TrackList::ItemList::const_iterator it = tracks.begin();
it != tracks.end();
++it) {
(*it)->Stop();
}
if (source.get())
source->Stop();
}
void WebRtcAudioCapturer::SetVolume(int volume) {
DVLOG(1) << "WebRtcAudioCapturer::SetVolume()";
DCHECK_LE(volume, MaxVolume());
double normalized_volume = static_cast<double>(volume) / MaxVolume();
base::AutoLock auto_lock(lock_);
if (source_.get())
source_->SetVolume(normalized_volume);
}
int WebRtcAudioCapturer::Volume() const {
base::AutoLock auto_lock(lock_);
return volume_;
}
int WebRtcAudioCapturer::MaxVolume() const {
return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
}
void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) {
#if defined(OS_WIN) || defined(OS_MACOSX)
DCHECK_LE(volume, 1.0);
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
DCHECK_LE(volume, 1.6);
#endif
TrackList::ItemList tracks;
TrackList::ItemList tracks_to_notify_format;
int current_volume = 0;
base::TimeDelta audio_delay;
bool need_audio_processing = true;
{
base::AutoLock auto_lock(lock_);
if (!running_)
return;
volume_ = static_cast<int>((volume * MaxVolume()) + 0.5);
current_volume = volume_;
audio_delay = base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
audio_delay_ = audio_delay;
key_pressed_ = key_pressed;
tracks = tracks_.Items();
tracks_.RetrieveAndClearTags(&tracks_to_notify_format);
need_audio_processing = need_audio_processing_ ?
!audio_processor_->IsAudioTrackProcessingEnabled() : false;
}
DCHECK(audio_processor_->InputFormat().IsValid());
DCHECK_EQ(audio_source->channels(),
audio_processor_->InputFormat().channels());
DCHECK_EQ(audio_source->frames(),
audio_processor_->InputFormat().frames_per_buffer());
media::AudioParameters output_params = audio_processor_->OutputFormat();
for (TrackList::ItemList::const_iterator it = tracks_to_notify_format.begin();
it != tracks_to_notify_format.end(); ++it) {
(*it)->OnSetFormat(output_params);
(*it)->SetAudioProcessor(audio_processor_);
}
audio_processor_->PushCaptureData(audio_source);
int16* output = NULL;
int new_volume = 0;
while (audio_processor_->ProcessAndConsumeData(
audio_delay, current_volume, key_pressed, &new_volume, &output)) {
for (TrackList::ItemList::const_iterator it = tracks.begin();
it != tracks.end(); ++it) {
(*it)->Capture(output, audio_delay, current_volume, key_pressed,
need_audio_processing);
}
if (new_volume) {
SetVolume(new_volume);
current_volume = new_volume;
}
}
}
void WebRtcAudioCapturer::OnCaptureError() {
NOTIMPLEMENTED();
}
media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const {
base::AutoLock auto_lock(lock_);
return audio_processor_ ?
audio_processor_->InputFormat() : media::AudioParameters();
}
bool WebRtcAudioCapturer::GetPairedOutputParameters(
int* session_id,
int* output_sample_rate,
int* output_frames_per_buffer) const {
if (device_info_.session_id <= 0 ||
!device_info_.device.matched_output.sample_rate ||
!device_info_.device.matched_output.frames_per_buffer)
return false;
*session_id = device_info_.session_id;
*output_sample_rate = device_info_.device.matched_output.sample_rate;
*output_frames_per_buffer =
device_info_.device.matched_output.frames_per_buffer;
return true;
}
int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const {
DCHECK(thread_checker_.CalledOnValidThread());
#if defined(OS_ANDROID)
return (2 * sample_rate / 100);
#endif
int peer_connection_buffer_size = sample_rate / 100;
int hardware_buffer_size = device_info_.device.input.frames_per_buffer;
if (!peer_connection_mode_ && hardware_buffer_size &&
hardware_buffer_size <= peer_connection_buffer_size) {
return hardware_buffer_size;
}
return (sample_rate / 100);
}
void WebRtcAudioCapturer::GetAudioProcessingParams(
base::TimeDelta* delay, int* volume, bool* key_pressed) {
base::AutoLock auto_lock(lock_);
*delay = audio_delay_;
*volume = volume_;
*key_pressed = key_pressed_;
}
void WebRtcAudioCapturer::SetCapturerSourceForTesting(
const scoped_refptr<media::AudioCapturerSource>& source,
media::AudioParameters params) {
SetCapturerSource(source, params.channel_layout(),
static_cast<float>(params.sample_rate()));
}
void WebRtcAudioCapturer::StartAecDump(
const base::PlatformFile& aec_dump_file) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_NE(aec_dump_file, base::kInvalidPlatformFileValue);
audio_processor_->StartAecDump(aec_dump_file);
}
void WebRtcAudioCapturer::StopAecDump() {
DCHECK(thread_checker_.CalledOnValidThread());
audio_processor_->StopAecDump();
}
}