This source file includes following definitions.
- DetectClipping
- KeepAudioControllerRefcountedForDtor
- converted_data_
- ProvideInput
- state_
- StartRecognition
- AbortRecognition
- StopAudioCapture
- IsActive
- IsCapturingAudio
- recognition_engine
- OnError
- OnData
- OnAudioClosed
- OnSpeechRecognitionEngineResults
- OnSpeechRecognitionEngineError
- DispatchEvent
- ExecuteTransitionAndGetNextState
- ProcessAudioPipeline
- StartRecording
- StartRecognitionEngine
- WaitEnvironmentEstimationCompletion
- DetectUserSpeechOrTimeout
- DetectEndOfSpeech
- StopCaptureAndWaitForResult
- AbortSilently
- AbortWithError
- Abort
- ProcessIntermediateResult
- ProcessFinalResult
- DoNothing
- NotFeasible
- CloseAudioControllerAsynchronously
- GetElapsedTimeMs
- UpdateSignalAndNoiseLevels
- SetAudioManagerForTesting
- engine_error
#include "content/browser/speech/speech_recognizer_impl.h"
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/time/time.h"
#include "content/browser/browser_main_loop.h"
#include "content/browser/speech/audio_buffer.h"
#include "content/browser/speech/google_one_shot_remote_engine.h"
#include "content/public/browser/speech_recognition_event_listener.h"
#include "media/base/audio_converter.h"
#include "net/url_request/url_request_context_getter.h"
#if defined(OS_WIN)
#include "media/audio/win/core_audio_util_win.h"
#endif
using media::AudioBus;
using media::AudioConverter;
using media::AudioInputController;
using media::AudioManager;
using media::AudioParameters;
using media::ChannelLayout;
namespace content {
class SpeechRecognizerImpl::OnDataConverter
: public media::AudioConverter::InputCallback {
public:
OnDataConverter(const AudioParameters& input_params,
const AudioParameters& output_params);
virtual ~OnDataConverter();
scoped_refptr<AudioChunk> Convert(const uint8* data, size_t size);
private:
virtual double ProvideInput(AudioBus* dest,
base::TimeDelta buffer_delay) OVERRIDE;
AudioConverter audio_converter_;
scoped_ptr<AudioBus> input_bus_;
scoped_ptr<AudioBus> output_bus_;
const AudioParameters input_parameters_;
const AudioParameters output_parameters_;
bool waiting_for_input_;
scoped_ptr<uint8[]> converted_data_;
DISALLOW_COPY_AND_ASSIGN(OnDataConverter);
};
namespace {
const float kUpSmoothingFactor = 1.0f;
const float kDownSmoothingFactor = 0.7f;
const float kAudioMeterMaxDb = 90.31f;
const float kAudioMeterMinDb = 30.0f;
const float kAudioMeterDbRange = kAudioMeterMaxDb - kAudioMeterMinDb;
const float kAudioMeterRangeMaxUnclipped = 47.0f / 48.0f;
bool DetectClipping(const AudioChunk& chunk) {
const int num_samples = chunk.NumSamples();
const int16* samples = chunk.SamplesData16();
const int kThreshold = num_samples / 20;
int clipping_samples = 0;
for (int i = 0; i < num_samples; ++i) {
if (samples[i] <= -32767 || samples[i] >= 32767) {
if (++clipping_samples > kThreshold)
return true;
}
}
return false;
}
void KeepAudioControllerRefcountedForDtor(scoped_refptr<AudioInputController>) {
}
}
const int SpeechRecognizerImpl::kAudioSampleRate = 16000;
const ChannelLayout SpeechRecognizerImpl::kChannelLayout =
media::CHANNEL_LAYOUT_MONO;
const int SpeechRecognizerImpl::kNumBitsPerAudioSample = 16;
const int SpeechRecognizerImpl::kNoSpeechTimeoutMs = 8000;
const int SpeechRecognizerImpl::kEndpointerEstimationTimeMs = 300;
media::AudioManager* SpeechRecognizerImpl::audio_manager_for_tests_ = NULL;
COMPILE_ASSERT(SpeechRecognizerImpl::kNumBitsPerAudioSample % 8 == 0,
kNumBitsPerAudioSample_must_be_a_multiple_of_8);
SpeechRecognizerImpl::OnDataConverter::OnDataConverter(
const AudioParameters& input_params, const AudioParameters& output_params)
: audio_converter_(input_params, output_params, false),
input_bus_(AudioBus::Create(input_params)),
output_bus_(AudioBus::Create(output_params)),
input_parameters_(input_params),
output_parameters_(output_params),
waiting_for_input_(false),
converted_data_(new uint8[output_parameters_.GetBytesPerBuffer()]) {
audio_converter_.AddInput(this);
}
SpeechRecognizerImpl::OnDataConverter::~OnDataConverter() {
audio_converter_.RemoveInput(this);
}
scoped_refptr<AudioChunk> SpeechRecognizerImpl::OnDataConverter::Convert(
const uint8* data, size_t size) {
CHECK_EQ(size, static_cast<size_t>(input_parameters_.GetBytesPerBuffer()));
input_bus_->FromInterleaved(
data, input_bus_->frames(), input_parameters_.bits_per_sample() / 8);
waiting_for_input_ = true;
audio_converter_.Convert(output_bus_.get());
output_bus_->ToInterleaved(
output_bus_->frames(), output_parameters_.bits_per_sample() / 8,
converted_data_.get());
return scoped_refptr<AudioChunk>(new AudioChunk(
converted_data_.get(),
output_parameters_.GetBytesPerBuffer(),
output_parameters_.bits_per_sample() / 8));
}
double SpeechRecognizerImpl::OnDataConverter::ProvideInput(
AudioBus* dest, base::TimeDelta buffer_delay) {
CHECK(waiting_for_input_);
input_bus_->CopyTo(dest);
waiting_for_input_ = false;
return 1;
}
SpeechRecognizerImpl::SpeechRecognizerImpl(
SpeechRecognitionEventListener* listener,
int session_id,
bool continuous,
bool provisional_results,
SpeechRecognitionEngine* engine)
: SpeechRecognizer(listener, session_id),
recognition_engine_(engine),
endpointer_(kAudioSampleRate),
is_dispatching_event_(false),
provisional_results_(provisional_results),
state_(STATE_IDLE) {
DCHECK(recognition_engine_ != NULL);
if (!continuous) {
endpointer_.set_speech_input_complete_silence_length(
base::Time::kMicrosecondsPerSecond / 2);
endpointer_.set_long_speech_input_complete_silence_length(
base::Time::kMicrosecondsPerSecond);
endpointer_.set_long_speech_length(3 * base::Time::kMicrosecondsPerSecond);
} else {
const int64 cont_timeout_us = base::Time::kMicrosecondsPerSecond * 15;
endpointer_.set_speech_input_complete_silence_length(cont_timeout_us);
endpointer_.set_long_speech_length(0);
}
endpointer_.StartSession();
recognition_engine_->set_delegate(this);
}
void SpeechRecognizerImpl::StartRecognition(const std::string& device_id) {
DCHECK(!device_id.empty());
device_id_ = device_id;
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, FSMEventArgs(EVENT_START)));
}
void SpeechRecognizerImpl::AbortRecognition() {
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, FSMEventArgs(EVENT_ABORT)));
}
void SpeechRecognizerImpl::StopAudioCapture() {
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, FSMEventArgs(EVENT_STOP_CAPTURE)));
}
bool SpeechRecognizerImpl::IsActive() const {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
return state_ != STATE_IDLE && state_ != STATE_ENDED;
}
bool SpeechRecognizerImpl::IsCapturingAudio() const {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
const bool is_capturing_audio = state_ >= STATE_STARTING &&
state_ <= STATE_RECOGNIZING;
DCHECK((is_capturing_audio && (audio_controller_.get() != NULL)) ||
(!is_capturing_audio && audio_controller_.get() == NULL));
return is_capturing_audio;
}
const SpeechRecognitionEngine&
SpeechRecognizerImpl::recognition_engine() const {
return *(recognition_engine_.get());
}
SpeechRecognizerImpl::~SpeechRecognizerImpl() {
endpointer_.EndSession();
if (audio_controller_.get()) {
audio_controller_->Close(
base::Bind(&KeepAudioControllerRefcountedForDtor, audio_controller_));
}
}
void SpeechRecognizerImpl::OnError(AudioInputController* controller,
media::AudioInputController::ErrorCode error_code) {
FSMEventArgs event_args(EVENT_AUDIO_ERROR);
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, event_args));
}
void SpeechRecognizerImpl::OnData(AudioInputController* controller,
const uint8* data, uint32 size) {
if (size == 0)
return;
FSMEventArgs event_args(EVENT_AUDIO_DATA);
event_args.audio_data = audio_converter_->Convert(data, size);
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, event_args));
}
void SpeechRecognizerImpl::OnAudioClosed(AudioInputController*) {}
void SpeechRecognizerImpl::OnSpeechRecognitionEngineResults(
const SpeechRecognitionResults& results) {
FSMEventArgs event_args(EVENT_ENGINE_RESULT);
event_args.engine_results = results;
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, event_args));
}
void SpeechRecognizerImpl::OnSpeechRecognitionEngineError(
const SpeechRecognitionError& error) {
FSMEventArgs event_args(EVENT_ENGINE_ERROR);
event_args.engine_error = error;
BrowserThread::PostTask(BrowserThread::IO, FROM_HERE,
base::Bind(&SpeechRecognizerImpl::DispatchEvent,
this, event_args));
}
void SpeechRecognizerImpl::DispatchEvent(const FSMEventArgs& event_args) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
DCHECK_LE(event_args.event, EVENT_MAX_VALUE);
DCHECK_LE(state_, STATE_MAX_VALUE);
DCHECK(!is_dispatching_event_);
is_dispatching_event_ = true;
scoped_refptr<SpeechRecognizerImpl> me(this);
if (event_args.event == EVENT_AUDIO_DATA) {
DCHECK(event_args.audio_data.get() != NULL);
ProcessAudioPipeline(*event_args.audio_data.get());
}
state_ = ExecuteTransitionAndGetNextState(event_args);
is_dispatching_event_ = false;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::ExecuteTransitionAndGetNextState(
const FSMEventArgs& event_args) {
const FSMEvent event = event_args.event;
switch (state_) {
case STATE_IDLE:
switch (event) {
case EVENT_ABORT:
return AbortSilently(event_args);
case EVENT_START:
return StartRecording(event_args);
case EVENT_STOP_CAPTURE:
return AbortSilently(event_args);
case EVENT_AUDIO_DATA:
case EVENT_ENGINE_RESULT:
case EVENT_ENGINE_ERROR:
case EVENT_AUDIO_ERROR:
return DoNothing(event_args);
}
break;
case STATE_STARTING:
switch (event) {
case EVENT_ABORT:
return AbortWithError(event_args);
case EVENT_START:
return NotFeasible(event_args);
case EVENT_STOP_CAPTURE:
return AbortSilently(event_args);
case EVENT_AUDIO_DATA:
return StartRecognitionEngine(event_args);
case EVENT_ENGINE_RESULT:
return NotFeasible(event_args);
case EVENT_ENGINE_ERROR:
case EVENT_AUDIO_ERROR:
return AbortWithError(event_args);
}
break;
case STATE_ESTIMATING_ENVIRONMENT:
switch (event) {
case EVENT_ABORT:
return AbortWithError(event_args);
case EVENT_START:
return NotFeasible(event_args);
case EVENT_STOP_CAPTURE:
return StopCaptureAndWaitForResult(event_args);
case EVENT_AUDIO_DATA:
return WaitEnvironmentEstimationCompletion(event_args);
case EVENT_ENGINE_RESULT:
return ProcessIntermediateResult(event_args);
case EVENT_ENGINE_ERROR:
case EVENT_AUDIO_ERROR:
return AbortWithError(event_args);
}
break;
case STATE_WAITING_FOR_SPEECH:
switch (event) {
case EVENT_ABORT:
return AbortWithError(event_args);
case EVENT_START:
return NotFeasible(event_args);
case EVENT_STOP_CAPTURE:
return StopCaptureAndWaitForResult(event_args);
case EVENT_AUDIO_DATA:
return DetectUserSpeechOrTimeout(event_args);
case EVENT_ENGINE_RESULT:
return ProcessIntermediateResult(event_args);
case EVENT_ENGINE_ERROR:
case EVENT_AUDIO_ERROR:
return AbortWithError(event_args);
}
break;
case STATE_RECOGNIZING:
switch (event) {
case EVENT_ABORT:
return AbortWithError(event_args);
case EVENT_START:
return NotFeasible(event_args);
case EVENT_STOP_CAPTURE:
return StopCaptureAndWaitForResult(event_args);
case EVENT_AUDIO_DATA:
return DetectEndOfSpeech(event_args);
case EVENT_ENGINE_RESULT:
return ProcessIntermediateResult(event_args);
case EVENT_ENGINE_ERROR:
case EVENT_AUDIO_ERROR:
return AbortWithError(event_args);
}
break;
case STATE_WAITING_FINAL_RESULT:
switch (event) {
case EVENT_ABORT:
return AbortWithError(event_args);
case EVENT_START:
return NotFeasible(event_args);
case EVENT_STOP_CAPTURE:
case EVENT_AUDIO_DATA:
return DoNothing(event_args);
case EVENT_ENGINE_RESULT:
return ProcessFinalResult(event_args);
case EVENT_ENGINE_ERROR:
case EVENT_AUDIO_ERROR:
return AbortWithError(event_args);
}
break;
case STATE_ENDED:
return DoNothing(event_args);
}
return NotFeasible(event_args);
}
void SpeechRecognizerImpl::ProcessAudioPipeline(const AudioChunk& raw_audio) {
const bool route_to_endpointer = state_ >= STATE_ESTIMATING_ENVIRONMENT &&
state_ <= STATE_RECOGNIZING;
const bool route_to_sr_engine = route_to_endpointer;
const bool route_to_vumeter = state_ >= STATE_WAITING_FOR_SPEECH &&
state_ <= STATE_RECOGNIZING;
const bool clip_detected = DetectClipping(raw_audio);
float rms = 0.0f;
num_samples_recorded_ += raw_audio.NumSamples();
if (route_to_endpointer)
endpointer_.ProcessAudio(raw_audio, &rms);
if (route_to_vumeter) {
DCHECK(route_to_endpointer);
UpdateSignalAndNoiseLevels(rms, clip_detected);
}
if (route_to_sr_engine) {
DCHECK(recognition_engine_.get() != NULL);
recognition_engine_->TakeAudioChunk(raw_audio);
}
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::StartRecording(const FSMEventArgs&) {
DCHECK(recognition_engine_.get() != NULL);
DCHECK(!IsCapturingAudio());
const bool unit_test_is_active = (audio_manager_for_tests_ != NULL);
AudioManager* audio_manager = unit_test_is_active ?
audio_manager_for_tests_ :
AudioManager::Get();
DCHECK(audio_manager != NULL);
DVLOG(1) << "SpeechRecognizerImpl starting audio capture.";
num_samples_recorded_ = 0;
audio_level_ = 0;
listener()->OnRecognitionStart(session_id());
if (!audio_manager->HasAudioInputDevices()) {
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO,
SPEECH_AUDIO_ERROR_DETAILS_NO_MIC));
}
int chunk_duration_ms = recognition_engine_->GetDesiredAudioChunkDurationMs();
AudioParameters in_params = audio_manager->GetInputStreamParameters(
device_id_);
if (!in_params.IsValid() && !unit_test_is_active) {
DLOG(ERROR) << "Invalid native audio input parameters";
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO));
}
int frames_per_buffer = (kAudioSampleRate * chunk_duration_ms) / 1000;
AudioParameters output_parameters = AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, kChannelLayout, kAudioSampleRate,
kNumBitsPerAudioSample, frames_per_buffer);
bool use_native_audio_params = true;
#if defined(OS_WIN)
use_native_audio_params = media::CoreAudioUtil::IsSupported();
DVLOG_IF(1, !use_native_audio_params) << "Reverting to WaveIn for WebSpeech";
#endif
AudioParameters input_parameters = output_parameters;
if (use_native_audio_params && !unit_test_is_active) {
frames_per_buffer =
((in_params.sample_rate() * (chunk_duration_ms + 2)) / 1000.0) + 0.5;
input_parameters.Reset(in_params.format(),
in_params.channel_layout(),
in_params.channels(),
in_params.input_channels(),
in_params.sample_rate(),
in_params.bits_per_sample(),
frames_per_buffer);
}
audio_converter_.reset(
new OnDataConverter(input_parameters, output_parameters));
audio_controller_ = AudioInputController::Create(
audio_manager, this, input_parameters, device_id_, NULL);
if (!audio_controller_.get()) {
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO));
}
endpointer_.SetEnvironmentEstimationMode();
audio_controller_->Record();
return STATE_STARTING;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) {
DCHECK(recognition_engine_.get() != NULL);
recognition_engine_->StartRecognition();
listener()->OnAudioStart(session_id());
recognition_engine_->TakeAudioChunk(*(event_args.audio_data.get()));
return STATE_ESTIMATING_ENVIRONMENT;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) {
DCHECK(endpointer_.IsEstimatingEnvironment());
if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) {
endpointer_.SetUserInputMode();
listener()->OnEnvironmentEstimationComplete(session_id());
return STATE_WAITING_FOR_SPEECH;
} else {
return STATE_ESTIMATING_ENVIRONMENT;
}
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::DetectUserSpeechOrTimeout(const FSMEventArgs&) {
if (endpointer_.DidStartReceivingSpeech()) {
listener()->OnSoundStart(session_id());
return STATE_RECOGNIZING;
} else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs) {
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH));
}
return STATE_WAITING_FOR_SPEECH;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::DetectEndOfSpeech(const FSMEventArgs& event_args) {
if (endpointer_.speech_input_complete())
return StopCaptureAndWaitForResult(event_args);
return STATE_RECOGNIZING;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::StopCaptureAndWaitForResult(const FSMEventArgs&) {
DCHECK(state_ >= STATE_ESTIMATING_ENVIRONMENT && state_ <= STATE_RECOGNIZING);
DVLOG(1) << "Concluding recognition";
CloseAudioControllerAsynchronously();
recognition_engine_->AudioChunksEnded();
if (state_ > STATE_WAITING_FOR_SPEECH)
listener()->OnSoundEnd(session_id());
listener()->OnAudioEnd(session_id());
return STATE_WAITING_FINAL_RESULT;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::AbortSilently(const FSMEventArgs& event_args) {
DCHECK_NE(event_args.event, EVENT_AUDIO_ERROR);
DCHECK_NE(event_args.event, EVENT_ENGINE_ERROR);
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NONE));
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::AbortWithError(const FSMEventArgs& event_args) {
if (event_args.event == EVENT_AUDIO_ERROR) {
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO));
} else if (event_args.event == EVENT_ENGINE_ERROR) {
return Abort(event_args.engine_error);
}
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_ABORTED));
}
SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::Abort(
const SpeechRecognitionError& error) {
if (IsCapturingAudio())
CloseAudioControllerAsynchronously();
DVLOG(1) << "SpeechRecognizerImpl canceling recognition. ";
if (state_ > STATE_STARTING) {
DCHECK(recognition_engine_.get() != NULL);
recognition_engine_->EndRecognition();
}
if (state_ > STATE_WAITING_FOR_SPEECH && state_ < STATE_WAITING_FINAL_RESULT)
listener()->OnSoundEnd(session_id());
if (state_ > STATE_STARTING && state_ < STATE_WAITING_FINAL_RESULT)
listener()->OnAudioEnd(session_id());
if (error.code != SPEECH_RECOGNITION_ERROR_NONE)
listener()->OnRecognitionError(session_id(), error);
listener()->OnRecognitionEnd(session_id());
return STATE_ENDED;
}
SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::ProcessIntermediateResult(
const FSMEventArgs& event_args) {
DCHECK(provisional_results_);
if (state_ == STATE_ESTIMATING_ENVIRONMENT) {
DCHECK(endpointer_.IsEstimatingEnvironment());
endpointer_.SetUserInputMode();
listener()->OnEnvironmentEstimationComplete(session_id());
} else if (state_ == STATE_WAITING_FOR_SPEECH) {
listener()->OnSoundStart(session_id());
} else {
DCHECK_EQ(STATE_RECOGNIZING, state_);
}
listener()->OnRecognitionResults(session_id(), event_args.engine_results);
return STATE_RECOGNIZING;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) {
const SpeechRecognitionResults& results = event_args.engine_results;
SpeechRecognitionResults::const_iterator i = results.begin();
bool provisional_results_pending = false;
bool results_are_empty = true;
for (; i != results.end(); ++i) {
const SpeechRecognitionResult& result = *i;
if (result.is_provisional) {
DCHECK(provisional_results_);
provisional_results_pending = true;
} else if (results_are_empty) {
results_are_empty = result.hypotheses.empty();
}
}
if (provisional_results_pending) {
listener()->OnRecognitionResults(session_id(), results);
return state_;
}
recognition_engine_->EndRecognition();
if (!results_are_empty) {
listener()->OnRecognitionResults(session_id(), results);
}
listener()->OnRecognitionEnd(session_id());
return STATE_ENDED;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::DoNothing(const FSMEventArgs&) const {
return state_;
}
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::NotFeasible(const FSMEventArgs& event_args) {
NOTREACHED() << "Unfeasible event " << event_args.event
<< " in state " << state_;
return state_;
}
void SpeechRecognizerImpl::CloseAudioControllerAsynchronously() {
DCHECK(IsCapturingAudio());
DVLOG(1) << "SpeechRecognizerImpl closing audio controller.";
audio_controller_->Close(base::Bind(&SpeechRecognizerImpl::OnAudioClosed,
this, audio_controller_));
audio_controller_ = NULL;
}
int SpeechRecognizerImpl::GetElapsedTimeMs() const {
return (num_samples_recorded_ * 1000) / kAudioSampleRate;
}
void SpeechRecognizerImpl::UpdateSignalAndNoiseLevels(const float& rms,
bool clip_detected) {
float level = (rms - kAudioMeterMinDb) /
(kAudioMeterDbRange / kAudioMeterRangeMaxUnclipped);
level = std::min(std::max(0.0f, level), kAudioMeterRangeMaxUnclipped);
const float smoothing_factor = (level > audio_level_) ? kUpSmoothingFactor :
kDownSmoothingFactor;
audio_level_ += (level - audio_level_) * smoothing_factor;
float noise_level = (endpointer_.NoiseLevelDb() - kAudioMeterMinDb) /
(kAudioMeterDbRange / kAudioMeterRangeMaxUnclipped);
noise_level = std::min(std::max(0.0f, noise_level),
kAudioMeterRangeMaxUnclipped);
listener()->OnAudioLevelsChange(
session_id(), clip_detected ? 1.0f : audio_level_, noise_level);
}
void SpeechRecognizerImpl::SetAudioManagerForTesting(
AudioManager* audio_manager) {
audio_manager_for_tests_ = audio_manager;
}
SpeechRecognizerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value)
: event(event_value),
audio_data(NULL),
engine_error(SPEECH_RECOGNITION_ERROR_NONE) {
}
SpeechRecognizerImpl::FSMEventArgs::~FSMEventArgs() {
}
}