This source file includes following definitions.
- create
- m_isPaused
- setPlatformSynthesizer
- executionContext
- voicesDidChange
- getVoices
- speaking
- pending
- paused
- startSpeakingImmediately
- speak
- cancel
- pause
- resume
- fireEvent
- handleSpeakingCompleted
- boundaryEventOccurred
- didStartSpeaking
- didPauseSpeaking
- didResumeSpeaking
- didFinishSpeaking
- speakingErrorOccurred
- currentSpeechUtterance
- interfaceName
- trace
#include "config.h"
#include "modules/speech/SpeechSynthesis.h"
#include "bindings/v8/ExceptionState.h"
#include "core/dom/ExecutionContext.h"
#include "modules/speech/SpeechSynthesisEvent.h"
#include "platform/speech/PlatformSpeechSynthesisVoice.h"
#include "wtf/CurrentTime.h"
namespace WebCore {
PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext* context)
{
return adoptRefWillBeRefCountedGarbageCollected(new SpeechSynthesis(context));
}
SpeechSynthesis::SpeechSynthesis(ExecutionContext* context)
: ContextLifecycleObserver(context)
, m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this))
, m_isPaused(false)
{
ScriptWrappable::init(this);
}
void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesizer> synthesizer)
{
m_platformSpeechSynthesizer = synthesizer;
}
ExecutionContext* SpeechSynthesis::executionContext() const
{
return ContextLifecycleObserver::executionContext();
}
void SpeechSynthesis::voicesDidChange()
{
m_voiceList.clear();
if (!executionContext()->activeDOMObjectsAreStopped())
dispatchEvent(Event::create(EventTypeNames::voiceschanged));
}
const WillBeHeapVector<RefPtrWillBeMember<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices()
{
if (m_voiceList.size())
return m_voiceList;
const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_platformSpeechSynthesizer->voiceList();
size_t voiceCount = platformVoices.size();
for (size_t k = 0; k < voiceCount; k++)
m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k]));
return m_voiceList;
}
bool SpeechSynthesis::speaking() const
{
return currentSpeechUtterance();
}
bool SpeechSynthesis::pending() const
{
return m_utteranceQueue.size() > 1;
}
bool SpeechSynthesis::paused() const
{
return m_isPaused;
}
void SpeechSynthesis::startSpeakingImmediately()
{
SpeechSynthesisUtterance* utterance = currentSpeechUtterance();
ASSERT(utterance);
utterance->setStartTime(monotonicallyIncreasingTime());
m_isPaused = false;
m_platformSpeechSynthesizer->speak(utterance->platformUtterance());
}
void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance, ExceptionState& exceptionState)
{
if (!utterance) {
exceptionState.throwTypeError("Invalid utterance argument");
return;
}
m_utteranceQueue.append(utterance);
if (m_utteranceQueue.size() == 1)
startSpeakingImmediately();
}
void SpeechSynthesis::cancel()
{
m_utteranceQueue.clear();
m_platformSpeechSynthesizer->cancel();
}
void SpeechSynthesis::pause()
{
if (!m_isPaused)
m_platformSpeechSynthesizer->pause();
}
void SpeechSynthesis::resume()
{
if (!currentSpeechUtterance())
return;
m_platformSpeechSynthesizer->resume();
}
void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtterance* utterance, unsigned long charIndex, const String& name)
{
if (!executionContext()->activeDOMObjectsAreStopped())
utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (currentTime() - utterance->startTime()), name));
}
void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utterance, bool errorOccurred)
{
ASSERT(utterance);
bool didJustFinishCurrentUtterance = false;
if (utterance == currentSpeechUtterance()) {
m_utteranceQueue.removeFirst();
didJustFinishCurrentUtterance = true;
}
fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utterance, 0, String());
if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty())
startSpeakingImmediately();
}
void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance, SpeechBoundary boundary, unsigned charIndex)
{
DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word"));
DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence"));
switch (boundary) {
case SpeechWordBoundary:
fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance*>(utterance->client()), charIndex, wordBoundaryString);
break;
case SpeechSentenceBoundary:
fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance*>(utterance->client()), charIndex, sentenceBoundaryString);
break;
default:
ASSERT_NOT_REACHED();
}
}
void SpeechSynthesis::didStartSpeaking(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance)
{
if (utterance->client())
fireEvent(EventTypeNames::start, static_cast<SpeechSynthesisUtterance*>(utterance->client()), 0, String());
}
void SpeechSynthesis::didPauseSpeaking(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance)
{
m_isPaused = true;
if (utterance->client())
fireEvent(EventTypeNames::pause, static_cast<SpeechSynthesisUtterance*>(utterance->client()), 0, String());
}
void SpeechSynthesis::didResumeSpeaking(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance)
{
m_isPaused = false;
if (utterance->client())
fireEvent(EventTypeNames::resume, static_cast<SpeechSynthesisUtterance*>(utterance->client()), 0, String());
}
void SpeechSynthesis::didFinishSpeaking(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance)
{
if (utterance->client())
handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance->client()), false);
}
void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance)
{
if (utterance->client())
handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance->client()), true);
}
SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const
{
if (!m_utteranceQueue.isEmpty())
return m_utteranceQueue.first().get();
return 0;
}
const AtomicString& SpeechSynthesis::interfaceName() const
{
return EventTargetNames::SpeechSynthesisUtterance;
}
void SpeechSynthesis::trace(Visitor* visitor)
{
visitor->trace(m_voiceList);
visitor->trace(m_utteranceQueue);
}
}