This source file includes following definitions.
- m_normalize
- process
- initialize
- uninitialize
- setBuffer
- buffer
- tailTime
- latencyTime
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "modules/webaudio/ConvolverNode.h"
#include "bindings/v8/ExceptionState.h"
#include "core/dom/ExceptionCode.h"
#include "platform/audio/Reverb.h"
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioContext.h"
#include "modules/webaudio/AudioNodeInput.h"
#include "modules/webaudio/AudioNodeOutput.h"
#include "wtf/MainThread.h"
const size_t MaxFFTSize = 32768;
namespace WebCore {
ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
, m_normalize(true)
{
ScriptWrappable::init(this);
addInput(adoptPtr(new AudioNodeInput(this)));
addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
m_channelCount = 2;
m_channelCountMode = ClampedMax;
m_channelInterpretation = AudioBus::Speakers;
setNodeType(NodeTypeConvolver);
initialize();
}
ConvolverNode::~ConvolverNode()
{
uninitialize();
}
void ConvolverNode::process(size_t framesToProcess)
{
AudioBus* outputBus = output(0)->bus();
ASSERT(outputBus);
MutexTryLocker tryLocker(m_processLock);
if (tryLocker.locked()) {
if (!isInitialized() || !m_reverb.get())
outputBus->zero();
else {
m_reverb->process(input(0)->bus(), outputBus, framesToProcess);
}
} else {
outputBus->zero();
}
}
void ConvolverNode::initialize()
{
if (isInitialized())
return;
AudioNode::initialize();
}
void ConvolverNode::uninitialize()
{
if (!isInitialized())
return;
m_reverb.clear();
AudioNode::uninitialize();
}
void ConvolverNode::setBuffer(AudioBuffer* buffer, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
if (!buffer)
return;
if (buffer->sampleRate() != context()->sampleRate()) {
exceptionState.throwDOMException(
NotSupportedError,
"The buffer sample rate of " + String::number(buffer->sampleRate())
+ " does not match the context rate of " + String::number(context()->sampleRate())
+ " Hz.");
}
unsigned numberOfChannels = buffer->numberOfChannels();
size_t bufferLength = buffer->length();
bool isBufferGood = numberOfChannels > 0 && numberOfChannels <= 4 && bufferLength;
ASSERT(isBufferGood);
if (!isBufferGood)
return;
RefPtr<AudioBus> bufferBus = AudioBus::create(numberOfChannels, bufferLength, false);
for (unsigned i = 0; i < numberOfChannels; ++i)
bufferBus->setChannelMemory(i, buffer->getChannelData(i)->data(), bufferLength);
bufferBus->setSampleRate(buffer->sampleRate());
bool useBackgroundThreads = !context()->isOfflineContext();
OwnPtr<Reverb> reverb = adoptPtr(new Reverb(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize));
{
MutexLocker locker(m_processLock);
m_reverb = reverb.release();
m_buffer = buffer;
}
}
AudioBuffer* ConvolverNode::buffer()
{
ASSERT(isMainThread());
return m_buffer.get();
}
double ConvolverNode::tailTime() const
{
MutexTryLocker tryLocker(m_processLock);
if (tryLocker.locked())
return m_reverb ? m_reverb->impulseResponseLength() / static_cast<double>(sampleRate()) : 0;
return std::numeric_limits<double>::infinity();
}
double ConvolverNode::latencyTime() const
{
MutexTryLocker tryLocker(m_processLock);
if (tryLocker.locked())
return m_reverb ? m_reverb->latencyFrames() / static_cast<double>(sampleRate()) : 0;
return std::numeric_limits<double>::infinity();
}
}
#endif