This source file includes following definitions.
- value
- setValue
- smoothedValue
- smooth
- finalValue
- calculateSampleAccurateValues
- calculateFinalValues
- calculateTimelineValues
- connect
- disconnect
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "modules/webaudio/AudioParam.h"
#include "platform/audio/AudioUtilities.h"
#include "modules/webaudio/AudioNode.h"
#include "modules/webaudio/AudioNodeOutput.h"
#include "platform/FloatConversion.h"
#include "wtf/MathExtras.h"
namespace WebCore {
const double AudioParam::DefaultSmoothingConstant = 0.05;
const double AudioParam::SnapThreshold = 0.001;
float AudioParam::value()
{
if (context() && context()->isAudioThread()) {
bool hasValue;
float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
if (hasValue)
m_value = timelineValue;
}
return narrowPrecisionToFloat(m_value);
}
void AudioParam::setValue(float value)
{
if (!std::isnan(value) && !std::isinf(value))
m_value = value;
}
float AudioParam::smoothedValue()
{
return narrowPrecisionToFloat(m_smoothedValue);
}
bool AudioParam::smooth()
{
bool useTimelineValue = false;
if (context())
m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
if (m_smoothedValue == m_value) {
return true;
}
if (useTimelineValue)
m_smoothedValue = m_value;
else {
m_smoothedValue += (m_value - m_smoothedValue) * DefaultSmoothingConstant;
if (fabs(m_smoothedValue - m_value) < SnapThreshold)
m_smoothedValue = m_value;
}
return false;
}
float AudioParam::finalValue()
{
float value;
calculateFinalValues(&value, 1, false);
return value;
}
void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfValues)
{
bool isSafe = context() && context()->isAudioThread() && values && numberOfValues;
ASSERT(isSafe);
if (!isSafe)
return;
calculateFinalValues(values, numberOfValues, true);
}
void AudioParam::calculateFinalValues(float* values, unsigned numberOfValues, bool sampleAccurate)
{
bool isGood = context() && context()->isAudioThread() && values && numberOfValues;
ASSERT(isGood);
if (!isGood)
return;
if (sampleAccurate) {
calculateTimelineValues(values, numberOfValues);
} else {
bool hasValue;
float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
if (hasValue)
m_value = timelineValue;
values[0] = narrowPrecisionToFloat(m_value);
}
RefPtr<AudioBus> summingBus = AudioBus::create(1, numberOfValues, false);
summingBus->setChannelMemory(0, values, numberOfValues);
for (unsigned i = 0; i < numberOfRenderingConnections(); ++i) {
AudioNodeOutput* output = renderingOutput(i);
ASSERT(output);
AudioBus* connectionBus = output->pull(0, AudioNode::ProcessingSizeInFrames);
summingBus->sumFrom(*connectionBus);
}
}
void AudioParam::calculateTimelineValues(float* values, unsigned numberOfValues)
{
double sampleRate = context()->sampleRate();
double startTime = context()->currentTime();
double endTime = startTime + numberOfValues / sampleRate;
m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
}
void AudioParam::connect(AudioNodeOutput* output)
{
ASSERT(context()->isGraphOwner());
ASSERT(output);
if (!output)
return;
if (m_outputs.contains(output))
return;
output->addParam(this);
m_outputs.add(output);
changedOutputs();
}
void AudioParam::disconnect(AudioNodeOutput* output)
{
ASSERT(context()->isGraphOwner());
ASSERT(output);
if (!output)
return;
if (m_outputs.contains(output)) {
m_outputs.remove(output);
changedOutputs();
output->removeParam(this);
}
}
}
#endif