#ifndef AudioParam_h
#define AudioParam_h
#include "bindings/v8/ScriptWrappable.h"
#include "modules/webaudio/AudioContext.h"
#include "modules/webaudio/AudioParamTimeline.h"
#include "modules/webaudio/AudioSummingJunction.h"
#include <sys/types.h>
#include "wtf/Float32Array.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefCounted.h"
#include "wtf/text/WTFString.h"
namespace WebCore {
class AudioNodeOutput;
class AudioParam FINAL : public RefCounted<AudioParam>, public ScriptWrappable, public AudioSummingJunction {
public:
static const double DefaultSmoothingConstant;
static const double SnapThreshold;
static PassRefPtr<AudioParam> create(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
{
return adoptRef(new AudioParam(context, name, defaultValue, minValue, maxValue, units));
}
virtual bool canUpdateState() OVERRIDE { return true; }
virtual void didUpdate() OVERRIDE { }
float value();
void setValue(float);
float finalValue();
String name() const { return m_name; }
float minValue() const { return static_cast<float>(m_minValue); }
float maxValue() const { return static_cast<float>(m_maxValue); }
float defaultValue() const { return static_cast<float>(m_defaultValue); }
unsigned units() const { return m_units; }
float smoothedValue();
bool smooth();
void resetSmoothedValue() { m_smoothedValue = m_value; }
void setValueAtTime(float value, double time) { m_timeline.setValueAtTime(value, time); }
void linearRampToValueAtTime(float value, double time) { m_timeline.linearRampToValueAtTime(value, time); }
void exponentialRampToValueAtTime(float value, double time, ExceptionState& es) { m_timeline.exponentialRampToValueAtTime(value, time, es); }
void setTargetAtTime(float target, double time, double timeConstant) { m_timeline.setTargetAtTime(target, time, timeConstant); }
void setValueCurveAtTime(Float32Array* curve, double time, double duration) { m_timeline.setValueCurveAtTime(curve, time, duration); }
void cancelScheduledValues(double startTime) { m_timeline.cancelScheduledValues(startTime); }
bool hasSampleAccurateValues() { return m_timeline.hasValues() || numberOfRenderingConnections(); }
void calculateSampleAccurateValues(float* values, unsigned numberOfValues);
void connect(AudioNodeOutput*);
void disconnect(AudioNodeOutput*);
protected:
AudioParam(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
: AudioSummingJunction(context)
, m_name(name)
, m_value(defaultValue)
, m_defaultValue(defaultValue)
, m_minValue(minValue)
, m_maxValue(maxValue)
, m_units(units)
, m_smoothedValue(defaultValue)
{
ScriptWrappable::init(this);
}
private:
void calculateFinalValues(float* values, unsigned numberOfValues, bool sampleAccurate);
void calculateTimelineValues(float* values, unsigned numberOfValues);
String m_name;
double m_value;
double m_defaultValue;
double m_minValue;
double m_maxValue;
unsigned m_units;
double m_smoothedValue;
AudioParamTimeline m_timeline;
};
}
#endif