This source file includes following definitions.
- isSampleRateRangeGood
- create
- create
- m_activeSourceCount
- m_activeSourceCount
- constructCommon
- lazyInitialize
- clear
- uninitialize
- isInitialized
- stopDispatch
- stop
- hasPendingActivity
- createBuffer
- createBuffer
- decodeAudioData
- createBufferSource
- createMediaElementSource
- createMediaStreamSource
- createMediaStreamDestination
- createScriptProcessor
- createScriptProcessor
- createScriptProcessor
- createScriptProcessor
- createBiquadFilter
- createWaveShaper
- createPanner
- createConvolver
- createDynamicsCompressor
- createAnalyser
- createGain
- createDelay
- createDelay
- createChannelSplitter
- createChannelSplitter
- createChannelMerger
- createChannelMerger
- createOscillator
- createPeriodicWave
- notifyNodeFinishedProcessing
- derefFinishedSourceNodes
- refNode
- derefNode
- derefUnfinishedSourceNodes
- lock
- tryLock
- unlock
- isAudioThread
- isGraphOwner
- addDeferredFinishDeref
- handlePreRenderTasks
- handlePostRenderTasks
- handleDeferredFinishDerefs
- markForDeletion
- scheduleNodeDeletion
- deleteMarkedNodesDispatch
- deleteMarkedNodes
- markSummingJunctionDirty
- removeMarkedSummingJunction
- markAudioNodeOutputDirty
- handleDirtyAudioSummingJunctions
- handleDirtyAudioNodeOutputs
- addAutomaticPullNode
- removeAutomaticPullNode
- updateAutomaticPullNodes
- processAutomaticPullNodes
- interfaceName
- executionContext
- startRendering
- fireCompletionEvent
- incrementActiveSourceCount
- decrementActiveSourceCount
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "modules/webaudio/AudioContext.h"
#include "bindings/v8/ExceptionMessages.h"
#include "bindings/v8/ExceptionState.h"
#include "core/dom/Document.h"
#include "core/dom/ExceptionCode.h"
#include "core/html/HTMLMediaElement.h"
#include "core/inspector/ScriptCallStack.h"
#include "platform/audio/FFTFrame.h"
#include "platform/audio/HRTFPanner.h"
#include "modules/mediastream/MediaStream.h"
#include "modules/webaudio/AnalyserNode.h"
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioBufferCallback.h"
#include "modules/webaudio/AudioBufferSourceNode.h"
#include "modules/webaudio/AudioListener.h"
#include "modules/webaudio/AudioNodeInput.h"
#include "modules/webaudio/AudioNodeOutput.h"
#include "modules/webaudio/BiquadFilterNode.h"
#include "modules/webaudio/ChannelMergerNode.h"
#include "modules/webaudio/ChannelSplitterNode.h"
#include "modules/webaudio/ConvolverNode.h"
#include "modules/webaudio/DefaultAudioDestinationNode.h"
#include "modules/webaudio/DelayNode.h"
#include "modules/webaudio/DynamicsCompressorNode.h"
#include "modules/webaudio/GainNode.h"
#include "modules/webaudio/MediaElementAudioSourceNode.h"
#include "modules/webaudio/MediaStreamAudioDestinationNode.h"
#include "modules/webaudio/MediaStreamAudioSourceNode.h"
#include "modules/webaudio/OfflineAudioCompletionEvent.h"
#include "modules/webaudio/OfflineAudioContext.h"
#include "modules/webaudio/OfflineAudioDestinationNode.h"
#include "modules/webaudio/OscillatorNode.h"
#include "modules/webaudio/PannerNode.h"
#include "modules/webaudio/PeriodicWave.h"
#include "modules/webaudio/ScriptProcessorNode.h"
#include "modules/webaudio/WaveShaperNode.h"
#if DEBUG_AUDIONODE_REFERENCES
#include <stdio.h>
#endif
#include "wtf/ArrayBuffer.h"
#include "wtf/Atomics.h"
#include "wtf/PassOwnPtr.h"
#include "wtf/text/WTFString.h"
const int UndefinedThreadIdentifier = 0xffffffff;
namespace WebCore {
bool AudioContext::isSampleRateRangeGood(float sampleRate)
{
    
    
    return sampleRate >= 44100 && sampleRate <= 96000;
}
const unsigned MaxHardwareContexts = 6;
unsigned AudioContext::s_hardwareContextCount = 0;
PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (s_hardwareContextCount >= MaxHardwareContexts) {
        exceptionState.throwDOMException(
            SyntaxError,
            "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
        return nullptr;
    }
    RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document)));
    audioContext->suspendIfNeeded();
    return audioContext.release();
}
PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
{
    document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
    return OfflineAudioContext::create(&document, numberOfChannels, numberOfFrames, sampleRate, exceptionState);
}
AudioContext::AudioContext(Document* document)
    : ActiveDOMObject(document)
    , m_isStopScheduled(false)
    , m_isCleared(false)
    , m_isInitialized(false)
    , m_isAudioThreadFinished(false)
    , m_destinationNode(nullptr)
    , m_isDeletionScheduled(false)
    , m_automaticPullNodesNeedUpdating(false)
    , m_connectionCount(0)
    , m_audioThread(0)
    , m_graphOwnerThread(UndefinedThreadIdentifier)
    , m_isOfflineContext(false)
    , m_activeSourceCount(0)
{
    constructCommon();
    m_destinationNode = DefaultAudioDestinationNode::create(this);
}
AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
    : ActiveDOMObject(document)
    , m_isStopScheduled(false)
    , m_isCleared(false)
    , m_isInitialized(false)
    , m_isAudioThreadFinished(false)
    , m_destinationNode(nullptr)
    , m_automaticPullNodesNeedUpdating(false)
    , m_connectionCount(0)
    , m_audioThread(0)
    , m_graphOwnerThread(UndefinedThreadIdentifier)
    , m_isOfflineContext(true)
    , m_activeSourceCount(0)
{
    constructCommon();
    
    m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    if (m_renderTarget.get())
        m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
}
void AudioContext::constructCommon()
{
    ScriptWrappable::init(this);
    FFTFrame::initialize();
    m_listener = AudioListener::create();
}
AudioContext::~AudioContext()
{
#if DEBUG_AUDIONODE_REFERENCES
    fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
#endif
    
    ASSERT(!m_isInitialized);
    ASSERT(!m_nodesToDelete.size());
    ASSERT(!m_referencedNodes.size());
    ASSERT(!m_finishedNodes.size());
    ASSERT(!m_automaticPullNodes.size());
    if (m_automaticPullNodesNeedUpdating)
        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
    ASSERT(!m_renderingAutomaticPullNodes.size());
}
void AudioContext::lazyInitialize()
{
    if (!m_isInitialized) {
        
        ASSERT(!m_isAudioThreadFinished);
        if (!m_isAudioThreadFinished) {
            
            
            if (m_destinationNode.get()) {
                m_destinationNode->initialize();
                if (!isOfflineContext()) {
                    
                    
                    
                    
                    m_destinationNode->startRendering();
                    ++s_hardwareContextCount;
                }
                m_isInitialized = true;
            }
        }
    }
}
void AudioContext::clear()
{
    
    if (m_destinationNode)
        m_destinationNode.clear();
    
    do {
        deleteMarkedNodes();
        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
        m_nodesMarkedForDeletion.clear();
    } while (m_nodesToDelete.size());
    m_isCleared = true;
}
void AudioContext::uninitialize()
{
    ASSERT(isMainThread());
    if (!m_isInitialized)
        return;
    
    m_destinationNode->uninitialize();
    
    m_isAudioThreadFinished = true;
    if (!isOfflineContext()) {
        ASSERT(s_hardwareContextCount);
        --s_hardwareContextCount;
    }
    
    derefUnfinishedSourceNodes();
    m_isInitialized = false;
}
bool AudioContext::isInitialized() const
{
    return m_isInitialized;
}
void AudioContext::stopDispatch(void* userData)
{
    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    ASSERT(context);
    if (!context)
        return;
    context->uninitialize();
    context->clear();
}
void AudioContext::stop()
{
    
    if (m_isStopScheduled)
        return;
    m_isStopScheduled = true;
    
    
    
    
    callOnMainThread(stopDispatch, this);
}
bool AudioContext::hasPendingActivity() const
{
    
    return !m_isCleared;
}
PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
{
    RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
    if (!audioBuffer.get()) {
        if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
            exceptionState.throwDOMException(
                NotSupportedError,
                "requested number of channels (" + String::number(numberOfChannels) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()) + ")");
        } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRate > AudioBuffer::maxAllowedSampleRate()) {
            exceptionState.throwDOMException(
                NotSupportedError,
                "requested sample rate (" + String::number(sampleRate)
                + ") does not lie in the allowed range of "
                + String::number(AudioBuffer::minAllowedSampleRate())
                + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz");
        } else if (!numberOfFrames) {
            exceptionState.throwDOMException(
                NotSupportedError,
                "number of frames must be greater than 0.");
        } else {
            exceptionState.throwDOMException(
                NotSupportedError,
                "unable to create buffer of " + String::number(numberOfChannels)
                + " channel(s) of " + String::number(numberOfFrames)
                + " frames each.");
        }
        return nullptr;
    }
    return audioBuffer;
}
PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& exceptionState)
{
    ASSERT(arrayBuffer);
    if (!arrayBuffer) {
        exceptionState.throwDOMException(
            SyntaxError,
            "invalid ArrayBuffer.");
        return nullptr;
    }
    RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
    if (!audioBuffer.get()) {
        exceptionState.throwDOMException(
            SyntaxError,
            "invalid audio data in ArrayBuffer.");
        return nullptr;
    }
    return audioBuffer;
}
void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, ExceptionState& exceptionState)
{
    if (!audioData) {
        exceptionState.throwDOMException(
            SyntaxError,
            "invalid ArrayBuffer for audioData.");
        return;
    }
    m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
}
PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
{
    ASSERT(isMainThread());
    RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
    
    
    refNode(node.get());
    return node;
}
PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (!mediaElement) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "invalid HTMLMedialElement.");
        return nullptr;
    }
    
    if (mediaElement->audioSourceNode()) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "invalid HTMLMediaElement.");
        return nullptr;
    }
    RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
    mediaElement->setAudioSourceNode(node.get());
    refNode(node.get()); 
    return node;
}
PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (!mediaStream) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "invalid MediaStream source");
        return nullptr;
    }
    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    if (audioTracks.isEmpty()) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "MediaStream has no audio track");
        return nullptr;
    }
    
    RefPtr<MediaStreamTrack> audioTrack = audioTracks[0];
    OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
    
    node->setFormat(2, sampleRate());
    refNode(node.get()); 
    return node;
}
PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
{
    
    return MediaStreamAudioDestinationNode::create(this, 2);
}
PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
{
    
    return createScriptProcessor(0, 2, 2, exceptionState);
}
PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
{
    
    return createScriptProcessor(bufferSize, 2, 2, exceptionState);
}
PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
{
    
    return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
}
PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
    if (!node.get()) {
        if (!numberOfInputChannels && !numberOfOutputChannels) {
            exceptionState.throwDOMException(
                IndexSizeError,
                "number of input channels and output channels cannot both be zero.");
        } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
            exceptionState.throwDOMException(
                IndexSizeError,
                "number of input channels (" + String::number(numberOfInputChannels)
                + ") exceeds maximum ("
                + String::number(AudioContext::maxNumberOfChannels()) + ").");
        } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
            exceptionState.throwDOMException(
                IndexSizeError,
                "number of output channels (" + String::number(numberOfInputChannels)
                + ") exceeds maximum ("
                + String::number(AudioContext::maxNumberOfChannels()) + ").");
        } else {
            exceptionState.throwDOMException(
                IndexSizeError,
                "buffer size (" + String::number(bufferSize)
                + ") must be a power of two between 256 and 16384.");
        }
        return nullptr;
    }
    refNode(node.get()); 
    return node;
}
PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
{
    ASSERT(isMainThread());
    return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
{
    ASSERT(isMainThread());
    return WaveShaperNode::create(this);
}
PassRefPtr<PannerNode> AudioContext::createPanner()
{
    ASSERT(isMainThread());
    return PannerNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<ConvolverNode> AudioContext::createConvolver()
{
    ASSERT(isMainThread());
    return ConvolverNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
{
    ASSERT(isMainThread());
    return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
{
    ASSERT(isMainThread());
    return AnalyserNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<GainNode> AudioContext::createGain()
{
    ASSERT(isMainThread());
    return GainNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
{
    const double defaultMaxDelayTime = 1;
    return createDelay(defaultMaxDelayTime, exceptionState);
}
PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
    if (exceptionState.hadException())
        return nullptr;
    return node;
}
PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
{
    const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
    return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
}
PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
    if (!node.get()) {
        exceptionState.throwDOMException(
            IndexSizeError,
            "number of outputs (" + String::number(numberOfOutputs)
            + ") must be between 1 and "
            + String::number(AudioContext::maxNumberOfChannels()) + ".");
        return nullptr;
    }
    return node;
}
PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
{
    const unsigned ChannelMergerDefaultNumberOfInputs = 6;
    return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
}
PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
    if (!node.get()) {
        exceptionState.throwDOMException(
            IndexSizeError,
            "number of inputs (" + String::number(numberOfInputs)
            + ") must be between 1 and "
            + String::number(AudioContext::maxNumberOfChannels()) + ".");
        return nullptr;
    }
    return node;
}
PassRefPtr<OscillatorNode> AudioContext::createOscillator()
{
    ASSERT(isMainThread());
    RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
    
    
    refNode(node.get());
    return node;
}
PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (!real) {
        exceptionState.throwDOMException(
            SyntaxError,
            "invalid real array");
        return nullptr;
    }
    if (!imag) {
        exceptionState.throwDOMException(
            SyntaxError,
            "invalid imaginary array");
        return nullptr;
    }
    if (real->length() != imag->length()) {
        exceptionState.throwDOMException(
            IndexSizeError,
            "length of real array (" + String::number(real->length())
            + ") and length of imaginary array (" +  String::number(imag->length())
            + ") must match.");
        return nullptr;
    }
    if (real->length() > 4096) {
        exceptionState.throwDOMException(
            IndexSizeError,
            "length of real array (" + String::number(real->length())
            + ") exceeds allowed maximum of 4096");
        return nullptr;
    }
    if (imag->length() > 4096) {
        exceptionState.throwDOMException(
            IndexSizeError,
            "length of imaginary array (" + String::number(imag->length())
            + ") exceeds allowed maximum of 4096");
        return nullptr;
    }
    return PeriodicWave::create(sampleRate(), real, imag);
}
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
{
    ASSERT(isAudioThread());
    m_finishedNodes.append(node);
}
void AudioContext::derefFinishedSourceNodes()
{
    ASSERT(isGraphOwner());
    ASSERT(isAudioThread() || isAudioThreadFinished());
    for (unsigned i = 0; i < m_finishedNodes.size(); i++)
        derefNode(m_finishedNodes[i]);
    m_finishedNodes.clear();
}
void AudioContext::refNode(AudioNode* node)
{
    ASSERT(isMainThread());
    AutoLocker locker(this);
    node->ref(AudioNode::RefTypeConnection);
    m_referencedNodes.append(node);
}
void AudioContext::derefNode(AudioNode* node)
{
    ASSERT(isGraphOwner());
    node->deref(AudioNode::RefTypeConnection);
    for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
        if (node == m_referencedNodes[i]) {
            m_referencedNodes.remove(i);
            break;
        }
    }
}
void AudioContext::derefUnfinishedSourceNodes()
{
    ASSERT(isMainThread() && isAudioThreadFinished());
    for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
        m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
    m_referencedNodes.clear();
}
void AudioContext::lock(bool& mustReleaseLock)
{
    
    ASSERT(isMainThread());
    ThreadIdentifier thisThread = currentThread();
    if (thisThread == m_graphOwnerThread) {
        
        mustReleaseLock = false;
    } else {
        
        m_contextGraphMutex.lock();
        m_graphOwnerThread = thisThread;
        mustReleaseLock = true;
    }
}
bool AudioContext::tryLock(bool& mustReleaseLock)
{
    ThreadIdentifier thisThread = currentThread();
    bool isAudioThread = thisThread == audioThread();
    
    ASSERT(isAudioThread || isAudioThreadFinished());
    if (!isAudioThread) {
        
        lock(mustReleaseLock);
        return true;
    }
    bool hasLock;
    if (thisThread == m_graphOwnerThread) {
        
        hasLock = true;
        mustReleaseLock = false;
    } else {
        
        hasLock = m_contextGraphMutex.tryLock();
        if (hasLock)
            m_graphOwnerThread = thisThread;
        mustReleaseLock = hasLock;
    }
    return hasLock;
}
void AudioContext::unlock()
{
    ASSERT(currentThread() == m_graphOwnerThread);
    m_graphOwnerThread = UndefinedThreadIdentifier;
    m_contextGraphMutex.unlock();
}
bool AudioContext::isAudioThread() const
{
    return currentThread() == m_audioThread;
}
bool AudioContext::isGraphOwner() const
{
    return currentThread() == m_graphOwnerThread;
}
void AudioContext::addDeferredFinishDeref(AudioNode* node)
{
    ASSERT(isAudioThread());
    m_deferredFinishDerefList.append(node);
}
void AudioContext::handlePreRenderTasks()
{
    ASSERT(isAudioThread());
    
    
    bool mustReleaseLock;
    if (tryLock(mustReleaseLock)) {
        
        handleDirtyAudioSummingJunctions();
        handleDirtyAudioNodeOutputs();
        updateAutomaticPullNodes();
        if (mustReleaseLock)
            unlock();
    }
}
void AudioContext::handlePostRenderTasks()
{
    ASSERT(isAudioThread());
    
    
    
    bool mustReleaseLock;
    if (tryLock(mustReleaseLock)) {
        
        handleDeferredFinishDerefs();
        
        derefFinishedSourceNodes();
        
        
        scheduleNodeDeletion();
        
        handleDirtyAudioSummingJunctions();
        handleDirtyAudioNodeOutputs();
        updateAutomaticPullNodes();
        if (mustReleaseLock)
            unlock();
    }
}
void AudioContext::handleDeferredFinishDerefs()
{
    ASSERT(isAudioThread() && isGraphOwner());
    for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
        AudioNode* node = m_deferredFinishDerefList[i];
        node->finishDeref(AudioNode::RefTypeConnection);
    }
    m_deferredFinishDerefList.clear();
}
void AudioContext::markForDeletion(AudioNode* node)
{
    ASSERT(isGraphOwner());
    if (isAudioThreadFinished())
        m_nodesToDelete.append(node);
    else
        m_nodesMarkedForDeletion.append(node);
    
    
    
    
    removeAutomaticPullNode(node);
}
void AudioContext::scheduleNodeDeletion()
{
    bool isGood = m_isInitialized && isGraphOwner();
    ASSERT(isGood);
    if (!isGood)
        return;
    
    if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
        m_nodesMarkedForDeletion.clear();
        m_isDeletionScheduled = true;
        
        
        ref();
        callOnMainThread(deleteMarkedNodesDispatch, this);
    }
}
void AudioContext::deleteMarkedNodesDispatch(void* userData)
{
    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    ASSERT(context);
    if (!context)
        return;
    context->deleteMarkedNodes();
    context->deref();
}
void AudioContext::deleteMarkedNodes()
{
    ASSERT(isMainThread());
    
    RefPtr<AudioContext> protect(this);
    {
        AutoLocker locker(this);
        while (size_t n = m_nodesToDelete.size()) {
            AudioNode* node = m_nodesToDelete[n - 1];
            m_nodesToDelete.removeLast();
            
            unsigned numberOfInputs = node->numberOfInputs();
            for (unsigned i = 0; i < numberOfInputs; ++i)
                m_dirtySummingJunctions.remove(node->input(i));
            
            unsigned numberOfOutputs = node->numberOfOutputs();
            for (unsigned i = 0; i < numberOfOutputs; ++i)
                m_dirtyAudioNodeOutputs.remove(node->output(i));
            
            delete node;
        }
        m_isDeletionScheduled = false;
    }
}
void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
{
    ASSERT(isGraphOwner());
    m_dirtySummingJunctions.add(summingJunction);
}
void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
{
    ASSERT(isMainThread());
    AutoLocker locker(this);
    m_dirtySummingJunctions.remove(summingJunction);
}
void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
{
    ASSERT(isGraphOwner());
    m_dirtyAudioNodeOutputs.add(output);
}
void AudioContext::handleDirtyAudioSummingJunctions()
{
    ASSERT(isGraphOwner());
    for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
        (*i)->updateRenderingState();
    m_dirtySummingJunctions.clear();
}
void AudioContext::handleDirtyAudioNodeOutputs()
{
    ASSERT(isGraphOwner());
    for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
        (*i)->updateRenderingState();
    m_dirtyAudioNodeOutputs.clear();
}
void AudioContext::addAutomaticPullNode(AudioNode* node)
{
    ASSERT(isGraphOwner());
    if (!m_automaticPullNodes.contains(node)) {
        m_automaticPullNodes.add(node);
        m_automaticPullNodesNeedUpdating = true;
    }
}
void AudioContext::removeAutomaticPullNode(AudioNode* node)
{
    ASSERT(isGraphOwner());
    if (m_automaticPullNodes.contains(node)) {
        m_automaticPullNodes.remove(node);
        m_automaticPullNodesNeedUpdating = true;
    }
}
void AudioContext::updateAutomaticPullNodes()
{
    ASSERT(isGraphOwner());
    if (m_automaticPullNodesNeedUpdating) {
        
        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
        unsigned j = 0;
        for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
            AudioNode* output = *i;
            m_renderingAutomaticPullNodes[j] = output;
        }
        m_automaticPullNodesNeedUpdating = false;
    }
}
void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
{
    ASSERT(isAudioThread());
    for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
        m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
}
const AtomicString& AudioContext::interfaceName() const
{
    return EventTargetNames::AudioContext;
}
ExecutionContext* AudioContext::executionContext() const
{
    return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
}
void AudioContext::startRendering()
{
    destination()->startRendering();
}
void AudioContext::fireCompletionEvent()
{
    ASSERT(isMainThread());
    if (!isMainThread())
        return;
    AudioBuffer* renderedBuffer = m_renderTarget.get();
    ASSERT(renderedBuffer);
    if (!renderedBuffer)
        return;
    
    if (executionContext()) {
        
        dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
    }
}
void AudioContext::incrementActiveSourceCount()
{
    atomicIncrement(&m_activeSourceCount);
}
void AudioContext::decrementActiveSourceCount()
{
    atomicDecrement(&m_activeSourceCount);
}
} 
#endif