This source file includes following definitions.
- m_imagData
- m_inverseContext
- m_imagData
- initialize
- cleanup
- doFFT
- doInverseFFT
- realData
- imagData
- getUpToDateComplexData
- contextForSize
#include "config.h"
#if ENABLE(WEB_AUDIO)
#if USE(WEBAUDIO_FFMPEG)
#include "platform/audio/FFTFrame.h"
#include "platform/audio/VectorMath.h"
extern "C" {
#include <libavcodec/avfft.h>
}
#include "wtf/MathExtras.h"
namespace WebCore {
#if !ASSERT_DISABLED
const int kMaxFFTPow2Size = 24;
#endif
FFTFrame::FFTFrame(unsigned fftSize)
: m_FFTSize(fftSize)
, m_log2FFTSize(static_cast<unsigned>(log2(fftSize)))
, m_forwardContext(0)
, m_inverseContext(0)
, m_complexData(fftSize)
, m_realData(fftSize / 2)
, m_imagData(fftSize / 2)
{
ASSERT(1UL << m_log2FFTSize == m_FFTSize);
m_forwardContext = contextForSize(fftSize, DFT_R2C);
m_inverseContext = contextForSize(fftSize, IDFT_C2R);
}
FFTFrame::FFTFrame()
: m_FFTSize(0)
, m_log2FFTSize(0)
, m_forwardContext(0)
, m_inverseContext(0)
{
}
FFTFrame::FFTFrame(const FFTFrame& frame)
: m_FFTSize(frame.m_FFTSize)
, m_log2FFTSize(frame.m_log2FFTSize)
, m_forwardContext(0)
, m_inverseContext(0)
, m_complexData(frame.m_FFTSize)
, m_realData(frame.m_FFTSize / 2)
, m_imagData(frame.m_FFTSize / 2)
{
m_forwardContext = contextForSize(m_FFTSize, DFT_R2C);
m_inverseContext = contextForSize(m_FFTSize, IDFT_C2R);
unsigned nbytes = sizeof(float) * (m_FFTSize / 2);
memcpy(realData(), frame.realData(), nbytes);
memcpy(imagData(), frame.imagData(), nbytes);
}
void FFTFrame::initialize()
{
}
void FFTFrame::cleanup()
{
}
FFTFrame::~FFTFrame()
{
av_rdft_end(m_forwardContext);
av_rdft_end(m_inverseContext);
}
#if OS(WIN)
#pragma float_control(except, off, push)
#pragma float_control(precise, off, push)
#pragma fp_contract(on)
#pragma fenv_access(off)
#endif
void FFTFrame::doFFT(const float* data)
{
float* p = m_complexData.data();
memcpy(p, data, sizeof(float) * m_FFTSize);
av_rdft_calc(m_forwardContext, p);
int len = m_FFTSize / 2;
float* real = m_realData.data();
float* imag = m_imagData.data();
for (int i = 0; i < len; ++i) {
int baseComplexIndex = 2 * i;
real[i] = p[baseComplexIndex];
imag[i] = p[baseComplexIndex + 1];
}
}
void FFTFrame::doInverseFFT(float* data)
{
float* interleavedData = getUpToDateComplexData();
av_rdft_calc(m_inverseContext, interleavedData);
const float scale = 2.0 / m_FFTSize;
VectorMath::vsmul(interleavedData, 1, &scale, data, 1, m_FFTSize);
}
float* FFTFrame::realData() const
{
return const_cast<float*>(m_realData.data());
}
float* FFTFrame::imagData() const
{
return const_cast<float*>(m_imagData.data());
}
float* FFTFrame::getUpToDateComplexData()
{
int len = m_FFTSize / 2;
const float* real = m_realData.data();
const float* imag = m_imagData.data();
float* c = m_complexData.data();
for (int i = 0; i < len; ++i) {
int baseComplexIndex = 2 * i;
c[baseComplexIndex] = real[i];
c[baseComplexIndex + 1] = imag[i];
}
return const_cast<float*>(m_complexData.data());
}
RDFTContext* FFTFrame::contextForSize(unsigned fftSize, int trans)
{
ASSERT(fftSize);
int pow2size = static_cast<int>(log2(fftSize));
ASSERT(pow2size < kMaxFFTPow2Size);
RDFTContext* context = av_rdft_init(pow2size, (RDFTransformType)trans);
return context;
}
}
#endif
#endif