This source file includes following definitions.
- JNINamespace
- run
- joinRecordThread
- createAudioRecordInput
- SuppressLint
- open
- start
- stop
- SuppressLint
- close
- nativeCacheDirectBufferAddress
- nativeOnData
package org.chromium.media;
import android.annotation.SuppressLint;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder.AudioSource;
import android.media.audiofx.AcousticEchoCanceler;
import android.media.audiofx.AudioEffect;
import android.media.audiofx.AudioEffect.Descriptor;
import android.os.Process;
import android.util.Log;
import org.chromium.base.CalledByNative;
import org.chromium.base.JNINamespace;
import java.nio.ByteBuffer;
@JNINamespace("media")
class AudioRecordInput {
private static final String TAG = "AudioRecordInput";
private static final boolean DEBUG = false;
private static final int HARDWARE_DELAY_MS = 100;
private final long mNativeAudioRecordInputStream;
private final int mSampleRate;
private final int mChannels;
private final int mBitsPerSample;
private final int mHardwareDelayBytes;
private final boolean mUsePlatformAEC;
private ByteBuffer mBuffer;
private AudioRecord mAudioRecord;
private AudioRecordThread mAudioRecordThread;
private AcousticEchoCanceler mAEC;
private class AudioRecordThread extends Thread {
private volatile boolean mKeepAlive = true;
@Override
public void run() {
Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
try {
mAudioRecord.startRecording();
} catch (IllegalStateException e) {
Log.e(TAG, "startRecording failed", e);
return;
}
while (mKeepAlive) {
int bytesRead = mAudioRecord.read(mBuffer, mBuffer.capacity());
if (bytesRead > 0) {
nativeOnData(mNativeAudioRecordInputStream, bytesRead,
mHardwareDelayBytes);
} else {
Log.e(TAG, "read failed: " + bytesRead);
if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
mKeepAlive = false;
}
}
}
try {
mAudioRecord.stop();
} catch (IllegalStateException e) {
Log.e(TAG, "stop failed", e);
}
}
public void joinRecordThread() {
mKeepAlive = false;
while (isAlive()) {
try {
join();
} catch (InterruptedException e) {
}
}
}
}
@CalledByNative
private static AudioRecordInput createAudioRecordInput(long nativeAudioRecordInputStream,
int sampleRate, int channels, int bitsPerSample, int bytesPerBuffer,
boolean usePlatformAEC) {
return new AudioRecordInput(nativeAudioRecordInputStream, sampleRate, channels,
bitsPerSample, bytesPerBuffer, usePlatformAEC);
}
private AudioRecordInput(long nativeAudioRecordInputStream, int sampleRate, int channels,
int bitsPerSample, int bytesPerBuffer, boolean usePlatformAEC) {
mNativeAudioRecordInputStream = nativeAudioRecordInputStream;
mSampleRate = sampleRate;
mChannels = channels;
mBitsPerSample = bitsPerSample;
mHardwareDelayBytes = HARDWARE_DELAY_MS * sampleRate / 1000 * bitsPerSample / 8;
mUsePlatformAEC = usePlatformAEC;
mBuffer = ByteBuffer.allocateDirect(bytesPerBuffer);
nativeCacheDirectBufferAddress(mNativeAudioRecordInputStream, mBuffer);
}
@SuppressLint("NewApi")
@CalledByNative
private boolean open() {
if (mAudioRecord != null) {
Log.e(TAG, "open() called twice without a close()");
return false;
}
int channelConfig;
if (mChannels == 1) {
channelConfig = AudioFormat.CHANNEL_IN_MONO;
} else if (mChannels == 2) {
channelConfig = AudioFormat.CHANNEL_IN_STEREO;
} else {
Log.e(TAG, "Unsupported number of channels: " + mChannels);
return false;
}
int audioFormat;
if (mBitsPerSample == 8) {
audioFormat = AudioFormat.ENCODING_PCM_8BIT;
} else if (mBitsPerSample == 16) {
audioFormat = AudioFormat.ENCODING_PCM_16BIT;
} else {
Log.e(TAG, "Unsupported bits per sample: " + mBitsPerSample);
return false;
}
int minBufferSize = AudioRecord.getMinBufferSize(mSampleRate, channelConfig, audioFormat);
if (minBufferSize < 0) {
Log.e(TAG, "getMinBufferSize error: " + minBufferSize);
return false;
}
int audioRecordBufferSizeInBytes = Math.max(mBuffer.capacity(), minBufferSize);
try {
mAudioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
mSampleRate,
channelConfig,
audioFormat,
audioRecordBufferSizeInBytes);
} catch (IllegalArgumentException e) {
Log.e(TAG, "AudioRecord failed", e);
return false;
}
if (AcousticEchoCanceler.isAvailable()) {
mAEC = AcousticEchoCanceler.create(mAudioRecord.getAudioSessionId());
if (mAEC == null) {
Log.e(TAG, "AcousticEchoCanceler.create failed");
return false;
}
int ret = mAEC.setEnabled(mUsePlatformAEC);
if (ret != AudioEffect.SUCCESS) {
Log.e(TAG, "setEnabled error: " + ret);
return false;
}
if (DEBUG) {
Descriptor descriptor = mAEC.getDescriptor();
Log.d(TAG, "AcousticEchoCanceler " +
"name: " + descriptor.name + ", " +
"implementor: " + descriptor.implementor + ", " +
"uuid: " + descriptor.uuid);
}
}
return true;
}
@CalledByNative
private void start() {
if (mAudioRecord == null) {
Log.e(TAG, "start() called before open().");
return;
}
if (mAudioRecordThread != null) {
return;
}
mAudioRecordThread = new AudioRecordThread();
mAudioRecordThread.start();
}
@CalledByNative
private void stop() {
if (mAudioRecordThread == null) {
return;
}
mAudioRecordThread.joinRecordThread();
mAudioRecordThread = null;
}
@SuppressLint("NewApi")
@CalledByNative
private void close() {
if (mAudioRecordThread != null) {
Log.e(TAG, "close() called before stop().");
return;
}
if (mAudioRecord == null) {
return;
}
if (mAEC != null) {
mAEC.release();
mAEC = null;
}
mAudioRecord.release();
mAudioRecord = null;
}
private native void nativeCacheDirectBufferAddress(long nativeAudioRecordInputStream,
ByteBuffer buffer);
private native void nativeOnData(long nativeAudioRecordInputStream, int size,
int hardwareDelayBytes);
}