AudioTrack创建过程分析(android_audio)
Posted we1less
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了AudioTrack创建过程分析(android_audio)相关的知识,希望对你有一定的参考价值。
c++测试程序 frameworks/base/media/tests/audiotests/shared_mem_test.cpp
int AudioTrackTest::Test01() 播放声音主要是靠这个
p<AudioTrack> track = new AudioTrack(AUDIO_STREAM_MUSIC,// stream type
rate,
AUDIO_FORMAT_PCM_16_BIT,// word length, PCM
AUDIO_CHANNEL_OUT_MONO,
iMem);
status_t status = track->initCheck();
if(status != NO_ERROR) {
track.clear();
ALOGD("Failed for initCheck()");
return -1;
}
// start play
ALOGD("start");
track->start();
java测试程序
frameworks/base/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/audio/MediaAudioTrackTest.java
@LargeTest
public void testSetStereoVolumeMax() throws Exception {
// constants for test
final String TEST_NAME = "testSetStereoVolumeMax";
final int TEST_SR = 22050;
final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
final int TEST_MODE = AudioTrack.MODE_STREAM;
final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
//-------- initialization --------------
int minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT,
minBuffSize, TEST_MODE);
byte data[] = new byte[minBuffSize/2];
//-------- test --------------
track.write(data, 0, data.length);
track.write(data, 0, data.length);
track.play();
float maxVol = AudioTrack.getMaxVolume();
assertTrue(TEST_NAME, track.setStereoVolume(maxVol, maxVol) == AudioTrack.SUCCESS);
//-------- tear down --------------
track.release();
}
java的AudioTrack
frameworks/base/media/java/android/media/AudioTrack.java
找到其中的一个构造器native_setup函数 jni的一个方法
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId)
throws IllegalArgumentException {
...
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
...
}
jni映射类 static const JNINativeMethod gMethods[]
frameworks/base/core/jni/android_media_AudioTrack.cpp
static const JNINativeMethod gMethods[] = {
// name, signature, funcPtr
...
{"native_setup", "(Ljava/lang/Object;Ljava/lang/Object;[IIIIII[IJ)I",
(void *)android_media_AudioTrack_setup},
...
};
android_media_AudioTrack_setup
这里面就是创建一个使用c++实现的AudioTrack 参数为空 再调用set函数
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
jlong nativeAudioTrack) {
ALOGV("sampleRates=%p, channel mask=%x, index mask=%x, audioFormat(Java)=%d, buffSize=%d"
"nativeAudioTrack=0x%" PRIX64,
jSampleRate, channelPositionMask, channelIndexMask, audioFormat, buffSizeInBytes,
nativeAudioTrack);
...
// create the native AudioTrack object
lpTrack = new AudioTrack();
...
switch (memoryMode) {
case MODE_STREAM:
status = lpTrack->set(
AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
sampleRateInHertz,
format,// word length, PCM
nativeChannelMask,
frameCount,
AUDIO_OUTPUT_FLAG_NONE,
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
0,// shared mem
true,// thread can call Java
sessionId,// audio session ID
AudioTrack::TRANSFER_SYNC,
NULL, // default offloadInfo
-1, -1, // default uid, pid values
paa);
break;
case MODE_STATIC:
// AudioTrack is using shared memory
if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}
status = lpTrack->set(
AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
sampleRateInHertz,
format,// word length, PCM
nativeChannelMask,
frameCount,
AUDIO_OUTPUT_FLAG_NONE,
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
lpJniStorage->mMemBase,// shared mem
true,// thread can call Java
sessionId,// audio session ID
AudioTrack::TRANSFER_SHARED,
NULL, // default offloadInfo
-1, -1, // default uid, pid values
paa);
break;
default:
ALOGE("Unknown mode %d", memoryMode);
goto native_init_failure;
}
}
分析 设备上的声卡在设备中属于一个output
每一个output对应一个播放线程 这些线程由AudioFlinger创建管理
参考 https://blog.csdn.net/we1less/article/details/118424539AudioFlinger::openOutput
AudioTrack负责利用设置的属性根据AudioPolicyService找到对应的output、playbackThread线程
AudioTrack创建过程_选择output
APP构造AudioTrack时指定了 streamtype 调用this构造器再利用jni调用到 android_media_AudioTrack.cpp中
frameworks/base/media/java/android/media/AudioTrack.java
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode, int sessionId)
throws IllegalArgumentException {
// mState already == STATE_UNINITIALIZED
this((new AudioAttributes.Builder())
.setLegacyStreamType(streamType)
.build(),
(new AudioFormat.Builder())
.setChannelMask(channelConfig)
.setEncoding(audioFormat)
.setSampleRate(sampleRateInHz)
.build(),
bufferSizeInBytes,
mode, sessionId);
deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
}
--------------------------------------------------------------------------------
setInternalLegacyStreamType
根据StreamType设置AudioAttributes.Builder的mContentType
frameworks/base/media/java/android/media/AudioAttributes.java
public Builder setInternalLegacyStreamType(int streamType) {
switch(streamType) {
case Audiosystem.STREAM_VOICE_CALL:
mContentType = CONTENT_TYPE_SPEECH;
break;
...
default:
Log.e(TAG, "Invalid stream type " + streamType + " for AudioAttributes");
}
mUsage = usageForStreamType(streamType);
return this;
}
------------------------------------------------------------------------------
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId)
throws IllegalArgumentException {
super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
...
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
}
...
}
android_media_AudioTrack_setup
此函数是由java层jni调用来的在这里根据attributes构建audio_attributes_t paa
new AudioTrack
在set函数中将这个paa设置进去
frameworks/base/core/jni/android_media_AudioTrack.cpp
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this, jobject jaa,
jintArray jSampleRate, jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession,
jlong nativeAudioTrack) {
...
// read the AudioAttributes values
paa = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
const jstring jtags =
(jstring) env->GetObjectField(jaa, javaAudioAttrFields.fieldFormattedTags);
const char* tags = env->GetStringUTFChars(jtags, NULL);
// copying array size -1, char array for tags was calloc'd, no need to NULL-terminate it
strncpy(paa->tags, tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
env->ReleaseStringUTFChars(jtags, tags);
paa->usage = (audio_usage_t) env->GetIntField(jaa, javaAudioAttrFields.fieldUsage);
paa->content_type =
(audio_content_type_t) env->GetIntField(jaa, javaAudioAttrFields.fieldContentType);
paa->flags = env->GetIntField(jaa, javaAudioAttrFields.fieldFlags);
ALOGV("AudioTrack_setup for usage=%d content=%d flags=0x%#x tags=%s",
paa->usage, paa->content_type, paa->flags, paa->tags);
...
// create the native AudioTrack object
lpTrack = new AudioTrack();
...
status = lpTrack->set(
AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
sampleRateInHertz,
format,// word length, PCM
nativeChannelMask,
frameCount,
AUDIO_OUTPUT_FLAG_NONE,
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
0,// shared mem
true,// thread can call Java
sessionId,// audio session ID
AudioTrack::TRANSFER_SYNC,
NULL, // default offloadInfo
-1, -1, // default uid, pid values
paa);
break;
}
set frameworks/av/media/libaudioclient/AudioTrack.cpp
status_t AudioTrack::set(
...
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed)
{
}
c. AudioPolicyManager::getStrategyForAttr
d. AudioPolicyManager::getDeviceForStrategy
e. AudioPolicyManager::getOutputForDevice
e.1 AudioPolicyManager::getOutputsForDevice
e.2 output = selectOutput(outputs, flags, format);
然后在playbackThread中创建对应的Track
在playbackThread中存在一个数组mTracks 其中的某一个要跟应用程序的Track对应
APP的AudioTrack和playbackThread的mTracks中的Track之间建立共享内存
AudioTrack::AudioTrack() frameworks/av/media/libaudioclient/AudioTrack.cpp
AudioTrack::AudioTrack(
...
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
}
以上是关于AudioTrack创建过程分析(android_audio)的主要内容,如果未能解决你的问题,请参考以下文章
Android 音频源码分析——AudioTrack设备选择
Android 音频源码分析——AudioTrack设备选择