投屏Sink端音频底层解码并用OpenSLES进行播放
Posted 咸鱼Jay
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了投屏Sink端音频底层解码并用OpenSLES进行播放相关的知识,希望对你有一定的参考价值。
一、代码分析
在公司项目中,音频解码及播放是把数据传到Java层进行解码播放的,其实这个步骤没有必要,完全可以在底层进行处理。
通过代码发现其实也做了在底层进行解码,那么为啥不直接使用底层解码播放呢,我们可以看看原先代码怎么做的:
代码中通过定义的宏DECODE_AUDIO_IN_JAVA
来控制mAudioCodec
对象是否创建,然后在通过mAudioCodec对象是否为null来控制音频数据是否传给Java层处理,代码中原来已经支持了在底层解码然后在传回上传使用AudioTrack进行播放,那我求改宏DECODE_AUDIO_IN_JAVA
来让其在底层进行解码,运行后会发现播放的声音非常的卡顿。
二、解决办法
最终发现原来是在使用底层处理时播放的音频是数据大小传的不对,会导致播放的声音非常的卡顿。
解决办法就是将下面红框的修改成info.size
就可以了。
三、底层播放音频
但是这样还是将音频的播放传给Java层进行播放。
我们可以通过使用OpenSLES来处理底层音频的播放
3.1 OpenSLRender类的实现
#ifndef _OPENSLRENDER_HEAD_
#define _OPENSLRENDER_HEAD_
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
namespace android
class OpenSLRender : public Thread
public:
OpenSLRender(int64_t buffertime,int32_t bufferframes=5);
~OpenSLRender();
bool init(int32_t chanNum,int rate);
void stop();
void setBufferTimes(int64_t buffertime);
void queueInputBuffer(sp<ABuffer> data);
void playerCallback();
private:
SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue;
SLObjectItf bqPlayerObject;
SLPlayItf bqPlayerPlay;
SLObjectItf outputMixObject;
SLObjectItf engineObject;
List<sp<ABuffer>> mList;
int64_t mBufferTimeUs;
int32_t mBufferFrames;
int64_t mLasPts;
bool bFist;
pthread_mutex_t startMutex;
pthread_cond_t startCond;
Mutex dataMutex; // for data in/out on diffrent thread
bool bRun;
sp<ABuffer> mMuteData;
int64_t mlastAudtime;
int mPlayAudFrames;
int mDropFrames;
int32_t muteCounts;
sp<ABuffer> mRenderData;
int32_t mOverFlowContinuetimes;
private:
virtual bool threadLoop();
sp<ABuffer> intervalOut(int gap);
sp<ABuffer> dropToMaxBuffer(int gap);
sp<ABuffer> dropAutoNums();
sp<ABuffer> getNextBuffer();
void destroy();
static void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context);
DISALLOW_EVIL_CONSTRUCTORS(OpenSLRender);
;
#endif
//#define LOG_NDEBUG 0
#define LOG_TAG "OpenSLRender"
#include"OpenSLRender.h"
#define UNUSED(x) ((void)x)
#define AUD_DROP_THRESHOLD 5
namespace android
OpenSLRender::OpenSLRender(int64_t bufferTime,int32_t bufferFrames):
mBufferTimeUs(bufferTime),
mBufferFrames(bufferFrames),
bFist(true),
startMutex(PTHREAD_MUTEX_INITIALIZER),
startCond(PTHREAD_COND_INITIALIZER),
bRun(true),
mRenderData(NULL),
mMuteData(new ABuffer(2048)),
muteCounts(0),
mlastAudtime(0),
mPlayAudFrames(0),
mDropFrames(0),
mOverFlowContinuetimes(0)
memset(mMuteData->data(),0,mMuteData->size());
OpenSLRender::~OpenSLRender()
stop();
requestExit();
requestExitAndWait();
//this.clear(); //sp<>.clear, this is not sp
bool OpenSLRender::init(int32_t chanNum,int rate)
// engine interfaces
SLEngineItf engineEngine;
// output mix interfaces
SLEnvironmentalReverbItf outputMixEnvironmentalReverb = NULL;
// aux effect on the output mix, used by the buffer queue player
const SLEnvironmentalReverbSettings reverbSettings =
SL_I3DL2_ENVIRONMENT_PRESET_DEFAULT;
// buffer queue player interfaces
SLresult result;
// create engine
result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// realize the engine
result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the engine interface, which is needed in order to create other objects
result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// create output mix, with environmental reverb specified as a non-required interface
const SLInterfaceID ids[1] = SL_IID_ENVIRONMENTALREVERB;
const SLboolean req[1] = SL_BOOLEAN_FALSE;
result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// realize the output mix
result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the environmental reverb interface
// this could fail if the environmental reverb effect is not available,
// either because the feature is not present, excessive CPU load, or
// the required MODIFY_AUDIO_SETTINGS permission was not requested and granted
result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB,
&outputMixEnvironmentalReverb);
if (SL_RESULT_SUCCESS == result)
result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
outputMixEnvironmentalReverb, &reverbSettings);
(void)result;
// ignore unsuccessful result codes for environmental reverb, as it is optional for this example
// configure audio source
SLuint32 samplesPerSec = SL_SAMPLINGRATE_48;
if(48000 == rate)
samplesPerSec = SL_SAMPLINGRATE_48;
else if(44100 == rate)
samplesPerSec = SL_SAMPLINGRATE_44_1;
SLuint32 audChan = chanNum;
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2;
SLDataFormat_PCM format_pcm = SL_DATAFORMAT_PCM,
(audChan == 0) ? 2 : audChan,
samplesPerSec,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,//
SL_BYTEORDER_LITTLEENDIAN;
/*
* Enable Fast Audio when possible: once we set the same rate to be the native, fast audio path
* will be triggered
*/
SLDataSource audioSrc = &loc_bufq, &format_pcm;
// configure audio sink
SLDataLocator_OutputMix loc_outmix = SL_DATALOCATOR_OUTPUTMIX, outputMixObject;
SLDataSink audioSnk = &loc_outmix, NULL;
/*
* create audio player:
* fast audio does not support when SL_IID_EFFECTSEND is required, skip it
* for fast audio case
*/
const SLInterfaceID ids[3] = SL_IID_BUFFERQUEUE, SL_IID_VOLUME,/* SL_IID_EFFECTSEND,
SL_IID_MUTESOLO,*/;
const SLboolean req[3] = SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
/*SL_BOOLEAN_TRUE,*/ ;
result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk,
2, ids, req);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// realize the player
result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the play interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// get the buffer queue interface
result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
&bqPlayerBufferQueue);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// register callback on the buffer queue
result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, this);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
// set the player\'s state to playing
result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
CHECK(SL_RESULT_SUCCESS == result);
(void)result;
status_t err = run("opensl buffering", ANDROID_PRIORITY_AUDIO);
CHECK(err==OK);
return true;
void OpenSLRender:: destroy()
ALOGE("opeslRender destroy ![%s][%d]",__FUNCTION__,__LINE__);
(*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_STOPPED);
// destroy buffer queue audio player object, and invalidate all associated interfaces
if (bqPlayerObject != NULL)
(*bqPlayerObject)->Destroy(bqPlayerObject);
bqPlayerObject = NULL;
bqPlayerPlay = NULL;
bqPlayerBufferQueue = NULL;
// destroy output mix object, and invalidate all associated interfaces
if (outputMixObject != NULL)
(*outputMixObject)->Destroy(outputMixObject);
outputMixObject = NULL;
// destroy engine object, and invalidate all associated interfaces
if (engineObject != NULL)
(*engineObject)->Destroy(engineObject);
engineObject = NULL;
void OpenSLRender::stop()
// AutoMutex _l(dataMutex);
ALOGE("OpenSLRender_stop:[%s%d]",__FUNCTION__,__LINE__);
if(bRun==true)
bRun=false;
destroy();
//to support Adjustment
void OpenSLRender::setBufferTimes(int64_t buffertime)
AutoMutex _l(dataMutex);
mBufferTimeUs = buffertime;
void OpenSLRender::queueInputBuffer(sp<ABuffer> data)
//input buffer, becareful!!!!!!!
AutoMutex _l(dataMutex);
//to chek pts
if(!mList.empty())
sp<ABuffer> max = *(--mList.end());
int64_t dataPts=0,maxPts=0;
CHECK(data->meta()->findInt64("timePts", &dataPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
if(dataPts < maxPts)
ALOGD("[%s%d] pts erro data:%ld list:%ld\\n",__FUNCTION__,__LINE__,maxPts,dataPts);
return;
#if ENABLE_STATISTICS
ALOGD(COMMON_DEBUG," Audio in, %lld remain __%ld__ [%s%d]\\n",ALooper::GetNowUs(), mList.size(),__FUNCTION__,__LINE__);
#endif
mList.push_back(data);
if(bFist)
sp<ABuffer> min = *mList.begin();
sp<ABuffer> max = *(--mList.end());
int64_t minPts=0,maxPts=0;
CHECK(min->meta()->findInt64("timePts", &minPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
// ALOGE("==minPts=%lld,maxPts:%lld,mBufferTimeUs:%lld,(maxPts - minPts)=%lld",minPts,maxPts,mBufferTimeUs,(maxPts - minPts));
//if((maxPts - minPts > mBufferTimeUs) || mList.size()>=mBufferFrames)
if((maxPts - minPts > mBufferTimeUs/2) || mList.size()>=mBufferFrames/2)
//buffer over! go---------
pthread_mutex_lock(&startMutex);
pthread_cond_signal(&startCond);
pthread_mutex_unlock(&startMutex);
void OpenSLRender::playerCallback()
AutoMutex _l(dataMutex);
if(!bRun)
return;
int64_t nowUs = ALooper::GetNowUs();
if(!mList.empty())
sp<ABuffer> min = *mList.begin();
sp<ABuffer> max = *(--mList.end());
int64_t minPts=0,maxPts=0;
CHECK(min->meta()->findInt64("timePts", &minPts));
CHECK(max->meta()->findInt64("timePts", &maxPts));
//if(maxPts - minPts > mBufferTimeUs -timeDuration)
if(mList.size()>=mBufferFrames)
mOverFlowContinuetimes++;
else
mOverFlowContinuetimes = 0;
if(mOverFlowContinuetimes > AUD_DROP_THRESHOLD)
//"Break out"
//Take one output to render for every two buffers
//data = intervalOut(2);
//data = dropAutoNums();
int flowFrames = mList.size() - mBufferFrames;
if( flowFrames >= mBufferFrames)
//ALOGD(COMMON_DEBUG,"video jetterbuff dopallflows %d [%s%d] mList.size():%ld \\n",flowFrames,__FUNCTION__,__LINE__,mList.size());
sp<ABuffer> data = dropToMaxBuffer(flowFrames);
mRenderData = getNextBuffer();
else
//"Break out"
//Take one output to render for every two buffers
sp<ABuffer> data = dropToMaxBuffer(2);
mRenderData = getNextBuffer();
mOverFlowContinuetimes = 0;
else
//one by one
mRenderData = getNextBuffer();
mPlayAudFrames++;
else
mRenderData = mMuteData;
muteCounts++;
mOverFlowContinuetimes = 0;
if(mRenderData ==NULL)
//just give the mutex data
mRenderData = mMuteData;
muteCounts++;
SLresult result;
//enqueue another buffer
result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, mRenderData->data(), mRenderData->size());
// the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
// which for this code example would indicate a programming error
if (SL_RESULT_SUCCESS != result)
if(!mlastAudtime)
mlastAudtime = nowUs;
if(nowUs - mlastAudtime >= 1000*1000)
ALOGE("playback(%d) droped(%d) muteCounts(%d) frames in one second,QSize:%d",mPlayAudFrames,mDropFrames,muteCounts,(int32_t)mList.size());
mDropFrames = 0;
mPlayAudFrames = 0;
mlastAudtime = nowUs;
muteCounts = 0;
void OpenSLRender::bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
UNUSED(bq);
OpenSLRender * pRender =static_cast<OpenSLRender*>(context);
if(pRender)
pRender->playerCallback();
sp<ABuffer> OpenSLRender::intervalOut(int gap)
int count =0;
sp<ABuffer> data = NULL;
while( (data = getNextBuffer())!=NULL && ++count < gap)
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \\n",__FUNCTION__,__LINE__,mList.size());
return data;
sp<ABuffer> OpenSLRender::dropToMaxBuffer(int gap)
sp<ABuffer> data = NULL;
int count = 0;
while( (data = getNextBuffer())!=NULL && count++ < gap)
mDropFrames++;
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \\n",__FUNCTION__,__LINE__,mList.size());
return data;
sp<ABuffer> OpenSLRender::dropAutoNums()
sp<ABuffer> data = NULL;
while( (data = getNextBuffer())!=NULL && muteCounts>0)
muteCounts--;
//ALOGD(COMMON_DEBUG," audio drop one [%s%d] remain mList.size():%ld \\n",__FUNCTION__,__LINE__,mList.size());
return data;
sp<ABuffer> OpenSLRender::getNextBuffer()
if(!mList.empty())
sp<ABuffer> data = *mList.begin();
mList.erase(mList.begin());
return data;
return NULL;
bool OpenSLRender::threadLoop()
if(bFist)
pthread_mutex_lock(&startMutex);
pthread_cond_wait(&startCond,&startMutex);
pthread_mutex_unlock(&startMutex);
ALOGE("[%s%d]start out\\n",__FUNCTION__,__LINE__);
bFist = false;
//to start play
playerCallback();
return false;
3.2 OpenSLRender类的使用
3.2.1 创建OpenSLRender对象并初始化
3.2.2 OpenSLRender的数据处理播放
3.2.3 OpenSLRender的停止
:使用OpenSL ES进行音频解码
/*
*
*这里使用了transcode-1.1.7对wav文件进行解码,然后使用opensl es进行播放
*
*/
//用到的变量和结构体
WAV wav; //wav文件指针
SLObjectItf engineObject; //引擎对象
SLEngineItf engineInterface; //引擎接口
SLObjectItf outputMixObject; //混音器
SLObjectItf audioPlayerObject; //播放器对象
SLAndroidSimpleBufferQueueItf andioPlayerBufferQueueItf; //缓冲器队列接口
SLPlayItf audioPlayInterface; //播放接口
unsigned char *buffer; //缓冲区
size_t bufferSize; //缓冲区大小
//上下文
struct PlayerContext
WAV wav;
unsigned char *buffer;
size_t bufferSize;
PlayerContext(WAV wav,
unsigned char *buffer,
size_t bufferSize)
this->wav = wav;
this->buffer = buffer;
this->bufferSize = bufferSize;
;
//实现对象
void RealizeObject(SLObjectItf object)
//非异步(阻塞)
(*object)->Realize(object,SL_BOOLEAN_FALSE);
具体实现流程:
1.打开文件
WAV wav = OpenWaveFile(env,jFileName);
//打开文件
WAV OpenWaveFile(JNIEnv *env,jstring jFileName)
const char *cFileName = env->GetStringUTFChars(jFileName,JNI_FALSE);
WAVError err;
WAV wav = wav_open(cFileName,WAV_READ,&err);
LOGI("%d",wav_get_bitrate(wav));
env->ReleaseStringUTFChars(jFileName,cFileName);
if(wav == 0)
LOGE("%s",wav_strerror(err));
return wav;
2.创建OpenSL ES引擎
//OpenSL ES在Android平台下默认是线程安全的,这样设置是为了为了兼容其他平台
SLEngineOption options[] =
(SLuint32)SL_ENGINEOPTION_THREADSAFE, (SLuint32)SL_BOOLEAN_TRUE
;
slCreateEngine(&engineObject,ARRAY_LEN(engineObject),options,0,0,0); //没有接口
//实例化对象
//对象创建之后,处于未实例化状态,对象虽然存在但未分配任何资源,使用前先实例化(使用完之后destroy)
RealizeObject(engineObject);
3.获取引擎接口
(*engineObject)->GetInterface(engineObject,SL_IID_ENGINE,&engineInterface);
4.创建输出混音器
(*engineInterface)->CreateOutputMix(engineInterface,&outputMixObject,0,0,0); //没有接口
//实例化混音器
RealizeObject(outputMixObject);
5.创建缓冲区保存读取到的音频数据库
//缓冲区的大小
bufferSize = wav_get_channels(wav) * wav_get_rate(wav) * wav_get_bits(wav);
buffer = new unsigned char[bufferSize];
6.创建带有缓冲区队列的音频播放器
CreateBufferQueueAudioPlayer(wav,engineInterface,outputMixObject,audioPlayerObject);
//实例化音频播放器
RealizeObject(audioPlayerObject);
CreateBufferQueueAudioPlayer.cpp
extern "C"
#include "wavlib.h"
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <android/log.h>
#define ARRAY_LEN(a) (sizeof(a) / sizeof(a[0]))
//创建音频播放对象
void CreateBufferQueueAudioPlayer(
WAV wav,
SLEngineItf engineEngine,
SLObjectItf outputMixObject,
SLObjectItf &audioPlayerObject)
// Android针对数据源的简单缓冲区队列定位器
SLDataLocator_AndroidSimpleBufferQueue dataSourceLocator =
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, // 定位器类型
1 // 缓冲区数
;
// PCM数据源格式
SLDataFormat_PCM dataSourceFormat =
SL_DATAFORMAT_PCM, // 格式类型
wav_get_channels(wav), // 通道数
wav_get_rate(wav) * 1000, // 毫赫兹/秒的样本数
wav_get_bits(wav), // 每个样本的位数
wav_get_bits(wav), // 容器大小
SL_SPEAKER_FRONT_CENTER, // 通道屏蔽
SL_BYTEORDER_LITTLEENDIAN // 字节顺序
;
// 数据源是含有PCM格式的简单缓冲区队列
SLDataSource dataSource =
&dataSourceLocator, // 数据定位器
&dataSourceFormat // 数据格式
;
// 针对数据接收器的输出混合定位器
SLDataLocator_OutputMix dataSinkLocator =
SL_DATALOCATOR_OUTPUTMIX, // 定位器类型
outputMixObject // 输出混合
;
// 数据定位器是一个输出混合
SLDataSink dataSink =
&dataSinkLocator, // 定位器
0 // 格式
;
// 需要的接口
SLInterfaceID interfaceIds[] =
SL_IID_BUFFERQUEUE
;
// 需要的接口,如果所需要的接口不要用,请求将失败
SLboolean requiredInterfaces[] =
SL_BOOLEAN_TRUE // for SL_IID_BUFFERQUEUE
;
// 创建音频播放器对象
SLresult result = (*engineEngine)->CreateAudioPlayer(
engineEngine,
&audioPlayerObject,
&dataSource,
&dataSink,
ARRAY_LEN(interfaceIds),
interfaceIds,
requiredInterfaces);
7.获得缓冲区队列接口Buffer Queue Interface
//通过缓冲区队列接口对缓冲区进行排序播放
(*audioPlayerObject)->GetInterface(audioPlayerObject,SL_IID_BUFFERQUEUE,&andioPlayerBufferQueueItf);
8.注册音频播放器回调函数
//当播放器完成对前一个缓冲区队列的播放时,回调函数会被调用,然后我们又继续读取音频数据,直到结束
//上下文,包裹参数方便再回调函数中使用
PlayerContext *ctx = new PlayerContext(wav,buffer,bufferSize);
(*andioPlayerBufferQueueItf)->RegisterCallback(andioPlayerBufferQueueItf,PlayerCallBack,ctx);
//回调函数
void PlayerCallBack(SLAndroidSimpleBufferQueueItf andioPlayerBufferQueue,void *context)
PlayerContext* ctx = (PlayerContext*)context;
//读取数据
ssize_t readSize = wav_read_data(ctx->wav,ctx->buffer,ctx->bufferSize);
if(0 < readSize)
(*andioPlayerBufferQueue)->Enqueue(andioPlayerBufferQueue,ctx->buffer,readSize);
else
//destroy context
CloseWaveFile(ctx->wav); //关闭文件
delete ctx->buffer; //释放缓存
9.获取Play Interface通过对SetPlayState函数来启动播放音乐
//一旦播放器被设置为播放状态,该音频播放器开始等待缓冲区排队就绪
(*audioPlayerObject)->GetInterface(audioPlayerObject,SL_IID_PLAY,&audioPlayInterface);
//设置播放状态
(*audioPlayInterface)->SetPlayState(audioPlayInterface,SL_PLAYSTATE_PLAYING);
10.开始,让第一个缓冲区入队
PlayerCallBack(andioPlayerBufferQueueItf,ctx);
完整代码
#include "com_dongnaoedu_jasonaudioplayer_AudioPlayer.h"
extern "C"
#include "wavlib.h"
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <android/log.h>
#include "CreateBufferQueueAudioPlayer.cpp"
#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"jason",FORMAT,##__VA_ARGS__);
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"jason",FORMAT,##__VA_ARGS__);
#define ARRAY_LEN(a) (sizeof(a) / sizeof(a[0]))
WAV wav; //wav文件指针
SLObjectItf engineObject; //引擎对象
SLEngineItf engineInterface; //引擎接口
SLObjectItf outputMixObject; //混音器
SLObjectItf audioPlayerObject; //播放器对象
SLAndroidSimpleBufferQueueItf andioPlayerBufferQueueItf; //缓冲器队列接口
SLPlayItf audioPlayInterface; //播放接口
unsigned char *buffer; //缓冲区
size_t bufferSize; //缓冲区大小
//上下文
struct PlayerContext
WAV wav;
unsigned char *buffer;
size_t bufferSize;
PlayerContext(WAV wav,
unsigned char *buffer,
size_t bufferSize)
this->wav = wav;
this->buffer = buffer;
this->bufferSize = bufferSize;
;
//打开文件
WAV OpenWaveFile(JNIEnv *env,jstring jFileName)
const char *cFileName = env->GetStringUTFChars(jFileName,JNI_FALSE);
WAVError err;
WAV wav = wav_open(cFileName,WAV_READ,&err);
LOGI("%d",wav_get_bitrate(wav));
env->ReleaseStringUTFChars(jFileName,cFileName);
if(wav == 0)
LOGE("%s",wav_strerror(err));
return wav;
//关闭文件
void CloseWaveFile(WAV wav)
wav_close(wav);
//实现对象
void RealizeObject(SLObjectItf object)
//非异步(阻塞)
(*object)->Realize(object,SL_BOOLEAN_FALSE);
//回调函数
void PlayerCallBack(SLAndroidSimpleBufferQueueItf andioPlayerBufferQueue,void *context)
PlayerContext* ctx = (PlayerContext*)context;
//读取数据
ssize_t readSize = wav_read_data(ctx->wav,ctx->buffer,ctx->bufferSize);
if(0 < readSize)
(*andioPlayerBufferQueue)->Enqueue(andioPlayerBufferQueue,ctx->buffer,readSize);
else
//destroy context
CloseWaveFile(ctx->wav); //关闭文件
delete ctx->buffer; //释放缓存
JNIEXPORT void JNICALL Java_com_dongnaoedu_jasonaudioplayer_AudioPlayer_play
(JNIEnv *env, jclass jthiz, jstring jFileName)
//1.打开文件
WAV wav = OpenWaveFile(env,jFileName);
//2.创建OpenSL ES引擎
//OpenSL ES在Android平台下默认是线程安全的,这样设置是为了为了兼容其他平台
SLEngineOption options[] =
(SLuint32)SL_ENGINEOPTION_THREADSAFE, (SLuint32)SL_BOOLEAN_TRUE
;
slCreateEngine(&engineObject,ARRAY_LEN(engineObject),options,0,0,0); //没有接口
//实例化对象
//对象创建之后,处于未实例化状态,对象虽然存在但未分配任何资源,使用前先实例化(使用完之后destroy)
RealizeObject(engineObject);
//3.获取引擎接口
(*engineObject)->GetInterface(engineObject,SL_IID_ENGINE,&engineInterface);
//4.创建输出混音器
(*engineInterface)->CreateOutputMix(engineInterface,&outputMixObject,0,0,0); //没有接口
//实例化混音器
RealizeObject(outputMixObject);
//5.创建缓冲区保存读取到的音频数据库
//缓冲区的大小
bufferSize = wav_get_channels(wav) * wav_get_rate(wav) * wav_get_bits(wav);
buffer = new unsigned char[bufferSize];
//6.创建带有缓冲区队列的音频播放器
CreateBufferQueueAudioPlayer(wav,engineInterface,outputMixObject,audioPlayerObject);
//实例化音频播放器
RealizeObject(audioPlayerObject);
//7.获得缓冲区队列接口Buffer Queue Interface
//通过缓冲区队列接口对缓冲区进行排序播放
(*audioPlayerObject)->GetInterface(audioPlayerObject,SL_IID_BUFFERQUEUE,&andioPlayerBufferQueueItf);
//8.注册音频播放器回调函数
//当播放器完成对前一个缓冲区队列的播放时,回调函数会被调用,然后我们又继续读取音频数据,直到结束
//上下文,包裹参数方便再回调函数中使用
PlayerContext *ctx = new PlayerContext(wav,buffer,bufferSize);
(*andioPlayerBufferQueueItf)->RegisterCallback(andioPlayerBufferQueueItf,PlayerCallBack,ctx);
//9.获取Play Interface通过对SetPlayState函数来启动播放音乐
//一旦播放器被设置为播放状态,该音频播放器开始等待缓冲区排队就绪
(*audioPlayerObject)->GetInterface(audioPlayerObject,SL_IID_PLAY,&audioPlayInterface);
//设置播放状态
(*audioPlayInterface)->SetPlayState(audioPlayInterface,SL_PLAYSTATE_PLAYING);
//10.开始,让第一个缓冲区入队
PlayerCallBack(andioPlayerBufferQueueItf,ctx);
//关闭文件
//CloseWaveFile(wav);
以上是关于投屏Sink端音频底层解码并用OpenSLES进行播放的主要内容,如果未能解决你的问题,请参考以下文章
Android音乐播放器-使用FFmpeg及OpenSLES
Android音乐播放器-使用FFmpeg及OpenSLES