Android录制音频并使用ijkplayer播放
Posted TenTenXu
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Android录制音频并使用ijkplayer播放相关的知识,希望对你有一定的参考价值。
1、使用MediaRecorder录音
1.1、开始录制
private MediaRecorder mMediaRecorder;
private File mTempFile;
public void startRecordAudio(Context context)
//临时文件
if (mTmpFile == null)
mTmpFile = SdcardUtils.getPublicFile(context, "record/voice.aac");
Log.i("tmpFile path", mTempFile.getPath());
final File file = mTempFile;
if (file.exists())
file.delete();
MediaRecorder recorder = mMediaRecorder;
if (recorder == null)
recorder = new MediaRecorder();
mMediaRecorder = recorder;
//设置输入源
recorder.setAudiosource(MediaRecorder.AudioSource.MIC);
//设置音频输出格式/编码格式
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
recorder.setOutputFormat(MediaRecorder.OutputFormat.AAC_ADTS);
else
recorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC);
//设置音频输出路径
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O)
recorder.setOutputFile(file);
else
recorder.setOutputFile(file.getAbsolutePath());
try
//准备录制
recorder.prepare();
//开始录制音频
recorder.start();
requestAudioFocus();
catch (Exception e)
e.printStackTrace();
Log.e(TAG, e.toString());
1.2、结束录制
public File stopRecordAudio()
final MediaRecorder recorder = mMediaRecorder;
if (recorder != null)
try
recorder.stop();
recorder.release();
mMediaRecorder = null;
catch (Exception e)
e.printStackTrace();
Log.e(TAG, e.toString());
return null;
finally
abandonAudioFocus();
File file = mTmpFile;
if (file != null && file.exists() && file.length() > 0)
return file;
else
return null;
2、使用AudioRecorder录音
在使用AudioRecorder时,需要了解采样率、频道配置和PCM音频格式数据的相关知识;
- PCM:音频的原始数据(AudioFormat.ENCODING_PCM_16BIT、AudioFormat.ENCODING_PCM_8BIT、AudioFormat.ENCODING_PCM_FLOAT等等);不同的PCM代表不同的位深
- 采样率:录音设备在单位时间内对模拟信号采样的多少,采样频率越高,机械波的波形就越真实越自然。常用的有16000(1.6KHz)、44100(44.1KHz)等
- 频道:单声道输入频道、输出声道等,相关的值有(AudioFormat.CHANNEL_IN_MONO,AudioFormat.CHANNEL_IN_STEREO等等)
//根据采样率+音频格式+频道得到录音缓存大小
int minBufferSize = AudioRecord.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
针对AudioRecord的初始化,也需要采样率、PCM原始音频格式和频道,另外还需要录音缓存大小以及录音设备,如下:
//MediaRecorder.AudioSource.MIC是麦克风录音设备,
//minBufferSize是录音缓存大小
new AudioRecord(MediaRecorder.AudioSource.MIC, 16000, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, minBufferSize);
AudioRecorder开始录音方法
recorder.startRecording();
开启子线程,通过read方法获取录音数据
while (isRecording && !recordingAudioThread.isInterrupted())
//获取录音数据
read = mAudioRecorder.read(data, 0, data.length);
if (AudioRecord.ERROR_INVALID_OPERATION != read)
try
fos.write(data);
Log.i("audioRecord", "写录音数据->" + read);
catch (IOException e)
e.printStackTrace();
2.1、开始录制(完整代码)
private AudioRecord mAudioRecorder;
private File mTempFile;
private boolean isRecording;
private Thread recordingAudioThread;
public void startRecordAudio(Context context)
//临时路径
if (mTmpFile == null)
mTmpFile = SdcardUtils.getPublicFile(context, "record/voice.pcm");
Log.i("tmpFile path", mTmpFile.getPath());
final File file = mTmpFile;
if (file.exists())
file.delete();
AudioRecord recorder = mAudioRecorder;
if (recorder == null)
//16000是采样率,常用采样率有16000(1.6KHz),441000(44.1KHz)
int minBufferSize = AudioRecord.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC, 16000, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT, minBufferSize);
mAudioRecorder = recorder;
try
//开始录制音频
isRecording = true;
recorder.startRecording();
recordingAudioThread = new Thread(() ->
try
file.createNewFile();
catch (IOException e)
e.printStackTrace();
FileOutputStream fos = null;
try
fos = new FileOutputStream(file);
catch (FileNotFoundException e)
e.printStackTrace();
if (fos != null)
byte[] data = new byte[minBufferSize];
int read;
while (isRecording && !recordingAudioThread.isInterrupted())
read = mAudioRecorder.read(data, 0, data.length);
if (AudioRecord.ERROR_INVALID_OPERATION != read)
try
fos.write(data);
Log.i("audioRecord", "录音数据:" + read);
catch (IOException e)
e.printStackTrace();
try
fos.close();
catch (IOException e)
e.printStackTrace();
);
recordingAudioThread.start();
requestAudioFocus();
catch (Exception e)
e.printStackTrace();
2.2、结束录制
public File stopRecordAudio()
isRecording = false;
final AudioRecord audioRecord = mAudioRecorder;
if (audioRecord != null)
audioRecord.stop();
audioRecord.release();
mAudioRecorder = null;
recordingAudioThread.interrupt();
recordingAudioThread = null;
File file = mTmpFile;
if (file != null && file.exists() && file.length() > 0)
return file;
else
return null;
3、PCM格式转码AAC
这个转码太难了,参考文章:Android pcm编码为aac
不过该文章中的代码有bug,当采样率为44.1KHz的时候可以转AAC,并且正常播放,但当采样率为1.6KHz的时候,转成AAC之后播放的声音极为尖锐,调整了大半天后发现是addADTStoPacket方法中freqIdx的值写死为4了
再参考了文章:Pcm 转 AAc,修复了该bug
package com.example.recordvoice.utils;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.os.Build;
import android.util.Log;
import androidx.annotation.RequiresApi;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class AacEncoder
...
private int sampleRateType;
public void init(int sampleRate, int inChannel,
int channelCount, int sampleFormat,
String srcPath, String dstPath,
IHanlderCallback callback)
...
sampleRateType = ADTSUtils.getSampleRateType(mSampleRate);
...
......
......
......
private void addADTStoPacket(byte[] packet, int packetLen)
....
int freqIdx = sampleRateType;
....
static class ADTSUtils
private static Map<String, Integer> SAMPLE_RATE_TYPE;
static
SAMPLE_RATE_TYPE = new HashMap<>();
SAMPLE_RATE_TYPE.put("96000", 0);
SAMPLE_RATE_TYPE.put("88200", 1);
SAMPLE_RATE_TYPE.put("64000", 2);
SAMPLE_RATE_TYPE.put("48000", 3);
SAMPLE_RATE_TYPE.put("44100", 4);
SAMPLE_RATE_TYPE.put("32000", 5);
SAMPLE_RATE_TYPE.put("24000", 6);
SAMPLE_RATE_TYPE.put("22050", 7);
SAMPLE_RATE_TYPE.put("16000", 8);
SAMPLE_RATE_TYPE.put("12000", 9);
SAMPLE_RATE_TYPE.put("11025", 10);
SAMPLE_RATE_TYPE.put("8000", 11);
SAMPLE_RATE_TYPE.put("7350", 12);
public static int getSampleRateType(int sampleRate)
return SAMPLE_RATE_TYPE.get(sampleRate + "");
4、音频焦点
4.1、音频焦点意义
当有两个或者两个以上音频同时向同一音频输出器播放,那么声音就会混在一起,为了避免所有音乐应用同时播放,就有了“音频焦点”的概念,希望做到 一次只能有一个应用获得音频焦点
4.2、音频焦点获取
private boolean mAudioFocus = false;
private AudioFocusRequest mAudioFocusRequest;
private AbsOnAudioFocusChangeListener mOnAudioFocusChangeListener;
private android.media.AudioManager mAM;
abstract static class AbsOnAudioFocusChangeListener implements android.media.AudioManager.OnAudioFocusChangeListener
boolean isEnabled = true;
@Override
public final void onAudioFocusChange(int focusChange)
if (isEnabled)
onChane(focusChange);
abstract void onChane(int focusChane);
private synchronized void requestAudioFocus()
android.media.AudioManager am = mAM;
mOnAudioFocusChangeListener = new AbsOnAudioFocusChangeListener()
@Override
void onChane(int focusChane)
Log.i(TAG, "focusChane:" + focusChane);
synchronized (AudioManager.this)
switch (focusChane)
case AUDIOFOCUS_LOSS:
case AUDIOFOCUS_LOSS_TRANSIENT:
case AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK:
if (mAudioFocus)
stopPlay(true, true);
else
stopPlay(false, true);
break;
case AUDIOFOCUS_GAIN:
mAudioFocus = true;
break;
;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O)
mAudioFocusRequest = new AudioFocusRequest.Builder(AUDIOFOCUS_GAIN)
.setOnAudioFocusChangeListener(mOnAudioFocusChangeListener).build();
am.requestAudioFocus(mAudioFocusRequest);
else
am.requestAudioFocus(mOnAudioFocusChangeListener, AudioStream.MODE_NORMAL, AUDIOFOCUS_GAIN);
mAudioFocus = true;
4.3、放弃音频焦点
private synchronized void abandonAudioFocus()
android.media.AudioManager am = mAM;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O)
if (mAudioFocusRequest != null)
am.abandonAudioFocusRequest(mAudioFocusRequest);
else Ijkplayer播放器源码分析之音视频输出(二)——音频篇
这篇文章的ijkplayer音频源码研究我们还是选择Android平台,它的音频解码是不支持硬解的,音频播放使用的API是OpenSL ES或AudioTrack。
OpenSL ES & AudioTrack
- OpenSL ES
什么是OpenSL ES?下面来自官网的说明:
OpenSL ES? is a royalty-free, cross-platform, hardware-accelerated audio API tuned for embedded systems. It provides a standardized, high-performance, low-latency method to access audio functionality for developers of native applications on embedded mobile multimedia devices, enabling straightforward cross-platform deployment of hardware and software audio capabilities, reducing implementation effort, and promoting the market for advanced audio.
可见OpenGL ES是专门为嵌入式设备设计的音频API,所以不适合在PC上使用。
- AudioTrack
AudioTrack是专门为Android应用提供的java API,显然也不适合在PC上使用。
使用AudioTrack API来输出音频就需要把音频数据从java层拷贝到native层。而OpenSL ES API是Android NDK提供的native接口,它可以在native层直接获取和处理数据,因此为了提高效率,应该使用OpenSL ES API。通过如下java接口设置音频输出API:
ijkMediaPlayer.setOption(IjkMediaPlayer.OPT_CATEGORY_PLAYER, "opensles", 0);
Ijkplayer使用jni4android来为AudioTrack的java API自动生成JNI native代码。
我们尽量选择底层的代码来进行研究,因此本篇文章梳理一遍OpenSL ES API在ijkplayer中的使用。
源码分析
创建播放器音频输出对象
调用如下函数生成音频输出对象:
SDL_Aout *SDL_AoutAndroid_CreateForOpenSLES()
创建并初始化Audio Engine:
//创建
SLObjectItf slObject = NULL;
ret = slCreateEngine(&slObject, 0, NULL, 0, NULL, NULL);
CHECK_OPENSL_ERROR(ret, "%s: slCreateEngine() failed", __func__);
opaque->slObject = slObject;
//初始化
ret = (*slObject)->Realize(slObject, SL_BOOLEAN_FALSE);
CHECK_OPENSL_ERROR(ret, "%s: slObject->Realize() failed", __func__);
//获取SLEngine接口对象slEngine
SLEngineItf slEngine = NULL;
ret = (*slObject)->GetInterface(slObject, SL_IID_ENGINE, &slEngine);
CHECK_OPENSL_ERROR(ret, "%s: slObject->GetInterface() failed", __func__);
opaque->slEngine = slEngine;
打开音频输出设备:
//使用slEngine打开输出设备
SLObjectItf slOutputMixObject = NULL;
const SLInterfaceID ids1[] = {SL_IID_VOLUME};
const SLboolean req1[] = {SL_BOOLEAN_FALSE};
ret = (*slEngine)->CreateOutputMix(slEngine, &slOutputMixObject, 1, ids1, req1);
CHECK_OPENSL_ERROR(ret, "%s: slEngine->CreateOutputMix() failed", __func__);
opaque->slOutputMixObject = slOutputMixObject;
//初始化
ret = (*slOutputMixObject)->Realize(slOutputMixObject, SL_BOOLEAN_FALSE);
CHECK_OPENSL_ERROR(ret, "%s: slOutputMixObject->Realize() failed", __func__);
将上述创建的OpenSL ES相关对象保存到SDL_Aout_Opaque中。
设置播放器音频输出对象的回调函数:
aout->free_l = aout_free_l;
aout->opaque_class = &g_opensles_class;
aout->open_audio = aout_open_audio;
aout->pause_audio = aout_pause_audio;
aout->flush_audio = aout_flush_audio;
aout->close_audio = aout_close_audio;
aout->set_volume = aout_set_volume;
aout->func_get_latency_seconds = aout_get_latency_seconds;
配置并创建音频播放器
通过如下函数进行:
static int aout_open_audio(SDL_Aout *aout, const SDL_AudioSpec *desired, SDL_AudioSpec *obtained)
配置数据源
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {
SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
OPENSLES_BUFFERS
};
SLDataFormat_PCM *format_pcm = &opaque->format_pcm;
format_pcm->formatType = SL_DATAFORMAT_PCM;
format_pcm->numChannels = desired->channels;
format_pcm->samplesPerSec = desired->freq * 1000; // milli Hz
format_pcm->bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
format_pcm->containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
switch (desired->channels) {
case 2:
format_pcm->channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
break;
case 1:
format_pcm->channelMask = SL_SPEAKER_FRONT_CENTER;
break;
default:
ALOGE("%s, invalid channel %d", __func__, desired->channels);
goto fail;
}
format_pcm->endianness = SL_BYTEORDER_LITTLEENDIAN;
SLDataSource audio_source = {&loc_bufq, format_pcm};
配置数据管道
SLDataLocator_OutputMix loc_outmix = {
SL_DATALOCATOR_OUTPUTMIX,
opaque->slOutputMixObject
};
SLDataSink audio_sink = {&loc_outmix, NULL};
其它参数
const SLInterfaceID ids2[] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_VOLUME, SL_IID_PLAY };
static const SLboolean req2[] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
创建播放器
ret = (*slEngine)->CreateAudioPlayer(slEngine, &slPlayerObject, &audio_source,
&audio_sink, sizeof(ids2) / sizeof(*ids2),
ids2, req2);
获取相关接口
//获取seek和play接口
ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_PLAY, &opaque->slPlayItf);
CHECK_OPENSL_ERROR(ret, "%s: slPlayerObject->GetInterface(SL_IID_PLAY) failed", __func__);
//音量调节接口
ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_VOLUME, &opaque->slVolumeItf);
CHECK_OPENSL_ERROR(ret, "%s: slPlayerObject->GetInterface(SL_IID_VOLUME) failed", __func__);
//获取音频输出的BufferQueue接口
ret = (*slPlayerObject)->GetInterface(slPlayerObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &opaque->slBufferQueueItf);
CHECK_OPENSL_ERROR(ret, "%s: slPlayerObject->GetInterface(SL_IID_ANDROIDSIMPLEBUFFERQUEUE) failed", __func__);
设置回调函数
回调函数并不传递音频数据,它只是告诉程序:我已经准备好接受处理(播放)数据了。这时候就可以调用Enqueue向BufferQueue中插入音频数据了。
ret = (*opaque->slBufferQueueItf)->RegisterCallback(opaque->slBufferQueueItf, aout_opensles_callback, (void*)aout);
CHECK_OPENSL_ERROR(ret, "%s: slBufferQueueItf->RegisterCallback() failed", __func__);
此回调函数每执行一次Dequeue会被执行一次。
音频数据的处理
音频数据的处理为典型的生产者消费者模型,解码线程解码出音频数据插入到队列中,音频驱动程序取出数据将声音播放出来。
audio_thread函数为音频解码线程主函数:
static int audio_thread(void *arg){
do {
ffp_audio_statistic_l(ffp);
if ((got_frame = decoder_decode_frame(ffp, &is->auddec, frame, NULL)) < 0)//从PacketQueue中取出pakcet并进行解码,生成一帧数据
...
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
af->pos = frame->pkt_pos;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
av_frame_move_ref(af->frame, frame);
frame_queue_push(&is->sampq);//将帧数据插入帧队列 FrameQueue
}
aout_thread_n 为音频输出线程主函数:
static int aout_thread_n(SDL_Aout *aout){
...
SDL_LockMutex(opaque->wakeup_mutex);
//如果没有退出播放&&(当前播放器状态为暂停||插入音频BufferQueue中的数据条数大于OPENSLES_BUFFERS)
if (!opaque->abort_request && (opaque->pause_on || slState.count >= OPENSLES_BUFFERS)) {
//不知道为什么if下面又加了一层while??
while (!opaque->abort_request && (opaque->pause_on || slState.count >= OPENSLES_BUFFERS)) {
//如果此时为非暂停状态,将播放器状态置为PLAYING
if (!opaque->pause_on) {
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PLAYING);
}
//如果暂停或者队列中数据过多,这里都会等待一个条件变量,并将过期时间置为1秒,应该是防止BufferQueue中的数据不再快速增加
SDL_CondWaitTimeout(opaque->wakeup_cond, opaque->wakeup_mutex, 1000);
SLresult slRet = (*slBufferQueueItf)->GetState(slBufferQueueItf, &slState);
if (slRet != SL_RESULT_SUCCESS) {
ALOGE("%s: slBufferQueueItf->GetState() failed
", __func__);
SDL_UnlockMutex(opaque->wakeup_mutex);
}
//暂停播放
if (opaque->pause_on)
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PAUSED);
}
//恢复播放
if (!opaque->abort_request && !opaque->pause_on) {
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PLAYING);
}
}
...
next_buffer = opaque->buffer + next_buffer_index * bytes_per_buffer;
next_buffer_index = (next_buffer_index + 1) % OPENSLES_BUFFERS;
//调用回调函数生成插入到BufferQueue中的数据
audio_cblk(userdata, next_buffer, bytes_per_buffer);
//如果需要刷新BufferQueue数据,则清除数据,何时需要清理数据??解释在下面
if (opaque->need_flush) {
(*slBufferQueueItf)->Clear(slBufferQueueItf);
opaque->need_flush = false;
}
//不知道为什么会判断两次??
if (opaque->need_flush) {
ALOGE("flush");
opaque->need_flush = 0;
(*slBufferQueueItf)->Clear(slBufferQueueItf);
} else {
//最终将数据插入到BufferQueue中。
slRet = (*slBufferQueueItf)->Enqueue(slBufferQueueItf, next_buffer, bytes_per_buffer);
...
}
以下是为条件变量opaque->wakeup_cond 发送signal的几个函数,目的是让输出线程快速响应
- static void aout_opensles_callback(SLAndroidSimpleBufferQueueItf caller, void *pContext)
- static void aout_close_audio(SDL_Aout *aout)
- static void aout_pause_audio(SDL_Aout *aout, int pause_on)
- static void aout_flush_audio(SDL_Aout *aout)
static void aout_set_volume(SDL_Aout *aout, float left_volume, float right_volume)
- 第一个为音频播放器的BufferQueue设置的回调函数,每从队列中取出一条数据执行一次,这个可以理解,队列中去除一条数据,立刻唤醒线程Enqueue数据。
- 第二个为关闭音频播放器的时候调用的函数,立马退出线程。
- 第三个为暂停/播放音频播放器函数,马上设置播放器状态。
- 第四个为清空BufferQueue时调用的函数,立刻唤醒线程Enqueue数据。
第五个为设置音量函数,马上设置音量。
通过调用如下函数生成插入到BufferQueue中的数据 :
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len){
...
if (is->audio_buf_index >= is->audio_buf_size) {
//如果buffer中没有数据了,生成新数据。
audio_size = audio_decode_frame(ffp);
...
if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
//直接拷贝到stream
memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
else {
memset(stream, 0, len1);
if (!is->muted && is->audio_buf)
//进行音量调整和混音
SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
}
}
生成新数据的函数不是对音频数据进行解码,而是对帧数据进行了二次处理,对音频进行了必要的重采样或者变速变调。
static int audio_decode_frame(FFPlayer *ffp){
...
//重采样
len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
...
//音频变速变调
int ret_len = ijk_soundtouch_translate(is->handle, is->audio_new_buf, (float)(ffp->pf_playback_rate), (float)(1.0f/ffp->pf_playback_rate),
resampled_data_size / 2, bytes_per_sample, is->audio_tgt.channels, af->frame->sample_rate);
...
//最后将数据保存到audio_buf中
is->audio_buf = (uint8_t*)is->audio_new_buf;
...
}
最后一个比较让人困惑的问题是何时才会清理BufferQueue,看一下清理的命令是在何时发出的:
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
...
if (is->auddec.pkt_serial != is->audioq.serial) {
is->audio_buf_index = is->audio_buf_size;
memset(stream, 0, len);
// stream += len;
// len = 0;
SDL_AoutFlushAudio(ffp->aout);
break;
}
...
}
它是在音频输出线程中获取即将插入到BufferQueue的音频数据,调用回调函数时发出的,发出的条件如上所示,其中pkt_serial 为从PacketQueue队列中取出的需要解码的packet的serial,serial为当前PacketQueue队列的serial。也就是说,如果两者不等,就需要清理BufferQueue。这里的serial是要保证前后数据包的连续性,例如发生了Seek,数据不连续,就需要清理旧数据。
注:在播放器中的VideoState成员中,audioq和解码成员auddec中的queue是同一个队列。
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
结束语
笔者从头到尾把和音频输出相关的自认为重要的源码做了一些解释和记录,有些细节没有去深入研究。以后有时间慢慢学习。
参考
以上是关于Android录制音频并使用ijkplayer播放的主要内容,如果未能解决你的问题,请参考以下文章
Android:使用 audiorecord 类录制音频播放快进
从 MIDI 键盘录制音频文件并使用 android studio 存储
Android:使用 MediaRecorder 录制音频 - 文件不播放