:音视频同步实现视频播放器

Posted 薛萌

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了:音视频同步实现视频播放器相关的知识,希望对你有一定的参考价值。

实现思路:
三个线程,两个队列
采用生产者消费者的模式


首先定义一个结构体来存储全局的数据

typedef struct _Player Player;
typedef struct _DecoderData DecoderData;

struct _Player 

    JavaVM *javaVM;

    AVFormatContext *input_format_ctx;
    //音频视频流索引位置
    int video_stream_index;
    int audio_stream_index;

    int captrue_stream_no;

    //解码器上下文
    AVCodecContext *input_codec_ctx[MAX_STREAM];
    //解码线程id
    pthread_t decode_threads[MAX_STREAM];
    ANativeWindow* nativeWindow;

    SwrContext *swr_ctx;
    //输入的采样格式
    enum AVSampleFormat in_sample_fmt;
    //输出的采样格式
    enum AVSampleFormat out_sample_fmt;
    //输入的采样率
    int in_sample_rate;
    //输出的采样率
    int out_sample_rate;
    //获取输出的声道个数
    int out_channel_nb;

    //JNI
    jobject audio_track;
    jmethodID audio_track_write_mid;

    pthread_t thread_read_from_stream;
    //音频或视频队列
    Queue *packets[MAX_STREAM];

    //互斥锁
    pthread_mutex_t mutex;
    //条件变量
    pthread_cond_t cond;

    int64_t start_time;

    int64_t audio_clock;
;
/**
 * 解码数据
 */
struct _DecoderData 
    Player *player;
    int stream_index;
;

因为要在工作线程里面使用JNIEnv *env,所以先要在主线程里初始化javaVM

//获取javaVM(为了关联线程内的JNIEnv)
(*env)->GetJavaVM(env, &(player->javaVM));

在工作线程里来获取JNIEnv的方式

        .......................
        /**
         * 关联当前线程的JNIEnv
         */
        JavaVM *javaVM = player->javaVM;
        JNIEnv *env;
        //关联的时候使用(*javaVM)
        (*javaVM)->AttachCurrentThread(javaVM, &env, NULL);
        .........................
        //解除关联
        (*javaVM)->DetachCurrentThread(javaVM);

初始化封装格式上下文

/**
 * 初始化封装格式上下文,并获取音频流和视频流索引位置
 */
void init_input_format_ctx(Player *player, const char* input_cstr) 
    //注册组件
    av_register_all();
    //封装格式上下文
    AVFormatContext *format_ctx = avformat_alloc_context();
    //打开输入视频文件
    if (avformat_open_input(&format_ctx, input_cstr, NULL, NULL) != 0) 
        LOGI("%s", "打开输入视频文件失败");
        return;
    
    //获取视频信息
    if (avformat_find_stream_info(format_ctx, NULL) < 0) 
        LOGI("%s", "获取视频信息失败");
        return;
    
    player->captrue_stream_no = format_ctx->nb_streams;
    //视频解码,需要找到对应的AVStream所在的pFormatCtx->streams的索引位置
    //获取音频流和视频流索引位置
    int i;
    for (i = 0; i < player->captrue_stream_no; i++) 
        if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) 
            player->video_stream_index = i;
         else if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) 
            player->audio_stream_index = i;
        
    
    player->input_format_ctx = format_ctx;

获取音视频解码器并打开

    //获取音视频解码器并打开
    int video_stream_index = player->video_stream_index;
    int audio_stream_index = player->audio_stream_index;
    init_codec_context(player, video_stream_index);
    init_codec_context(player, audio_stream_index);

/**
 *初始化解码器
 */
void init_codec_context(Player *player, int stream_idx) 
    AVFormatContext *format_ctx = player->input_format_ctx;
    //获取解码器
    //根据索引拿到对应的流,根据流拿到解码器上下文
    AVCodecContext *codec_ctx = format_ctx->streams[stream_idx]->codec;
    //再根据上下文拿到编解码id,通过该id拿到解码器
    AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (codec == NULL) 
        LOGI("%s", "无法解码");
        return;
    
    //打开解码器
    if (avcodec_open2(codec_ctx, codec, NULL) < 0) 
        LOGI("%s", "编码器无法打开");
        return;
    
    player->input_codec_ctx[stream_idx] = codec_ctx;

一系列初始化工作。。。。(实现代码省略)

    decode_video_prepare(env, player, surface);
    decode_audio_prepare(player);

    jni_audio_prepare(env, jobj, player);

    player_alloc_queues(player);

    pthread_mutex_init(&player->mutex, NULL);
    pthread_cond_init(&player->cond, NULL);

生产者和消费者线程

//生产者线程
    pthread_create(&(player->thread_read_from_stream), NULL, player_read_from_stream, (void*) player);

消费者线程不断生产 AVPacket 往队列里加入

void *player_read_from_stream(void * arg) 
    int ret;
    Player *player = (Player *) arg;
    //编码数据(保存在栈内存)
    AVPacket packet, *pkt = &packet;
    for (;;) 
        ret = av_read_frame(player->input_format_ctx, pkt);
        if (ret < 0) 
            break;
        
        //根据AVPacket->stream_index获取对应的队列
        Queue *queue = player->packets[pkt->stream_index];
        //加锁
        pthread_mutex_lock(&player->mutex);
        AVPacket *packet_data = queue_push(queue, &player->mutex, &player->cond);
        *packet_data = packet;
        //解锁
        pthread_mutex_unlock(&player->mutex);
    
/*
     * 消费者线程
     */
    //创建子线程进行视频解码
    DecoderData data1 =  player, video_stream_index , *decoder_data1 = &data1;
    pthread_create(&(player->decode_threads[video_stream_index]), NULL, decode_data, (void*) decoder_data1);
    //创建子线程进行音频解码
    DecoderData data2 =  player, audio_stream_index , *decoder_data2 = &data2;
    pthread_create(&(player->decode_threads[audio_stream_index]), NULL, decode_data, (void*) decoder_data2);

消费者拿到AVPacket根据index来判断是解码音频还是视频

/**
 * 消费者。解码子线程函数
 */
void *decode_data(void* arg) 
    DecoderData *decoder_data = (DecoderData *) arg;
    Player *player = decoder_data->player;
    int stream_index = decoder_data->stream_index;
    //根据stream_index获取对应的AVPacket队列
    Queue *queue = player->packets[stream_index];

    int video_frame_count = 0;
    int audio_frame_count = 0;
    for (;;) 
        //加锁
        pthread_mutex_lock(&player->mutex);
        AVPacket *packet = (AVPacket *) queue_pop(queue, &player->mutex, &player->cond);
        //解锁
        pthread_mutex_unlock(&player->mutex);
        if (stream_index == player->video_stream_index) 
            decode_video(player, packet);
            LOGI("video_frame_count:%d", video_frame_count++);
         else if (stream_index == player->audio_stream_index) 
            decode_audio(player, packet);
            LOGI("audio_frame_count:%d", audio_frame_count++);
        
    

decode_video(player, packet);
decode_audio(player, packet);
就和前面两长代码一样,目前两个线程毫无关系,各自消费着各自的AVPacket,此时此刻需要进行时间同步的方式来让音视频同步,同步的方式是延迟等待,快的等待慢的,慢的等待快的
以下是延迟代码:

/**
 * 获取视频当前的播放时间
 */
int64_t player_get_current_video_time(Player *player) 
    int64_t current_time = av_gettime();
    return current_time - player->start_time;

/**
 * 延迟
 */
void player_wait_for_frame(Player *player, int64_t stream_time, int stream_no) 

    pthread_mutex_lock(&player->mutex);
    for (;;) 
        int64_t current_video_time = player_get_current_video_time(player);
        //PTS时间减去当前时间
        int64_t sleep_time = stream_time - current_video_time;
        if (sleep_time < -30000011) 
            int64_t new_value = player->start_time - sleep_time;
            player->start_time = new_value;
            pthread_cond_broadcast(&player->cond);
        
        if (sleep_time <= MIN_SLEEP_TIME_US) 
            break;
        
        if (sleep_time >= 50000011) 
            sleep_time = 50000011;
        
        //等待指定时长
        pthread_cond_timeout_np(&player->cond, &player->mutex, sleep_time / 1000ll);
    
    pthread_mutex_unlock(&player->mutex);

同步的时候
在decode_video(player, packet);中的代码是

//------------------------视频同步start-----------------------------
        //计算延迟
        int64_t pts = av_frame_get_best_effort_timestamp(yuv_frame);
        //转换(不同时间基时间转换)
        int64_t time = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
        player_wait_for_frame(player, time, player->video_stream_index);
        //-------------------------视频同步end-------------------------------

在decode_audio(player, packet);中的代码是

............
//-------------------------音频同步start---------------------------
        int64_t pts = packet->pts;
        if (pts != AV_NOPTS_VALUE) 
            player->audio_clock = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
            player_wait_for_frame(player, player->audio_clock + AUDIO_TIME_ADJUST_US, player->audio_stream_index);
        
        //-------------------------音频同步end---------------------------
.............

音视频同步理论依据:
DTS和PTS
DTS:Decoding Time stamp 解码时间戳
PTS: Presentation Time Stamp 显示时间戳


完整代码

#include "com_xuemeng_mylive_utils_XuemengPlayer.h"
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <pthread.h>
#include <android/log.h>
#include <android/native_window.h>
#include <android/native_window_jni.h>

#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"xuemeng",FORMAT,##__VA_ARGS__);
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"xuemeng",FORMAT,##__VA_ARGS__);
#include "libyuv.h"
#include "queue.h"
//封装格式
#include "libavformat/avformat.h"
//解码
#include "libavcodec/avcodec.h"
//缩放
#include "libswscale/swscale.h"
//重采样
#include "libswresample/swresample.h"

#define MAX_AUDIO_FRME_SIZE 48000 * 4
//nb_streams,视频中存在音频流,视频流,字幕
#define MAX_STREAM 2
typedef struct _Player Player;
typedef struct _DecoderData DecoderData;

#define MIN_SLEEP_TIME_US 1000ll
#define AUDIO_TIME_ADJUST_US -200000ll

struct _Player 

    JavaVM *javaVM;

    AVFormatContext *input_format_ctx;
    //音频视频流索引位置
    int video_stream_index;
    int audio_stream_index;

    int captrue_stream_no;

    //解码器上下文
    AVCodecContext *input_codec_ctx[MAX_STREAM];
    //解码线程id
    pthread_t decode_threads[MAX_STREAM];
    ANativeWindow* nativeWindow;

    SwrContext *swr_ctx;
    //输入的采样格式
    enum AVSampleFormat in_sample_fmt;
    //输出的采样格式
    enum AVSampleFormat out_sample_fmt;
    //输入的采样率
    int in_sample_rate;
    //输出的采样率
    int out_sample_rate;
    //获取输出的声道个数
    int out_channel_nb;

    //JNI
    jobject audio_track;
    jmethodID audio_track_write_mid;

    pthread_t thread_read_from_stream;
    //音频或视频队列
    Queue *packets[MAX_STREAM];

    //互斥锁
    pthread_mutex_t mutex;
    //条件变量
    pthread_cond_t cond;

    int64_t start_time;

    int64_t audio_clock;
;
/**
 * 解码数据
 */
struct _DecoderData 
    Player *player;
    int stream_index;
;
/**
 * 初始化封装格式上下文,并获取音频流和视频流索引位置
 */
void init_input_format_ctx(Player *player, const char* input_cstr) 
    //注册组件
    av_register_all();
    //封装格式上下文
    AVFormatContext *format_ctx = avformat_alloc_context();
    //打开输入视频文件
    if (avformat_open_input(&format_ctx, input_cstr, NULL, NULL) != 0) 
        LOGI("%s", "打开输入视频文件失败");
        return;
    
    //获取视频信息
    if (avformat_find_stream_info(format_ctx, NULL) < 0) 
        LOGI("%s", "获取视频信息失败");
        return;
    
    player->captrue_stream_no = format_ctx->nb_streams;
    //视频解码,需要找到对应的AVStream所在的pFormatCtx->streams的索引位置
    //获取音频流和视频流索引位置
    int i;
    for (i = 0; i < player->captrue_stream_no; i++) 
        if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) 
            player->video_stream_index = i;
         else if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) 
            player->audio_stream_index = i;
        
    
    player->input_format_ctx = format_ctx;

/**
 *初始化解码器
 */
void init_codec_context(Player *player, int stream_idx) 
    AVFormatContext *format_ctx = player->input_format_ctx;
    //获取解码器
    //根据索引拿到对应的流,根据流拿到解码器上下文
    AVCodecContext *codec_ctx = format_ctx->streams[stream_idx]->codec;
    //再根据上下文拿到编解码id,通过该id拿到解码器
    AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (codec == NULL) 
        LOGI("%s", "无法解码");
        return;
    
    //打开解码器
    if (avcodec_open2(codec_ctx, codec, NULL) < 0) 
        LOGI("%s", "编码器无法打开");
        return;
    
    player->input_codec_ctx[stream_idx] = codec_ctx;

/**
 * 获取视频当前的播放时间
 */
int64_t player_get_current_video_time(Player *player) 
    int64_t current_time = av_gettime();
    return current_time - player->start_time;

/**
 * 延迟
 */
void player_wait_for_frame(Player *player, int64_t stream_time, int stream_no) 

    pthread_mutex_lock(&player->mutex);
    for (;;) 
        int64_t current_video_time = player_get_current_video_time(player);
        //PTS时间减去当前时间
        int64_t sleep_time = stream_time - current_video_time;
        if (sleep_time < -30000011) 
            int64_t new_value = player->start_time - sleep_time;
            player->start_time = new_value;
            pthread_cond_broadcast(&player->cond);
        
        if (sleep_time <= MIN_SLEEP_TIME_US) 
            break;
        
        if (sleep_time >= 50000011) 
            sleep_time = 50000011;
        
        //等待指定时长
        pthread_cond_timeout_np(&player->cond, &player->mutex, sleep_time / 1000ll);
    
    pthread_mutex_unlock(&player->mutex);

/**
 * 解码视频
 */
void decode_video(Player *player, AVPacket *packet) 
    AVFormatContext *input_format_ctx = player->input_format_ctx;
    AVStream *stream = input_format_ctx->streams[player->video_stream_index];
    //像素数据(解码数据)
    AVFrame *yuv_frame = av_frame_alloc();
    AVFrame *rgb_frame = av_frame_alloc();
    //绘制时的缓冲区
    ANativeWindow_Buffer outBuffer;
    AVCodecContext * codec_ctx = player->input_codec_ctx[player->video_stream_index];
    int got_frame;
    //解码AVPacket->AVFrame
    avcodec_decode_video2(codec_ctx, yuv_frame, &got_frame, packet);
    //非0,正在解码
    if (got_frame) 
        //lock
        //设置缓冲区的属性(宽,高,像素)
        ANativeWindow_setBuffersGeometry(player->nativeWindow, codec_ctx->width, codec_ctx->height,
                WINDOW_FORMAT_RGBA_8888);
        ANativeWindow_lock(player->nativeWindow, &outBuffer, NULL);
        //设置rgb_frame缓冲区,像素格式
        //rgb_frame缓冲区与outBuffer.bits是同一块内存
        avpicture_fill((AVPicture *) rgb_frame, outBuffer.bits, AV_PIX_FMT_RGBA, codec_ctx->width, codec_ctx->height);
        //YUV->RGB 8888
        I420ToARGB(yuv_frame->data[0], yuv_frame->linesize[0], yuv_frame->data[2], yuv_frame->linesize[2],
                yuv_frame->data[1], yuv_frame->linesize[1], rgb_frame->data[0], rgb_frame->linesize[0],
                codec_ctx->width, codec_ctx->height);
        //------------------------视频同步start-----------------------------
        //计算延迟
        int64_t pts = av_frame_get_best_effort_timestamp(yuv_frame);
        //转换(不同时间基时间转换)
        int64_t time = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
        player_wait_for_frame(player, time, player->video_stream_index);
        //-------------------------视频同步end-------------------------------
        //unlock
        ANativeWindow_unlockAndPost(player->nativeWindow);
    
    av_frame_free(&yuv_frame);
    av_frame_free(&rgb_frame);

/**
 * 解码音频
 */
void decode_audio(Player *player, AVPacket *packet) 
    AVFormatContext *input_format_ctx = player->input_format_ctx;
    AVStream *stream = input_format_ctx->streams[player->audio_stream_index];
    AVCodecContext *code_ctx = player->input_codec_ctx[player->audio_stream_index];
    //解压缩数据
    AVFrame *frame = av_frame_alloc();
    int got_frame;
    avcodec_decode_audio4(code_ctx, frame, &got_frame, packet);

    //存储pcm数据
    uint8_t *out_buffer = (uint8_t *) av_malloc(MAX_AUDIO_FRME_SIZE);
    //非0,正在解码
    if (got_frame) 
        swr_convert(player->swr_ctx, &out_buffer, MAX_AUDIO_FRME_SIZE, (const uint8_t **) frame->data,
                frame->nb_samples);
        //获取sample的size
        int out_buffer_size = av_samples_get_buffer_size(NULL, player->out_channel_nb, frame->nb_samples,
                player->out_sample_fmt, 1);
        //-------------------------音频同步start---------------------------
        int64_t pts = packet->pts;
        if (pts != AV_NOPTS_VALUE) 
            player->audio_clock = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
            player_wait_for_frame(player, player->audio_clock + AUDIO_TIME_ADJUST_US, player->audio_stream_index);
        
        //-------------------------音频同步end---------------------------
        /**
         * 关联当前线程的JNIEnv
         */
        JavaVM *javaVM = player->javaVM;
        JNIEnv *env;
        //关联的时候使用(*javaVM)
        (*javaVM)->AttachCurrentThread(javaVM, &env, NULL);
        //写入文件进行测试
        //fwrite(out_buffer, 1, out_buffer_size, fp_pcm);
        //out_buffer缓冲区数据转byte数组
        jbyteArray audio_sample_array = (*env)->NewByteArray(env, out_buffer_size);
        jbyte *sample_bytep = (*env)->GetByteArrayElements(env, audio_sample_array, NULL);
        //out_buffer数据复制到sample_bytep
        memcpy(sample_bytep, out_buffer, out_buffer_size);
        //同步
        (*env)->ReleaseByteArrayElements(env, audio_sample_array, sample_bytep, 0);
        //AudioTrack.write PCM数据
        /**
         * 警告:player->audio_track必须是全局引用,否则报(accessed stale local reference 0x1d (index 7 in a table of size 1))错误
         */
        (*env)->CallIntMethod(env, player->audio_track, player->audio_track_write_mid, audio_sample_array, 0,
                out_buffer_size);
        //释放局部引用
        (*env)->DeleteLocalRef(env, audio_sample_array);

        //解除关联
        (*javaVM)->DetachCurrentThread(javaVM);

        usleep(1000 * 16);
    
    av_frame_free(&frame);

/**
 * 消费者。解码子线程函数
 */
void *decode_data(void* arg) 
    DecoderData *decoder_data = (DecoderData *) arg;
    Player *player = decoder_data->player;
    int stream_index = decoder_data->stream_index;
    //根据stream_index获取对应的AVPacket队列
    Queue *queue = player->packets[stream_index];
    //一帧一帧读取压缩的视频数据AVPacket
    int video_frame_count = 0;
    int audio_frame_count = 0;
    for (;;) 
        //加锁
        pthread_mutex_lock(&player->mutex);
        AVPacket *packet = (AVPacket *) queue_pop(queue, &player->mutex, &player->cond);
        //解锁
        pthread_mutex_unlock(&player->mutex);
        if (stream_index == player->video_stream_index) 
            decode_video(player, packet);
            LOGI("video_frame_count:%d", video_frame_count++);
         else if (stream_index == player->audio_stream_index) 
            decode_audio(player, packet);
            LOGI("audio_frame_count:%d", audio_frame_count++);
        
    


/**
 * 生产者:read_stream线程负责不断的读取视频文件中AVPacket,分别放入两个队列中
 */
void *player_read_from_stream(void * arg) 
    int ret;
    Player *player = (Player *) arg;
    //编码数据(保存在栈内存)
    AVPacket packet, *pkt = &packet;
    for (;;) 
        ret = av_read_frame(player->input_format_ctx, pkt);
        if (ret < 0) 
            break;
        
        //根据AVPacket->stream_index获取对应的队列
        Queue *queue = player->packets[pkt->stream_index];
        //加锁
        pthread_mutex_lock(&player->mutex);
        AVPacket *packet_data = queue_push(queue, &player->mutex, &player->cond);
        *packet_data = packet;
        //解锁
        pthread_mutex_unlock(&player->mutex);
    

/**
 * 给AVPacket开辟空间,后面会将AVPacket栈内存数据拷贝至这里开辟的空间
 */
void* player_fill_packet() 
    //请参照我在vs中写的代码
    AVPacket *packet = malloc(sizeof(AVPacket));
    return packet;

/**
 * 初始化音频,视频AVPacket队列,长度50
 */
void player_alloc_queues(Player *player) 
    int i;
    for (i = 0; i < player->captrue_stream_no; ++i) 
        Queue *queue = queue_init(50, (queue_fill_func) player_fill_packet);
        player->packets[i] = queue;
    

/**
 * 视频解码准备
 */
void decode_video_prepare(JNIEnv *env, Player *player, jobject surface) 
    player->nativeWindow = ANativeWindow_fromSurface(env, surface);

/**
 * 音频解码准备
 */
void decode_audio_prepare(Player *player) 
    AVCodecContext *code_ctx = player->input_codec_ctx[player->audio_stream_index];
    //重采样设置选项-----------------------------------------------------------start
    //输入的采样格式
    enum AVSampleFormat in_sample_fmt = code_ctx->sample_fmt;
    //输出的采样格式 16bit PCM
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
    //输入的采样率
    int in_sample_rate = code_ctx->sample_rate;
    //输出的采样率
    int out_sample_rate = 44100;
    //输入的声道布局
    uint64_t in_ch_layout = code_ctx->channel_layout;
    //输出的声道布局
    uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
    //frame->16bit 44100 PCM 统一音频采样格式与采样率
    SwrContext *swr_ctx = swr_alloc();
    swr_alloc_set_opts(swr_ctx, out_ch_layout, out_sample_fmt, out_sample_rate, in_ch_layout, in_sample_fmt,
            in_sample_rate, 0, NULL);
    swr_init(swr_ctx);
    //重采样设置选项-----------------------------------------------------------end
    //获取输出的声道个数
    int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);

    player->in_sample_fmt = in_sample_fmt;
    player->out_sample_fmt = out_sample_fmt;
    player->in_sample_rate = in_sample_rate;
    player->out_sample_rate = out_sample_rate;
    player->out_channel_nb = out_channel_nb;
    player->swr_ctx = swr_ctx;

/**
 * jni准备
 */
void jni_audio_prepare(JNIEnv *env, jobject jthiz, Player *player) 
    //JNI调用-----------------------------------------------------------------start
    //XuemengPlayer
    jclass player_class = (*env)->GetObjectClass(env, jthiz);
    //AudioTrack对象
    jmethodID create_audio_track_mid = (*env)->GetMethodID(env, player_class, "createAudioTrack",
            "(II)Landroid/media/AudioTrack;");
    jobject audio_track = (*env)->CallObjectMethod(env, jthiz, create_audio_track_mid, player->out_sample_rate,
            player->out_channel_nb);
    //调用AudioTrack.play方法
    jclass audio_track_class = (*env)->GetObjectClass(env, audio_track);
    jmethodID audio_track_play_mid = (*env)->GetMethodID(env, audio_track_class, "play", "()V");
    (*env)->CallVoidMethod(env, audio_track, audio_track_play_mid);
    //AudioTrack.write
    jmethodID audio_track_write_mid = (*env)->GetMethodID(env, audio_track_class, "write", "([BII)I");
    /**
     * 将player->audio_track变成全局引用
     */
    player->audio_track = (*env)->NewGlobalRef(env, audio_track);
    player->audio_track_write_mid = audio_track_write_mid;
    //JNI调用-----------------------------------------------------------------end


JNIEXPORT void JNICALL Java_com_xuemeng_mylive_utils_XuemengPlayer_play(JNIEnv *env, jobject jobj, jstring input_jstr,
        jobject surface) 
    const char* input_cstr = (*env)->GetStringUTFChars(env, input_jstr, NULL);
    Player *player = (Player *) malloc(sizeof(Player));
    //获取javaVM(为了关联线程内的JNIEnv)
    (*env)->GetJavaVM(env, &(player->javaVM));
    //初始化封装格式上下文
    init_input_format_ctx(player, input_cstr);
    //获取音视频解码器并打开
    int video_stream_index = player->video_stream_index;
    int audio_stream_index = player->audio_stream_index;
    init_codec_context(player, video_stream_index);
    init_codec_context(player, audio_stream_index);

    decode_video_prepare(env, player, surface);
    decode_audio_prepare(player);

    jni_audio_prepare(env, jobj, player);

    player_alloc_queues(player);

    pthread_mutex_init(&player->mutex, NULL);
    pthread_cond_init(&player->cond, NULL);

    //生产者线程
    pthread_create(&(player->thread_read_from_stream), NULL, player_read_from_stream, (void*) player);
    sleep(1);
    player->start_time = 0;
    /*
     * 消费者线程
     */
    //创建子线程进行视频解码
    DecoderData data1 =  player, video_stream_index , *decoder_data1 = &data1;
    pthread_create(&(player->decode_threads[video_stream_index]), NULL, decode_data, (void*) decoder_data1);
    //创建子线程进行音频解码
    DecoderData data2 =  player, audio_stream_index , *decoder_data2 = &data2;
    pthread_create(&(player->decode_threads[audio_stream_index]), NULL, decode_data, (void*) decoder_data2);

    pthread_join(player->thread_read_from_stream, NULL);
    pthread_join(player->decode_threads[video_stream_index], NULL);
    pthread_join(player->decode_threads[audio_stream_index], NULL);

队列实现

#include "queue.h"
/**
 * 队列,这里主要用于存放AVPacket的指针
 * 这里,使用生产者消费模式来使用队列,至少需要2个队列实例,分别用来存储音频AVPacket和视频AVPacket
 *  1.生产者:read_stream线程负责不断的读取视频文件中AVPacket,分别放入两个队列中
 *  2.消费者:
 *   1)视频解码,从视频AVPacket Queue中获取元素,解码,绘制
 *   2)音频解码,从音频AVPacket Queue中获取元素,解码,播放
 */
struct _Queue 
    //长度
    int size;

    //任意类型指针数组,这里每个元素都是AVPacket指针
    void **tab;
    //push或pop元素时需要按照先后顺序依次进行
    int next_to_write;
    int next_to_read;
    int *ready;
;

/**
 * 初始化队列
 */
Queue* queue_init(int size,queue_fill_func fill_func) 
    Queue* queue = (Queue*) malloc(sizeof(Queue));
    queue->size = size;
    queue->next_to_write = 0;
    queue->next_to_read = 0;
    //数组开辟空间
    queue->tab = malloc(sizeof(*queue->tab) * size);
    int i;
    for (i = 0; i < size; i++) 
        queue->tab[i] = fill_func();
    
    return queue;


/**
 * 销毁队列
 */
void queue_free(Queue* queue, queue_free_func free_func) 
    int i;
    for (i = 0; i < queue->size; i++) 
        //销毁队列的元素,通过使用回调函数
        free_func((void*) queue->tab[i]);
    
    free(queue->tab);
    free(queue);


/**
 * 获取下一个索引位置
 */
int queue_get_next(Queue *queue, int current) 
    return (current + 1) % queue->size;


/**
 * 队列压人元素
 */
void* queue_push(Queue *queue, pthread_mutex_t *mutex, pthread_cond_t *cond) 
    int current = queue->next_to_write;
    int next_to_write;
    for (;;) 
        //下一个要读的位置等于要写的位置,等写完了再读
        //不等于就继续
        next_to_write = queue_get_next(queue, current);
        if (next_to_write != queue->next_to_read) 
            break;
        
        //阻塞
        pthread_cond_wait(cond, mutex);
    
    queue->next_to_write = next_to_write;
    //通知
    pthread_cond_broadcast(cond);
    return queue->tab[current];


/**
 * 弹出元素
 */
void* queue_pop(Queue *queue, pthread_mutex_t *mutex, pthread_cond_t *cond) 
    int current = queue->next_to_read;
    for (;;) 
        //下一个要读的位置等于要写的位置,等写完了再读
        //不等于就继续
        if (queue->next_to_write != queue->next_to_read) 
            break;
        
        //阻塞
        pthread_cond_wait(cond, mutex);
    
    queue->next_to_read = queue_get_next(queue, current);
    //通知
    pthread_cond_broadcast(cond);
    return queue->tab[current];

以上是关于:音视频同步实现视频播放器的主要内容,如果未能解决你的问题,请参考以下文章

:音视频同步实现视频播放器

[SimplePlayer] 实现一个简单的播放器

Html5视频播放器-VideoJS+Audio标签实现视频,音频及字幕同步播放

使用ffmpeg实现单线程异步的视频播放器

支持多人异地同步播放的视频播放器

FFmpeg学习6:视音频同步