UE4 使用WASAPI获取系统音频

Posted Litmin

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了UE4 使用WASAPI获取系统音频相关的知识,希望对你有一定的参考价值。

多线程+WASAPI获取系统音频:

#pragma once

#include "CoreMinimal.h"
//MultiThread
#include "Runnable.h"
#include "RunnableThread.h"
#include "ThreadSafeCounter.h"
#include "ThreadSafeBool.h"

struct IAudioClient;
struct IAudioCaptureClient;
typedef LONGLONG REFERENCE_TIME;
typedef struct tWAVEFORMATEX WAVEFORMATEX;
class TESTJIGOU_API AudiostreamThread : public FRunnable
{
public:
    AudioStreamThread();
    ~AudioStreamThread();

    //暂停线程    
    void PauseThread();
    //继续线程
    void ContinueThread();
    //停止线程
    void StopThread();

    bool IsThreadPaused();
    bool IsThreadKilled();

    //向zego传音频数据
    void CopyData2Zego(unsigned char* data,int dataLen);
public:
    WAVEFORMATEX* m_Format;
    //音频数据
    TArray<unsigned char> m_audioData;
private:
    FRunnableThread* Thread;
    FThreadSafeCounter StopTaskCounter;
    FCriticalSection m_mutex;
private:

    int curIndex;
    //WASAPI Variable
    IAudioClient* m_Client;
    IAudioCaptureClient* m_Capture;
    REFERENCE_TIME m_WaitTime;
    //WASAPI Function
    void SetWaveFormat(WAVEFORMATEX& format);
    bool InitLoopBack();
    void AudioStreamLoop();
    void StopAudioStream();
private:
    //override function
    virtual bool Init() override;
    virtual uint32 Run() override;
    virtual void Stop() override;
    virtual void Exit() override;
private:
    FThreadSafeBool m_Kill;
    FThreadSafeBool m_Pause;
};
#include "AudioStreamThread.h"
#include "WindowsPlatformProcess.h"
//WASAPI
#include <mmdeviceapi.h>
#include <AudioClient.h>


AudioStreamThread::AudioStreamThread()
{
    m_Kill = false;
    m_Pause = false;

    m_Client = nullptr;
    m_Capture = nullptr;
    m_Format = nullptr;

    curIndex = 0;

    Thread = FRunnableThread::Create(this, TEXT("AudioStreamThread"), 0, TPri_Normal);
}

AudioStreamThread::~AudioStreamThread()
{
    if (Thread)
    {
        delete Thread;
        Thread = nullptr;
    }
}

void AudioStreamThread::PauseThread()
{
    m_Pause = true;
}

void AudioStreamThread::ContinueThread()
{
    m_Pause = false;
}

void AudioStreamThread::StopThread()
{
    Stop();
    if (Thread)
    {
        Thread->WaitForCompletion();
    }
}

bool AudioStreamThread::IsThreadPaused()
{
    return (bool)m_Pause;
}

bool AudioStreamThread::IsThreadKilled()
{
    return (bool)m_Kill;
}

void AudioStreamThread::CopyData2Zego(unsigned char* data, int dataLen)
{
    m_mutex.Lock();
    if (m_audioData.Num() >= dataLen)
    {
        memcpy(data, m_audioData.GetData(), dataLen);
        //curIndex += dataLen;
        m_audioData.RemoveAt(0, dataLen);
    }
    m_mutex.Unlock();
}

void AudioStreamThread::SetWaveFormat(WAVEFORMATEX& format)
{
    if (format.wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
    {
        format.wFormatTag = WAVE_FORMAT_PCM;
    }
    else if (format.wFormatTag == WAVE_FORMAT_EXTENSIBLE)
    {
        PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(&format);
        if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat))
        {
            pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
            pEx->Samples.wValidBitsPerSample = 16;
        }
    }
    format.wBitsPerSample = 16;
    format.nBlockAlign = format.nChannels * format.wBitsPerSample / 8;
    format.nAvgBytesPerSec = format.nBlockAlign * format.nSamplesPerSec;
}

bool AudioStreamThread::InitLoopBack()
{
    IMMDeviceEnumerator* enumerator;
    HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), reinterpret_cast<void**>(&enumerator));
    IMMDevice* device;
    hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
    hr = device->Activate(__uuidof(IAudioClient), CLSCTX_SERVER, NULL, reinterpret_cast<void**>(&m_Client));

    hr = m_Client->GetMixFormat(&m_Format);
    SetWaveFormat(*m_Format);
    hr = m_Client->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, m_Format, NULL);

    hr = m_Client->GetService(__uuidof(IAudioCaptureClient), reinterpret_cast<void**>(&m_Capture));

    hr = m_Client->GetDevicePeriod(&m_WaitTime, NULL);
    hr = m_Client->Start();

    if (enumerator != nullptr)
    {
        enumerator->Release();
    }
    if (device != nullptr)
    {
        device->Release();
    }

    return SUCCEEDED(hr);
}

void AudioStreamThread::AudioStreamLoop()
{
    BYTE* data = nullptr;
    UINT32 size;
    DWORD flags;
    UINT64 device, performance;
    HRESULT hr = m_Capture->GetNextPacketSize(&size);

    hr = m_Capture->GetBuffer(&data, &size, &flags, &device, &performance);
    int byteWrite = size*m_Format->nBlockAlign;
    if (data != nullptr)
    {
        m_audioData.Append(data, size);
    }
    uint8_t* formatData = (uint8_t*)data;
    hr = m_Capture->ReleaseBuffer(size);
}

 void AudioStreamThread::StopAudioStream()
 {
 
 }

bool AudioStreamThread::Init()
{
    return InitLoopBack();
}

uint32 AudioStreamThread::Run()
{
    FPlatformProcess::Sleep(0.03);
    while (StopTaskCounter.GetValue() == 0 && !m_Kill)
    {
        if (m_Pause)
        {
            if (m_Kill)
            {
                return 0;
            }
        }
        else
        {

            m_mutex.Lock();
            //需要同步处理的内容

            //AudioStreamLoop();
            BYTE* data = nullptr;
            UINT32 size;
            DWORD flags;
            UINT64 device, performance;
            HRESULT hr = m_Capture->GetNextPacketSize(&size);

            hr = m_Capture->GetBuffer(&data, &size, &flags, &device, &performance);
            int byteWrite = size*m_Format->nBlockAlign;
            if (data != nullptr)
            {
                m_audioData.Append(data, byteWrite);
            }
            uint8_t* formatData = (uint8_t*)data;
            hr = m_Capture->ReleaseBuffer(size);

            m_mutex.Unlock();

            FPlatformProcess::Sleep((m_WaitTime) / 2 / (10 * 1000) / 1000);
        }
    }
    return 0;
}

void AudioStreamThread::Stop()
{
    StopTaskCounter.Increment();
    m_Kill = true;
    m_Pause = false;
}

void AudioStreamThread::Exit()
{

}

音频数据存储在m_AudioData,因为是实时获取,这里得到相应长度的数据后就会把它删除,如果要实现录音功能,改为不删除取出的数据即可。

以上是关于UE4 使用WASAPI获取系统音频的主要内容,如果未能解决你的问题,请参考以下文章

直接渲染到 WASAPI 时,两个流之一没有音频输出

使用 WASAPI 录制音频流

使用 wasapi 渲染音频的问题

使用 WASAPI 捕获蓝牙音频数据

如何在 Windows 中从任何格式转换为 PCM

尝试使用 Win32 WASAPI C++ 中的“捕获流”创建 wav 文件