|
@@ -0,0 +1,627 @@
|
|
|
+// libaudiorender.cpp : Defines the exported functions for the DLL application.
|
|
|
+//
|
|
|
+
|
|
|
+#include "stdafx.h"
|
|
|
+#define _CRT_SECURE_NO_WARNINGS
|
|
|
+
|
|
|
+#include "libaudiorender.h"
|
|
|
+#include "../rvcmediacommon/rvc_media_common.h"
|
|
|
+
|
|
|
+
|
|
|
+#ifndef RVC_AUDIO_FRAME_SIZE
|
|
|
+#define RVC_AUDIO_FRAME_SIZE 320
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifndef RVC_AUDIO_FREQUENCY
|
|
|
+#define RVC_AUDIO_FREQUENCY 8000
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifndef RVC_AUDIO_BUFFER_LEN
|
|
|
+#define RVC_AUDIO_BUFFER_LEN 512
|
|
|
+#endif
|
|
|
+
|
|
|
+#ifndef RVC_DEFAULT_BITPERSAMPLE
|
|
|
+#define RVC_DEFAULT_BITPERSAMPLE 16
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+AudioRenderImpl::AudioRenderImpl(audiorender_callback_t* pCallback)
|
|
|
+{
|
|
|
+ memcpy(&m_audiorender_cb, pCallback, sizeof(audiorender_callback_t));
|
|
|
+ m_pAudioCaptureClient = NULL;
|
|
|
+ m_pAudioClient = NULL;
|
|
|
+ m_pMMDevice = NULL;
|
|
|
+ m_hEventStop = NULL;
|
|
|
+ m_hTimerWakeUp = NULL;
|
|
|
+ m_hTask = NULL;
|
|
|
+ m_pwfx = NULL;
|
|
|
+ m_iQueueNumber = 0;
|
|
|
+ m_audio_cap = NULL;
|
|
|
+ m_frame_format = NULL;
|
|
|
+ memset(m_strFilePathName, 0, MAX_PATH);
|
|
|
+ m_bRecordPCM = false;
|
|
|
+}
|
|
|
+
|
|
|
+bool AudioRenderImpl::InitAudioFrame(audio_frame* pframe)
|
|
|
+{
|
|
|
+ bool bret = false;
|
|
|
+ if (NULL != pframe){
|
|
|
+ pframe->bitspersample = RVC_DEFAULT_BITPERSAMPLE;
|
|
|
+ pframe->format = 1;
|
|
|
+ pframe->nchannels = 1;
|
|
|
+ pframe->samplespersec = RVC_AUDIO_FREQUENCY;
|
|
|
+ pframe->framesize = RVC_AUDIO_FRAME_SIZE;
|
|
|
+ pframe->data = NULL;
|
|
|
+
|
|
|
+ bret = true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return bret;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+DWORD AudioRenderImpl::pfThreadFunc(LPVOID lpThreadParameter)
|
|
|
+{
|
|
|
+ AudioRenderImpl* pCapture = (AudioRenderImpl*)lpThreadParameter;
|
|
|
+ pCapture->RenderFunc();
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::LogRenderInfo()
|
|
|
+{
|
|
|
+ uint32_t uConvertRatio = 1;
|
|
|
+ if (NULL != m_frame_format){
|
|
|
+ if (m_frame_format->samplespersec&&m_frame_format->nchannels&&m_frame_format->bitspersample){
|
|
|
+ uConvertRatio = (m_pwfx->nSamplesPerSec*m_pwfx->nChannels*m_pwfx->wBitsPerSample)/(m_frame_format->samplespersec*m_frame_format->nchannels*m_frame_format->bitspersample);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ RenderLog("current speaker render audio convert ratio is %d.", uConvertRatio);
|
|
|
+
|
|
|
+ eSpeakerSamplingDepthRate eSampleType = GetSpeakerSampleRate(m_pwfx->wBitsPerSample, m_pwfx->nSamplesPerSec);
|
|
|
+ RenderLog("Speaker Sample Type is %d.", eSampleType);
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::RenderLog( const char *fmt, ... )
|
|
|
+{
|
|
|
+ if (m_audiorender_cb.debug){
|
|
|
+ va_list arg;
|
|
|
+ va_start(arg, fmt);
|
|
|
+ if (*m_audiorender_cb.debug){
|
|
|
+ (*m_audiorender_cb.debug)(m_audiorender_cb.user_data, fmt, arg);
|
|
|
+ }
|
|
|
+ va_end(arg);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::OnRenderFailed()
|
|
|
+{
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::OnAudioRenderExcption()
|
|
|
+{
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+int AudioRenderImpl::StartRender(int iQueue, void* pFrameformat, const char* pSwitchName)
|
|
|
+{
|
|
|
+ m_iQueueNumber = iQueue;
|
|
|
+
|
|
|
+ if (NULL != m_frame_format){
|
|
|
+ delete m_frame_format;
|
|
|
+ m_frame_format = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ m_frame_format = new audio_frame();
|
|
|
+ if (NULL != m_frame_format){
|
|
|
+ if (NULL != pFrameformat){
|
|
|
+ m_frame_format->bitspersample = ((audio_frame*)pFrameformat)->bitspersample;
|
|
|
+ m_frame_format->nchannels = ((audio_frame*)pFrameformat)->nchannels;
|
|
|
+ m_frame_format->format = ((audio_frame*)pFrameformat)->format;
|
|
|
+ m_frame_format->samplespersec = ((audio_frame*)pFrameformat)->samplespersec;
|
|
|
+ m_frame_format->framesize = ((audio_frame*)pFrameformat)->framesize;
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ InitAudioFrame(m_frame_format);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != pSwitchName){
|
|
|
+ size_t ulen = strlen(pSwitchName);
|
|
|
+ if (ulen >= MAX_PATH){
|
|
|
+ ulen = MAX_PATH - 1;
|
|
|
+ }
|
|
|
+ memcpy(m_strFilePathName, pSwitchName, ulen);
|
|
|
+ if (strlen(m_strFilePathName) > 0){
|
|
|
+ m_bRecordPCM = true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (InitQueueInfo(iQueue)){
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ CoInitialize(NULL);
|
|
|
+
|
|
|
+ IMMDeviceEnumerator *pMMDeviceEnumerator = NULL;
|
|
|
+ HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_ALL,
|
|
|
+ __uuidof(IMMDeviceEnumerator), (void**)&pMMDeviceEnumerator);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ CoUninitialize();
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ // get the default render endpoint
|
|
|
+ hr = pMMDeviceEnumerator->GetDefaultAudioEndpoint(eRender, eConsole, &m_pMMDevice);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ CoUninitialize();
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ pMMDeviceEnumerator->Release();
|
|
|
+
|
|
|
+ m_hEventStop = CreateEvent(NULL, TRUE, FALSE, NULL);
|
|
|
+ if (m_hEventStop == NULL){
|
|
|
+ CoUninitialize();
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ hr = m_pMMDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&m_pAudioClient);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ REFERENCE_TIME hnsDefaultDevicePeriod(0);
|
|
|
+
|
|
|
+ if (NULL != m_pAudioClient){
|
|
|
+ hr = m_pAudioClient->GetDevicePeriod(&hnsDefaultDevicePeriod, NULL);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ hr = m_pAudioClient->GetMixFormat(&m_pwfx);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ RenderLog("speaker render format is %0x, channels is %d, samples rate is %d, buffer size is %d, block size of data is %d.",m_pwfx->wFormatTag, m_pwfx->nChannels, m_pwfx->nSamplesPerSec, m_pwfx->nAvgBytesPerSec, m_pwfx->nBlockAlign);
|
|
|
+ RenderLog("destination audio frame format is %d, samples rate is %d, bits per sample is %d, channels number is %d.", m_frame_format->format, m_frame_format->samplespersec, m_frame_format->bitspersample, m_frame_format->nchannels);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!AdjustFormatTo16Bits(m_pwfx)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ m_hTimerWakeUp = CreateWaitableTimer(NULL, FALSE, NULL);
|
|
|
+ if (m_hTimerWakeUp == NULL){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != m_pAudioClient){
|
|
|
+ hr = m_pAudioClient->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_LOOPBACK, 0, 0, m_pwfx, 0);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != m_pAudioClient){
|
|
|
+ hr = m_pAudioClient->GetService(__uuidof(IAudioCaptureClient), (void**)&m_pAudioCaptureClient);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ DWORD nTaskIndex = 0;
|
|
|
+ m_hTask = AvSetMmThreadCharacteristics("Capture", &nTaskIndex);
|
|
|
+ if (NULL == m_hTask){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ LARGE_INTEGER liFirstFire;
|
|
|
+ liFirstFire.QuadPart = -hnsDefaultDevicePeriod / 2; // negative means relative time
|
|
|
+ LONG lTimeBetweenFires = (LONG)hnsDefaultDevicePeriod / 2 / (10 * 1000); // convert to milliseconds
|
|
|
+
|
|
|
+ BOOL bOK = SetWaitableTimer(m_hTimerWakeUp, &liFirstFire, lTimeBetweenFires, NULL, NULL, FALSE);
|
|
|
+ if (!bOK){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != m_pAudioClient){
|
|
|
+ hr = m_pAudioClient->Start();
|
|
|
+ if (FAILED(hr)){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ m_hThread = CreateThread(NULL, 0, pfThreadFunc, this, 0, 0);
|
|
|
+
|
|
|
+ if (m_hThread == NULL){
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+
|
|
|
+ CoUninitialize();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+error:
|
|
|
+ Close();
|
|
|
+ CoUninitialize();
|
|
|
+
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t AudioRenderImpl::ConvertDouble2SingleChannel(char* pDstBuf, const uint32_t uDstLen, const unsigned char* pSrcBuf, uint32_t uSrcLen, uint32_t uBitDeepth)
|
|
|
+{
|
|
|
+ uint32_t uRet = 0;
|
|
|
+ uint32_t uOneChannelLen = uSrcLen/2;
|
|
|
+ uint32_t i = 0;
|
|
|
+
|
|
|
+ for (; i < uOneChannelLen/2 && i < uDstLen/uBitDeepth; i++){
|
|
|
+ memcpy((uint16_t*)pDstBuf + i, ((uint32_t*)(pSrcBuf))+i, uBitDeepth);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (i == uOneChannelLen/2){
|
|
|
+ uRet = uOneChannelLen;
|
|
|
+ }
|
|
|
+
|
|
|
+ return uRet;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+eSpeakerSamplingDepthRate AudioRenderImpl::GetSpeakerSampleRate(uint32_t udepth, uint32_t usamplerate)
|
|
|
+{
|
|
|
+ eSpeakerSamplingDepthRate eType = eSixteenBitsDVD;
|
|
|
+
|
|
|
+ if (16 == udepth){
|
|
|
+ switch(usamplerate)
|
|
|
+ {
|
|
|
+ case 44100:
|
|
|
+ eType = eSixteenBitsCD;
|
|
|
+ break;
|
|
|
+ case 48000:
|
|
|
+ eType = eSixteenBitsDVD;
|
|
|
+ break;
|
|
|
+ case 96000:
|
|
|
+ eType = eSixteenBitsStatdioLow;
|
|
|
+ break;
|
|
|
+ case 192000:
|
|
|
+ eType = eSixteenBitsStatdioHigh;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else if (24 == udepth){
|
|
|
+ switch(usamplerate)
|
|
|
+ {
|
|
|
+ case 44100:
|
|
|
+ eType = eTwentyfourStatdioA;
|
|
|
+ break;
|
|
|
+ case 48000:
|
|
|
+ eType = eTwentyfourStatdioB;
|
|
|
+ break;
|
|
|
+ case 96000:
|
|
|
+ eType = eTwentyfourStatdioC;
|
|
|
+ break;
|
|
|
+ case 192000:
|
|
|
+ eType = eTwentyfourStatdioD;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return eType;
|
|
|
+}
|
|
|
+
|
|
|
+int AudioRenderImpl::InitQueueInfo(int iQueue)
|
|
|
+{
|
|
|
+ int iRet = -1;
|
|
|
+ char* pQueueName = NULL;
|
|
|
+
|
|
|
+ if (0 == iQueue){
|
|
|
+ pQueueName = REC_COMMON_REMOTEAUDIO_SHM_QUEUE;
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ pQueueName = REC_COMMON_AUDIO_SALES_SHM_QUEUE;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != pQueueName){
|
|
|
+ m_audio_cap = new Clibaudioqueue(pQueueName);
|
|
|
+ if (NULL != m_audio_cap){
|
|
|
+ iRet = 0;
|
|
|
+ RenderLog("audio render insert queue name is %s.", pQueueName);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return iRet;
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t AudioRenderImpl::TranslateBuffer2DestFrameFormat(spx_int16_t* pOutAudio, spx_uint32_t* pAudioLen, spx_uint32_t uAudioBufferLen, unsigned char* pCbBuffer, const uint32_t uBufferLen, SpeexResamplerState *st, const audio_frame* pDestFrameFormat)
|
|
|
+{
|
|
|
+ uint32_t uRet = 0;
|
|
|
+
|
|
|
+ uint32_t uSingleChannelDataLen = uBufferLen;
|
|
|
+ uint32_t uSingleChannelBufferLen = 0;
|
|
|
+ char* pSingleChannelBuf = (char*)malloc(uBufferLen*sizeof(char));
|
|
|
+ if (NULL != pSingleChannelBuf){
|
|
|
+ memset(pSingleChannelBuf, 0, uBufferLen*sizeof(char));
|
|
|
+ uSingleChannelBufferLen = uBufferLen*sizeof(char);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (eDoubleChannel == m_pwfx->nChannels){
|
|
|
+ if (eSingleChannel == pDestFrameFormat->nchannels){
|
|
|
+ uSingleChannelDataLen = ConvertDouble2SingleChannel(pSingleChannelBuf, uSingleChannelBufferLen, pCbBuffer, uBufferLen, m_pwfx->wBitsPerSample/8);
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ memcpy(pSingleChannelBuf, pCbBuffer, uBufferLen);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ if (eSingleChannel == pDestFrameFormat->nchannels){
|
|
|
+ memcpy(pSingleChannelBuf, pCbBuffer, uBufferLen);
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ RenderLog("not support single channel convert to double channels.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ spx_uint32_t uInLen = uSingleChannelDataLen;
|
|
|
+
|
|
|
+ int iRet = speex_resampler_process_int(st, 0, (spx_int16_t*)pSingleChannelBuf, &uInLen, pOutAudio, &uAudioBufferLen);
|
|
|
+ if (RESAMPLER_ERR_SUCCESS == iRet){
|
|
|
+ *pAudioLen = uAudioBufferLen;
|
|
|
+ uRet = uAudioBufferLen;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != pSingleChannelBuf){
|
|
|
+ free(pSingleChannelBuf);
|
|
|
+ pSingleChannelBuf = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return uRet;
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::RenderFunc()
|
|
|
+{
|
|
|
+ HANDLE waitArray[2] = { m_hEventStop, m_hTimerWakeUp };
|
|
|
+ DWORD dwWaitResult;
|
|
|
+ UINT32 nNextPacketSize(0);
|
|
|
+ BYTE *pData = NULL;
|
|
|
+ UINT32 nNumFramesToRead;
|
|
|
+ DWORD dwFlags;
|
|
|
+ CoInitialize(NULL);
|
|
|
+
|
|
|
+ SpeexResamplerState *st = NULL;
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ spx_int16_t OutAudioBuffer[RVC_AUDIO_BUFFER_LEN] = {0};
|
|
|
+ spx_uint32_t uIndex = 0;
|
|
|
+ spx_uint32_t uValidAudioLen = 0;
|
|
|
+ spx_uint32_t uLeftBufferLen = RVC_AUDIO_BUFFER_LEN;
|
|
|
+ int iseriesnumber = 0;
|
|
|
+
|
|
|
+ FILE* pRecord = NULL;
|
|
|
+ if (m_bRecordPCM){
|
|
|
+ pRecord = fopen(m_strFilePathName, "wb+");
|
|
|
+ }
|
|
|
+
|
|
|
+ LogRenderInfo();
|
|
|
+
|
|
|
+ st = speex_resampler_init_frac(1, m_pwfx->nSamplesPerSec, m_frame_format->samplespersec, m_pwfx->nSamplesPerSec, m_frame_format->samplespersec, 0, &err);
|
|
|
+
|
|
|
+ while (TRUE){
|
|
|
+ dwWaitResult = WaitForMultipleObjects(sizeof(waitArray) / sizeof(waitArray[0]), waitArray, FALSE, INFINITE);
|
|
|
+ if (WAIT_OBJECT_0 == dwWaitResult) {
|
|
|
+ RenderLog("%s","exit circle for set event stop.");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (WAIT_OBJECT_0 + 1 != dwWaitResult){
|
|
|
+ RenderLog("%s","exit circle for time wake up.");
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ HRESULT hr = S_OK;
|
|
|
+ if (NULL != m_pAudioCaptureClient){
|
|
|
+ m_pAudioCaptureClient->GetNextPacketSize(&nNextPacketSize);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }else{
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (nNextPacketSize == 0) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != m_pAudioCaptureClient){
|
|
|
+ hr = m_pAudioCaptureClient->GetBuffer(&pData, &nNumFramesToRead, &dwFlags, NULL, NULL);
|
|
|
+ if (FAILED(hr)){
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ if (0 != nNumFramesToRead){
|
|
|
+ uIndex = TranslateBuffer2DestFrameFormat((spx_int16_t*)((char*)OutAudioBuffer+uIndex), &uValidAudioLen, uLeftBufferLen, pData, nNumFramesToRead*m_pwfx->nBlockAlign, st, m_frame_format);
|
|
|
+ uLeftBufferLen -= uValidAudioLen;
|
|
|
+ //Debug("translate valid audio len is %d,left buffer len is %d.", uValidAudioLen, uLeftBufferLen);
|
|
|
+ if (uLeftBufferLen <= RVC_AUDIO_BUFFER_LEN - m_frame_format->framesize){
|
|
|
+ //Debug("audio len = %d.", RVC_AUDIO_BUFFER_LEN - uLeftBufferLen);
|
|
|
+ if (m_audio_cap){
|
|
|
+ audio_frame framedata;
|
|
|
+ framedata.bitspersample = m_frame_format->bitspersample;
|
|
|
+ framedata.format = m_frame_format->format;
|
|
|
+ framedata.nchannels = m_frame_format->nchannels;
|
|
|
+ framedata.samplespersec = m_frame_format->samplespersec;
|
|
|
+ framedata.framesize = m_frame_format->framesize;
|
|
|
+ framedata.data = (char*)OutAudioBuffer;
|
|
|
+ framedata.iseriesnumber = iseriesnumber++;
|
|
|
+ BOOL bRet = m_audio_cap->InsertAudio(&framedata);
|
|
|
+ if (bRet){
|
|
|
+ if (0 == framedata.iseriesnumber % 300){
|
|
|
+ RenderLog("speaker audio[%d] InsertAudio success.", framedata.iseriesnumber);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ RenderLog("%s","speaker audio InsertAudio failed.");
|
|
|
+ }
|
|
|
+
|
|
|
+ if (m_bRecordPCM){
|
|
|
+ if (NULL != pRecord){
|
|
|
+ fwrite(framedata.data, framedata.framesize, 1, pRecord);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ uLeftBufferLen = RVC_AUDIO_BUFFER_LEN;
|
|
|
+ memset(OutAudioBuffer, 0 , RVC_AUDIO_BUFFER_LEN);
|
|
|
+ uIndex = 0;
|
|
|
+ }
|
|
|
+ else{
|
|
|
+ uValidAudioLen = 0;
|
|
|
+ //Debug("continue next capture.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != m_pAudioCaptureClient){
|
|
|
+ m_pAudioCaptureClient->ReleaseBuffer(nNumFramesToRead);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (NULL != pRecord){
|
|
|
+ fclose(pRecord);
|
|
|
+ pRecord = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ speex_resampler_destroy(st);
|
|
|
+ CoUninitialize();
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+BOOL AudioRenderImpl::AdjustFormatTo16Bits(WAVEFORMATEX *pwfx)
|
|
|
+{
|
|
|
+ BOOL bRet = FALSE;
|
|
|
+
|
|
|
+ if (NULL != pwfx){
|
|
|
+ if (pwfx->wFormatTag == WAVE_FORMAT_IEEE_FLOAT){
|
|
|
+ pwfx->wFormatTag = WAVE_FORMAT_PCM;
|
|
|
+ pwfx->wBitsPerSample = 16;
|
|
|
+ pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
|
|
|
+ pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
|
|
|
+ bRet = TRUE;
|
|
|
+ }
|
|
|
+ else if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE){
|
|
|
+ PWAVEFORMATEXTENSIBLE pEx = reinterpret_cast<PWAVEFORMATEXTENSIBLE>(pwfx);
|
|
|
+ if (IsEqualGUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, pEx->SubFormat)){
|
|
|
+ pEx->SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
|
|
|
+ pEx->Samples.wValidBitsPerSample = 16;
|
|
|
+ pwfx->wBitsPerSample = 16;
|
|
|
+ pwfx->nBlockAlign = pwfx->nChannels * pwfx->wBitsPerSample / 8;
|
|
|
+ pwfx->nAvgBytesPerSec = pwfx->nBlockAlign * pwfx->nSamplesPerSec;
|
|
|
+ bRet = TRUE;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return bRet;
|
|
|
+}
|
|
|
+
|
|
|
+int AudioRenderImpl::StopRender()
|
|
|
+{
|
|
|
+ RenderLog("%s","stop audio render.");
|
|
|
+
|
|
|
+ m_bRecordPCM = false;
|
|
|
+ memset(m_strFilePathName, 0, MAX_PATH);
|
|
|
+
|
|
|
+ if (m_pAudioClient){
|
|
|
+ m_pAudioClient->Stop();
|
|
|
+ }
|
|
|
+
|
|
|
+ SetEvent(m_hEventStop);
|
|
|
+ RenderLog("%s","m_hEventStop SetEvent.");
|
|
|
+ WaitForSingleObject(m_hThread, -1);
|
|
|
+
|
|
|
+ if (m_audio_cap){
|
|
|
+ Sleep(10);
|
|
|
+ m_audio_cap->ClearAudioQueue();
|
|
|
+ }
|
|
|
+
|
|
|
+ if (m_frame_format){
|
|
|
+ delete m_frame_format;
|
|
|
+ m_frame_format = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ Close();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int AudioRenderImpl::ReStartRender()
|
|
|
+{
|
|
|
+ if (0 == StopRender()){
|
|
|
+ StartRender(m_iQueueNumber, m_frame_format, m_strFilePathName);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::Close()
|
|
|
+{
|
|
|
+ if (m_hEventStop != NULL)
|
|
|
+ {
|
|
|
+ CloseHandle(m_hEventStop);
|
|
|
+ m_hEventStop = NULL;
|
|
|
+ }
|
|
|
+ if (m_pAudioClient)
|
|
|
+ {
|
|
|
+ m_pAudioClient->Release();
|
|
|
+ m_pAudioClient = NULL;
|
|
|
+ }
|
|
|
+ if (m_pwfx != NULL)
|
|
|
+ {
|
|
|
+ CoTaskMemFree(m_pwfx);
|
|
|
+ m_pwfx = NULL;
|
|
|
+ }
|
|
|
+ if (m_hTimerWakeUp != NULL)
|
|
|
+ {
|
|
|
+ CancelWaitableTimer(m_hTimerWakeUp);
|
|
|
+ CloseHandle(m_hTimerWakeUp);
|
|
|
+ m_hTimerWakeUp = NULL;
|
|
|
+ }
|
|
|
+ if (m_hTask != NULL)
|
|
|
+ {
|
|
|
+ AvRevertMmThreadCharacteristics(m_hTask);
|
|
|
+ m_hTask = NULL;
|
|
|
+ }
|
|
|
+ if (m_pAudioCaptureClient != NULL)
|
|
|
+ {
|
|
|
+ m_pAudioCaptureClient->Release();
|
|
|
+ m_pAudioCaptureClient = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void AudioRenderImpl::Release()
|
|
|
+{
|
|
|
+ delete this;
|
|
|
+}
|
|
|
+
|
|
|
+AudioRenderImpl::~AudioRenderImpl()
|
|
|
+{
|
|
|
+
|
|
|
+}
|