Directshow的音频捕获和回放测试(2)

CMemPlayStream::CMemPlayStream(HRESULT *phr,
                         CMemPlay *pParent,
                         LPCWSTR pPinName) :
    CSourceStream(NAME("Memory Play"),phr, pParent, pPinName),
    m_iDefaultRepeatTime(20)
{
    ASSERT(phr);
    CAutoLock cAutoLock(&m_cSharedState);
 for(int i=0; i<1024; i++)
 {
  m_DataList[i] = NULL;
  m_SampleSize[i] = 0;
  m_StartTimeList[i] = 0;
  m_StopTimeList[i] = 0;
 }
 m_SampleCount = 0;
 m_SampleReaded = 0;
 FILE *fp = NULL;
 fp = fopen("c://audiodata", "rb");
 if(fp == NULL) return;
 BYTE *pBuffer = NULL;
 long size = 0;
 REFERENCE_TIME StartTime;
 REFERENCE_TIME StopTime;
 while(!feof(fp))
 {
  fread(&size, sizeof(long), 1, fp);
  if(size <=0) break;
  fread(&StartTime, sizeof(REFERENCE_TIME), 1, fp);
  fread(&StopTime, sizeof(REFERENCE_TIME), 1, fp);
  pBuffer = new BYTE[size];
  fread(pBuffer, sizeof(BYTE), size, fp);
  m_SampleSize[m_SampleCount] = size;
  m_DataList[m_SampleCount] = pBuffer;
  m_StartTimeList[m_SampleCount] = StartTime;
  m_StopTimeList[m_SampleCount] = StopTime;
  m_SampleCount ++;
  pBuffer = NULL;
  size = 0;
 }
 fclose(fp); 
} // (Constructor)
CMemPlayStream::~CMemPlayStream()
{
    CAutoLock cAutoLock(&m_cSharedState);
    for(int i=0; i {
  delete[] m_DataList[i];
  m_DataList[i] = NULL;
  m_SampleSize[i] = 0;
  m_StartTimeList[i] = 0;
  m_StopTimeList[i] = 0;
 }

} // (Destructor)
HRESULT CMemPlayStream::FillBuffer(IMediaSample *pms)
{
    CheckPointer(pms,E_POINTER);
    BYTE *pData;
    long lDataLen;
    pms->GetPointer(&pData);
    lDataLen = pms->GetSize();
    ZeroMemory(pData, lDataLen);
    {
       CAutoLock cAutoLockShared(&m_cSharedState);
  long lBytesFilled = 0;
  lBytesFilled = (m_SampleSize[m_SampleReaded] < lDataLen) ?
   m_SampleSize[m_SampleReaded] :
   lDataLen;
  memcpy(pData, m_DataList[m_SampleReaded], lBytesFilled);
       CRefTime rtStart = m_rtSampleTime;
       m_rtSampleTime += (LONG)(m_StopTimeList[m_SampleReaded] - m_StartTimeList[m_SampleReaded]);
  if(m_SampleReaded ++ >= m_SampleCount) return S_FALSE;
       pms->SetTime((REFERENCE_TIME *) &rtStart,(REFERENCE_TIME *) &m_rtSampleTime);
    }
    pms->SetSyncPoint(TRUE);
    return NOERROR;
} // FillBuffer
HRESULT CMemPlayStream::GetMediaType(CMediaType *pmt)
{
    CheckPointer(pmt,E_POINTER);
    CAutoLock cAutoLock(m_pFilter->pStateLock());
 WAVEFORMATEX *pwi = (WAVEFORMATEX *) pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
    if(NULL == pwi)
        return(E_OUTOFMEMORY);
    ZeroMemory(pwi, sizeof(WAVEFORMATEX));
    int nFrequency = 44100;
 int nChannels = 2;
 int nBytesPerSample = 2;
 long lBytesPerSecond = (long) (nBytesPerSample * nFrequency * nChannels);
 long lBufferSize = (long) ((float) lBytesPerSecond);
 pwi->wFormatTag = WAVE_FORMAT_PCM;
 pwi->nChannels = (unsigned short)nChannels;
 pwi->nSamplesPerSec = nFrequency;
 pwi->nAvgBytesPerSec = lBytesPerSecond;
 pwi->wBitsPerSample = (WORD) (nBytesPerSample * 8);
 pwi->nBlockAlign = (WORD) (nBytesPerSample * nChannels);
   pmt->SetType(&MEDIATYPE_Audio);
   pmt->SetFormatType(&WMFORMAT_WaveFormatEx);
   pmt->SetTemporalCompression(FALSE);
   pmt->SetSubtype(&MEDIASUBTYPE_PCM);
   pmt->SetSampleSize(lBufferSize);
   return NOERROR;
} // GetMediaType
HRESULT CMemPlayStream::CheckMediaType(const CMediaType *pMediaType)
{
    CheckPointer(pMediaType,E_POINTER);
 if((*(pMediaType->Type()) != MEDIATYPE_Audio) ||   // we only output video
    !(pMediaType->IsFixedSize()))                   // in fixed size samples
 {                                                 
  return E_INVALIDARG;
 }
 const GUID *SubType = pMediaType->Subtype();
 if (SubType == NULL)
  return E_INVALIDARG;
 if(*SubType != MEDIASUBTYPE_PCM)
    {
        return E_INVALIDARG;
    }
 WAVEFORMATEX *pwi = (WAVEFORMATEX *) pMediaType->Format();
 if((pwi->nChannels != 1) ||
  (pwi->nSamplesPerSec != 44100) ||
  (pwi->wBitsPerSample != 16))
 {
  return E_INVALIDARG;
 }
    return S_OK;  // This format is acceptable.
} // CheckMediaType
HRESULT CMemPlayStream::DecideBufferSize(IMemAllocator *pAlloc,
                                      ALLOCATOR_PROPERTIES *pProperties)
{
    CheckPointer(pAlloc,E_POINTER);
    CheckPointer(pProperties,E_POINTER);
    CAutoLock cAutoLock(m_pFilter->pStateLock());
    HRESULT hr = NOERROR;
    WAVEFORMATEX *pwi = (WAVEFORMATEX *) m_mt.Format();
    pProperties->cBuffers = 1;
    pProperties->cbBuffer = pwi->nSamplesPerSec;
  pProperties->cbAlign = pwi->nBlockAlign;
    ASSERT(pProperties->cbBuffer);
    ALLOCATOR_PROPERTIES Actual;
    hr = pAlloc->SetProperties(pProperties,&Actual);
    if(FAILED(hr))
    {
        return hr;
    }
    if(Actual.cbBuffer < pProperties->cbBuffer)
    {
        return E_FAIL;
    }
    ASSERT(Actual.cBuffers == 1);
    return NOERROR;
} // DecideBufferSize
HRESULT CMemPlayStream::SetMediaType(const CMediaType *pMediaType)
{
    CAutoLock cAutoLock(m_pFilter->pStateLock());
    HRESULT hr = CSourceStream::SetMediaType(pMediaType);
    if(SUCCEEDED(hr))
    {
        return NOERROR;
    }
    return hr;
} // SetMediaType
HRESULT CMemPlayStream::OnThreadCreate()
{
    CAutoLock cAutoLockShared(&m_cSharedState);
    m_rtSampleTime = 0;
    return NOERROR;
} // OnThreadCreate
贴了一堆代码,把注释基本上都去掉了,没想到还是这么长。其实做的工作并不是很多,主要涉及到其中的三个函数,CMemPlayStream的初始化函数、fillbuffer函数和GetMediaType函数。
初始化函数把所有的音频祯的信息都按照原来的存储格式读出来,存储在四个数组当中,m_SampleSize[m_SampleCount]存放buffer的大小,m_DataList存放buffer的实际内容,m_StartTimeList和m_StopTimeList存放buffer的开始和结束时间。两个变量m_SampleCount对应于buffer的总数,m_SampleReaded对应于已经播放的buffer的个数。
fillbuffer将对应的音频祯的内容copy到IMediaSample当中去,同时设置这个sample的开始和结束时间。然后m_SampleReaded加1,再次请求fillbuffer时就播放下一个buffer的内容。当m_SampleReaded大于总的buffer个数m_SampleCoun后,fillbuffer返回S_FALSE结束音频的播放。
GetMediaType函数设置正确的音频配置信息,此处设置为44.1K的采样频率,双声道播放,每单位的数据为2个字节(16位)。如果捕获时不是按照这种格式设置的,可以进行相应的修改(好不方便奥,确实),否则播放出来就是一堆杂音,什么也听不清楚。
还有很多需要完善的地方,以后再说吧:
 一下子把所有的数据都存储到内存中去了,如果文件很大行不通。
 需要动态调整音频的配置信息。
4、音频祯的回放
编译这个程序,需要注意的是修改ball.def中的LIBRARY为MemPlay.ax代理原来Ball.ax,同时修改project-setting-link中的output file name为./Debug/MemPlay.ax替代原来的Ball.ax,注意编译成debug版本。在开始-运行中注册该filter,然后到graphedit中graph-insert filters-directshow filters中找到MemPlay,双击即可加入到graphedit中。右键单击MemPlay的output pin,选择render pin,或者自行加入一个音频播放的filter(在graph-insert filters-Audio Renders中选择default directsound device)。运行这个graph,就可以听到原来录制的声音拉。
(完)

 

你可能感兴趣的:(Directshow的音频捕获和回放测试(2))