?? hxaudply.cpp
字號:
if ( m_eState == E_PLAYING ) { return HXR_OK; } m_bIsDonePlayback = FALSE; m_eState = E_PLAYING; m_bCanBeRewound = TRUE; /* Use Audio Session Object ONLY if there are any audio streams * in the presentation */ if (m_bHasStreams) { CHXAudioStream* s = 0; CHXSimpleList::Iterator lIter = m_pStreamList->Begin(); for (; lIter != m_pStreamList->End(); ++lIter) { s = (CHXAudioStream*) (*lIter); if ( s ) s->Resume(TRUE); } // This is the audio device playback time that corresponds to // when this audio player resumed. m_ulADresumeTime = m_Owner->GetCurrentPlayBackTime(); // This is this player's start time within its timeline. This is // modified when the player is seeked or resumed. m_ulAPstartTime = m_ulAPplaybackTime; UpdateStreamLastWriteTime(); // Resume the audio device playback if ( !theErr ) theErr = m_Owner->Resume(this); } else { theErr = ResumeFakeTimeline(); /* Send time 0 at first Resume */ if (!theErr && m_bIsFirstResume) { m_bIsFirstResume = FALSE; OnTimeSync(m_ulIncreasingTimer); } } m_bIsResumed = TRUE; return ( !theErr ) ? HXR_OK : HXR_FAILED;}/************************************************************************ * Method: * CHXAudioPlayer::Pause * Purpose: * The player object calls this function to pause audio playback. */HX_RESULT CHXAudioPlayer::Pause(){ if (m_eState == E_PAUSED) { return HXR_OK; } m_eState = E_PAUSED; if (m_bHasStreams) { CHXAudioStream* s = 0; CHXSimpleList::Iterator lIter = m_pStreamList->Begin(); for (; lIter != m_pStreamList->End(); ++lIter) { s = (CHXAudioStream*) (*lIter); if ( s ) s->Pause(TRUE); } m_Owner->Pause(this); } else { StopFakeTimeline(); } m_bCanBeRewound = FALSE; return HXR_OK;}/************************************************************************ * Method: * CHXAudioPlay::Stop * Purpose: * The player object calls this function to stop audio playback. * If bFlush is TRUE, flush any data in the audio device. */HX_RESULT CHXAudioPlayer::Stop( const BOOL bFlush){ m_eState = E_STOPPED; m_ulAPstartTime = 0; if (m_bHasStreams) { CHXAudioStream* s = 0; CHXSimpleList::Iterator lIter = m_pStreamList->Begin(); for (; lIter != m_pStreamList->End(); ++lIter) { s = (CHXAudioStream*) (*lIter); if ( s ) s->Stop(); } m_Owner->Stop(this, bFlush); } else { StopFakeTimeline(); } ResetPlayer(); return HXR_OK;}/************************************************************************ * Method: * CHXAudioPlayer::Seek * Purpose: * The player object calls this function to seek audio playback to * the time (in milliseconds) given. */HX_RESULT CHXAudioPlayer::Seek(const UINT32 ulSeekTime){ /* always remember this seek time.. even though there may not be any streams * yet for this player. This is because the streams may be created later and * we need to correctly apply the seek time to get the accurate time. */ m_ulAPstartTime = m_ulAPplaybackTime = ulSeekTime; // current start time for this player m_llLastWriteTime = (INT64) ulSeekTime; if (m_bHasStreams) { // Make each stream seek, too, since they own the resampling buffers. CHXAudioStream* s = 0; CHXSimpleList::Iterator lIter = m_pStreamList->Begin(); for (; lIter != m_pStreamList->End(); ++lIter) { s = (CHXAudioStream*) (*lIter); if ( s ) s->Seek(ulSeekTime); } m_Owner->Seek( this, ulSeekTime ); m_ulADresumeTime = m_Owner->GetCurrentPlayBackTime(); } else { StopFakeTimeline(); m_bIsFirstResume = TRUE; } m_ulCurrentTime = ulSeekTime; m_ulLastCurrentTimeReturned = m_ulCurrentTime; m_bTimeReturned = FALSE; m_bHasDataInAudioDevice = FALSE; return HXR_OK;}void CHXAudioPlayer::ResetPlayer(void){ m_bInited = FALSE; m_bHasStreams = FALSE; m_bIsFirstResume = TRUE; m_ulAPstartTime = 0; m_ulAPplaybackTime = 0; m_ulADresumeTime = 0; m_ulCurrentTime = 0; m_ulLastCurrentTimeReturned = 0; m_ulLastDeviceTimeAdjusted = 0; m_bTimeReturned = FALSE; m_bIsLive = FALSE; m_bIsResumed = FALSE; m_bIsDonePlayback = TRUE; m_llLastWriteTime = 0; m_bCanBeRewound = FALSE; m_bHasDataInAudioDevice = FALSE; // Delete all streams. Remove all list items. if ( m_pStreamList ) { CHXAudioStream* pAudioStream = 0; while(!m_pStreamList->IsEmpty()) { pAudioStream = (CHXAudioStream*) m_pStreamList->RemoveHead(); pAudioStream->ResetStream(); pAudioStream->Release(); } } /* We do not remove post mix hooks any more */ /* We do not remove Stream Response Objects any more */ /* Default value of Player format */ m_PlayerFmt.uChannels = 2; m_PlayerFmt.uBitsPerSample = 16; m_PlayerFmt.ulSamplesPerSec = 16000; m_PlayerFmt.uMaxBlockSize = 64000; m_ulLastFakeCallbackTime = 0; StopFakeTimeline();}/************************************************************************ * Method: * CHXAudioPlayer::SetupStreams * Purpose: * Tell each stream about the audio device format so * they can setup their resamplers and buffer. */void CHXAudioPlayer::SetupStreams(void){ // Get audio device format m_Owner->GetFormat(&m_DeviceFmt); // Calculate bytes per gran m_ulBytesPerGran = (ULONG32) (((m_DeviceFmt.uChannels * ((m_DeviceFmt.uBitsPerSample==8)?1:2) * m_DeviceFmt.ulSamplesPerSec) / 1000.0) * m_ulGranularity); // Make sure that number of bytes per granularity is an even number. if ( (m_ulBytesPerGran % 2) != 0 ) m_ulBytesPerGran++; /* Don't we have to calculate granularity again if we adjust * for even byte boundary - XXX Rahul 06/15/97 */ // Notify each stream CHXAudioStream* s = 0; CHXSimpleList::Iterator lIter = m_pStreamList->Begin(); for (; lIter != m_pStreamList->End(); ++lIter) { s = (CHXAudioStream*) (*lIter); if ( s ) { s->Setup( &m_DeviceFmt, m_ulGranularity ); } }}/************************************************************************ * Method: * CHXAudioPlayer::OnTimeSync * Purpose: */HX_RESULT CHXAudioPlayer::OnTimeSync(ULONG32 ulCurrentTime){ HX_RESULT theErr = HXR_OK;#ifdef _MACINTOSH if (InterlockedIncrement(&gTIMELINE_MUTEX) > 1) { InterlockedDecrement(&gTIMELINE_MUTEX); return; } InterlockedDecrement(&gTIMELINE_MUTEX);#endif if (m_bHasStreams) { ULONG32 ulADplaybackTime; ulADplaybackTime = m_Owner->GetCurrentPlayBackTime(); m_ulAPplaybackTime = (ulADplaybackTime - m_ulADresumeTime) + m_ulAPstartTime; } else { m_ulAPplaybackTime = ulCurrentTime; } m_ulCurrentTime = m_ulAPplaybackTime ; AdjustForRealAudio(); // Here we need to fudge the actual time for this player // For now we support only one player/timeline if (m_pPlayerResponse) { // The current playback time of any player is the difference // of the current audio device playback time minus the audio // device time when this player started (resumed) playback // plus the initial start time of playback within this player's // timeline (usually 0 but can be something else esp. after a // seek). theErr = m_pPlayerResponse->OnTimeSync(m_ulCurrentTime); } return theErr;}/************************************************************************ * Method: * CHXAudioPlayer::Setup * Purpose: * This is called after AS receives format and stream info * from the renderers AND before packets are received from * the renderer. */HX_RESULT CHXAudioPlayer::Setup( ULONG32 ulGranularity){ HX_RESULT theErr = HXR_OK; if (m_bInited) return HXR_OK; /* Always write 100ms audio blocks for now. */ m_ulGranularity = MAXIMUM_AUDIO_GRANULARITY; //ulGranularity; /* We do not go below MINIMUM_AUDIO_GRANULARITY. This will not affect * sending timesyncs at this lower granularity since HXPlayer object * uses the scheduler to send individual timesyncs anyway */ if (m_ulGranularity < MINIMUM_AUDIO_GRANULARITY) { m_ulGranularity = MINIMUM_AUDIO_GRANULARITY; } else if (m_ulGranularity > MAXIMUM_AUDIO_GRANULARITY) { m_ulGranularity = MAXIMUM_AUDIO_GRANULARITY; } if (!m_bHasStreams) { m_bInited = TRUE; return HXR_OK; } /* If this is the second player, session object may overide * the granularity value. */ m_ulGranularity = m_Owner->SetGranularity(m_ulGranularity); // Determine this player's audio format parameters based on // the mixer channels attributes supplied in RegisterRenderer. // // 1. Spin thru the list of registered streams and // determine the desired audio device parameters. // 2. Check the audio format with the audio device. // CHXAudioStream* pAudioStream = 0; ULONG32 maxSamplesPerSec = 8000; ULONG32 minSamplesPerSec = 44100; BOOL bFirst = TRUE; UINT16 maxChannels = 1; UINT16 maxBlocksize = 0; UINT16 maxBitsPerSample = 0; HXAudioFormat audioFmt; theErr = GetAudioPrefs(); if (!theErr && m_pStreamList->GetCount() > 0) { CHXSimpleList::Iterator lIter = m_pStreamList->Begin(); for (;lIter != m_pStreamList->End(); ++lIter) { pAudioStream = (CHXAudioStream*) (*lIter); //m_pStreamList->GetNext(lp); if (!pAudioStream->IsAudioFormatKnown()) { continue; } pAudioStream->GetFormat( &audioFmt ); if (bFirst) { bFirst = FALSE; maxSamplesPerSec = audioFmt.ulSamplesPerSec; minSamplesPerSec = audioFmt.ulSamplesPerSec; maxChannels = audioFmt.uChannels; maxBlocksize = audioFmt.uMaxBlockSize; maxBitsPerSample = audioFmt.uBitsPerSample; } else { // // NOTE: upsampling typically costs more CPU than downsampling if ( audioFmt.ulSamplesPerSec > maxSamplesPerSec ) maxSamplesPerSec = audioFmt.ulSamplesPerSec; if ( audioFmt.ulSamplesPerSec < minSamplesPerSec) minSamplesPerSec = audioFmt.ulSamplesPerSec; // // NOTE: converting mono to stereo and vice versa cost about the // same in CPU usage. if ( audioFmt.uChannels > maxChannels) maxChannels = audioFmt.uChannels; // Get max block size. if ( audioFmt.uMaxBlockSize > maxBlocksize ) maxBlocksize = audioFmt.uMaxBlockSize; // Get max sample width. if ( audioFmt.uBitsPerSample > maxBitsPerSample ) maxBitsPerSample = audioFmt.uBitsPerSample; } } // Set the audio format for this Player. m_PlayerFmt.uMaxBlockSize = maxBlocksize; m_PlayerFmt.uChannels = maxChannels; m_PlayerFmt.uBitsPerSample = maxBitsPerSample; // If user wants upsampling if ( m_uPrefAudioQuality > 2 ) m_PlayerFmt.ulSamplesPerSec = maxSamplesPerSec; else m_PlayerFmt.ulSamplesPerSec = minSamplesPerSec; } if (m_bPrefUse11khz) { m_PlayerFmt.ulSamplesPerSec = 11025; } // Do audio session setup. (e.g., determine device audio // format, etc. if ( !theErr ) theErr = m_Owner->Setup( m_bHasStreams ); // Now let all streams know the final audio format so they // can resample to this format. if ( !theErr ) { SetupStreams(); } // if audio device is failed to initialized, we // will keep the video playing if this is not audio only source else if (!IsAudioOnlyTrue()) { m_bHasStreams = FALSE; m_bInited = TRUE; return HXR_OK; } // Let all stream response know total number of streams. if (!theErr && m_pStreamRespList) { IHXAudioStreamInfoResponse* pAudioStreamInfoResponse = 0; CHXSimpleList::Iterator lIter = m_pStreamRespList->Begin(); for (; lIter != m_pStreamRespList->End(); ++lIter) { pAudioStreamInfoResponse = (IHXAudioStreamInfoResponse*) (*lIter); CHXSimpleList::Iterator lIter2 = m_pStreamList->Begin(); for (; lIter2 != m_pStreamList->End(); ++lIter2) { CHXAudioStream* pStream = (CHXAudioStream*) (*lIter2); /* Only if a stream is initialized, send it to * Response object. If not, we will send it when it * gets initialized (in StreamInitialized() call) */ if (pStream->IsInitialized()) { pAudioStreamInfoResponse->OnStream(pStream); } } } } // All renderers should have checked in by now! // Call post mix process hooks in list and provide the audio format. if (!theErr && m_pPMixHookList) { HXAudioFormat audioFmt; m_Owner->GetFormat( &audioFmt ); HXAudioHookInfo* pPMixHookInfo = 0; CHXSimpleList::Iterator lIter = m_pPMixHookList->Begin(); for (; lIter != m_pPMixHookList->End(); ++lIter) { pPMixHookInfo = (HXAudioHookInfo*) (*lIter); if (pPMixHookInfo->bIgnoreAudioData || HXR_OK == ProcessAudioHook(ACTION_CHECK, pPMixHookInfo->pHook)) { pPMixHookInfo->pHook->OnInit( &audioFmt ); } } } if (!theErr) { m_bInited = TRUE; /* Only change the state to initialized if we were in a stopped * state earlier. It is possible to be in Playing state and be still * in this function. This will happen if we have started the * timeline as a fake timeline and later an audio stream joins the * presentation thereby converting fake to audio timeline * (delayed audio source in SMIL playback) */ if (m_eState == E_STOPPED) { m_eState = E_INITIALIZED; } } return theErr;}ULONG32 CHXAudioPlayer::GetCurrentPlayBackTime(void){ if (m_eState != E_PLAYING) { return m_ulCurrentTime; } // The current playback time of any player is the difference // of the current audio device playback time minus the audio // device time when this player started (resumed) playback // plus the initial start time of playback within this player's // timeline (usually 0 but can be something else esp. after a // seek). if (!m_bHasStreams) { ULONG32 ulCurrentTime = HX_GET_TICKCOUNT(); m_ulIncreasingTimer += CALCULATE_ELAPSED_TICKS(m_ulLastFakeCallbackTime, ulCurrentTime); m_ulLastFakeCallbackTime = ulCurrentTime; m_ulCurrentTime = m_ulIncreasingTimer; } else { m_ulCurrentTime = (m_Owner->GetCurrentPlayBackTime() - m_ulADresumeTime) + m_ulAPstartTime; } m_ulAPplaybackTime = m_ulCurrentTime; AdjustForRealAudio(); return m_ulCurrentTime;}
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -