?? queue.c
字號:
available on the queue.
As detailed above we do not require mutual exclusion on the event
list as nothing else can modify it or the ready lists while we
have the scheduler suspended and queue locked.
It is possible that an ISR has removed data from the queue since we
checked if any was available. If this is the case then the data
will have been copied from the queue, and the queue variables
updated, but the event list will not yet have been checked to see if
anything is waiting as the queue is locked. */
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
/* Force a context switch now as we are blocked. We can do
this from within a critical section as the task we are
switching to has its own context. When we return here (i.e. we
unblock) we will leave the critical section as normal.
It is possible that an ISR has caused an event on an unrelated and
unlocked queue. If this was the case then the event list for that
queue will have been updated but the ready lists left unchanged -
instead the readied task will have been added to the pending ready
list. */
taskENTER_CRITICAL();
{
/* We can safely unlock the queue and scheduler here as
interrupts are disabled. We must not yield with anything
locked, but we can yield from within a critical section.
Tasks that have been placed on the pending ready list cannot
be tasks that are waiting for events on this queue. See
in comment xTaskRemoveFromEventList(). */
prvUnlockQueue( pxQueue );
/* Resuming the scheduler may cause a yield. If so then there
is no point yielding again here. */
if( !xTaskResumeAll() )
{
taskYIELD();
}
/* Before leaving the critical section we have to ensure
exclusive access again. */
vTaskSuspendAll();
prvLockQueue( pxQueue );
}
taskEXIT_CRITICAL();
}
}
/* When we are here it is possible that we unblocked as space became
available on the queue. It is also possible that an ISR posted to the
queue since we left the critical section, so it may be that again there
is no space. This would only happen if a task and ISR post onto the
same queue. */
taskENTER_CRITICAL();
{
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{
/* There is room in the queue, copy the data into the queue. */
prvCopyQueueData( pxQueue, pvItemToQueue );
xReturn = pdPASS;
/* Update the TxLock count so prvUnlockQueue knows to check for
tasks waiting for data to become available in the queue. */
++( pxQueue->xTxLock );
}
else
{
xReturn = errQUEUE_FULL;
}
}
taskEXIT_CRITICAL();
if( xReturn == errQUEUE_FULL )
{
if( xTicksToWait > 0 )
{
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
xReturn = queueERRONEOUS_UNBLOCK;
}
}
}
}
while( xReturn == queueERRONEOUS_UNBLOCK );
prvUnlockQueue( pxQueue );
xTaskResumeAll();
return xReturn;
}
/*-----------------------------------------------------------*/
signed portBASE_TYPE xQueueSendFromISR( xQueueHandle pxQueue, const void *pvItemToQueue, signed portBASE_TYPE xTaskPreviouslyWoken )
{
/* Similar to xQueueSend, except we don't block if there is no room in the
queue. Also we don't directly wake a task that was blocked on a queue
read, instead we return a flag to say whether a context switch is required
or not (i.e. has a task with a higher priority than us been woken by this
post). */
if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
{
prvCopyQueueData( pxQueue, pvItemToQueue );
/* If the queue is locked we do not alter the event list. This will
be done when the queue is unlocked later. */
if( pxQueue->xTxLock == queueUNLOCKED )
{
/* We only want to wake one task per ISR, so check that a task has
not already been woken. */
if( !xTaskPreviouslyWoken )
{
if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority so record that a
context switch is required. */
return pdTRUE;
}
}
}
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was posted while it was locked. */
++( pxQueue->xTxLock );
}
}
return xTaskPreviouslyWoken;
}
/*-----------------------------------------------------------*/
signed portBASE_TYPE xQueueReceive( xQueueHandle pxQueue, void *pvBuffer, portTickType xTicksToWait )
{
signed portBASE_TYPE xReturn;
xTimeOutType xTimeOut;
/* This function is very similar to xQueueSend(). See comments within
xQueueSend() for a more detailed explanation.
Make sure other tasks do not access the queue. */
vTaskSuspendAll();
/* Capture the current time status for future reference. */
vTaskSetTimeOutState( &xTimeOut );
/* Make sure interrupts do not access the queue. */
prvLockQueue( pxQueue );
do
{
/* If there are no messages in the queue we may have to block. */
if( prvIsQueueEmpty( pxQueue ) )
{
/* There are no messages in the queue, do we want to block or just
leave with nothing? */
if( xTicksToWait > ( portTickType ) 0 )
{
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
taskENTER_CRITICAL();
{
prvUnlockQueue( pxQueue );
if( !xTaskResumeAll() )
{
taskYIELD();
}
vTaskSuspendAll();
prvLockQueue( pxQueue );
}
taskEXIT_CRITICAL();
}
}
taskENTER_CRITICAL();
{
if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
{
pxQueue->pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->pcReadFrom >= pxQueue->pcTail )
{
pxQueue->pcReadFrom = pxQueue->pcHead;
}
--( pxQueue->uxMessagesWaiting );
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
/* Increment the lock count so prvUnlockQueue knows to check for
tasks waiting for space to become available on the queue. */
++( pxQueue->xRxLock );
xReturn = pdPASS;
}
else
{
xReturn = errQUEUE_EMPTY;
}
}
taskEXIT_CRITICAL();
if( xReturn == errQUEUE_EMPTY )
{
if( xTicksToWait > 0 )
{
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
xReturn = queueERRONEOUS_UNBLOCK;
}
}
}
} while( xReturn == queueERRONEOUS_UNBLOCK );
/* We no longer require exclusive access to the queue. */
prvUnlockQueue( pxQueue );
xTaskResumeAll();
return xReturn;
}
/*-----------------------------------------------------------*/
signed portBASE_TYPE xQueueReceiveFromISR( xQueueHandle pxQueue, void *pvBuffer, signed portBASE_TYPE *pxTaskWoken )
{
signed portBASE_TYPE xReturn;
/* We cannot block from an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
{
/* Copy the data from the queue. */
pxQueue->pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->pcReadFrom >= pxQueue->pcTail )
{
pxQueue->pcReadFrom = pxQueue->pcHead;
}
--( pxQueue->uxMessagesWaiting );
memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
/* If the queue is locked we will not modify the event list. Instead
we update the lock count so the task that unlocks the queue will know
that an ISR has removed data while the queue was locked. */
if( pxQueue->xRxLock == queueUNLOCKED )
{
/* We only want to wake one task per ISR, so check that a task has
not already been woken. */
if( !( *pxTaskWoken ) )
{
if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) )
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
{
/* The task waiting has a higher priority than us so
force a context switch. */
*pxTaskWoken = pdTRUE;
}
}
}
}
else
{
/* Increment the lock count so the task that unlocks the queue
knows that data was removed while it was locked. */
++( pxQueue->xRxLock );
}
xReturn = pdPASS;
}
else
{
xReturn = pdFAIL;
}
return xReturn;
}
/*-----------------------------------------------------------*/
unsigned portBASE_TYPE uxQueueMessagesWaiting( xQueueHandle pxQueue )
{
unsigned portBASE_TYPE uxReturn;
taskENTER_CRITICAL();
uxReturn = pxQueue->uxMessagesWaiting;
taskEXIT_CRITICAL();
return uxReturn;
}
/*-----------------------------------------------------------*/
void vQueueDelete( xQueueHandle pxQueue )
{
vPortFree( pxQueue->pcHead );
vPortFree( pxQueue );
}
/*-----------------------------------------------------------*/
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -