Prusa MINI Firmware overview
queue.c File Reference
#include <stdlib.h>
#include <string.h>
#include "FreeRTOS.h"
#include "task.h"
#include "queue.h"

Classes

struct  QueueDefinition
 

Macros

#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
 
#define queueUNLOCKED   ( ( int8_t ) -1 )
 
#define queueLOCKED_UNMODIFIED   ( ( int8_t ) 0 )
 
#define pxMutexHolder   pcTail
 
#define uxQueueType   pcHead
 
#define queueQUEUE_IS_MUTEX   NULL
 
#define queueSEMAPHORE_QUEUE_ITEM_LENGTH   ( ( UBaseType_t ) 0 )
 
#define queueMUTEX_GIVE_BLOCK_TIME   ( ( TickType_t ) 0U )
 
#define queueYIELD_IF_USING_PREEMPTION()
 
#define prvLockQueue(pxQueue)
 

Typedefs

typedef struct QueueDefinition xQUEUE
 
typedef xQUEUE Queue_t
 

Functions

static PRIVILEGED_FUNCTION void prvUnlockQueue (Queue_t *const pxQueue)
 
static PRIVILEGED_FUNCTION BaseType_t prvIsQueueEmpty (const Queue_t *pxQueue)
 
static PRIVILEGED_FUNCTION BaseType_t prvIsQueueFull (const Queue_t *pxQueue)
 
static PRIVILEGED_FUNCTION BaseType_t prvCopyDataToQueue (Queue_t *const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition)
 
static PRIVILEGED_FUNCTION void prvCopyDataFromQueue (Queue_t *const pxQueue, void *const pvBuffer)
 
static PRIVILEGED_FUNCTION void prvInitialiseNewQueue (const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue)
 
BaseType_t xQueueGenericReset (QueueHandle_t xQueue, BaseType_t xNewQueue)
 
BaseType_t xQueueGenericSend (QueueHandle_t xQueue, const void *const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition)
 
BaseType_t xQueueGenericSendFromISR (QueueHandle_t xQueue, const void *const pvItemToQueue, BaseType_t *const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition)
 
BaseType_t xQueueGiveFromISR (QueueHandle_t xQueue, BaseType_t *const pxHigherPriorityTaskWoken)
 
BaseType_t xQueueGenericReceive (QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait, const BaseType_t xJustPeeking)
 
BaseType_t xQueueReceiveFromISR (QueueHandle_t xQueue, void *const pvBuffer, BaseType_t *const pxHigherPriorityTaskWoken)
 
BaseType_t xQueuePeekFromISR (QueueHandle_t xQueue, void *const pvBuffer)
 
UBaseType_t uxQueueMessagesWaiting (const QueueHandle_t xQueue)
 
UBaseType_t uxQueueSpacesAvailable (const QueueHandle_t xQueue)
 
UBaseType_t uxQueueMessagesWaitingFromISR (const QueueHandle_t xQueue)
 
void vQueueDelete (QueueHandle_t xQueue)
 
BaseType_t xQueueIsQueueEmptyFromISR (const QueueHandle_t xQueue)
 
BaseType_t xQueueIsQueueFullFromISR (const QueueHandle_t xQueue)
 

Macro Definition Documentation

◆ MPU_WRAPPERS_INCLUDED_FROM_API_FILE

#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE

◆ queueUNLOCKED

#define queueUNLOCKED   ( ( int8_t ) -1 )

◆ queueLOCKED_UNMODIFIED

#define queueLOCKED_UNMODIFIED   ( ( int8_t ) 0 )

◆ pxMutexHolder

#define pxMutexHolder   pcTail

◆ uxQueueType

#define uxQueueType   pcHead

◆ queueQUEUE_IS_MUTEX

#define queueQUEUE_IS_MUTEX   NULL

◆ queueSEMAPHORE_QUEUE_ITEM_LENGTH

#define queueSEMAPHORE_QUEUE_ITEM_LENGTH   ( ( UBaseType_t ) 0 )

◆ queueMUTEX_GIVE_BLOCK_TIME

#define queueMUTEX_GIVE_BLOCK_TIME   ( ( TickType_t ) 0U )

◆ queueYIELD_IF_USING_PREEMPTION

#define queueYIELD_IF_USING_PREEMPTION ( )

◆ prvLockQueue

#define prvLockQueue (   pxQueue)
Value:
{ \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \
( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
} \
if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
{ \
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \
} \
taskEXIT_CRITICAL()

Typedef Documentation

◆ xQUEUE

typedef struct QueueDefinition xQUEUE

◆ Queue_t

typedef xQUEUE Queue_t

Function Documentation

◆ prvUnlockQueue()

static void prvUnlockQueue ( Queue_t *const  pxQueue)
static
1795 {
1796  /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
1797 
1798  /* The lock counts contains the number of extra data items placed or
1799  removed from the queue while the queue was locked. When a queue is
1800  locked items can be added or removed, but the event lists cannot be
1801  updated. */
1803  {
1804  int8_t cTxLock = pxQueue->cTxLock;
1805 
1806  /* See if data was added to the queue while it was locked. */
1807  while( cTxLock > queueLOCKED_UNMODIFIED )
1808  {
1809  /* Data was posted while the queue was locked. Are any tasks
1810  blocked waiting for data to become available? */
1811  #if ( configUSE_QUEUE_SETS == 1 )
1812  {
1813  if( pxQueue->pxQueueSetContainer != NULL )
1814  {
1815  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
1816  {
1817  /* The queue is a member of a queue set, and posting to
1818  the queue set caused a higher priority task to unblock.
1819  A context switch is required. */
1820  vTaskMissedYield();
1821  }
1822  else
1823  {
1825  }
1826  }
1827  else
1828  {
1829  /* Tasks that are removed from the event list will get
1830  added to the pending ready list as the scheduler is still
1831  suspended. */
1832  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1833  {
1834  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1835  {
1836  /* The task waiting has a higher priority so record that a
1837  context switch is required. */
1838  vTaskMissedYield();
1839  }
1840  else
1841  {
1843  }
1844  }
1845  else
1846  {
1847  break;
1848  }
1849  }
1850  }
1851  #else /* configUSE_QUEUE_SETS */
1852  {
1853  /* Tasks that are removed from the event list will get added to
1854  the pending ready list as the scheduler is still suspended. */
1855  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1856  {
1857  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1858  {
1859  /* The task waiting has a higher priority so record that
1860  a context switch is required. */
1861  vTaskMissedYield();
1862  }
1863  else
1864  {
1866  }
1867  }
1868  else
1869  {
1870  break;
1871  }
1872  }
1873  #endif /* configUSE_QUEUE_SETS */
1874 
1875  --cTxLock;
1876  }
1877 
1878  pxQueue->cTxLock = queueUNLOCKED;
1879  }
1881 
1882  /* Do the same for the Rx lock. */
1884  {
1885  int8_t cRxLock = pxQueue->cRxLock;
1886 
1887  while( cRxLock > queueLOCKED_UNMODIFIED )
1888  {
1889  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1890  {
1891  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1892  {
1893  vTaskMissedYield();
1894  }
1895  else
1896  {
1898  }
1899 
1900  --cRxLock;
1901  }
1902  else
1903  {
1904  break;
1905  }
1906  }
1907 
1908  pxQueue->cRxLock = queueUNLOCKED;
1909  }
1911 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prvIsQueueEmpty()

static BaseType_t prvIsQueueEmpty ( const Queue_t pxQueue)
static
1915 {
1916 BaseType_t xReturn;
1917 
1919  {
1920  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
1921  {
1922  xReturn = pdTRUE;
1923  }
1924  else
1925  {
1926  xReturn = pdFALSE;
1927  }
1928  }
1930 
1931  return xReturn;
1932 }
Here is the caller graph for this function:

◆ prvIsQueueFull()

static BaseType_t prvIsQueueFull ( const Queue_t pxQueue)
static
1954 {
1955 BaseType_t xReturn;
1956 
1958  {
1959  if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
1960  {
1961  xReturn = pdTRUE;
1962  }
1963  else
1964  {
1965  xReturn = pdFALSE;
1966  }
1967  }
1969 
1970  return xReturn;
1971 }
Here is the caller graph for this function:

◆ prvCopyDataToQueue()

static BaseType_t prvCopyDataToQueue ( Queue_t *const  pxQueue,
const void pvItemToQueue,
const BaseType_t  xPosition 
)
static
1698 {
1699 BaseType_t xReturn = pdFALSE;
1700 UBaseType_t uxMessagesWaiting;
1701 
1702  /* This function is called from a critical section. */
1703 
1704  uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1705 
1706  if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
1707  {
1708  #if ( configUSE_MUTEXES == 1 )
1709  {
1710  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1711  {
1712  /* The mutex is no longer being held. */
1713  xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
1714  pxQueue->pxMutexHolder = NULL;
1715  }
1716  else
1717  {
1719  }
1720  }
1721  #endif /* configUSE_MUTEXES */
1722  }
1723  else if( xPosition == queueSEND_TO_BACK )
1724  {
1725  ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
1726  pxQueue->pcWriteTo += pxQueue->uxItemSize;
1727  if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1728  {
1729  pxQueue->pcWriteTo = pxQueue->pcHead;
1730  }
1731  else
1732  {
1734  }
1735  }
1736  else
1737  {
1738  ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1739  pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
1740  if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1741  {
1742  pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
1743  }
1744  else
1745  {
1747  }
1748 
1749  if( xPosition == queueOVERWRITE )
1750  {
1751  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1752  {
1753  /* An item is not being added but overwritten, so subtract
1754  one from the recorded number of items in the queue so when
1755  one is added again below the number of recorded items remains
1756  correct. */
1757  --uxMessagesWaiting;
1758  }
1759  else
1760  {
1762  }
1763  }
1764  else
1765  {
1767  }
1768  }
1769 
1770  pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;
1771 
1772  return xReturn;
1773 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prvCopyDataFromQueue()

static void prvCopyDataFromQueue ( Queue_t *const  pxQueue,
void *const  pvBuffer 
)
static
1777 {
1778  if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
1779  {
1780  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
1781  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
1782  {
1783  pxQueue->u.pcReadFrom = pxQueue->pcHead;
1784  }
1785  else
1786  {
1788  }
1789  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
1790  }
1791 }
Here is the caller graph for this function:

◆ prvInitialiseNewQueue()

static void prvInitialiseNewQueue ( const UBaseType_t  uxQueueLength,
const UBaseType_t  uxItemSize,
uint8_t pucQueueStorage,
const uint8_t  ucQueueType,
Queue_t pxNewQueue 
)
static
433 {
434  /* Remove compiler warnings about unused parameters should
435  configUSE_TRACE_FACILITY not be set to 1. */
436  ( void ) ucQueueType;
437 
438  if( uxItemSize == ( UBaseType_t ) 0 )
439  {
440  /* No RAM was allocated for the queue storage area, but PC head cannot
441  be set to NULL because NULL is used as a key to say the queue is used as
442  a mutex. Therefore just set pcHead to point to the queue as a benign
443  value that is known to be within the memory map. */
444  pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
445  }
446  else
447  {
448  /* Set the head to the start of the queue storage area. */
449  pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
450  }
451 
452  /* Initialise the queue members as described where the queue type is
453  defined. */
454  pxNewQueue->uxLength = uxQueueLength;
455  pxNewQueue->uxItemSize = uxItemSize;
456  ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
457 
458  #if ( configUSE_TRACE_FACILITY == 1 )
459  {
460  pxNewQueue->ucQueueType = ucQueueType;
461  }
462  #endif /* configUSE_TRACE_FACILITY */
463 
464  #if( configUSE_QUEUE_SETS == 1 )
465  {
466  pxNewQueue->pxQueueSetContainer = NULL;
467  }
468  #endif /* configUSE_QUEUE_SETS */
469 
470  traceQUEUE_CREATE( pxNewQueue );
471 }
Here is the call graph for this function:

◆ xQueueGenericReset()

BaseType_t xQueueGenericReset ( QueueHandle_t  xQueue,
BaseType_t  xNewQueue 
)
280 {
281 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
282 
283  configASSERT( pxQueue );
284 
286  {
287  pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
288  pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
289  pxQueue->pcWriteTo = pxQueue->pcHead;
290  pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
291  pxQueue->cRxLock = queueUNLOCKED;
292  pxQueue->cTxLock = queueUNLOCKED;
293 
294  if( xNewQueue == pdFALSE )
295  {
296  /* If there are tasks blocked waiting to read from the queue, then
297  the tasks will remain blocked as after this function exits the queue
298  will still be empty. If there are tasks blocked waiting to write to
299  the queue, then one should be unblocked as after this function exits
300  it will be possible to write to it. */
301  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
302  {
303  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
304  {
306  }
307  else
308  {
310  }
311  }
312  else
313  {
315  }
316  }
317  else
318  {
319  /* Ensure the event queues start in the correct state. */
320  vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
321  vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
322  }
323  }
325 
326  /* A value is returned for calling semantic consistency with previous
327  versions. */
328  return pdPASS;
329 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ xQueueGenericSend()

BaseType_t xQueueGenericSend ( QueueHandle_t  xQueue,
const void *const  pvItemToQueue,
TickType_t  xTicksToWait,
const BaseType_t  xCopyPosition 
)
724 {
725 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
726 TimeOut_t xTimeOut;
727 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
728 
729  configASSERT( pxQueue );
730  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
731  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
732  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
733  {
734  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
735  }
736  #endif
737 
738 
739  /* This function relaxes the coding standard somewhat to allow return
740  statements within the function itself. This is done in the interest
741  of execution time efficiency. */
742  for( ;; )
743  {
745  {
746  /* Is there room on the queue now? The running task must be the
747  highest priority task wanting to access the queue. If the head item
748  in the queue is to be overwritten then it does not matter if the
749  queue is full. */
750  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
751  {
752  traceQUEUE_SEND( pxQueue );
753  xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
754 
755  #if ( configUSE_QUEUE_SETS == 1 )
756  {
757  if( pxQueue->pxQueueSetContainer != NULL )
758  {
759  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
760  {
761  /* The queue is a member of a queue set, and posting
762  to the queue set caused a higher priority task to
763  unblock. A context switch is required. */
765  }
766  else
767  {
769  }
770  }
771  else
772  {
773  /* If there was a task waiting for data to arrive on the
774  queue then unblock it now. */
775  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
776  {
778  {
779  /* The unblocked task has a priority higher than
780  our own so yield immediately. Yes it is ok to
781  do this from within the critical section - the
782  kernel takes care of that. */
784  }
785  else
786  {
788  }
789  }
790  else if( xYieldRequired != pdFALSE )
791  {
792  /* This path is a special case that will only get
793  executed if the task was holding multiple mutexes
794  and the mutexes were given back in an order that is
795  different to that in which they were taken. */
797  }
798  else
799  {
801  }
802  }
803  }
804  #else /* configUSE_QUEUE_SETS */
805  {
806  /* If there was a task waiting for data to arrive on the
807  queue then unblock it now. */
808  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
809  {
811  {
812  /* The unblocked task has a priority higher than
813  our own so yield immediately. Yes it is ok to do
814  this from within the critical section - the kernel
815  takes care of that. */
817  }
818  else
819  {
821  }
822  }
823  else if( xYieldRequired != pdFALSE )
824  {
825  /* This path is a special case that will only get
826  executed if the task was holding multiple mutexes and
827  the mutexes were given back in an order that is
828  different to that in which they were taken. */
830  }
831  else
832  {
834  }
835  }
836  #endif /* configUSE_QUEUE_SETS */
837 
839  return pdPASS;
840  }
841  else
842  {
843  if( xTicksToWait == ( TickType_t ) 0 )
844  {
845  /* The queue was full and no block time is specified (or
846  the block time has expired) so leave now. */
848 
849  /* Return to the original privilege level before exiting
850  the function. */
851  traceQUEUE_SEND_FAILED( pxQueue );
852  return errQUEUE_FULL;
853  }
854  else if( xEntryTimeSet == pdFALSE )
855  {
856  /* The queue was full and a block time was specified so
857  configure the timeout structure. */
858  vTaskSetTimeOutState( &xTimeOut );
859  xEntryTimeSet = pdTRUE;
860  }
861  else
862  {
863  /* Entry time was already set. */
865  }
866  }
867  }
869 
870  /* Interrupts and other tasks can send to and receive from the queue
871  now the critical section has been exited. */
872 
873  vTaskSuspendAll();
874  prvLockQueue( pxQueue );
875 
876  /* Update the timeout state to see if it has expired yet. */
877  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
878  {
879  if( prvIsQueueFull( pxQueue ) != pdFALSE )
880  {
881  traceBLOCKING_ON_QUEUE_SEND( pxQueue );
882  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
883 
884  /* Unlocking the queue means queue events can effect the
885  event list. It is possible that interrupts occurring now
886  remove this task from the event list again - but as the
887  scheduler is suspended the task will go onto the pending
888  ready last instead of the actual ready list. */
889  prvUnlockQueue( pxQueue );
890 
891  /* Resuming the scheduler will move tasks from the pending
892  ready list into the ready list - so it is feasible that this
893  task is already in a ready list before it yields - in which
894  case the yield will not cause a context switch unless there
895  is also a higher priority task in the pending ready list. */
896  if( xTaskResumeAll() == pdFALSE )
897  {
899  }
900  }
901  else
902  {
903  /* Try again. */
904  prvUnlockQueue( pxQueue );
905  ( void ) xTaskResumeAll();
906  }
907  }
908  else
909  {
910  /* The timeout has expired. */
911  prvUnlockQueue( pxQueue );
912  ( void ) xTaskResumeAll();
913 
914  traceQUEUE_SEND_FAILED( pxQueue );
915  return errQUEUE_FULL;
916  }
917  }
918 }
Here is the call graph for this function:

◆ xQueueGenericSendFromISR()

BaseType_t xQueueGenericSendFromISR ( QueueHandle_t  xQueue,
const void *const  pvItemToQueue,
BaseType_t *const  pxHigherPriorityTaskWoken,
const BaseType_t  xCopyPosition 
)
922 {
923 BaseType_t xReturn;
924 UBaseType_t uxSavedInterruptStatus;
925 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
926 
927  configASSERT( pxQueue );
928  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
929  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
930 
931  /* RTOS ports that support interrupt nesting have the concept of a maximum
932  system call (or maximum API call) interrupt priority. Interrupts that are
933  above the maximum system call priority are kept permanently enabled, even
934  when the RTOS kernel is in a critical section, but cannot make any calls to
935  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
936  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
937  failure if a FreeRTOS API function is called from an interrupt that has been
938  assigned a priority above the configured maximum system call priority.
939  Only FreeRTOS functions that end in FromISR can be called from interrupts
940  that have been assigned a priority at or (logically) below the maximum
941  system call interrupt priority. FreeRTOS maintains a separate interrupt
942  safe API to ensure interrupt entry is as fast and as simple as possible.
943  More information (albeit Cortex-M specific) is provided on the following
944  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
946 
947  /* Similar to xQueueGenericSend, except without blocking if there is no room
948  in the queue. Also don't directly wake a task that was blocked on a queue
949  read, instead return a flag to say whether a context switch is required or
950  not (i.e. has a task with a higher priority than us been woken by this
951  post). */
952  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
953  {
954  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
955  {
956  const int8_t cTxLock = pxQueue->cTxLock;
957 
958  traceQUEUE_SEND_FROM_ISR( pxQueue );
959 
960  /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
961  semaphore or mutex. That means prvCopyDataToQueue() cannot result
962  in a task disinheriting a priority and prvCopyDataToQueue() can be
963  called here even though the disinherit function does not check if
964  the scheduler is suspended before accessing the ready lists. */
965  ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
966 
967  /* The event list is not altered if the queue is locked. This will
968  be done when the queue is unlocked later. */
969  if( cTxLock == queueUNLOCKED )
970  {
971  #if ( configUSE_QUEUE_SETS == 1 )
972  {
973  if( pxQueue->pxQueueSetContainer != NULL )
974  {
975  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
976  {
977  /* The queue is a member of a queue set, and posting
978  to the queue set caused a higher priority task to
979  unblock. A context switch is required. */
980  if( pxHigherPriorityTaskWoken != NULL )
981  {
982  *pxHigherPriorityTaskWoken = pdTRUE;
983  }
984  else
985  {
987  }
988  }
989  else
990  {
992  }
993  }
994  else
995  {
996  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
997  {
999  {
1000  /* The task waiting has a higher priority so
1001  record that a context switch is required. */
1002  if( pxHigherPriorityTaskWoken != NULL )
1003  {
1004  *pxHigherPriorityTaskWoken = pdTRUE;
1005  }
1006  else
1007  {
1009  }
1010  }
1011  else
1012  {
1014  }
1015  }
1016  else
1017  {
1019  }
1020  }
1021  }
1022  #else /* configUSE_QUEUE_SETS */
1023  {
1024  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1025  {
1026  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1027  {
1028  /* The task waiting has a higher priority so record that a
1029  context switch is required. */
1030  if( pxHigherPriorityTaskWoken != NULL )
1031  {
1032  *pxHigherPriorityTaskWoken = pdTRUE;
1033  }
1034  else
1035  {
1037  }
1038  }
1039  else
1040  {
1042  }
1043  }
1044  else
1045  {
1047  }
1048  }
1049  #endif /* configUSE_QUEUE_SETS */
1050  }
1051  else
1052  {
1053  /* Increment the lock count so the task that unlocks the queue
1054  knows that data was posted while it was locked. */
1055  pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1056  }
1057 
1058  xReturn = pdPASS;
1059  }
1060  else
1061  {
1063  xReturn = errQUEUE_FULL;
1064  }
1065  }
1066  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1067 
1068  return xReturn;
1069 }
Here is the call graph for this function:

◆ xQueueGiveFromISR()

BaseType_t xQueueGiveFromISR ( QueueHandle_t  xQueue,
BaseType_t *const  pxHigherPriorityTaskWoken 
)
1073 {
1074 BaseType_t xReturn;
1075 UBaseType_t uxSavedInterruptStatus;
1076 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1077 
1078  /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1079  item size is 0. Don't directly wake a task that was blocked on a queue
1080  read, instead return a flag to say whether a context switch is required or
1081  not (i.e. has a task with a higher priority than us been woken by this
1082  post). */
1083 
1084  configASSERT( pxQueue );
1085 
1086  /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1087  if the item size is not 0. */
1088  configASSERT( pxQueue->uxItemSize == 0 );
1089 
1090  /* Normally a mutex would not be given from an interrupt, especially if
1091  there is a mutex holder, as priority inheritance makes no sense for an
1092  interrupts, only tasks. */
1093  configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
1094 
1095  /* RTOS ports that support interrupt nesting have the concept of a maximum
1096  system call (or maximum API call) interrupt priority. Interrupts that are
1097  above the maximum system call priority are kept permanently enabled, even
1098  when the RTOS kernel is in a critical section, but cannot make any calls to
1099  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1100  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1101  failure if a FreeRTOS API function is called from an interrupt that has been
1102  assigned a priority above the configured maximum system call priority.
1103  Only FreeRTOS functions that end in FromISR can be called from interrupts
1104  that have been assigned a priority at or (logically) below the maximum
1105  system call interrupt priority. FreeRTOS maintains a separate interrupt
1106  safe API to ensure interrupt entry is as fast and as simple as possible.
1107  More information (albeit Cortex-M specific) is provided on the following
1108  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1110 
1111  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1112  {
1113  const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1114 
1115  /* When the queue is used to implement a semaphore no data is ever
1116  moved through the queue but it is still valid to see if the queue 'has
1117  space'. */
1118  if( uxMessagesWaiting < pxQueue->uxLength )
1119  {
1120  const int8_t cTxLock = pxQueue->cTxLock;
1121 
1122  traceQUEUE_SEND_FROM_ISR( pxQueue );
1123 
1124  /* A task can only have an inherited priority if it is a mutex
1125  holder - and if there is a mutex holder then the mutex cannot be
1126  given from an ISR. As this is the ISR version of the function it
1127  can be assumed there is no mutex holder and no need to determine if
1128  priority disinheritance is needed. Simply increase the count of
1129  messages (semaphores) available. */
1130  pxQueue->uxMessagesWaiting = uxMessagesWaiting + 1;
1131 
1132  /* The event list is not altered if the queue is locked. This will
1133  be done when the queue is unlocked later. */
1134  if( cTxLock == queueUNLOCKED )
1135  {
1136  #if ( configUSE_QUEUE_SETS == 1 )
1137  {
1138  if( pxQueue->pxQueueSetContainer != NULL )
1139  {
1140  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
1141  {
1142  /* The semaphore is a member of a queue set, and
1143  posting to the queue set caused a higher priority
1144  task to unblock. A context switch is required. */
1145  if( pxHigherPriorityTaskWoken != NULL )
1146  {
1147  *pxHigherPriorityTaskWoken = pdTRUE;
1148  }
1149  else
1150  {
1152  }
1153  }
1154  else
1155  {
1157  }
1158  }
1159  else
1160  {
1161  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1162  {
1163  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1164  {
1165  /* The task waiting has a higher priority so
1166  record that a context switch is required. */
1167  if( pxHigherPriorityTaskWoken != NULL )
1168  {
1169  *pxHigherPriorityTaskWoken = pdTRUE;
1170  }
1171  else
1172  {
1174  }
1175  }
1176  else
1177  {
1179  }
1180  }
1181  else
1182  {
1184  }
1185  }
1186  }
1187  #else /* configUSE_QUEUE_SETS */
1188  {
1189  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1190  {
1191  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1192  {
1193  /* The task waiting has a higher priority so record that a
1194  context switch is required. */
1195  if( pxHigherPriorityTaskWoken != NULL )
1196  {
1197  *pxHigherPriorityTaskWoken = pdTRUE;
1198  }
1199  else
1200  {
1202  }
1203  }
1204  else
1205  {
1207  }
1208  }
1209  else
1210  {
1212  }
1213  }
1214  #endif /* configUSE_QUEUE_SETS */
1215  }
1216  else
1217  {
1218  /* Increment the lock count so the task that unlocks the queue
1219  knows that data was posted while it was locked. */
1220  pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1221  }
1222 
1223  xReturn = pdPASS;
1224  }
1225  else
1226  {
1228  xReturn = errQUEUE_FULL;
1229  }
1230  }
1231  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1232 
1233  return xReturn;
1234 }
Here is the call graph for this function:

◆ xQueueGenericReceive()

BaseType_t xQueueGenericReceive ( QueueHandle_t  xQueue,
void *const  pvBuffer,
TickType_t  xTicksToWait,
const BaseType_t  xJustPeeking 
)
1238 {
1239 BaseType_t xEntryTimeSet = pdFALSE;
1240 TimeOut_t xTimeOut;
1241 int8_t *pcOriginalReadPosition;
1242 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1243 
1244  configASSERT( pxQueue );
1245  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1246  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1247  {
1248  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1249  }
1250  #endif
1251 
1252  /* This function relaxes the coding standard somewhat to allow return
1253  statements within the function itself. This is done in the interest
1254  of execution time efficiency. */
1255 
1256  for( ;; )
1257  {
1259  {
1260  const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1261 
1262  /* Is there data in the queue now? To be running the calling task
1263  must be the highest priority task wanting to access the queue. */
1264  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1265  {
1266  /* Remember the read position in case the queue is only being
1267  peeked. */
1268  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1269 
1270  prvCopyDataFromQueue( pxQueue, pvBuffer );
1271 
1272  if( xJustPeeking == pdFALSE )
1273  {
1274  traceQUEUE_RECEIVE( pxQueue );
1275 
1276  /* Actually removing data, not just peeking. */
1277  pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;
1278 
1279  #if ( configUSE_MUTEXES == 1 )
1280  {
1281  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1282  {
1283  /* Record the information required to implement
1284  priority inheritance should it become necessary. */
1285  pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
1286  }
1287  else
1288  {
1290  }
1291  }
1292  #endif /* configUSE_MUTEXES */
1293 
1294  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1295  {
1296  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1297  {
1299  }
1300  else
1301  {
1303  }
1304  }
1305  else
1306  {
1308  }
1309  }
1310  else
1311  {
1312  traceQUEUE_PEEK( pxQueue );
1313 
1314  /* The data is not being removed, so reset the read
1315  pointer. */
1316  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1317 
1318  /* The data is being left in the queue, so see if there are
1319  any other tasks waiting for the data. */
1320  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1321  {
1322  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1323  {
1324  /* The task waiting has a higher priority than this task. */
1326  }
1327  else
1328  {
1330  }
1331  }
1332  else
1333  {
1335  }
1336  }
1337 
1339  return pdPASS;
1340  }
1341  else
1342  {
1343  if( xTicksToWait == ( TickType_t ) 0 )
1344  {
1345  /* The queue was empty and no block time is specified (or
1346  the block time has expired) so leave now. */
1348  traceQUEUE_RECEIVE_FAILED( pxQueue );
1349  return errQUEUE_EMPTY;
1350  }
1351  else if( xEntryTimeSet == pdFALSE )
1352  {
1353  /* The queue was empty and a block time was specified so
1354  configure the timeout structure. */
1355  vTaskSetTimeOutState( &xTimeOut );
1356  xEntryTimeSet = pdTRUE;
1357  }
1358  else
1359  {
1360  /* Entry time was already set. */
1362  }
1363  }
1364  }
1366 
1367  /* Interrupts and other tasks can send to and receive from the queue
1368  now the critical section has been exited. */
1369 
1370  vTaskSuspendAll();
1371  prvLockQueue( pxQueue );
1372 
1373  /* Update the timeout state to see if it has expired yet. */
1374  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1375  {
1376  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1377  {
1378  traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1379 
1380  #if ( configUSE_MUTEXES == 1 )
1381  {
1382  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1383  {
1385  {
1386  vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1387  }
1389  }
1390  else
1391  {
1393  }
1394  }
1395  #endif
1396 
1397  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1398  prvUnlockQueue( pxQueue );
1399  if( xTaskResumeAll() == pdFALSE )
1400  {
1402  }
1403  else
1404  {
1406  }
1407  }
1408  else
1409  {
1410  /* Try again. */
1411  prvUnlockQueue( pxQueue );
1412  ( void ) xTaskResumeAll();
1413  }
1414  }
1415  else
1416  {
1417  prvUnlockQueue( pxQueue );
1418  ( void ) xTaskResumeAll();
1419 
1420  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1421  {
1422  traceQUEUE_RECEIVE_FAILED( pxQueue );
1423  return errQUEUE_EMPTY;
1424  }
1425  else
1426  {
1428  }
1429  }
1430  }
1431 }
Here is the call graph for this function:

◆ xQueueReceiveFromISR()

BaseType_t xQueueReceiveFromISR ( QueueHandle_t  xQueue,
void *const  pvBuffer,
BaseType_t *const  pxHigherPriorityTaskWoken 
)
1435 {
1436 BaseType_t xReturn;
1437 UBaseType_t uxSavedInterruptStatus;
1438 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1439 
1440  configASSERT( pxQueue );
1441  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1442 
1443  /* RTOS ports that support interrupt nesting have the concept of a maximum
1444  system call (or maximum API call) interrupt priority. Interrupts that are
1445  above the maximum system call priority are kept permanently enabled, even
1446  when the RTOS kernel is in a critical section, but cannot make any calls to
1447  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1448  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1449  failure if a FreeRTOS API function is called from an interrupt that has been
1450  assigned a priority above the configured maximum system call priority.
1451  Only FreeRTOS functions that end in FromISR can be called from interrupts
1452  that have been assigned a priority at or (logically) below the maximum
1453  system call interrupt priority. FreeRTOS maintains a separate interrupt
1454  safe API to ensure interrupt entry is as fast and as simple as possible.
1455  More information (albeit Cortex-M specific) is provided on the following
1456  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1458 
1459  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1460  {
1461  const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1462 
1463  /* Cannot block in an ISR, so check there is data available. */
1464  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1465  {
1466  const int8_t cRxLock = pxQueue->cRxLock;
1467 
1468  traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1469 
1470  prvCopyDataFromQueue( pxQueue, pvBuffer );
1471  pxQueue->uxMessagesWaiting = uxMessagesWaiting - 1;
1472 
1473  /* If the queue is locked the event list will not be modified.
1474  Instead update the lock count so the task that unlocks the queue
1475  will know that an ISR has removed data while the queue was
1476  locked. */
1477  if( cRxLock == queueUNLOCKED )
1478  {
1479  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1480  {
1481  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1482  {
1483  /* The task waiting has a higher priority than us so
1484  force a context switch. */
1485  if( pxHigherPriorityTaskWoken != NULL )
1486  {
1487  *pxHigherPriorityTaskWoken = pdTRUE;
1488  }
1489  else
1490  {
1492  }
1493  }
1494  else
1495  {
1497  }
1498  }
1499  else
1500  {
1502  }
1503  }
1504  else
1505  {
1506  /* Increment the lock count so the task that unlocks the queue
1507  knows that data was removed while it was locked. */
1508  pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
1509  }
1510 
1511  xReturn = pdPASS;
1512  }
1513  else
1514  {
1515  xReturn = pdFAIL;
1517  }
1518  }
1519  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1520 
1521  return xReturn;
1522 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ xQueuePeekFromISR()

BaseType_t xQueuePeekFromISR ( QueueHandle_t  xQueue,
void *const  pvBuffer 
)
1526 {
1527 BaseType_t xReturn;
1528 UBaseType_t uxSavedInterruptStatus;
1529 int8_t *pcOriginalReadPosition;
1530 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1531 
1532  configASSERT( pxQueue );
1533  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1534  configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
1535 
1536  /* RTOS ports that support interrupt nesting have the concept of a maximum
1537  system call (or maximum API call) interrupt priority. Interrupts that are
1538  above the maximum system call priority are kept permanently enabled, even
1539  when the RTOS kernel is in a critical section, but cannot make any calls to
1540  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1541  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1542  failure if a FreeRTOS API function is called from an interrupt that has been
1543  assigned a priority above the configured maximum system call priority.
1544  Only FreeRTOS functions that end in FromISR can be called from interrupts
1545  that have been assigned a priority at or (logically) below the maximum
1546  system call interrupt priority. FreeRTOS maintains a separate interrupt
1547  safe API to ensure interrupt entry is as fast and as simple as possible.
1548  More information (albeit Cortex-M specific) is provided on the following
1549  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1551 
1552  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1553  {
1554  /* Cannot block in an ISR, so check there is data available. */
1555  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1556  {
1557  traceQUEUE_PEEK_FROM_ISR( pxQueue );
1558 
1559  /* Remember the read position so it can be reset as nothing is
1560  actually being removed from the queue. */
1561  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1562  prvCopyDataFromQueue( pxQueue, pvBuffer );
1563  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1564 
1565  xReturn = pdPASS;
1566  }
1567  else
1568  {
1569  xReturn = pdFAIL;
1571  }
1572  }
1573  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1574 
1575  return xReturn;
1576 }
Here is the call graph for this function:

◆ uxQueueMessagesWaiting()

UBaseType_t uxQueueMessagesWaiting ( const QueueHandle_t  xQueue)
1580 {
1581 UBaseType_t uxReturn;
1582 
1583  configASSERT( xQueue );
1584 
1586  {
1587  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1588  }
1590 
1591  return uxReturn;
1592 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
Here is the caller graph for this function:

◆ uxQueueSpacesAvailable()

UBaseType_t uxQueueSpacesAvailable ( const QueueHandle_t  xQueue)
1596 {
1597 UBaseType_t uxReturn;
1598 Queue_t *pxQueue;
1599 
1600  pxQueue = ( Queue_t * ) xQueue;
1601  configASSERT( pxQueue );
1602 
1604  {
1605  uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1606  }
1608 
1609  return uxReturn;
1610 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
Here is the caller graph for this function:

◆ uxQueueMessagesWaitingFromISR()

UBaseType_t uxQueueMessagesWaitingFromISR ( const QueueHandle_t  xQueue)
1614 {
1615 UBaseType_t uxReturn;
1616 
1617  configASSERT( xQueue );
1618 
1619  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1620 
1621  return uxReturn;
1622 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
Here is the caller graph for this function:

◆ vQueueDelete()

void vQueueDelete ( QueueHandle_t  xQueue)
1626 {
1627 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1628 
1629  configASSERT( pxQueue );
1630  traceQUEUE_DELETE( pxQueue );
1631 
1632  #if ( configQUEUE_REGISTRY_SIZE > 0 )
1633  {
1634  vQueueUnregisterQueue( pxQueue );
1635  }
1636  #endif
1637 
1638  #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
1639  {
1640  /* The queue can only have been allocated dynamically - free it
1641  again. */
1642  vPortFree( pxQueue );
1643  }
1644  #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1645  {
1646  /* The queue could have been allocated statically or dynamically, so
1647  check before attempting to free the memory. */
1648  if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
1649  {
1650  vPortFree( pxQueue );
1651  }
1652  else
1653  {
1655  }
1656  }
1657  #else
1658  {
1659  /* The queue must have been statically allocated, so is not going to be
1660  deleted. Avoid compiler warnings about the unused parameter. */
1661  ( void ) pxQueue;
1662  }
1663  #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1664 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ xQueueIsQueueEmptyFromISR()

BaseType_t xQueueIsQueueEmptyFromISR ( const QueueHandle_t  xQueue)
1936 {
1937 BaseType_t xReturn;
1938 
1939  configASSERT( xQueue );
1940  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
1941  {
1942  xReturn = pdTRUE;
1943  }
1944  else
1945  {
1946  xReturn = pdFALSE;
1947  }
1948 
1949  return xReturn;
1950 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */

◆ xQueueIsQueueFullFromISR()

BaseType_t xQueueIsQueueFullFromISR ( const QueueHandle_t  xQueue)
1975 {
1976 BaseType_t xReturn;
1977 
1978  configASSERT( xQueue );
1979  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
1980  {
1981  xReturn = pdTRUE;
1982  }
1983  else
1984  {
1985  xReturn = pdFALSE;
1986  }
1987 
1988  return xReturn;
1989 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
vQueueUnregisterQueue
#define vQueueUnregisterQueue(xQueue)
Definition: FreeRTOS.h:325
prvCopyDataFromQueue
static PRIVILEGED_FUNCTION void prvCopyDataFromQueue(Queue_t *const pxQueue, void *const pvBuffer)
Definition: queue.c:1776
traceQUEUE_RECEIVE_FROM_ISR_FAILED
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:498
errQUEUE_FULL
#define errQUEUE_FULL
Definition: projdefs.h:92
QueueDefinition::xTasksWaitingToReceive
List_t xTasksWaitingToReceive
Definition: queue.c:143
queueOVERWRITE
#define queueOVERWRITE
Definition: queue.h:107
portSET_INTERRUPT_MASK_FROM_ISR
#define portSET_INTERRUPT_MASK_FROM_ISR()
Definition: FreeRTOS.h:300
xTaskGetSchedulerState
PRIVILEGED_FUNCTION BaseType_t xTaskGetSchedulerState(void)
configASSERT
#define configASSERT(x)
Definition: FreeRTOSConfig.h:162
QueueDefinition::uxItemSize
UBaseType_t uxItemSize
Definition: queue.c:147
xTaskPriorityDisinherit
PRIVILEGED_FUNCTION BaseType_t xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)
queueYIELD_IF_USING_PREEMPTION
#define queueYIELD_IF_USING_PREEMPTION()
Definition: queue.c:120
QueueDefinition::pcReadFrom
int8_t * pcReadFrom
Definition: queue.c:138
pdFAIL
#define pdFAIL
Definition: projdefs.h:90
vTaskPriorityInherit
PRIVILEGED_FUNCTION void vTaskPriorityInherit(TaskHandle_t const pxMutexHolder)
QueueDefinition::cTxLock
volatile int8_t cTxLock
Definition: queue.c:150
traceQUEUE_RECEIVE
#define traceQUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:470
queueLOCKED_UNMODIFIED
#define queueLOCKED_UNMODIFIED
Definition: queue.c:95
vTaskMissedYield
PRIVILEGED_FUNCTION void vTaskMissedYield(void)
Definition: tasks.c:3076
vListInitialise
PRIVILEGED_FUNCTION void vListInitialise(List_t *const pxList)
Definition: list.c:79
QueueDefinition::pcWriteTo
int8_t * pcWriteTo
Definition: queue.c:134
QueueDefinition::xTasksWaitingToSend
List_t xTasksWaitingToSend
Definition: queue.c:142
prvIsQueueFull
static PRIVILEGED_FUNCTION BaseType_t prvIsQueueFull(const Queue_t *pxQueue)
Definition: queue.c:1953
NULL
#define NULL
Definition: usbd_def.h:53
xQueueGenericReset
BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
Definition: queue.c:279
pdPASS
#define pdPASS
Definition: projdefs.h:89
TickType_t
uint32_t TickType_t
Definition: portmacro.h:105
queueUNLOCKED
#define queueUNLOCKED
Definition: queue.c:94
traceQUEUE_SEND_FROM_ISR_FAILED
#define traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:490
traceQUEUE_PEEK_FROM_ISR
#define traceQUEUE_PEEK_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:478
traceQUEUE_SEND_FAILED
#define traceQUEUE_SEND_FAILED(pxQueue)
Definition: FreeRTOS.h:466
xTaskRemoveFromEventList
PRIVILEGED_FUNCTION BaseType_t xTaskRemoveFromEventList(const List_t *const pxEventList)
Definition: tasks.c:2894
taskENTER_CRITICAL
#define taskENTER_CRITICAL()
Definition: task.h:217
vTaskPlaceOnEventList
PRIVILEGED_FUNCTION void vTaskPlaceOnEventList(List_t *const pxEventList, const TickType_t xTicksToWait)
Definition: tasks.c:2820
pvTaskIncrementMutexHeldCount
PRIVILEGED_FUNCTION void * pvTaskIncrementMutexHeldCount(void)
pdFALSE
#define pdFALSE
Definition: projdefs.h:86
UBaseType_t
unsigned long UBaseType_t
Definition: portmacro.h:99
QueueDefinition::u
union QueueDefinition::@57 u
prvLockQueue
#define prvLockQueue(pxQueue)
Definition: queue.c:264
xTIME_OUT
Definition: task.h:135
traceQUEUE_PEEK
#define traceQUEUE_PEEK(pxQueue)
Definition: FreeRTOS.h:474
void
void
Definition: png.h:1083
traceQUEUE_PEEK_FROM_ISR_FAILED
#define traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:502
QueueDefinition::cRxLock
volatile int8_t cRxLock
Definition: queue.c:149
traceBLOCKING_ON_QUEUE_SEND
#define traceBLOCKING_ON_QUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:404
prvIsQueueEmpty
static PRIVILEGED_FUNCTION BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue)
Definition: queue.c:1914
listLIST_IS_EMPTY
#define listLIST_IS_EMPTY(pxList)
Definition: list.h:291
traceQUEUE_RECEIVE_FROM_ISR
#define traceQUEUE_RECEIVE_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:494
QueueDefinition
Definition: queue.c:130
taskSCHEDULER_SUSPENDED
#define taskSCHEDULER_SUSPENDED
Definition: task.h:257
uint8_t
const uint8_t[]
Definition: 404_html.c:3
prvUnlockQueue
static PRIVILEGED_FUNCTION void prvUnlockQueue(Queue_t *const pxQueue)
Definition: queue.c:1794
traceQUEUE_SEND_FROM_ISR
#define traceQUEUE_SEND_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:486
errQUEUE_EMPTY
#define errQUEUE_EMPTY
Definition: projdefs.h:91
QueueDefinition::uxMessagesWaiting
volatile UBaseType_t uxMessagesWaiting
Definition: queue.c:145
queueSEND_TO_BACK
#define queueSEND_TO_BACK
Definition: queue.h:105
portASSERT_IF_INTERRUPT_PRIORITY_INVALID
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
Definition: FreeRTOS.h:740
traceBLOCKING_ON_QUEUE_RECEIVE
#define traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:396
QueueDefinition::uxLength
UBaseType_t uxLength
Definition: queue.c:146
portYIELD_WITHIN_API
#define portYIELD_WITHIN_API
Definition: FreeRTOS.h:692
traceQUEUE_SEND
#define traceQUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:462
BaseType_t
long BaseType_t
Definition: portmacro.h:98
pdTRUE
#define pdTRUE
Definition: projdefs.h:87
QueueDefinition::pcHead
int8_t * pcHead
Definition: queue.c:132
xTaskResumeAll
PRIVILEGED_FUNCTION BaseType_t xTaskResumeAll(void)
Definition: tasks.c:2017
traceQUEUE_RECEIVE_FAILED
#define traceQUEUE_RECEIVE_FAILED(pxQueue)
Definition: FreeRTOS.h:482
vPortFree
PRIVILEGED_FUNCTION void vPortFree(void *pv)
Definition: heap_4.c:305
traceQUEUE_DELETE
#define traceQUEUE_DELETE(pxQueue)
Definition: FreeRTOS.h:506
taskEXIT_CRITICAL
#define taskEXIT_CRITICAL()
Definition: task.h:232
portCLEAR_INTERRUPT_MASK_FROM_ISR
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue)
Definition: FreeRTOS.h:304
traceQUEUE_CREATE
#define traceQUEUE_CREATE(pxNewQueue)
Definition: FreeRTOS.h:422
QueueDefinition::pcTail
int8_t * pcTail
Definition: queue.c:133
vTaskSuspendAll
PRIVILEGED_FUNCTION void vTaskSuspendAll(void)
Definition: tasks.c:1944
xTaskCheckForTimeOut
PRIVILEGED_FUNCTION BaseType_t xTaskCheckForTimeOut(TimeOut_t *const pxTimeOut, TickType_t *const pxTicksToWait)
Definition: tasks.c:3015
prvCopyDataToQueue
static PRIVILEGED_FUNCTION BaseType_t prvCopyDataToQueue(Queue_t *const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition)
Definition: queue.c:1697
queueQUEUE_IS_MUTEX
#define queueQUEUE_IS_MUTEX
Definition: queue.c:110
mtCOVERAGE_TEST_MARKER
#define mtCOVERAGE_TEST_MARKER()
Definition: FreeRTOS.h:748
vTaskSetTimeOutState
PRIVILEGED_FUNCTION void vTaskSetTimeOutState(TimeOut_t *const pxTimeOut)
Definition: tasks.c:3007