queue.c
Go to the documentation of this file.
1 /*
2  * FreeRTOS Kernel V10.0.0
3  * Copyright (C) 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a copy of
6  * this software and associated documentation files (the "Software"), to deal in
7  * the Software without restriction, including without limitation the rights to
8  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9  * the Software, and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in all
13  * copies or substantial portions of the Software. If you wish to use our Amazon
14  * FreeRTOS name, please do so in a fair use way that does not cause confusion.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
18  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
19  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * http://www.FreeRTOS.org
24  * http://aws.amazon.com/freertos
25  *
26  * 1 tab == 4 spaces!
27  */
28 
29 #include <stdlib.h>
30 #include <string.h>
31 
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 all the API functions to use the MPU wrappers. That should only be done when
34 task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36 
37 #include "FreeRTOS.h"
38 #include "task.h"
39 #include "queue.h"
40 
41 #if ( configUSE_CO_ROUTINES == 1 )
42  #include "croutine.h"
43 #endif
44 
45 /* Lint e961 and e750 are suppressed as a MISRA exception justified because the
46 MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined for the
47 header files above, but not in this file, in order to generate the correct
48 privileged Vs unprivileged linkage and placement. */
49 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750. */
50 
51 
52 /* Constants used with the cRxLock and cTxLock structure members. */
53 #define queueUNLOCKED ( ( int8_t ) -1 )
54 #define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
55 
56 /* When the Queue_t structure is used to represent a base queue its pcHead and
57 pcTail members are used as pointers into the queue storage area. When the
58 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
59 not necessary, and the pcHead pointer is set to NULL to indicate that the
60 pcTail pointer actually points to the mutex holder (if any). Map alternative
61 names to the pcHead and pcTail structure members to ensure the readability of
62 the code is maintained despite this dual use of two structure members. An
63 alternative implementation would be to use a union, but use of a union is
64 against the coding standard (although an exception to the standard has been
65 permitted where the dual use also significantly changes the type of the
66 structure member). */
67 #define pxMutexHolder pcTail
68 #define uxQueueType pcHead
69 #define queueQUEUE_IS_MUTEX NULL
70 
71 /* Semaphores do not actually store or copy data, so have an item size of
72 zero. */
73 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
74 #define queueMUTEX_GIVE_BLOCK_TIME ( ( TickType_t ) 0U )
75 
76 #if( configUSE_PREEMPTION == 0 )
77  /* If the cooperative scheduler is being used then a yield should not be
78  performed just because a higher priority task has been woken. */
79  #define queueYIELD_IF_USING_PREEMPTION()
80 #else
81  #define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
82 #endif
83 
84 /*
85  * Definition of the queue used by the scheduler.
86  * Items are queued by copy, not reference. See the following link for the
87  * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
88  */
89 typedef struct QueueDefinition
90 {
91  int8_t *pcHead; /*< Points to the beginning of the queue storage area. */
92  int8_t *pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
93  int8_t *pcWriteTo; /*< Points to the free next place in the storage area. */
94 
95  union /* Use of a union is an exception to the coding standard to ensure two mutually exclusive structure members don't appear simultaneously (wasting RAM). */
96  {
97  int8_t *pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */
98  UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
99  } u;
100 
101  List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */
102  List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */
103 
104  volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
105  UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
106  UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
107 
108  volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
109  volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
110 
111  #if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
112  uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
113  #endif
114 
115  #if ( configUSE_QUEUE_SETS == 1 )
116  struct QueueDefinition *pxQueueSetContainer;
117  #endif
118 
119  #if ( configUSE_TRACE_FACILITY == 1 )
120  UBaseType_t uxQueueNumber;
121  uint8_t ucQueueType;
122  #endif
123 
124 } xQUEUE;
125 
126 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
127 name below to enable the use of older kernel aware debuggers. */
128 typedef xQUEUE Queue_t;
129 
130 /*-----------------------------------------------------------*/
131 
132 /*
133  * The queue registry is just a means for kernel aware debuggers to locate
134  * queue structures. It has no other purpose so is an optional component.
135  */
136 #if ( configQUEUE_REGISTRY_SIZE > 0 )
137 
138  /* The type stored within the queue registry array. This allows a name
139  to be assigned to each queue making kernel aware debugging a little
140  more user friendly. */
141  typedef struct QUEUE_REGISTRY_ITEM
142  {
143  const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
144  QueueHandle_t xHandle;
145  } xQueueRegistryItem;
146 
147  /* The old xQueueRegistryItem name is maintained above then typedefed to the
148  new xQueueRegistryItem name below to enable the use of older kernel aware
149  debuggers. */
150  typedef xQueueRegistryItem QueueRegistryItem_t;
151 
152  /* The queue registry is simply an array of QueueRegistryItem_t structures.
153  The pcQueueName member of a structure being NULL is indicative of the
154  array position being vacant. */
155  PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
156 
157 #endif /* configQUEUE_REGISTRY_SIZE */
158 
159 /*
160  * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
161  * prevent an ISR from adding or removing items to the queue, but does prevent
162  * an ISR from removing tasks from the queue event lists. If an ISR finds a
163  * queue is locked it will instead increment the appropriate queue lock count
164  * to indicate that a task may require unblocking. When the queue in unlocked
165  * these lock counts are inspected, and the appropriate action taken.
166  */
167 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
168 
169 /*
170  * Uses a critical section to determine if there is any data in a queue.
171  *
172  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
173  */
174 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
175 
176 /*
177  * Uses a critical section to determine if there is any space in a queue.
178  *
179  * @return pdTRUE if there is no space, otherwise pdFALSE;
180  */
181 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
182 
183 /*
184  * Copies an item into the queue, either at the front of the queue or the
185  * back of the queue.
186  */
187 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
188 
189 /*
190  * Copies an item out of a queue.
191  */
192 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
193 
194 #if ( configUSE_QUEUE_SETS == 1 )
195  /*
196  * Checks to see if a queue is a member of a queue set, and if so, notifies
197  * the queue set that the queue contains data.
198  */
199  static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
200 #endif
201 
202 /*
203  * Called after a Queue_t structure has been allocated either statically or
204  * dynamically to fill in the structure's members.
205  */
206 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
207 
208 /*
209  * Mutexes are a special type of queue. When a mutex is created, first the
210  * queue is created, then prvInitialiseMutex() is called to configure the queue
211  * as a mutex.
212  */
213 #if( configUSE_MUTEXES == 1 )
214  static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
215 #endif
216 
217 #if( configUSE_MUTEXES == 1 )
218  /*
219  * If a task waiting for a mutex causes the mutex holder to inherit a
220  * priority, but the waiting task times out, then the holder should
221  * disinherit the priority - but only down to the highest priority of any
222  * other tasks that are waiting for the same mutex. This function returns
223  * that priority.
224  */
225  static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
226 #endif
227 /*-----------------------------------------------------------*/
228 
229 /*
230  * Macro to mark a queue as locked. Locking a queue prevents an ISR from
231  * accessing the queue event lists.
232  */
233 #define prvLockQueue( pxQueue ) \
234  taskENTER_CRITICAL(); \
235  { \
236  if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
237  { \
238  ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
239  } \
240  if( ( pxQueue )->cTxLock == queueUNLOCKED ) \
241  { \
242  ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
243  } \
244  } \
245  taskEXIT_CRITICAL()
246 /*-----------------------------------------------------------*/
247 
249 {
250 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
251 
252  configASSERT( pxQueue );
253 
255  {
256  pxQueue->pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
257  pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
258  pxQueue->pcWriteTo = pxQueue->pcHead;
259  pxQueue->u.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - ( UBaseType_t ) 1U ) * pxQueue->uxItemSize );
260  pxQueue->cRxLock = queueUNLOCKED;
261  pxQueue->cTxLock = queueUNLOCKED;
262 
263  if( xNewQueue == pdFALSE )
264  {
265  /* If there are tasks blocked waiting to read from the queue, then
266  the tasks will remain blocked as after this function exits the queue
267  will still be empty. If there are tasks blocked waiting to write to
268  the queue, then one should be unblocked as after this function exits
269  it will be possible to write to it. */
270  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
271  {
272  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
273  {
275  }
276  else
277  {
279  }
280  }
281  else
282  {
284  }
285  }
286  else
287  {
288  /* Ensure the event queues start in the correct state. */
289  vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
290  vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
291  }
292  }
294 
295  /* A value is returned for calling semantic consistency with previous
296  versions. */
297  return pdPASS;
298 }
299 /*-----------------------------------------------------------*/
300 
301 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
302 
303  QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
304  {
305  Queue_t *pxNewQueue;
306 
307  configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
308 
309  /* The StaticQueue_t structure and the queue storage area must be
310  supplied. */
311  configASSERT( pxStaticQueue != NULL );
312 
313  /* A queue storage area should be provided if the item size is not 0, and
314  should not be provided if the item size is 0. */
315  configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
316  configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
317 
318  #if( configASSERT_DEFINED == 1 )
319  {
320  /* Sanity check that the size of the structure used to declare a
321  variable of type StaticQueue_t or StaticSemaphore_t equals the size of
322  the real queue and semaphore structures. */
323  volatile size_t xSize = sizeof( StaticQueue_t );
324  configASSERT( xSize == sizeof( Queue_t ) );
325  }
326  #endif /* configASSERT_DEFINED */
327 
328  /* The address of a statically allocated queue was passed in, use it.
329  The address of a statically allocated storage area was also passed in
330  but is already set. */
331  pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
332 
333  if( pxNewQueue != NULL )
334  {
335  #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
336  {
337  /* Queues can be allocated wither statically or dynamically, so
338  note this queue was allocated statically in case the queue is
339  later deleted. */
340  pxNewQueue->ucStaticallyAllocated = pdTRUE;
341  }
342  #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
343 
344  prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
345  }
346  else
347  {
348  traceQUEUE_CREATE_FAILED( ucQueueType );
349  }
350 
351  return pxNewQueue;
352  }
353 
354 #endif /* configSUPPORT_STATIC_ALLOCATION */
355 /*-----------------------------------------------------------*/
356 
357 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
358 
359  QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
360  {
361  Queue_t *pxNewQueue;
362  size_t xQueueSizeInBytes;
363  uint8_t *pucQueueStorage;
364 
365  configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
366 
367  if( uxItemSize == ( UBaseType_t ) 0 )
368  {
369  /* There is not going to be a queue storage area. */
370  xQueueSizeInBytes = ( size_t ) 0;
371  }
372  else
373  {
374  /* Allocate enough space to hold the maximum number of items that
375  can be in the queue at any time. */
376  xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
377  }
378 
379  pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
380 
381  if( pxNewQueue != NULL )
382  {
383  /* Jump past the queue structure to find the location of the queue
384  storage area. */
385  pucQueueStorage = ( ( uint8_t * ) pxNewQueue ) + sizeof( Queue_t );
386 
387  #if( configSUPPORT_STATIC_ALLOCATION == 1 )
388  {
389  /* Queues can be created either statically or dynamically, so
390  note this task was created dynamically in case it is later
391  deleted. */
392  pxNewQueue->ucStaticallyAllocated = pdFALSE;
393  }
394  #endif /* configSUPPORT_STATIC_ALLOCATION */
395 
396  prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
397  }
398  else
399  {
400  traceQUEUE_CREATE_FAILED( ucQueueType );
401  }
402 
403  return pxNewQueue;
404  }
405 
406 #endif /* configSUPPORT_STATIC_ALLOCATION */
407 /*-----------------------------------------------------------*/
408 
409 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
410 {
411  /* Remove compiler warnings about unused parameters should
412  configUSE_TRACE_FACILITY not be set to 1. */
413  ( void ) ucQueueType;
414 
415  if( uxItemSize == ( UBaseType_t ) 0 )
416  {
417  /* No RAM was allocated for the queue storage area, but PC head cannot
418  be set to NULL because NULL is used as a key to say the queue is used as
419  a mutex. Therefore just set pcHead to point to the queue as a benign
420  value that is known to be within the memory map. */
421  pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
422  }
423  else
424  {
425  /* Set the head to the start of the queue storage area. */
426  pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
427  }
428 
429  /* Initialise the queue members as described where the queue type is
430  defined. */
431  pxNewQueue->uxLength = uxQueueLength;
432  pxNewQueue->uxItemSize = uxItemSize;
433  ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
434 
435  #if ( configUSE_TRACE_FACILITY == 1 )
436  {
437  pxNewQueue->ucQueueType = ucQueueType;
438  }
439  #endif /* configUSE_TRACE_FACILITY */
440 
441  #if( configUSE_QUEUE_SETS == 1 )
442  {
443  pxNewQueue->pxQueueSetContainer = NULL;
444  }
445  #endif /* configUSE_QUEUE_SETS */
446 
447  traceQUEUE_CREATE( pxNewQueue );
448 }
449 /*-----------------------------------------------------------*/
450 
451 #if( configUSE_MUTEXES == 1 )
452 
453  static void prvInitialiseMutex( Queue_t *pxNewQueue )
454  {
455  if( pxNewQueue != NULL )
456  {
457  /* The queue create function will set all the queue structure members
458  correctly for a generic queue, but this function is creating a
459  mutex. Overwrite those members that need to be set differently -
460  in particular the information required for priority inheritance. */
461  pxNewQueue->pxMutexHolder = NULL;
462  pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
463 
464  /* In case this is a recursive mutex. */
465  pxNewQueue->u.uxRecursiveCallCount = 0;
466 
467  traceCREATE_MUTEX( pxNewQueue );
468 
469  /* Start with the semaphore in the expected state. */
470  ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
471  }
472  else
473  {
475  }
476  }
477 
478 #endif /* configUSE_MUTEXES */
479 /*-----------------------------------------------------------*/
480 
481 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
482 
483  QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
484  {
485  Queue_t *pxNewQueue;
486  const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
487 
488  pxNewQueue = ( Queue_t * ) xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
489  prvInitialiseMutex( pxNewQueue );
490 
491  return pxNewQueue;
492  }
493 
494 #endif /* configUSE_MUTEXES */
495 /*-----------------------------------------------------------*/
496 
497 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
498 
499  QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
500  {
501  Queue_t *pxNewQueue;
502  const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
503 
504  /* Prevent compiler warnings about unused parameters if
505  configUSE_TRACE_FACILITY does not equal 1. */
506  ( void ) ucQueueType;
507 
508  pxNewQueue = ( Queue_t * ) xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
509  prvInitialiseMutex( pxNewQueue );
510 
511  return pxNewQueue;
512  }
513 
514 #endif /* configUSE_MUTEXES */
515 /*-----------------------------------------------------------*/
516 
517 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
518 
519  void* xQueueGetMutexHolder( QueueHandle_t xSemaphore )
520  {
521  void *pxReturn;
522 
523  /* This function is called by xSemaphoreGetMutexHolder(), and should not
524  be called directly. Note: This is a good way of determining if the
525  calling task is the mutex holder, but not a good way of determining the
526  identity of the mutex holder, as the holder may change between the
527  following critical section exiting and the function returning. */
529  {
530  if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
531  {
532  pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
533  }
534  else
535  {
536  pxReturn = NULL;
537  }
538  }
540 
541  return pxReturn;
542  } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
543 
544 #endif
545 /*-----------------------------------------------------------*/
546 
547 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
548 
549  void* xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
550  {
551  void *pxReturn;
552 
553  configASSERT( xSemaphore );
554 
555  /* Mutexes cannot be used in interrupt service routines, so the mutex
556  holder should not change in an ISR, and therefore a critical section is
557  not required here. */
558  if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
559  {
560  pxReturn = ( void * ) ( ( Queue_t * ) xSemaphore )->pxMutexHolder;
561  }
562  else
563  {
564  pxReturn = NULL;
565  }
566 
567  return pxReturn;
568  } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
569 
570 #endif
571 /*-----------------------------------------------------------*/
572 
573 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
574 
576  {
577  BaseType_t xReturn;
578  Queue_t * const pxMutex = ( Queue_t * ) xMutex;
579 
580  configASSERT( pxMutex );
581 
582  /* If this is the task that holds the mutex then pxMutexHolder will not
583  change outside of this task. If this task does not hold the mutex then
584  pxMutexHolder can never coincidentally equal the tasks handle, and as
585  this is the only condition we are interested in it does not matter if
586  pxMutexHolder is accessed simultaneously by another task. Therefore no
587  mutual exclusion is required to test the pxMutexHolder variable. */
588  if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Not a redundant cast as TaskHandle_t is a typedef. */
589  {
590  traceGIVE_MUTEX_RECURSIVE( pxMutex );
591 
592  /* uxRecursiveCallCount cannot be zero if pxMutexHolder is equal to
593  the task handle, therefore no underflow check is required. Also,
594  uxRecursiveCallCount is only modified by the mutex holder, and as
595  there can only be one, no mutual exclusion is required to modify the
596  uxRecursiveCallCount member. */
597  ( pxMutex->u.uxRecursiveCallCount )--;
598 
599  /* Has the recursive call count unwound to 0? */
600  if( pxMutex->u.uxRecursiveCallCount == ( UBaseType_t ) 0 )
601  {
602  /* Return the mutex. This will automatically unblock any other
603  task that might be waiting to access the mutex. */
605  }
606  else
607  {
609  }
610 
611  xReturn = pdPASS;
612  }
613  else
614  {
615  /* The mutex cannot be given because the calling task is not the
616  holder. */
617  xReturn = pdFAIL;
618 
620  }
621 
622  return xReturn;
623  }
624 
625 #endif /* configUSE_RECURSIVE_MUTEXES */
626 /*-----------------------------------------------------------*/
627 
628 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
629 
631  {
632  BaseType_t xReturn;
633  Queue_t * const pxMutex = ( Queue_t * ) xMutex;
634 
635  configASSERT( pxMutex );
636 
637  /* Comments regarding mutual exclusion as per those within
638  xQueueGiveMutexRecursive(). */
639 
640  traceTAKE_MUTEX_RECURSIVE( pxMutex );
641 
642  if( pxMutex->pxMutexHolder == ( void * ) xTaskGetCurrentTaskHandle() ) /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
643  {
644  ( pxMutex->u.uxRecursiveCallCount )++;
645  xReturn = pdPASS;
646  }
647  else
648  {
649  xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
650 
651  /* pdPASS will only be returned if the mutex was successfully
652  obtained. The calling task may have entered the Blocked state
653  before reaching here. */
654  if( xReturn != pdFAIL )
655  {
656  ( pxMutex->u.uxRecursiveCallCount )++;
657  }
658  else
659  {
661  }
662  }
663 
664  return xReturn;
665  }
666 
667 #endif /* configUSE_RECURSIVE_MUTEXES */
668 /*-----------------------------------------------------------*/
669 
670 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
671 
672  QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
673  {
674  QueueHandle_t xHandle;
675 
676  configASSERT( uxMaxCount != 0 );
677  configASSERT( uxInitialCount <= uxMaxCount );
678 
679  xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
680 
681  if( xHandle != NULL )
682  {
683  ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
684 
686  }
687  else
688  {
690  }
691 
692  return xHandle;
693  }
694 
695 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
696 /*-----------------------------------------------------------*/
697 
698 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
699 
700  QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
701  {
702  QueueHandle_t xHandle;
703 
704  configASSERT( uxMaxCount != 0 );
705  configASSERT( uxInitialCount <= uxMaxCount );
706 
707  xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
708 
709  if( xHandle != NULL )
710  {
711  ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
712 
714  }
715  else
716  {
718  }
719 
720  return xHandle;
721  }
722 
723 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
724 /*-----------------------------------------------------------*/
725 
726 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
727 {
728 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
729 TimeOut_t xTimeOut;
730 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
731 
732  configASSERT( pxQueue );
733  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
734  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
735  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
736  {
737  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
738  }
739  #endif
740 
741 
742  /* This function relaxes the coding standard somewhat to allow return
743  statements within the function itself. This is done in the interest
744  of execution time efficiency. */
745  for( ;; )
746  {
748  {
749  /* Is there room on the queue now? The running task must be the
750  highest priority task wanting to access the queue. If the head item
751  in the queue is to be overwritten then it does not matter if the
752  queue is full. */
753  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
754  {
755  traceQUEUE_SEND( pxQueue );
756  xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
757 
758  #if ( configUSE_QUEUE_SETS == 1 )
759  {
760  if( pxQueue->pxQueueSetContainer != NULL )
761  {
762  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
763  {
764  /* The queue is a member of a queue set, and posting
765  to the queue set caused a higher priority task to
766  unblock. A context switch is required. */
768  }
769  else
770  {
772  }
773  }
774  else
775  {
776  /* If there was a task waiting for data to arrive on the
777  queue then unblock it now. */
778  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
779  {
781  {
782  /* The unblocked task has a priority higher than
783  our own so yield immediately. Yes it is ok to
784  do this from within the critical section - the
785  kernel takes care of that. */
787  }
788  else
789  {
791  }
792  }
793  else if( xYieldRequired != pdFALSE )
794  {
795  /* This path is a special case that will only get
796  executed if the task was holding multiple mutexes
797  and the mutexes were given back in an order that is
798  different to that in which they were taken. */
800  }
801  else
802  {
804  }
805  }
806  }
807  #else /* configUSE_QUEUE_SETS */
808  {
809  /* If there was a task waiting for data to arrive on the
810  queue then unblock it now. */
811  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
812  {
814  {
815  /* The unblocked task has a priority higher than
816  our own so yield immediately. Yes it is ok to do
817  this from within the critical section - the kernel
818  takes care of that. */
820  }
821  else
822  {
824  }
825  }
826  else if( xYieldRequired != pdFALSE )
827  {
828  /* This path is a special case that will only get
829  executed if the task was holding multiple mutexes and
830  the mutexes were given back in an order that is
831  different to that in which they were taken. */
833  }
834  else
835  {
837  }
838  }
839  #endif /* configUSE_QUEUE_SETS */
840 
842  return pdPASS;
843  }
844  else
845  {
846  if( xTicksToWait == ( TickType_t ) 0 )
847  {
848  /* The queue was full and no block time is specified (or
849  the block time has expired) so leave now. */
851 
852  /* Return to the original privilege level before exiting
853  the function. */
854  traceQUEUE_SEND_FAILED( pxQueue );
855  return errQUEUE_FULL;
856  }
857  else if( xEntryTimeSet == pdFALSE )
858  {
859  /* The queue was full and a block time was specified so
860  configure the timeout structure. */
861  vTaskInternalSetTimeOutState( &xTimeOut );
862  xEntryTimeSet = pdTRUE;
863  }
864  else
865  {
866  /* Entry time was already set. */
868  }
869  }
870  }
872 
873  /* Interrupts and other tasks can send to and receive from the queue
874  now the critical section has been exited. */
875 
876  vTaskSuspendAll();
877  prvLockQueue( pxQueue );
878 
879  /* Update the timeout state to see if it has expired yet. */
880  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
881  {
882  if( prvIsQueueFull( pxQueue ) != pdFALSE )
883  {
884  traceBLOCKING_ON_QUEUE_SEND( pxQueue );
885  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
886 
887  /* Unlocking the queue means queue events can effect the
888  event list. It is possible that interrupts occurring now
889  remove this task from the event list again - but as the
890  scheduler is suspended the task will go onto the pending
891  ready last instead of the actual ready list. */
892  prvUnlockQueue( pxQueue );
893 
894  /* Resuming the scheduler will move tasks from the pending
895  ready list into the ready list - so it is feasible that this
896  task is already in a ready list before it yields - in which
897  case the yield will not cause a context switch unless there
898  is also a higher priority task in the pending ready list. */
899  if( xTaskResumeAll() == pdFALSE )
900  {
902  }
903  }
904  else
905  {
906  /* Try again. */
907  prvUnlockQueue( pxQueue );
908  ( void ) xTaskResumeAll();
909  }
910  }
911  else
912  {
913  /* The timeout has expired. */
914  prvUnlockQueue( pxQueue );
915  ( void ) xTaskResumeAll();
916 
917  traceQUEUE_SEND_FAILED( pxQueue );
918  return errQUEUE_FULL;
919  }
920  }
921 }
922 /*-----------------------------------------------------------*/
923 
924 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
925 {
926 BaseType_t xReturn;
927 UBaseType_t uxSavedInterruptStatus;
928 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
929 
930  configASSERT( pxQueue );
931  configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
932  configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
933 
934  /* RTOS ports that support interrupt nesting have the concept of a maximum
935  system call (or maximum API call) interrupt priority. Interrupts that are
936  above the maximum system call priority are kept permanently enabled, even
937  when the RTOS kernel is in a critical section, but cannot make any calls to
938  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
939  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
940  failure if a FreeRTOS API function is called from an interrupt that has been
941  assigned a priority above the configured maximum system call priority.
942  Only FreeRTOS functions that end in FromISR can be called from interrupts
943  that have been assigned a priority at or (logically) below the maximum
944  system call interrupt priority. FreeRTOS maintains a separate interrupt
945  safe API to ensure interrupt entry is as fast and as simple as possible.
946  More information (albeit Cortex-M specific) is provided on the following
947  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
949 
950  /* Similar to xQueueGenericSend, except without blocking if there is no room
951  in the queue. Also don't directly wake a task that was blocked on a queue
952  read, instead return a flag to say whether a context switch is required or
953  not (i.e. has a task with a higher priority than us been woken by this
954  post). */
955  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
956  {
957  if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
958  {
959  const int8_t cTxLock = pxQueue->cTxLock;
960 
961  traceQUEUE_SEND_FROM_ISR( pxQueue );
962 
963  /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
964  semaphore or mutex. That means prvCopyDataToQueue() cannot result
965  in a task disinheriting a priority and prvCopyDataToQueue() can be
966  called here even though the disinherit function does not check if
967  the scheduler is suspended before accessing the ready lists. */
968  ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
969 
970  /* The event list is not altered if the queue is locked. This will
971  be done when the queue is unlocked later. */
972  if( cTxLock == queueUNLOCKED )
973  {
974  #if ( configUSE_QUEUE_SETS == 1 )
975  {
976  if( pxQueue->pxQueueSetContainer != NULL )
977  {
978  if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
979  {
980  /* The queue is a member of a queue set, and posting
981  to the queue set caused a higher priority task to
982  unblock. A context switch is required. */
983  if( pxHigherPriorityTaskWoken != NULL )
984  {
985  *pxHigherPriorityTaskWoken = pdTRUE;
986  }
987  else
988  {
990  }
991  }
992  else
993  {
995  }
996  }
997  else
998  {
999  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1000  {
1001  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1002  {
1003  /* The task waiting has a higher priority so
1004  record that a context switch is required. */
1005  if( pxHigherPriorityTaskWoken != NULL )
1006  {
1007  *pxHigherPriorityTaskWoken = pdTRUE;
1008  }
1009  else
1010  {
1012  }
1013  }
1014  else
1015  {
1017  }
1018  }
1019  else
1020  {
1022  }
1023  }
1024  }
1025  #else /* configUSE_QUEUE_SETS */
1026  {
1027  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1028  {
1029  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1030  {
1031  /* The task waiting has a higher priority so record that a
1032  context switch is required. */
1033  if( pxHigherPriorityTaskWoken != NULL )
1034  {
1035  *pxHigherPriorityTaskWoken = pdTRUE;
1036  }
1037  else
1038  {
1040  }
1041  }
1042  else
1043  {
1045  }
1046  }
1047  else
1048  {
1050  }
1051  }
1052  #endif /* configUSE_QUEUE_SETS */
1053  }
1054  else
1055  {
1056  /* Increment the lock count so the task that unlocks the queue
1057  knows that data was posted while it was locked. */
1058  pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1059  }
1060 
1061  xReturn = pdPASS;
1062  }
1063  else
1064  {
1066  xReturn = errQUEUE_FULL;
1067  }
1068  }
1069  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1070 
1071  return xReturn;
1072 }
1073 /*-----------------------------------------------------------*/
1074 
1075 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
1076 {
1077 BaseType_t xReturn;
1078 UBaseType_t uxSavedInterruptStatus;
1079 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1080 
1081  /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1082  item size is 0. Don't directly wake a task that was blocked on a queue
1083  read, instead return a flag to say whether a context switch is required or
1084  not (i.e. has a task with a higher priority than us been woken by this
1085  post). */
1086 
1087  configASSERT( pxQueue );
1088 
1089  /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1090  if the item size is not 0. */
1091  configASSERT( pxQueue->uxItemSize == 0 );
1092 
1093  /* Normally a mutex would not be given from an interrupt, especially if
1094  there is a mutex holder, as priority inheritance makes no sense for an
1095  interrupts, only tasks. */
1096  configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
1097 
1098  /* RTOS ports that support interrupt nesting have the concept of a maximum
1099  system call (or maximum API call) interrupt priority. Interrupts that are
1100  above the maximum system call priority are kept permanently enabled, even
1101  when the RTOS kernel is in a critical section, but cannot make any calls to
1102  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1103  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1104  failure if a FreeRTOS API function is called from an interrupt that has been
1105  assigned a priority above the configured maximum system call priority.
1106  Only FreeRTOS functions that end in FromISR can be called from interrupts
1107  that have been assigned a priority at or (logically) below the maximum
1108  system call interrupt priority. FreeRTOS maintains a separate interrupt
1109  safe API to ensure interrupt entry is as fast and as simple as possible.
1110  More information (albeit Cortex-M specific) is provided on the following
1111  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1113 
1114  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1115  {
1117 
1118  /* When the queue is used to implement a semaphore no data is ever
1119  moved through the queue but it is still valid to see if the queue 'has
1120  space'. */
1121  if( uxMessagesWaiting < pxQueue->uxLength )
1122  {
1123  const int8_t cTxLock = pxQueue->cTxLock;
1124 
1125  traceQUEUE_SEND_FROM_ISR( pxQueue );
1126 
1127  /* A task can only have an inherited priority if it is a mutex
1128  holder - and if there is a mutex holder then the mutex cannot be
1129  given from an ISR. As this is the ISR version of the function it
1130  can be assumed there is no mutex holder and no need to determine if
1131  priority disinheritance is needed. Simply increase the count of
1132  messages (semaphores) available. */
1133  pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
1134 
1135  /* The event list is not altered if the queue is locked. This will
1136  be done when the queue is unlocked later. */
1137  if( cTxLock == queueUNLOCKED )
1138  {
1139  #if ( configUSE_QUEUE_SETS == 1 )
1140  {
1141  if( pxQueue->pxQueueSetContainer != NULL )
1142  {
1143  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
1144  {
1145  /* The semaphore is a member of a queue set, and
1146  posting to the queue set caused a higher priority
1147  task to unblock. A context switch is required. */
1148  if( pxHigherPriorityTaskWoken != NULL )
1149  {
1150  *pxHigherPriorityTaskWoken = pdTRUE;
1151  }
1152  else
1153  {
1155  }
1156  }
1157  else
1158  {
1160  }
1161  }
1162  else
1163  {
1164  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1165  {
1166  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1167  {
1168  /* The task waiting has a higher priority so
1169  record that a context switch is required. */
1170  if( pxHigherPriorityTaskWoken != NULL )
1171  {
1172  *pxHigherPriorityTaskWoken = pdTRUE;
1173  }
1174  else
1175  {
1177  }
1178  }
1179  else
1180  {
1182  }
1183  }
1184  else
1185  {
1187  }
1188  }
1189  }
1190  #else /* configUSE_QUEUE_SETS */
1191  {
1192  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1193  {
1194  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1195  {
1196  /* The task waiting has a higher priority so record that a
1197  context switch is required. */
1198  if( pxHigherPriorityTaskWoken != NULL )
1199  {
1200  *pxHigherPriorityTaskWoken = pdTRUE;
1201  }
1202  else
1203  {
1205  }
1206  }
1207  else
1208  {
1210  }
1211  }
1212  else
1213  {
1215  }
1216  }
1217  #endif /* configUSE_QUEUE_SETS */
1218  }
1219  else
1220  {
1221  /* Increment the lock count so the task that unlocks the queue
1222  knows that data was posted while it was locked. */
1223  pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1224  }
1225 
1226  xReturn = pdPASS;
1227  }
1228  else
1229  {
1231  xReturn = errQUEUE_FULL;
1232  }
1233  }
1234  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1235 
1236  return xReturn;
1237 }
1238 /*-----------------------------------------------------------*/
1239 
1240 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
1241 {
1242 BaseType_t xEntryTimeSet = pdFALSE;
1243 TimeOut_t xTimeOut;
1244 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1245 
1246  /* Check the pointer is not NULL. */
1247  configASSERT( ( pxQueue ) );
1248 
1249  /* The buffer into which data is received can only be NULL if the data size
1250  is zero (so no data is copied into the buffer. */
1251  configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1252 
1253  /* Cannot block if the scheduler is suspended. */
1254  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1255  {
1256  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1257  }
1258  #endif
1259 
1260 
1261  /* This function relaxes the coding standard somewhat to allow return
1262  statements within the function itself. This is done in the interest
1263  of execution time efficiency. */
1264 
1265  for( ;; )
1266  {
1268  {
1270 
1271  /* Is there data in the queue now? To be running the calling task
1272  must be the highest priority task wanting to access the queue. */
1273  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1274  {
1275  /* Data available, remove one item. */
1276  prvCopyDataFromQueue( pxQueue, pvBuffer );
1277  traceQUEUE_RECEIVE( pxQueue );
1278  pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
1279 
1280  /* There is now space in the queue, were any tasks waiting to
1281  post to the queue? If so, unblock the highest priority waiting
1282  task. */
1283  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1284  {
1285  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1286  {
1288  }
1289  else
1290  {
1292  }
1293  }
1294  else
1295  {
1297  }
1298 
1300  return pdPASS;
1301  }
1302  else
1303  {
1304  if( xTicksToWait == ( TickType_t ) 0 )
1305  {
1306  /* The queue was empty and no block time is specified (or
1307  the block time has expired) so leave now. */
1309  traceQUEUE_RECEIVE_FAILED( pxQueue );
1310  return errQUEUE_EMPTY;
1311  }
1312  else if( xEntryTimeSet == pdFALSE )
1313  {
1314  /* The queue was empty and a block time was specified so
1315  configure the timeout structure. */
1316  vTaskInternalSetTimeOutState( &xTimeOut );
1317  xEntryTimeSet = pdTRUE;
1318  }
1319  else
1320  {
1321  /* Entry time was already set. */
1323  }
1324  }
1325  }
1327 
1328  /* Interrupts and other tasks can send to and receive from the queue
1329  now the critical section has been exited. */
1330 
1331  vTaskSuspendAll();
1332  prvLockQueue( pxQueue );
1333 
1334  /* Update the timeout state to see if it has expired yet. */
1335  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1336  {
1337  /* The timeout has not expired. If the queue is still empty place
1338  the task on the list of tasks waiting to receive from the queue. */
1339  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1340  {
1341  traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1342  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1343  prvUnlockQueue( pxQueue );
1344  if( xTaskResumeAll() == pdFALSE )
1345  {
1347  }
1348  else
1349  {
1351  }
1352  }
1353  else
1354  {
1355  /* The queue contains data again. Loop back to try and read the
1356  data. */
1357  prvUnlockQueue( pxQueue );
1358  ( void ) xTaskResumeAll();
1359  }
1360  }
1361  else
1362  {
1363  /* Timed out. If there is no data in the queue exit, otherwise loop
1364  back and attempt to read the data. */
1365  prvUnlockQueue( pxQueue );
1366  ( void ) xTaskResumeAll();
1367 
1368  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1369  {
1370  traceQUEUE_RECEIVE_FAILED( pxQueue );
1371  return errQUEUE_EMPTY;
1372  }
1373  else
1374  {
1376  }
1377  }
1378  }
1379 }
1380 /*-----------------------------------------------------------*/
1381 
1383 {
1384 BaseType_t xEntryTimeSet = pdFALSE;
1385 TimeOut_t xTimeOut;
1386 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1387 
1388 #if( configUSE_MUTEXES == 1 )
1389  BaseType_t xInheritanceOccurred = pdFALSE;
1390 #endif
1391 
1392  /* Check the queue pointer is not NULL. */
1393  configASSERT( ( pxQueue ) );
1394 
1395  /* Check this really is a semaphore, in which case the item size will be
1396  0. */
1397  configASSERT( pxQueue->uxItemSize == 0 );
1398 
1399  /* Cannot block if the scheduler is suspended. */
1400  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1401  {
1402  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1403  }
1404  #endif
1405 
1406 
1407  /* This function relaxes the coding standard somewhat to allow return
1408  statements within the function itself. This is done in the interest
1409  of execution time efficiency. */
1410 
1411  for( ;; )
1412  {
1414  {
1415  /* Semaphores are queues with an item size of 0, and where the
1416  number of messages in the queue is the semaphore's count value. */
1417  const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1418 
1419  /* Is there data in the queue now? To be running the calling task
1420  must be the highest priority task wanting to access the queue. */
1421  if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1422  {
1423  traceQUEUE_RECEIVE( pxQueue );
1424 
1425  /* Semaphores are queues with a data size of zero and where the
1426  messages waiting is the semaphore's count. Reduce the count. */
1427  pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
1428 
1429  #if ( configUSE_MUTEXES == 1 )
1430  {
1431  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1432  {
1433  /* Record the information required to implement
1434  priority inheritance should it become necessary. */
1435  pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
1436  }
1437  else
1438  {
1440  }
1441  }
1442  #endif /* configUSE_MUTEXES */
1443 
1444  /* Check to see if other tasks are blocked waiting to give the
1445  semaphore, and if so, unblock the highest priority such task. */
1446  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1447  {
1448  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1449  {
1451  }
1452  else
1453  {
1455  }
1456  }
1457  else
1458  {
1460  }
1461 
1463  return pdPASS;
1464  }
1465  else
1466  {
1467  if( xTicksToWait == ( TickType_t ) 0 )
1468  {
1469  /* For inheritance to have occurred there must have been an
1470  initial timeout, and an adjusted timeout cannot become 0, as
1471  if it were 0 the function would have exited. */
1472  #if( configUSE_MUTEXES == 1 )
1473  {
1474  configASSERT( xInheritanceOccurred == pdFALSE );
1475  }
1476  #endif /* configUSE_MUTEXES */
1477 
1478  /* The semaphore count was 0 and no block time is specified
1479  (or the block time has expired) so exit now. */
1481  traceQUEUE_RECEIVE_FAILED( pxQueue );
1482  return errQUEUE_EMPTY;
1483  }
1484  else if( xEntryTimeSet == pdFALSE )
1485  {
1486  /* The semaphore count was 0 and a block time was specified
1487  so configure the timeout structure ready to block. */
1488  vTaskInternalSetTimeOutState( &xTimeOut );
1489  xEntryTimeSet = pdTRUE;
1490  }
1491  else
1492  {
1493  /* Entry time was already set. */
1495  }
1496  }
1497  }
1499 
1500  /* Interrupts and other tasks can give to and take from the semaphore
1501  now the critical section has been exited. */
1502 
1503  vTaskSuspendAll();
1504  prvLockQueue( pxQueue );
1505 
1506  /* Update the timeout state to see if it has expired yet. */
1507  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1508  {
1509  /* A block time is specified and not expired. If the semaphore
1510  count is 0 then enter the Blocked state to wait for a semaphore to
1511  become available. As semaphores are implemented with queues the
1512  queue being empty is equivalent to the semaphore count being 0. */
1513  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1514  {
1515  traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1516 
1517  #if ( configUSE_MUTEXES == 1 )
1518  {
1519  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1520  {
1522  {
1523  xInheritanceOccurred = xTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
1524  }
1526  }
1527  else
1528  {
1530  }
1531  }
1532  #endif
1533 
1534  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1535  prvUnlockQueue( pxQueue );
1536  if( xTaskResumeAll() == pdFALSE )
1537  {
1539  }
1540  else
1541  {
1543  }
1544  }
1545  else
1546  {
1547  /* There was no timeout and the semaphore count was not 0, so
1548  attempt to take the semaphore again. */
1549  prvUnlockQueue( pxQueue );
1550  ( void ) xTaskResumeAll();
1551  }
1552  }
1553  else
1554  {
1555  /* Timed out. */
1556  prvUnlockQueue( pxQueue );
1557  ( void ) xTaskResumeAll();
1558 
1559  /* If the semaphore count is 0 exit now as the timeout has
1560  expired. Otherwise return to attempt to take the semaphore that is
1561  known to be available. As semaphores are implemented by queues the
1562  queue being empty is equivalent to the semaphore count being 0. */
1563  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1564  {
1565  #if ( configUSE_MUTEXES == 1 )
1566  {
1567  /* xInheritanceOccurred could only have be set if
1568  pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1569  test the mutex type again to check it is actually a mutex. */
1570  if( xInheritanceOccurred != pdFALSE )
1571  {
1573  {
1574  UBaseType_t uxHighestWaitingPriority;
1575 
1576  /* This task blocking on the mutex caused another
1577  task to inherit this task's priority. Now this task
1578  has timed out the priority should be disinherited
1579  again, but only as low as the next highest priority
1580  task that is waiting for the same mutex. */
1581  uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1582  vTaskPriorityDisinheritAfterTimeout( ( void * ) pxQueue->pxMutexHolder, uxHighestWaitingPriority );
1583  }
1585  }
1586  }
1587  #endif /* configUSE_MUTEXES */
1588 
1589  traceQUEUE_RECEIVE_FAILED( pxQueue );
1590  return errQUEUE_EMPTY;
1591  }
1592  else
1593  {
1595  }
1596  }
1597  }
1598 }
1599 /*-----------------------------------------------------------*/
1600 
1601 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
1602 {
1603 BaseType_t xEntryTimeSet = pdFALSE;
1604 TimeOut_t xTimeOut;
1605 int8_t *pcOriginalReadPosition;
1606 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1607 
1608  /* Check the pointer is not NULL. */
1609  configASSERT( ( pxQueue ) );
1610 
1611  /* The buffer into which data is received can only be NULL if the data size
1612  is zero (so no data is copied into the buffer. */
1613  configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1614 
1615  /* Cannot block if the scheduler is suspended. */
1616  #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1617  {
1618  configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1619  }
1620  #endif
1621 
1622 
1623  /* This function relaxes the coding standard somewhat to allow return
1624  statements within the function itself. This is done in the interest
1625  of execution time efficiency. */
1626 
1627  for( ;; )
1628  {
1630  {
1632 
1633  /* Is there data in the queue now? To be running the calling task
1634  must be the highest priority task wanting to access the queue. */
1635  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1636  {
1637  /* Remember the read position so it can be reset after the data
1638  is read from the queue as this function is only peeking the
1639  data, not removing it. */
1640  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1641 
1642  prvCopyDataFromQueue( pxQueue, pvBuffer );
1643  traceQUEUE_PEEK( pxQueue );
1644 
1645  /* The data is not being removed, so reset the read pointer. */
1646  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1647 
1648  /* The data is being left in the queue, so see if there are
1649  any other tasks waiting for the data. */
1650  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1651  {
1652  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1653  {
1654  /* The task waiting has a higher priority than this task. */
1656  }
1657  else
1658  {
1660  }
1661  }
1662  else
1663  {
1665  }
1666 
1668  return pdPASS;
1669  }
1670  else
1671  {
1672  if( xTicksToWait == ( TickType_t ) 0 )
1673  {
1674  /* The queue was empty and no block time is specified (or
1675  the block time has expired) so leave now. */
1677  traceQUEUE_PEEK_FAILED( pxQueue );
1678  return errQUEUE_EMPTY;
1679  }
1680  else if( xEntryTimeSet == pdFALSE )
1681  {
1682  /* The queue was empty and a block time was specified so
1683  configure the timeout structure ready to enter the blocked
1684  state. */
1685  vTaskInternalSetTimeOutState( &xTimeOut );
1686  xEntryTimeSet = pdTRUE;
1687  }
1688  else
1689  {
1690  /* Entry time was already set. */
1692  }
1693  }
1694  }
1696 
1697  /* Interrupts and other tasks can send to and receive from the queue
1698  now the critical section has been exited. */
1699 
1700  vTaskSuspendAll();
1701  prvLockQueue( pxQueue );
1702 
1703  /* Update the timeout state to see if it has expired yet. */
1704  if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1705  {
1706  /* Timeout has not expired yet, check to see if there is data in the
1707  queue now, and if not enter the Blocked state to wait for data. */
1708  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1709  {
1710  traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1711  vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1712  prvUnlockQueue( pxQueue );
1713  if( xTaskResumeAll() == pdFALSE )
1714  {
1716  }
1717  else
1718  {
1720  }
1721  }
1722  else
1723  {
1724  /* There is data in the queue now, so don't enter the blocked
1725  state, instead return to try and obtain the data. */
1726  prvUnlockQueue( pxQueue );
1727  ( void ) xTaskResumeAll();
1728  }
1729  }
1730  else
1731  {
1732  /* The timeout has expired. If there is still no data in the queue
1733  exit, otherwise go back and try to read the data again. */
1734  prvUnlockQueue( pxQueue );
1735  ( void ) xTaskResumeAll();
1736 
1737  if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1738  {
1739  traceQUEUE_PEEK_FAILED( pxQueue );
1740  return errQUEUE_EMPTY;
1741  }
1742  else
1743  {
1745  }
1746  }
1747  }
1748 }
1749 /*-----------------------------------------------------------*/
1750 
1751 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
1752 {
1753 BaseType_t xReturn;
1754 UBaseType_t uxSavedInterruptStatus;
1755 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1756 
1757  configASSERT( pxQueue );
1758  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1759 
1760  /* RTOS ports that support interrupt nesting have the concept of a maximum
1761  system call (or maximum API call) interrupt priority. Interrupts that are
1762  above the maximum system call priority are kept permanently enabled, even
1763  when the RTOS kernel is in a critical section, but cannot make any calls to
1764  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1765  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1766  failure if a FreeRTOS API function is called from an interrupt that has been
1767  assigned a priority above the configured maximum system call priority.
1768  Only FreeRTOS functions that end in FromISR can be called from interrupts
1769  that have been assigned a priority at or (logically) below the maximum
1770  system call interrupt priority. FreeRTOS maintains a separate interrupt
1771  safe API to ensure interrupt entry is as fast and as simple as possible.
1772  More information (albeit Cortex-M specific) is provided on the following
1773  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1775 
1776  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1777  {
1779 
1780  /* Cannot block in an ISR, so check there is data available. */
1781  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1782  {
1783  const int8_t cRxLock = pxQueue->cRxLock;
1784 
1785  traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1786 
1787  prvCopyDataFromQueue( pxQueue, pvBuffer );
1788  pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
1789 
1790  /* If the queue is locked the event list will not be modified.
1791  Instead update the lock count so the task that unlocks the queue
1792  will know that an ISR has removed data while the queue was
1793  locked. */
1794  if( cRxLock == queueUNLOCKED )
1795  {
1796  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1797  {
1798  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1799  {
1800  /* The task waiting has a higher priority than us so
1801  force a context switch. */
1802  if( pxHigherPriorityTaskWoken != NULL )
1803  {
1804  *pxHigherPriorityTaskWoken = pdTRUE;
1805  }
1806  else
1807  {
1809  }
1810  }
1811  else
1812  {
1814  }
1815  }
1816  else
1817  {
1819  }
1820  }
1821  else
1822  {
1823  /* Increment the lock count so the task that unlocks the queue
1824  knows that data was removed while it was locked. */
1825  pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
1826  }
1827 
1828  xReturn = pdPASS;
1829  }
1830  else
1831  {
1832  xReturn = pdFAIL;
1834  }
1835  }
1836  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1837 
1838  return xReturn;
1839 }
1840 /*-----------------------------------------------------------*/
1841 
1842 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer )
1843 {
1844 BaseType_t xReturn;
1845 UBaseType_t uxSavedInterruptStatus;
1846 int8_t *pcOriginalReadPosition;
1847 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1848 
1849  configASSERT( pxQueue );
1850  configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1851  configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
1852 
1853  /* RTOS ports that support interrupt nesting have the concept of a maximum
1854  system call (or maximum API call) interrupt priority. Interrupts that are
1855  above the maximum system call priority are kept permanently enabled, even
1856  when the RTOS kernel is in a critical section, but cannot make any calls to
1857  FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
1858  then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1859  failure if a FreeRTOS API function is called from an interrupt that has been
1860  assigned a priority above the configured maximum system call priority.
1861  Only FreeRTOS functions that end in FromISR can be called from interrupts
1862  that have been assigned a priority at or (logically) below the maximum
1863  system call interrupt priority. FreeRTOS maintains a separate interrupt
1864  safe API to ensure interrupt entry is as fast and as simple as possible.
1865  More information (albeit Cortex-M specific) is provided on the following
1866  link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1868 
1869  uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1870  {
1871  /* Cannot block in an ISR, so check there is data available. */
1872  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1873  {
1874  traceQUEUE_PEEK_FROM_ISR( pxQueue );
1875 
1876  /* Remember the read position so it can be reset as nothing is
1877  actually being removed from the queue. */
1878  pcOriginalReadPosition = pxQueue->u.pcReadFrom;
1879  prvCopyDataFromQueue( pxQueue, pvBuffer );
1880  pxQueue->u.pcReadFrom = pcOriginalReadPosition;
1881 
1882  xReturn = pdPASS;
1883  }
1884  else
1885  {
1886  xReturn = pdFAIL;
1888  }
1889  }
1890  portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1891 
1892  return xReturn;
1893 }
1894 /*-----------------------------------------------------------*/
1895 
1897 {
1898 UBaseType_t uxReturn;
1899 
1900  configASSERT( xQueue );
1901 
1903  {
1904  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1905  }
1907 
1908  return uxReturn;
1909 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1910 /*-----------------------------------------------------------*/
1911 
1913 {
1914 UBaseType_t uxReturn;
1915 Queue_t *pxQueue;
1916 
1917  pxQueue = ( Queue_t * ) xQueue;
1918  configASSERT( pxQueue );
1919 
1921  {
1922  uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1923  }
1925 
1926  return uxReturn;
1927 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1928 /*-----------------------------------------------------------*/
1929 
1931 {
1932 UBaseType_t uxReturn;
1933 
1934  configASSERT( xQueue );
1935 
1936  uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1937 
1938  return uxReturn;
1939 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1940 /*-----------------------------------------------------------*/
1941 
1943 {
1944 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1945 
1946  configASSERT( pxQueue );
1947  traceQUEUE_DELETE( pxQueue );
1948 
1949  #if ( configQUEUE_REGISTRY_SIZE > 0 )
1950  {
1951  vQueueUnregisterQueue( pxQueue );
1952  }
1953  #endif
1954 
1955  #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
1956  {
1957  /* The queue can only have been allocated dynamically - free it
1958  again. */
1959  vPortFree( pxQueue );
1960  }
1961  #elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1962  {
1963  /* The queue could have been allocated statically or dynamically, so
1964  check before attempting to free the memory. */
1965  if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
1966  {
1967  vPortFree( pxQueue );
1968  }
1969  else
1970  {
1972  }
1973  }
1974  #else
1975  {
1976  /* The queue must have been statically allocated, so is not going to be
1977  deleted. Avoid compiler warnings about the unused parameter. */
1978  ( void ) pxQueue;
1979  }
1980  #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1981 }
1982 /*-----------------------------------------------------------*/
1983 
1984 #if ( configUSE_TRACE_FACILITY == 1 )
1985 
1987  {
1988  return ( ( Queue_t * ) xQueue )->uxQueueNumber;
1989  }
1990 
1991 #endif /* configUSE_TRACE_FACILITY */
1992 /*-----------------------------------------------------------*/
1993 
1994 #if ( configUSE_TRACE_FACILITY == 1 )
1995 
1996  void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
1997  {
1998  ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
1999  }
2000 
2001 #endif /* configUSE_TRACE_FACILITY */
2002 /*-----------------------------------------------------------*/
2003 
2004 #if ( configUSE_TRACE_FACILITY == 1 )
2005 
2006  uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2007  {
2008  return ( ( Queue_t * ) xQueue )->ucQueueType;
2009  }
2010 
2011 #endif /* configUSE_TRACE_FACILITY */
2012 /*-----------------------------------------------------------*/
2013 
2014 #if( configUSE_MUTEXES == 1 )
2015 
2016  static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2017  {
2018  UBaseType_t uxHighestPriorityOfWaitingTasks;
2019 
2020  /* If a task waiting for a mutex causes the mutex holder to inherit a
2021  priority, but the waiting task times out, then the holder should
2022  disinherit the priority - but only down to the highest priority of any
2023  other tasks that are waiting for the same mutex. For this purpose,
2024  return the priority of the highest priority task that is waiting for the
2025  mutex. */
2026  if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0 )
2027  {
2028  uxHighestPriorityOfWaitingTasks = configMAX_PRIORITIES - listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
2029  }
2030  else
2031  {
2032  uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2033  }
2034 
2035  return uxHighestPriorityOfWaitingTasks;
2036  }
2037 
2038 #endif /* configUSE_MUTEXES */
2039 /*-----------------------------------------------------------*/
2040 
2041 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
2042 {
2043 BaseType_t xReturn = pdFALSE;
2045 
2046  /* This function is called from a critical section. */
2047 
2048  uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2049 
2050  if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2051  {
2052  #if ( configUSE_MUTEXES == 1 )
2053  {
2054  if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2055  {
2056  /* The mutex is no longer being held. */
2057  xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
2058  pxQueue->pxMutexHolder = NULL;
2059  }
2060  else
2061  {
2063  }
2064  }
2065  #endif /* configUSE_MUTEXES */
2066  }
2067  else if( xPosition == queueSEND_TO_BACK )
2068  {
2069  ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0. */
2070  pxQueue->pcWriteTo += pxQueue->uxItemSize;
2071  if( pxQueue->pcWriteTo >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2072  {
2073  pxQueue->pcWriteTo = pxQueue->pcHead;
2074  }
2075  else
2076  {
2078  }
2079  }
2080  else
2081  {
2082  ( void ) memcpy( ( void * ) pxQueue->u.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2083  pxQueue->u.pcReadFrom -= pxQueue->uxItemSize;
2084  if( pxQueue->u.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2085  {
2086  pxQueue->u.pcReadFrom = ( pxQueue->pcTail - pxQueue->uxItemSize );
2087  }
2088  else
2089  {
2091  }
2092 
2093  if( xPosition == queueOVERWRITE )
2094  {
2095  if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2096  {
2097  /* An item is not being added but overwritten, so subtract
2098  one from the recorded number of items in the queue so when
2099  one is added again below the number of recorded items remains
2100  correct. */
2102  }
2103  else
2104  {
2106  }
2107  }
2108  else
2109  {
2111  }
2112  }
2113 
2114  pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
2115 
2116  return xReturn;
2117 }
2118 /*-----------------------------------------------------------*/
2119 
2120 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
2121 {
2122  if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2123  {
2124  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2125  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
2126  {
2127  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2128  }
2129  else
2130  {
2132  }
2133  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
2134  }
2135 }
2136 /*-----------------------------------------------------------*/
2137 
2138 static void prvUnlockQueue( Queue_t * const pxQueue )
2139 {
2140  /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2141 
2142  /* The lock counts contains the number of extra data items placed or
2143  removed from the queue while the queue was locked. When a queue is
2144  locked items can be added or removed, but the event lists cannot be
2145  updated. */
2147  {
2148  int8_t cTxLock = pxQueue->cTxLock;
2149 
2150  /* See if data was added to the queue while it was locked. */
2151  while( cTxLock > queueLOCKED_UNMODIFIED )
2152  {
2153  /* Data was posted while the queue was locked. Are any tasks
2154  blocked waiting for data to become available? */
2155  #if ( configUSE_QUEUE_SETS == 1 )
2156  {
2157  if( pxQueue->pxQueueSetContainer != NULL )
2158  {
2159  if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
2160  {
2161  /* The queue is a member of a queue set, and posting to
2162  the queue set caused a higher priority task to unblock.
2163  A context switch is required. */
2164  vTaskMissedYield();
2165  }
2166  else
2167  {
2169  }
2170  }
2171  else
2172  {
2173  /* Tasks that are removed from the event list will get
2174  added to the pending ready list as the scheduler is still
2175  suspended. */
2176  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2177  {
2178  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2179  {
2180  /* The task waiting has a higher priority so record that a
2181  context switch is required. */
2182  vTaskMissedYield();
2183  }
2184  else
2185  {
2187  }
2188  }
2189  else
2190  {
2191  break;
2192  }
2193  }
2194  }
2195  #else /* configUSE_QUEUE_SETS */
2196  {
2197  /* Tasks that are removed from the event list will get added to
2198  the pending ready list as the scheduler is still suspended. */
2199  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2200  {
2201  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2202  {
2203  /* The task waiting has a higher priority so record that
2204  a context switch is required. */
2205  vTaskMissedYield();
2206  }
2207  else
2208  {
2210  }
2211  }
2212  else
2213  {
2214  break;
2215  }
2216  }
2217  #endif /* configUSE_QUEUE_SETS */
2218 
2219  --cTxLock;
2220  }
2221 
2222  pxQueue->cTxLock = queueUNLOCKED;
2223  }
2225 
2226  /* Do the same for the Rx lock. */
2228  {
2229  int8_t cRxLock = pxQueue->cRxLock;
2230 
2231  while( cRxLock > queueLOCKED_UNMODIFIED )
2232  {
2233  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2234  {
2235  if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2236  {
2237  vTaskMissedYield();
2238  }
2239  else
2240  {
2242  }
2243 
2244  --cRxLock;
2245  }
2246  else
2247  {
2248  break;
2249  }
2250  }
2251 
2252  pxQueue->cRxLock = queueUNLOCKED;
2253  }
2255 }
2256 /*-----------------------------------------------------------*/
2257 
2258 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
2259 {
2260 BaseType_t xReturn;
2261 
2263  {
2264  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2265  {
2266  xReturn = pdTRUE;
2267  }
2268  else
2269  {
2270  xReturn = pdFALSE;
2271  }
2272  }
2274 
2275  return xReturn;
2276 }
2277 /*-----------------------------------------------------------*/
2278 
2280 {
2281 BaseType_t xReturn;
2282 
2283  configASSERT( xQueue );
2284  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( UBaseType_t ) 0 )
2285  {
2286  xReturn = pdTRUE;
2287  }
2288  else
2289  {
2290  xReturn = pdFALSE;
2291  }
2292 
2293  return xReturn;
2294 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2295 /*-----------------------------------------------------------*/
2296 
2297 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
2298 {
2299 BaseType_t xReturn;
2300 
2302  {
2303  if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2304  {
2305  xReturn = pdTRUE;
2306  }
2307  else
2308  {
2309  xReturn = pdFALSE;
2310  }
2311  }
2313 
2314  return xReturn;
2315 }
2316 /*-----------------------------------------------------------*/
2317 
2319 {
2320 BaseType_t xReturn;
2321 
2322  configASSERT( xQueue );
2323  if( ( ( Queue_t * ) xQueue )->uxMessagesWaiting == ( ( Queue_t * ) xQueue )->uxLength )
2324  {
2325  xReturn = pdTRUE;
2326  }
2327  else
2328  {
2329  xReturn = pdFALSE;
2330  }
2331 
2332  return xReturn;
2333 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2334 /*-----------------------------------------------------------*/
2335 
2336 #if ( configUSE_CO_ROUTINES == 1 )
2337 
2338  BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
2339  {
2340  BaseType_t xReturn;
2341  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2342 
2343  /* If the queue is already full we may have to block. A critical section
2344  is required to prevent an interrupt removing something from the queue
2345  between the check to see if the queue is full and blocking on the queue. */
2347  {
2348  if( prvIsQueueFull( pxQueue ) != pdFALSE )
2349  {
2350  /* The queue is full - do we want to block or just leave without
2351  posting? */
2352  if( xTicksToWait > ( TickType_t ) 0 )
2353  {
2354  /* As this is called from a coroutine we cannot block directly, but
2355  return indicating that we need to block. */
2356  vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2358  return errQUEUE_BLOCKED;
2359  }
2360  else
2361  {
2363  return errQUEUE_FULL;
2364  }
2365  }
2366  }
2368 
2370  {
2371  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2372  {
2373  /* There is room in the queue, copy the data into the queue. */
2374  prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2375  xReturn = pdPASS;
2376 
2377  /* Were any co-routines waiting for data to become available? */
2378  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2379  {
2380  /* In this instance the co-routine could be placed directly
2381  into the ready list as we are within a critical section.
2382  Instead the same pending ready list mechanism is used as if
2383  the event were caused from within an interrupt. */
2385  {
2386  /* The co-routine waiting has a higher priority so record
2387  that a yield might be appropriate. */
2388  xReturn = errQUEUE_YIELD;
2389  }
2390  else
2391  {
2393  }
2394  }
2395  else
2396  {
2398  }
2399  }
2400  else
2401  {
2402  xReturn = errQUEUE_FULL;
2403  }
2404  }
2406 
2407  return xReturn;
2408  }
2409 
2410 #endif /* configUSE_CO_ROUTINES */
2411 /*-----------------------------------------------------------*/
2412 
2413 #if ( configUSE_CO_ROUTINES == 1 )
2414 
2415  BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
2416  {
2417  BaseType_t xReturn;
2418  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2419 
2420  /* If the queue is already empty we may have to block. A critical section
2421  is required to prevent an interrupt adding something to the queue
2422  between the check to see if the queue is empty and blocking on the queue. */
2424  {
2425  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2426  {
2427  /* There are no messages in the queue, do we want to block or just
2428  leave with nothing? */
2429  if( xTicksToWait > ( TickType_t ) 0 )
2430  {
2431  /* As this is a co-routine we cannot block directly, but return
2432  indicating that we need to block. */
2433  vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2435  return errQUEUE_BLOCKED;
2436  }
2437  else
2438  {
2440  return errQUEUE_FULL;
2441  }
2442  }
2443  else
2444  {
2446  }
2447  }
2449 
2451  {
2452  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2453  {
2454  /* Data is available from the queue. */
2455  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2456  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2457  {
2458  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2459  }
2460  else
2461  {
2463  }
2464  --( pxQueue->uxMessagesWaiting );
2465  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2466 
2467  xReturn = pdPASS;
2468 
2469  /* Were any co-routines waiting for space to become available? */
2470  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2471  {
2472  /* In this instance the co-routine could be placed directly
2473  into the ready list as we are within a critical section.
2474  Instead the same pending ready list mechanism is used as if
2475  the event were caused from within an interrupt. */
2477  {
2478  xReturn = errQUEUE_YIELD;
2479  }
2480  else
2481  {
2483  }
2484  }
2485  else
2486  {
2488  }
2489  }
2490  else
2491  {
2492  xReturn = pdFAIL;
2493  }
2494  }
2496 
2497  return xReturn;
2498  }
2499 
2500 #endif /* configUSE_CO_ROUTINES */
2501 /*-----------------------------------------------------------*/
2502 
2503 #if ( configUSE_CO_ROUTINES == 1 )
2504 
2505  BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
2506  {
2507  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2508 
2509  /* Cannot block within an ISR so if there is no space on the queue then
2510  exit without doing anything. */
2511  if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2512  {
2513  prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2514 
2515  /* We only want to wake one co-routine per ISR, so check that a
2516  co-routine has not already been woken. */
2517  if( xCoRoutinePreviouslyWoken == pdFALSE )
2518  {
2519  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2520  {
2522  {
2523  return pdTRUE;
2524  }
2525  else
2526  {
2528  }
2529  }
2530  else
2531  {
2533  }
2534  }
2535  else
2536  {
2538  }
2539  }
2540  else
2541  {
2543  }
2544 
2545  return xCoRoutinePreviouslyWoken;
2546  }
2547 
2548 #endif /* configUSE_CO_ROUTINES */
2549 /*-----------------------------------------------------------*/
2550 
2551 #if ( configUSE_CO_ROUTINES == 1 )
2552 
2553  BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
2554  {
2555  BaseType_t xReturn;
2556  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2557 
2558  /* We cannot block from an ISR, so check there is data available. If
2559  not then just leave without doing anything. */
2560  if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2561  {
2562  /* Copy the data from the queue. */
2563  pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
2564  if( pxQueue->u.pcReadFrom >= pxQueue->pcTail )
2565  {
2566  pxQueue->u.pcReadFrom = pxQueue->pcHead;
2567  }
2568  else
2569  {
2571  }
2572  --( pxQueue->uxMessagesWaiting );
2573  ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2574 
2575  if( ( *pxCoRoutineWoken ) == pdFALSE )
2576  {
2577  if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2578  {
2580  {
2581  *pxCoRoutineWoken = pdTRUE;
2582  }
2583  else
2584  {
2586  }
2587  }
2588  else
2589  {
2591  }
2592  }
2593  else
2594  {
2596  }
2597 
2598  xReturn = pdPASS;
2599  }
2600  else
2601  {
2602  xReturn = pdFAIL;
2603  }
2604 
2605  return xReturn;
2606  }
2607 
2608 #endif /* configUSE_CO_ROUTINES */
2609 /*-----------------------------------------------------------*/
2610 
2611 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2612 
2613  void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2614  {
2615  UBaseType_t ux;
2616 
2617  /* See if there is an empty space in the registry. A NULL name denotes
2618  a free slot. */
2619  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2620  {
2621  if( xQueueRegistry[ ux ].pcQueueName == NULL )
2622  {
2623  /* Store the information on this queue. */
2624  xQueueRegistry[ ux ].pcQueueName = pcQueueName;
2625  xQueueRegistry[ ux ].xHandle = xQueue;
2626 
2627  traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
2628  break;
2629  }
2630  else
2631  {
2633  }
2634  }
2635  }
2636 
2637 #endif /* configQUEUE_REGISTRY_SIZE */
2638 /*-----------------------------------------------------------*/
2639 
2640 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2641 
2642  const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2643  {
2644  UBaseType_t ux;
2645  const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2646 
2647  /* Note there is nothing here to protect against another task adding or
2648  removing entries from the registry while it is being searched. */
2649  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2650  {
2651  if( xQueueRegistry[ ux ].xHandle == xQueue )
2652  {
2653  pcReturn = xQueueRegistry[ ux ].pcQueueName;
2654  break;
2655  }
2656  else
2657  {
2659  }
2660  }
2661 
2662  return pcReturn;
2663  } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
2664 
2665 #endif /* configQUEUE_REGISTRY_SIZE */
2666 /*-----------------------------------------------------------*/
2667 
2668 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2669 
2670  void vQueueUnregisterQueue( QueueHandle_t xQueue )
2671  {
2672  UBaseType_t ux;
2673 
2674  /* See if the handle of the queue being unregistered in actually in the
2675  registry. */
2676  for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2677  {
2678  if( xQueueRegistry[ ux ].xHandle == xQueue )
2679  {
2680  /* Set the name to NULL to show that this slot if free again. */
2681  xQueueRegistry[ ux ].pcQueueName = NULL;
2682 
2683  /* Set the handle to NULL to ensure the same queue handle cannot
2684  appear in the registry twice if it is added, removed, then
2685  added again. */
2686  xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
2687  break;
2688  }
2689  else
2690  {
2692  }
2693  }
2694 
2695  } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2696 
2697 #endif /* configQUEUE_REGISTRY_SIZE */
2698 /*-----------------------------------------------------------*/
2699 
2700 #if ( configUSE_TIMERS == 1 )
2701 
2702  void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
2703  {
2704  Queue_t * const pxQueue = ( Queue_t * ) xQueue;
2705 
2706  /* This function should not be called by application code hence the
2707  'Restricted' in its name. It is not part of the public API. It is
2708  designed for use by kernel code, and has special calling requirements.
2709  It can result in vListInsert() being called on a list that can only
2710  possibly ever have one item in it, so the list will be fast, but even
2711  so it should be called with the scheduler locked and not from a critical
2712  section. */
2713 
2714  /* Only do anything if there are no messages in the queue. This function
2715  will not actually cause the task to block, just place it on a blocked
2716  list. It will not block until the scheduler is unlocked - at which
2717  time a yield will be performed. If an item is added to the queue while
2718  the queue is locked, and the calling task blocks on the queue, then the
2719  calling task will be immediately unblocked when the queue is unlocked. */
2720  prvLockQueue( pxQueue );
2721  if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
2722  {
2723  /* There is nothing in the queue, block for the specified period. */
2724  vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
2725  }
2726  else
2727  {
2729  }
2730  prvUnlockQueue( pxQueue );
2731  }
2732 
2733 #endif /* configUSE_TIMERS */
2734 /*-----------------------------------------------------------*/
2735 
2736 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
2737 
2738  QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
2739  {
2740  QueueSetHandle_t pxQueue;
2741 
2742  pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
2743 
2744  return pxQueue;
2745  }
2746 
2747 #endif /* configUSE_QUEUE_SETS */
2748 /*-----------------------------------------------------------*/
2749 
2750 #if ( configUSE_QUEUE_SETS == 1 )
2751 
2752  BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2753  {
2754  BaseType_t xReturn;
2755 
2757  {
2758  if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
2759  {
2760  /* Cannot add a queue/semaphore to more than one queue set. */
2761  xReturn = pdFAIL;
2762  }
2763  else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
2764  {
2765  /* Cannot add a queue/semaphore to a queue set if there are already
2766  items in the queue/semaphore. */
2767  xReturn = pdFAIL;
2768  }
2769  else
2770  {
2771  ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
2772  xReturn = pdPASS;
2773  }
2774  }
2776 
2777  return xReturn;
2778  }
2779 
2780 #endif /* configUSE_QUEUE_SETS */
2781 /*-----------------------------------------------------------*/
2782 
2783 #if ( configUSE_QUEUE_SETS == 1 )
2784 
2786  {
2787  BaseType_t xReturn;
2788  Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
2789 
2790  if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
2791  {
2792  /* The queue was not a member of the set. */
2793  xReturn = pdFAIL;
2794  }
2795  else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
2796  {
2797  /* It is dangerous to remove a queue from a set when the queue is
2798  not empty because the queue set will still hold pending events for
2799  the queue. */
2800  xReturn = pdFAIL;
2801  }
2802  else
2803  {
2805  {
2806  /* The queue is no longer contained in the set. */
2807  pxQueueOrSemaphore->pxQueueSetContainer = NULL;
2808  }
2810  xReturn = pdPASS;
2811  }
2812 
2813  return xReturn;
2814  } /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
2815 
2816 #endif /* configUSE_QUEUE_SETS */
2817 /*-----------------------------------------------------------*/
2818 
2819 #if ( configUSE_QUEUE_SETS == 1 )
2820 
2822  {
2823  QueueSetMemberHandle_t xReturn = NULL;
2824 
2825  ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
2826  return xReturn;
2827  }
2828 
2829 #endif /* configUSE_QUEUE_SETS */
2830 /*-----------------------------------------------------------*/
2831 
2832 #if ( configUSE_QUEUE_SETS == 1 )
2833 
2835  {
2836  QueueSetMemberHandle_t xReturn = NULL;
2837 
2838  ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
2839  return xReturn;
2840  }
2841 
2842 #endif /* configUSE_QUEUE_SETS */
2843 /*-----------------------------------------------------------*/
2844 
2845 #if ( configUSE_QUEUE_SETS == 1 )
2846 
2847  static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
2848  {
2849  Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
2850  BaseType_t xReturn = pdFALSE;
2851 
2852  /* This function must be called form a critical section. */
2853 
2854  configASSERT( pxQueueSetContainer );
2855  configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
2856 
2857  if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
2858  {
2859  const int8_t cTxLock = pxQueueSetContainer->cTxLock;
2860 
2861  traceQUEUE_SEND( pxQueueSetContainer );
2862 
2863  /* The data copied is the handle of the queue that contains data. */
2864  xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
2865 
2866  if( cTxLock == queueUNLOCKED )
2867  {
2868  if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
2869  {
2870  if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
2871  {
2872  /* The task waiting has a higher priority. */
2873  xReturn = pdTRUE;
2874  }
2875  else
2876  {
2878  }
2879  }
2880  else
2881  {
2883  }
2884  }
2885  else
2886  {
2887  pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
2888  }
2889  }
2890  else
2891  {
2893  }
2894 
2895  return xReturn;
2896  }
2897 
2898 #endif /* configUSE_QUEUE_SETS */
2899 
2900 
2901 
2902 
2903 
2904 
2905 
2906 
2907 
2908 
2909 
2910 
#define pdTRUE
Definition: projdefs.h:46
BaseType_t xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
#define pxMutexHolder
Definition: queue.c:67
UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
Definition: queue.c:1930
void vPortFree(void *pv) PRIVILEGED_FUNCTION
Definition: heap_4.c:311
#define traceQUEUE_PEEK_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:457
#define queueQUEUE_TYPE_SET
Definition: queue.h:70
BaseType_t xQueueTakeMutexRecursive(QueueHandle_t xMutex, TickType_t xTicksToWait) PRIVILEGED_FUNCTION
#define queueOVERWRITE
Definition: queue.h:66
BaseType_t xQueueCRSendFromISR(QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken)
#define traceGIVE_MUTEX_RECURSIVE_FAILED(pxMutex)
Definition: FreeRTOS.h:417
BaseType_t xQueueSemaphoreTake(QueueHandle_t xQueue, TickType_t xTicksToWait)
Definition: queue.c:1382
QueueHandle_t xQueueCreateMutexStatic(const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue) PRIVILEGED_FUNCTION
void * QueueSetMemberHandle_t
Definition: queue.h:61
Definition: list.h:164
#define queueYIELD_IF_USING_PREEMPTION()
Definition: queue.c:79
void vQueueSetQueueNumber(QueueHandle_t xQueue, UBaseType_t uxQueueNumber) PRIVILEGED_FUNCTION
#define errQUEUE_YIELD
Definition: projdefs.h:56
xQUEUE Queue_t
Definition: queue.c:128
union QueueDefinition::@28 u
TaskHandle_t xTaskGetCurrentTaskHandle(void) PRIVILEGED_FUNCTION
#define traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName)
Definition: FreeRTOS.h:609
UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
Definition: queue.c:1896
#define traceQUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:437
#define traceCREATE_COUNTING_SEMAPHORE()
Definition: FreeRTOS.h:429
#define listGET_ITEM_VALUE_OF_HEAD_ENTRY(pxList)
Definition: list.h:217
#define queueQUEUE_TYPE_COUNTING_SEMAPHORE
Definition: queue.h:72
#define queueLOCKED_UNMODIFIED
Definition: queue.c:54
#define taskEXIT_CRITICAL()
Definition: task.h:194
void vTaskSuspendAll(void) PRIVILEGED_FUNCTION
Definition: tasks.c:2034
#define configMAX_PRIORITIES
#define traceQUEUE_SEND_FAILED(pxQueue)
Definition: FreeRTOS.h:441
#define traceQUEUE_CREATE_FAILED(ucQueueType)
Definition: FreeRTOS.h:401
#define portENABLE_INTERRUPTS()
Definition: portmacro.h:103
#define queueSEMAPHORE_QUEUE_ITEM_LENGTH
Definition: queue.c:73
void * xQueueGetMutexHolder(QueueHandle_t xSemaphore) PRIVILEGED_FUNCTION
#define portDISABLE_INTERRUPTS()
Definition: portmacro.h:102
#define traceQUEUE_CREATE(pxNewQueue)
Definition: FreeRTOS.h:397
BaseType_t xTaskPriorityInherit(TaskHandle_t const pxMutexHolder) PRIVILEGED_FUNCTION
void * pvTaskIncrementMutexHeldCount(void) PRIVILEGED_FUNCTION
#define traceQUEUE_SEND_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:465
#define traceTAKE_MUTEX_RECURSIVE_FAILED(pxMutex)
Definition: FreeRTOS.h:425
void * pvPortMalloc(size_t xSize) PRIVILEGED_FUNCTION
Definition: heap_4.c:155
#define vQueueUnregisterQueue(xQueue)
Definition: FreeRTOS.h:284
void vTaskPlaceOnEventListRestricted(List_t *const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely) PRIVILEGED_FUNCTION
#define NULL
Definition: nm_bsp.h:52
QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength) PRIVILEGED_FUNCTION
#define traceQUEUE_PEEK_FAILED(pxQueue)
Definition: FreeRTOS.h:453
#define listLIST_IS_EMPTY(pxList)
Definition: list.h:250
static BaseType_t prvCopyDataToQueue(Queue_t *const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition) PRIVILEGED_FUNCTION
Definition: queue.c:2041
void * QueueSetHandle_t
Definition: queue.h:54
#define traceQUEUE_PEEK(pxQueue)
Definition: FreeRTOS.h:449
List_t xTasksWaitingToReceive
Definition: queue.c:102
void * xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore) PRIVILEGED_FUNCTION
unsigned long UBaseType_t
Definition: portmacro.h:58
uint32_t TickType_t
Definition: portmacro.h:64
#define queueSEND_TO_BACK
Definition: queue.h:64
struct QueueDefinition xQUEUE
QueueSetMemberHandle_t xQueueSelectFromSet(QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void *const pvBuffer)
Definition: queue.c:1842
volatile UBaseType_t uxMessagesWaiting
Definition: queue.c:104
#define traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:469
#define portSET_INTERRUPT_MASK_FROM_ISR()
Definition: FreeRTOS.h:259
int8_t * pcTail
Definition: queue.c:92
Definition: task.h:94
QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
Definition: queue.c:2318
#define pcQueueGetName(xQueue)
Definition: FreeRTOS.h:285
void vTaskPriorityDisinheritAfterTimeout(TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask) PRIVILEGED_FUNCTION
#define configASSERT(x)
Definition: FreeRTOS.h:235
#define vQueueAddToRegistry(xQueue, pcName)
Definition: FreeRTOS.h:283
#define traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:355
#define traceQUEUE_RECEIVE_FAILED(pxQueue)
Definition: FreeRTOS.h:461
UBaseType_t uxLength
Definition: queue.c:105
int8_t * pcWriteTo
Definition: queue.c:93
int8_t * pcReadFrom
Definition: queue.c:97
#define queueUNLOCKED
Definition: queue.c:53
#define traceQUEUE_RECEIVE(pxQueue)
Definition: FreeRTOS.h:445
void vQueueWaitForMessageRestricted(QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely) PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
Definition: mpu_wrappers.h:175
BaseType_t xQueueReceiveFromISR(QueueHandle_t xQueue, void *const pvBuffer, BaseType_t *const pxHigherPriorityTaskWoken)
Definition: queue.c:1751
#define portASSERT_IF_INTERRUPT_PRIORITY_INVALID()
Definition: FreeRTOS.h:779
BaseType_t xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder) PRIVILEGED_FUNCTION
#define traceBLOCKING_ON_QUEUE_PEEK(pxQueue)
Definition: FreeRTOS.h:363
#define pdFAIL
Definition: projdefs.h:49
void vTaskMissedYield(void) PRIVILEGED_FUNCTION
Definition: tasks.c:3171
long BaseType_t
Definition: portmacro.h:57
BaseType_t xQueueGiveMutexRecursive(QueueHandle_t pxMutex) PRIVILEGED_FUNCTION
UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue) PRIVILEGED_FUNCTION
#define errQUEUE_FULL
Definition: projdefs.h:51
static void prvUnlockQueue(Queue_t *const pxQueue) PRIVILEGED_FUNCTION
Definition: queue.c:2138
#define traceCREATE_MUTEX_FAILED()
Definition: FreeRTOS.h:409
#define pdPASS
Definition: projdefs.h:48
BaseType_t xQueueCRReceiveFromISR(QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxTaskWoken)
BaseType_t xQueueReceive(QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait)
Definition: queue.c:1240
uint8_t ucQueueGetQueueType(QueueHandle_t xQueue) PRIVILEGED_FUNCTION
void * QueueHandle_t
Definition: queue.h:47
#define traceTAKE_MUTEX_RECURSIVE(pxMutex)
Definition: FreeRTOS.h:421
BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
Definition: queue.c:248
void vCoRoutineAddToDelayedList(TickType_t xTicksToDelay, List_t *pxEventList)
static void prvInitialiseNewQueue(const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue) PRIVILEGED_FUNCTION
Definition: queue.c:409
#define traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:481
#define portYIELD_WITHIN_API
Definition: FreeRTOS.h:723
#define traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue)
Definition: FreeRTOS.h:477
#define pdFALSE
Definition: projdefs.h:45
#define configQUEUE_REGISTRY_SIZE
Definition: FreeRTOS.h:279
BaseType_t xTaskResumeAll(void) PRIVILEGED_FUNCTION
Definition: tasks.c:2107
BaseType_t xTaskGetSchedulerState(void) PRIVILEGED_FUNCTION
#define taskENTER_CRITICAL()
Definition: task.h:179
#define uxQueueType
Definition: queue.c:68
BaseType_t xQueueGenericSendFromISR(QueueHandle_t xQueue, const void *const pvItemToQueue, BaseType_t *const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition)
Definition: queue.c:924
#define traceCREATE_COUNTING_SEMAPHORE_FAILED()
Definition: FreeRTOS.h:433
volatile int8_t cRxLock
Definition: queue.c:108
BaseType_t xQueueCRSend(QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait)
List_t xTasksWaitingToSend
Definition: queue.c:101
#define traceQUEUE_RECEIVE_FROM_ISR(pxQueue)
Definition: FreeRTOS.h:473
QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType) PRIVILEGED_FUNCTION
static BaseType_t prvIsQueueFull(const Queue_t *pxQueue) PRIVILEGED_FUNCTION
Definition: queue.c:2297
UBaseType_t uxItemSize
Definition: queue.c:106
#define queueQUEUE_IS_MUTEX
Definition: queue.c:69
volatile int8_t cTxLock
Definition: queue.c:109
#define traceBLOCKING_ON_QUEUE_SEND(pxQueue)
Definition: FreeRTOS.h:371
#define errQUEUE_BLOCKED
Definition: projdefs.h:55
BaseType_t xQueuePeek(QueueHandle_t xQueue, void *const pvBuffer, TickType_t xTicksToWait)
Definition: queue.c:1601
BaseType_t xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet) PRIVILEGED_FUNCTION
BaseType_t xCoRoutineRemoveFromEventList(const List_t *pxEventList)
UBaseType_t uxRecursiveCallCount
Definition: queue.c:98
QueueHandle_t xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue) PRIVILEGED_FUNCTION
BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
Definition: queue.c:2279
#define PRIVILEGED_FUNCTION
Definition: mpu_wrappers.h:174
#define tskIDLE_PRIORITY
Definition: task.h:155
#define errQUEUE_EMPTY
Definition: projdefs.h:50
#define traceQUEUE_DELETE(pxQueue)
Definition: FreeRTOS.h:485
#define queueMUTEX_GIVE_BLOCK_TIME
Definition: queue.c:74
#define traceCREATE_MUTEX(pxNewQueue)
Definition: FreeRTOS.h:405
#define prvLockQueue(pxQueue)
Definition: queue.c:233
struct xSTATIC_QUEUE StaticQueue_t
#define mtCOVERAGE_TEST_MARKER()
Definition: FreeRTOS.h:787
BaseType_t xTaskCheckForTimeOut(TimeOut_t *const pxTimeOut, TickType_t *const pxTicksToWait) PRIVILEGED_FUNCTION
Definition: tasks.c:3108
#define taskSCHEDULER_SUSPENDED
Definition: task.h:219
#define traceGIVE_MUTEX_RECURSIVE(pxMutex)
Definition: FreeRTOS.h:413
void vQueueDelete(QueueHandle_t xQueue)
Definition: queue.c:1942
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue)
Definition: FreeRTOS.h:263
#define listCURRENT_LIST_LENGTH(pxList)
Definition: list.h:255
BaseType_t xQueueCRReceive(QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait)
static void prvCopyDataFromQueue(Queue_t *const pxQueue, void *const pvBuffer) PRIVILEGED_FUNCTION
Definition: queue.c:2120
BaseType_t xQueueGenericSend(QueueHandle_t xQueue, const void *const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition)
Definition: queue.c:726
BaseType_t xTaskRemoveFromEventList(const List_t *const pxEventList) PRIVILEGED_FUNCTION
Definition: tasks.c:2986
void vTaskPlaceOnEventList(List_t *const pxEventList, const TickType_t xTicksToWait) PRIVILEGED_FUNCTION
Definition: tasks.c:2912
void vListInitialise(List_t *const pxList) PRIVILEGED_FUNCTION
Definition: list.c:38
QueueHandle_t xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount) PRIVILEGED_FUNCTION
static BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue) PRIVILEGED_FUNCTION
Definition: queue.c:2258
BaseType_t xQueueGiveFromISR(QueueHandle_t xQueue, BaseType_t *const pxHigherPriorityTaskWoken)
Definition: queue.c:1075
void vTaskInternalSetTimeOutState(TimeOut_t *const pxTimeOut) PRIVILEGED_FUNCTION
Definition: tasks.c:3100
int8_t * pcHead
Definition: queue.c:91
UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
Definition: queue.c:1912


inertial_sense_ros
Author(s):
autogenerated on Sun Feb 28 2021 03:17:58