| /* |
| * Copyright (c) 1991, 1993 |
| * The Regents of the University of California. All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions |
| * are met: |
| * 1. Redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer. |
| * 2. Redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution. |
| * 4. Neither the name of the University nor the names of its contributors |
| * may be used to endorse or promote products derived from this software |
| * without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
| * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
| * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| * SUCH DAMAGE. |
| * |
| * $Id: queue.h 1441 2006-02-06 16:03:21Z mrenzmann $ |
| */ |
| |
| #ifndef _SYS_QUEUE_H_ |
| #define _SYS_QUEUE_H_ |
| |
| #ifdef MUC_BUILD |
| #include <stddef.h> |
| #endif |
| |
| /* |
| * This file defines four types of data structures: singly-linked lists, |
| * singly-linked tail queues, lists and tail queues. |
| * |
| * A singly-linked list is headed by a single forward pointer. The elements |
| * are singly linked for minimum space and pointer manipulation overhead at |
| * the expense of O(n) removal for arbitrary elements. New elements can be |
| * added to the list after an existing element or at the head of the list. |
| * Elements being removed from the head of the list should use the explicit |
| * macro for this purpose for optimum efficiency. A singly-linked list may |
| * only be traversed in the forward direction. Singly-linked lists are ideal |
| * for applications with large datasets and few or no removals or for |
| * implementing a LIFO queue. |
| * |
| * A singly-linked tail queue is headed by a pair of pointers, one to the |
| * head of the list and the other to the tail of the list. The elements are |
| * singly linked for minimum space and pointer manipulation overhead at the |
| * expense of O(n) removal for arbitrary elements. New elements can be added |
| * to the list after an existing element, at the head of the list, or at the |
| * end of the list. Elements being removed from the head of the tail queue |
| * should use the explicit macro for this purpose for optimum efficiency. |
| * A singly-linked tail queue may only be traversed in the forward direction. |
| * Singly-linked tail queues are ideal for applications with large datasets |
| * and few or no removals or for implementing a FIFO queue. |
| * |
| * A list is headed by a single forward pointer (or an array of forward |
| * pointers for a hash table header). The elements are doubly linked |
| * so that an arbitrary element can be removed without a need to |
| * traverse the list. New elements can be added to the list before |
| * or after an existing element or at the head of the list. A list |
| * may only be traversed in the forward direction. |
| * |
| * A tail queue is headed by a pair of pointers, one to the head of the |
| * list and the other to the tail of the list. The elements are doubly |
| * linked so that an arbitrary element can be removed without a need to |
| * traverse the list. New elements can be added to the list before or |
| * after an existing element, at the head of the list, or at the end of |
| * the list. A tail queue may be traversed in either direction. |
| * |
| * For details on the use of these macros, see the queue(3) manual page. |
| * |
| * |
| * SLIST LIST STAILQ TAILQ |
| * _HEAD + + + + |
| * _HEAD_INITIALIZER + + + + |
| * _ENTRY + + + + |
| * _INIT + + + + |
| * _EMPTY + + + + |
| * _FIRST + + + + |
| * _NEXT + + + + |
| * _PREV - - - + |
| * _LAST - - + + |
| * _FOREACH + + + + |
| * _FOREACH_SAFE + + + + |
| * _FOREACH_REVERSE - - - + |
| * _FOREACH_REVERSE_SAFE - - - + |
| * _INSERT_HEAD + + + + |
| * _INSERT_BEFORE - + - + |
| * _INSERT_AFTER + + + + |
| * _INSERT_TAIL - - + + |
| * _CONCAT - - + + |
| * _REMOVE_HEAD + - + - |
| * _REMOVE + + + + |
| * |
| */ |
| |
| #ifdef MUC_BUILD |
| # define QUEUE_MACRO_LOCK_DEBUG 0 |
| #else |
| # define QUEUE_MACRO_LOCK_DEBUG 0 |
| #endif |
| |
| /* |
| * Additional fields to try and locate unprotected accesses of fields. |
| * These macros provide hooks to a function which is checked when the |
| * various queue macros are |
| */ |
| #if QUEUE_MACRO_LOCK_DEBUG |
| # define QUEUE_MACRO_LOCK_FIELD(prefix) \ |
| int prefix##lockcontext; |
| |
| # define QUEUE_MACRO_LOCK_INIT(prefix, x) (x)->prefix##lockcontext = 0 |
| struct os_spinlock; |
| int queue_debug_muc_context_check(int* context, const char* file, const int line); |
| # define MUC_CONTEXT_CHECK_IN_INTERRUPT 0x00000001 |
| # define MUC_CONTEXT_CHECK_NOT_IN_INTERRUPT 0x00000002 |
| # define MUC_CONTEXT_CHECK(context) queue_debug_muc_context_check(&(context), __FILE__, __LINE__) |
| # define QUEUE_MACRO_LOCK_ASSERT(context) MUC_CONTEXT_CHECK(context) |
| #elif defined(SYSTEM_BUILD) |
| # define QUEUE_MACRO_LOCK_FIELD(prefix) int dummy |
| # define QUEUE_MACRO_LOCK_INIT(prefix, x) int dummy |
| # define QUEUE_MACRO_LOCK_ASSERT(context) __queue_macro_lock_assert_noop() |
| static __inline int __queue_macro_lock_assert_noop(void) { |
| return 1; |
| } |
| #else |
| # define QUEUE_MACRO_LOCK_FIELD(prefix) |
| # define QUEUE_MACRO_LOCK_INIT(prefix, x) |
| # define QUEUE_MACRO_LOCK_ASSERT(context) __queue_macro_lock_assert_noop() |
| static __inline__ int __queue_macro_lock_assert_noop(void) { |
| return 1; |
| } |
| #endif |
| |
| #define QUEUE_MACRO_DEBUG 0 |
| #if QUEUE_MACRO_DEBUG |
| /* Store the last 2 places the queue element or head was altered */ |
| struct qm_trace { |
| char *lastfile; |
| int lastline; |
| char *prevfile; |
| int prevline; |
| }; |
| |
| #define TRACEBUF struct qm_trace trace; |
| #define TRASHIT(x) do {(x) = (void *)-1;} while (0) |
| |
| #define QMD_TRACE_HEAD(head) do { \ |
| (head)->trace.prevline = (head)->trace.lastline; \ |
| (head)->trace.prevfile = (head)->trace.lastfile; \ |
| (head)->trace.lastline = __LINE__; \ |
| (head)->trace.lastfile = __FILE__; \ |
| } while (0) |
| |
| #define QMD_TRACE_ELEM(elem) do { \ |
| (elem)->trace.prevline = (elem)->trace.lastline; \ |
| (elem)->trace.prevfile = (elem)->trace.lastfile; \ |
| (elem)->trace.lastline = __LINE__; \ |
| (elem)->trace.lastfile = __FILE__; \ |
| } while (0) |
| |
| #else |
| #define QMD_TRACE_ELEM(elem) |
| #define QMD_TRACE_HEAD(head) |
| #define TRACEBUF |
| #define TRASHIT(x) |
| #endif /* QUEUE_MACRO_DEBUG */ |
| |
| /* |
| * Singly-linked List declarations. |
| */ |
| #define SLIST_HEAD(name, type) \ |
| struct name { \ |
| struct type *slh_first; /* first element */ \ |
| QUEUE_MACRO_LOCK_FIELD(slh_); \ |
| } |
| |
| #define SLIST_HEAD_INITIALIZER(head) \ |
| { NULL } |
| |
| #define SLIST_ENTRY(type) \ |
| struct { \ |
| struct type *sle_next; /* next element */ \ |
| } |
| |
| #define SLIST_LOCK_ASSERT(head) \ |
| QUEUE_MACRO_LOCK_ASSERT((head)->slh_lockcontext) |
| |
| /* |
| * Singly-linked List functions. |
| */ |
| #define SLIST_EMPTY(head) ((head)->slh_first == NULL) |
| |
| #define SLIST_FIRST(head) ((head)->slh_first) |
| |
| #define SLIST_FOREACH(var, head, field) \ |
| for ((var) = SLIST_FIRST((head)); \ |
| SLIST_LOCK_ASSERT((head)) && (var); \ |
| (var) = SLIST_NEXT((var), field)) |
| |
| #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ |
| for ((var) = SLIST_FIRST((head)); \ |
| SLIST_LOCK_ASSERT((head)) && \ |
| (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ |
| (var) = (tvar)) |
| |
| #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ |
| for ((varp) = &SLIST_FIRST((head)); \ |
| SLIST_LOCK_ASSERT((head)) && \ |
| ((var) = *(varp)) != NULL; \ |
| (varp) = &SLIST_NEXT((var), field)) |
| |
| #define SLIST_INIT(head) do { \ |
| SLIST_FIRST((head)) = NULL; \ |
| } while (0) |
| |
| #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ |
| SLIST_LOCK_ASSERT((head)); \ |
| SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ |
| SLIST_NEXT((slistelm), field) = (elm); \ |
| } while (0) |
| |
| #define SLIST_INSERT_HEAD(head, elm, field) do { \ |
| SLIST_LOCK_ASSERT((head)); \ |
| SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ |
| SLIST_FIRST((head)) = (elm); \ |
| } while (0) |
| |
| #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) |
| |
| #define SLIST_REMOVE(head, elm, type, field) do { \ |
| SLIST_LOCK_ASSERT((head)); \ |
| if (SLIST_FIRST((head)) == (elm)) { \ |
| SLIST_REMOVE_HEAD((head), field); \ |
| } \ |
| else { \ |
| struct type *curelm = SLIST_FIRST((head)); \ |
| while (SLIST_NEXT(curelm, field) != (elm)) \ |
| curelm = SLIST_NEXT(curelm, field); \ |
| SLIST_NEXT(curelm, field) = \ |
| SLIST_NEXT(SLIST_NEXT(curelm, field), field); \ |
| } \ |
| } while (0) |
| |
| #define SLIST_REMOVE_HEAD(head, field) do { \ |
| SLIST_LOCK_ASSERT((head)); \ |
| SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ |
| } while (0) |
| |
| /* |
| * Singly-linked Tail queue declarations. |
| */ |
| #define STAILQ_HEAD(name, type) \ |
| struct name { \ |
| struct type *stqh_first;/* first element */ \ |
| struct type **stqh_last;/* addr of last next element */ \ |
| QUEUE_MACRO_LOCK_FIELD(stqh_); /* debug locking primitive */ \ |
| } |
| |
| #define STAILQ_HEAD_INITIALIZER(head) \ |
| { NULL, &(head).stqh_first } |
| |
| #define STAILQ_ENTRY(type) \ |
| struct { \ |
| struct type *stqe_next; /* next element */ \ |
| } |
| |
| #define STAILQ_LOCK_ASSERT(head) \ |
| QUEUE_MACRO_LOCK_ASSERT((head)->stqh_lockcontext) |
| #if QUEUE_MACRO_LOCK_DEBUG |
| # define STAILQ_INIT_LOCK_DEBUG(head, lock) (head)->stqh_lock = lock; |
| #else |
| # define STAILQ_INIT_LOCK_DEBUG(head, intref) |
| #endif |
| |
| /* |
| * Singly-linked Tail queue functions. |
| */ |
| #define STAILQ_CONCAT(head1, head2) do { \ |
| if (!STAILQ_EMPTY((head2))) { \ |
| *(head1)->stqh_last = (head2)->stqh_first; \ |
| (head1)->stqh_last = (head2)->stqh_last; \ |
| STAILQ_INIT((head2)); \ |
| } \ |
| } while (0) |
| |
| #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) |
| |
| #define STAILQ_FIRST(head) ((head)->stqh_first) |
| |
| #define STAILQ_FOREACH(var, head, field) \ |
| for((var) = STAILQ_FIRST((head)); \ |
| STAILQ_LOCK_ASSERT((head)) && (var); \ |
| (var) = STAILQ_NEXT((var), field)) |
| |
| |
| #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ |
| for ((var) = STAILQ_FIRST((head)); \ |
| STAILQ_LOCK_ASSERT((head)) && \ |
| (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ |
| (var) = (tvar)) |
| |
| #define STAILQ_INIT(head) do { \ |
| STAILQ_FIRST((head)) = NULL; \ |
| (head)->stqh_last = &STAILQ_FIRST((head)); \ |
| QUEUE_MACRO_LOCK_INIT(stqh_, (head)); \ |
| } while (0) |
| |
| #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ |
| (head)->stqh_last = &STAILQ_NEXT((elm), field); \ |
| STAILQ_NEXT((tqelm), field) = (elm); \ |
| } while (0) |
| |
| #define STAILQ_INSERT_HEAD(head, elm, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ |
| (head)->stqh_last = &STAILQ_NEXT((elm), field); \ |
| STAILQ_FIRST((head)) = (elm); \ |
| } while (0) |
| |
| #define STAILQ_INSERT_TAIL(head, elm, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| STAILQ_NEXT((elm), field) = NULL; \ |
| *(head)->stqh_last = (elm); \ |
| (head)->stqh_last = &STAILQ_NEXT((elm), field); \ |
| } while (0) |
| |
| #define STAILQ_LAST(head, type, field) \ |
| (STAILQ_EMPTY((head)) ? \ |
| NULL : \ |
| ((struct type *) \ |
| ((char *)((head)->stqh_last) - __offsetof(struct type, field)))) |
| |
| #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) |
| |
| #define STAILQ_REMOVE(head, elm, type, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| if (STAILQ_FIRST((head)) == (elm)) { \ |
| STAILQ_REMOVE_HEAD((head), field); \ |
| } \ |
| else { \ |
| struct type *curelm = STAILQ_FIRST((head)); \ |
| while (STAILQ_NEXT(curelm, field) != (elm)) \ |
| curelm = STAILQ_NEXT(curelm, field); \ |
| if ((STAILQ_NEXT(curelm, field) = \ |
| STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\ |
| (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ |
| } \ |
| } while (0) |
| |
| |
| #define STAILQ_REMOVE_AFTER(head, elm, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| if (STAILQ_NEXT(elm, field)) { \ |
| if ((STAILQ_NEXT(elm, field) = \ |
| STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL)\ |
| (head)->stqh_last = &STAILQ_NEXT((elm), field); \ |
| } \ |
| } while (0) |
| |
| |
| #define STAILQ_REMOVE_HEAD(head, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| if ((STAILQ_FIRST((head)) = \ |
| STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ |
| (head)->stqh_last = &STAILQ_FIRST((head)); \ |
| } while (0) |
| |
| #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ |
| STAILQ_LOCK_ASSERT(head); \ |
| if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ |
| (head)->stqh_last = &STAILQ_FIRST((head)); \ |
| } while (0) |
| |
| /* |
| * List declarations. |
| */ |
| #define ATH_LIST_HEAD(name, type) \ |
| struct name { \ |
| struct type *lh_first; /* first element */ \ |
| QUEUE_MACRO_LOCK_FIELD(lh_); \ |
| } |
| |
| #define LIST_HEAD_INITIALIZER(head) \ |
| { NULL } |
| |
| #define LIST_ENTRY(type) \ |
| struct { \ |
| struct type *le_next; /* next element */ \ |
| struct type **le_prev; /* address of previous next element */ \ |
| } |
| |
| #define LIST_LOCK_ASSERT(head) \ |
| QUEUE_MACRO_LOCK_ASSERT((head)->lh_lockcontext) |
| |
| /* |
| * List functions. |
| */ |
| |
| #define LIST_EMPTY(head) ((head)->lh_first == NULL) |
| |
| #define LIST_FIRST(head) ((head)->lh_first) |
| |
| #define LIST_FOREACH(var, head, field) \ |
| for ((var) = LIST_FIRST((head)); \ |
| LIST_LOCK_ASSERT((head)) && (var); \ |
| (var) = LIST_NEXT((var), field)) |
| |
| #define LIST_FOREACH_SAFE(var, head, field, tvar) \ |
| for ((var) = LIST_FIRST((head)); \ |
| LIST_LOCK_ASSERT((head)) && \ |
| (var) && ((tvar) = LIST_NEXT((var), field), 1); \ |
| (var) = (tvar)) |
| |
| #define LIST_INIT(head) do { \ |
| LIST_FIRST((head)) = NULL; \ |
| } while (0) |
| |
| #define LIST_INSERT_AFTER(listelm, elm, field) do { \ |
| if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ |
| LIST_NEXT((listelm), field)->field.le_prev = \ |
| &LIST_NEXT((elm), field); \ |
| LIST_NEXT((listelm), field) = (elm); \ |
| (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ |
| } while (0) |
| |
| #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ |
| (elm)->field.le_prev = (listelm)->field.le_prev; \ |
| LIST_NEXT((elm), field) = (listelm); \ |
| *(listelm)->field.le_prev = (elm); \ |
| (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ |
| } while (0) |
| |
| #define LIST_INSERT_HEAD(head, elm, field) do { \ |
| LIST_LOCK_ASSERT((head)); \ |
| if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ |
| LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ |
| LIST_FIRST((head)) = (elm); \ |
| (elm)->field.le_prev = &LIST_FIRST((head)); \ |
| } while (0) |
| |
| #define LIST_NEXT(elm, field) ((elm)->field.le_next) |
| |
| #define LIST_REMOVE(elm, field) do { \ |
| if (LIST_NEXT((elm), field) != NULL) \ |
| LIST_NEXT((elm), field)->field.le_prev = \ |
| (elm)->field.le_prev; \ |
| *(elm)->field.le_prev = LIST_NEXT((elm), field); \ |
| } while (0) |
| |
| /* |
| * Tail queue declarations. |
| */ |
| #define TAILQ_HEAD(name, type) \ |
| struct name { \ |
| struct type *tqh_first; /* first element */ \ |
| struct type **tqh_last; /* addr of last next element */ \ |
| TRACEBUF \ |
| QUEUE_MACRO_LOCK_FIELD(tqh_); \ |
| } |
| |
| #define TAILQ_HEAD_INITIALIZER(head) \ |
| { NULL, &(head).tqh_first } |
| #if (!defined(TAILQ_ENTRY)) |
| #define TAILQ_ENTRY(type) \ |
| struct { \ |
| struct type *tqe_next; /* next element */ \ |
| struct type **tqe_prev; /* address of previous next element */ \ |
| TRACEBUF \ |
| } |
| #endif |
| |
| #define TAILQ_LOCK_ASSERT(head) \ |
| QUEUE_MACRO_LOCK_ASSERT((head)->tqh_lockcontext) |
| |
| /* |
| * Tail queue functions. |
| */ |
| #define TAILQ_CONCAT(head1, head2, field) do { \ |
| if (!TAILQ_EMPTY(head2)) { \ |
| *(head1)->tqh_last = (head2)->tqh_first; \ |
| (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ |
| (head1)->tqh_last = (head2)->tqh_last; \ |
| TAILQ_INIT((head2)); \ |
| QMD_TRACE_HEAD(head); \ |
| QMD_TRACE_HEAD(head2); \ |
| } \ |
| } while (0) |
| |
| #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) |
| |
| #define TAILQ_FIRST(head) ((head)->tqh_first) |
| |
| #define TAILQ_FOREACH(var, head, field) \ |
| for ((var) = TAILQ_FIRST((head)); \ |
| TAILQ_LOCK_ASSERT((head)) && (var); \ |
| (var) = TAILQ_NEXT((var), field)) |
| |
| #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ |
| for ((var) = TAILQ_FIRST((head)); \ |
| TAILQ_LOCK_ASSERT((head)) && \ |
| (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ |
| (var) = (tvar)) |
| |
| #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ |
| for ((var) = TAILQ_LAST((head), headname); \ |
| TAILQ_LOCK_ASSERT((head)) && (var); \ |
| (var) = TAILQ_PREV((var), headname, field)) |
| |
| #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ |
| for ((var) = TAILQ_LAST((head), headname); \ |
| TAILQ_LOCK_ASSERT((head)) && \ |
| (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ |
| (var) = (tvar)) |
| |
| #define TAILQ_INIT(head) do { \ |
| TAILQ_FIRST((head)) = NULL; \ |
| (head)->tqh_last = &TAILQ_FIRST((head)); \ |
| QMD_TRACE_HEAD(head); \ |
| } while (0) |
| |
| #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ |
| TAILQ_LOCK_ASSERT((head)); \ |
| if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ |
| TAILQ_NEXT((elm), field)->field.tqe_prev = \ |
| &TAILQ_NEXT((elm), field); \ |
| else { \ |
| (head)->tqh_last = &TAILQ_NEXT((elm), field); \ |
| QMD_TRACE_HEAD(head); \ |
| } \ |
| TAILQ_NEXT((listelm), field) = (elm); \ |
| (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ |
| QMD_TRACE_ELEM(&(elm)->field); \ |
| QMD_TRACE_ELEM(&listelm->field); \ |
| } while (0) |
| |
| #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ |
| (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ |
| TAILQ_NEXT((elm), field) = (listelm); \ |
| *(listelm)->field.tqe_prev = (elm); \ |
| (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ |
| QMD_TRACE_ELEM(&(elm)->field); \ |
| QMD_TRACE_ELEM(&listelm->field); \ |
| } while (0) |
| |
| #define TAILQ_INSERT_HEAD(head, elm, field) do { \ |
| TAILQ_LOCK_ASSERT((head)); \ |
| if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ |
| TAILQ_FIRST((head))->field.tqe_prev = \ |
| &TAILQ_NEXT((elm), field); \ |
| else \ |
| (head)->tqh_last = &TAILQ_NEXT((elm), field); \ |
| TAILQ_FIRST((head)) = (elm); \ |
| (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ |
| QMD_TRACE_HEAD(head); \ |
| QMD_TRACE_ELEM(&(elm)->field); \ |
| } while (0) |
| |
| #define TAILQ_INSERT_TAIL(head, elm, field) do { \ |
| TAILQ_LOCK_ASSERT((head)); \ |
| TAILQ_NEXT((elm), field) = NULL; \ |
| (elm)->field.tqe_prev = (head)->tqh_last; \ |
| *(head)->tqh_last = (elm); \ |
| (head)->tqh_last = &TAILQ_NEXT((elm), field); \ |
| QMD_TRACE_HEAD(head); \ |
| QMD_TRACE_ELEM(&(elm)->field); \ |
| } while (0) |
| |
| #define TAILQ_LAST(head, headname) \ |
| (*(((struct headname *)((head)->tqh_last))->tqh_last)) |
| |
| #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) |
| |
| #define TAILQ_PREV(elm, headname, field) \ |
| (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) |
| |
| #define TAILQ_REMOVE(head, elm, field) do { \ |
| TAILQ_LOCK_ASSERT((head)); \ |
| if ((TAILQ_NEXT((elm), field)) != NULL) \ |
| TAILQ_NEXT((elm), field)->field.tqe_prev = \ |
| (elm)->field.tqe_prev; \ |
| else { \ |
| (head)->tqh_last = (elm)->field.tqe_prev; \ |
| QMD_TRACE_HEAD(head); \ |
| } \ |
| *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ |
| TRASHIT((elm)->field.tqe_next); \ |
| TRASHIT((elm)->field.tqe_prev); \ |
| QMD_TRACE_ELEM(&(elm)->field); \ |
| } while (0) |
| |
| |
| #ifdef _KERNEL |
| |
| /* |
| * XXX insque() and remque() are an old way of handling certain queues. |
| * They bogusly assumes that all queue heads look alike. |
| */ |
| |
| struct quehead { |
| struct quehead *qh_link; |
| struct quehead *qh_rlink; |
| }; |
| |
| #if defined(__GNUC__) || defined(__INTEL_COMPILER) |
| |
| static __inline void |
| insque(void *a, void *b) |
| { |
| struct quehead *element = (struct quehead *)a, |
| *head = (struct quehead *)b; |
| |
| element->qh_link = head->qh_link; |
| element->qh_rlink = head; |
| head->qh_link = element; |
| element->qh_link->qh_rlink = element; |
| } |
| |
| static __inline void |
| remque(void *a) |
| { |
| struct quehead *element = (struct quehead *)a; |
| |
| element->qh_link->qh_rlink = element->qh_rlink; |
| element->qh_rlink->qh_link = element->qh_link; |
| element->qh_rlink = 0; |
| } |
| |
| #else /* !(__GNUC__ || __INTEL_COMPILER) */ |
| |
| void insque(void *a, void *b); |
| void remque(void *a); |
| |
| #endif /* __GNUC__ || __INTEL_COMPILER */ |
| |
| #endif /* _KERNEL */ |
| |
| #endif /* !_SYS_QUEUE_H_ */ |