1 #ifndef _LINUX_LIST_H 2 #define _LINUX_LIST_H 3 4 #undef offsetof 5 #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) 6 7 /** 8 * container_of - cast a member of a structure out to the containing structure 9 * 10 * @ptr: the pointer to the member. 11 * @type: the type of the container struct this is embedded in. 12 * @member: the name of the member within the struct. 13 * 14 */ 15 #define container_of(ptr, type, member) ({ \ 16 const typeof( ((type *)0)->member ) *__mptr = (ptr); \ 17 (type *)( (char *)__mptr - offsetof(type,member) );}) 18 19 /* 20 * Check at compile time that something is of a particular type. 21 * Always evaluates to 1 so you may use it easily in comparisons. 22 */ 23 #define typecheck(type,x) \ 24 ({ type __dummy; \ 25 typeof(x) __dummy2; \ 26 (void)(&__dummy == &__dummy2); \ 27 1; \ 28 }) 29 30 #define prefetch(x) 1 31 32 /* empty define to make this work in userspace -HW */ 33 #define smp_wmb() 34 35 /* 36 * These are non-NULL pointers that will result in page faults 37 * under normal circumstances, used to verify that nobody uses 38 * non-initialized list entries. 39 */ 40 #define LIST_POISON1 ((void *) 0x00100100) 41 #define LIST_POISON2 ((void *) 0x00200200) 42 43 /* 44 * Simple doubly linked list implementation. 45 * 46 * Some of the internal functions ("__xxx") are useful when 47 * manipulating whole lists rather than single entries, as 48 * sometimes we already know the next/prev entries and we can 49 * generate better code by using them directly rather than 50 * using the generic single-entry routines. 51 */ 52 53 struct list_head { 54 struct list_head *next, *prev; 55 }; 56 57 #define LIST_HEAD_INIT(name) { &(name), &(name) } 58 59 #define LIST_HEAD(name) \ 60 struct list_head name = LIST_HEAD_INIT(name) 61 62 #define INIT_LIST_HEAD(ptr) do { \ 63 (ptr)->next = (ptr); (ptr)->prev = (ptr); \ 64 } while (0) 65 66 /* 67 * Insert a new entry between two known consecutive entries. 68 * 69 * This is only for internal list manipulation where we know 70 * the prev/next entries already! 71 */ 72 static inline void __list_add(struct list_head *new, 73 struct list_head *prev, 74 struct list_head *next) 75 { 76 next->prev = new; 77 new->next = next; 78 new->prev = prev; 79 prev->next = new; 80 } 81 82 /** 83 * list_add - add a new entry 84 * @new: new entry to be added 85 * @head: list head to add it after 86 * 87 * Insert a new entry after the specified head. 88 * This is good for implementing stacks. 89 */ 90 static inline void list_add(struct list_head *new, struct list_head *head) 91 { 92 __list_add(new, head, head->next); 93 } 94 95 /** 96 * list_add_tail - add a new entry 97 * @new: new entry to be added 98 * @head: list head to add it before 99 * 100 * Insert a new entry before the specified head. 101 * This is useful for implementing queues. 102 */ 103 static inline void list_add_tail(struct list_head *new, struct list_head *head) 104 { 105 __list_add(new, head->prev, head); 106 } 107 108 /* 109 * Insert a new entry between two known consecutive entries. 110 * 111 * This is only for internal list manipulation where we know 112 * the prev/next entries already! 113 */ 114 static inline void __list_add_rcu(struct list_head * new, 115 struct list_head * prev, struct list_head * next) 116 { 117 new->next = next; 118 new->prev = prev; 119 smp_wmb(); 120 next->prev = new; 121 prev->next = new; 122 } 123 124 /** 125 * list_add_rcu - add a new entry to rcu-protected list 126 * @new: new entry to be added 127 * @head: list head to add it after 128 * 129 * Insert a new entry after the specified head. 130 * This is good for implementing stacks. 131 * 132 * The caller must take whatever precautions are necessary 133 * (such as holding appropriate locks) to avoid racing 134 * with another list-mutation primitive, such as list_add_rcu() 135 * or list_del_rcu(), running on this same list. 136 * However, it is perfectly legal to run concurrently with 137 * the _rcu list-traversal primitives, such as 138 * list_for_each_entry_rcu(). 139 */ 140 static inline void list_add_rcu(struct list_head *new, struct list_head *head) 141 { 142 __list_add_rcu(new, head, head->next); 143 } 144 145 /** 146 * list_add_tail_rcu - add a new entry to rcu-protected list 147 * @new: new entry to be added 148 * @head: list head to add it before 149 * 150 * Insert a new entry before the specified head. 151 * This is useful for implementing queues. 152 * 153 * The caller must take whatever precautions are necessary 154 * (such as holding appropriate locks) to avoid racing 155 * with another list-mutation primitive, such as list_add_tail_rcu() 156 * or list_del_rcu(), running on this same list. 157 * However, it is perfectly legal to run concurrently with 158 * the _rcu list-traversal primitives, such as 159 * list_for_each_entry_rcu(). 160 */ 161 static inline void list_add_tail_rcu(struct list_head *new, 162 struct list_head *head) 163 { 164 __list_add_rcu(new, head->prev, head); 165 } 166 167 /* 168 * Delete a list entry by making the prev/next entries 169 * point to each other. 170 * 171 * This is only for internal list manipulation where we know 172 * the prev/next entries already! 173 */ 174 static inline void __list_del(struct list_head * prev, struct list_head * next) 175 { 176 next->prev = prev; 177 prev->next = next; 178 } 179 180 /** 181 * list_del - deletes entry from list. 182 * @entry: the element to delete from the list. 183 * Note: list_empty on entry does not return true after this, the entry is 184 * in an undefined state. 185 */ 186 static inline void list_del(struct list_head *entry) 187 { 188 __list_del(entry->prev, entry->next); 189 entry->next = LIST_POISON1; 190 entry->prev = LIST_POISON2; 191 } 192 193 /** 194 * list_del_rcu - deletes entry from list without re-initialization 195 * @entry: the element to delete from the list. 196 * 197 * Note: list_empty on entry does not return true after this, 198 * the entry is in an undefined state. It is useful for RCU based 199 * lockfree traversal. 200 * 201 * In particular, it means that we can not poison the forward 202 * pointers that may still be used for walking the list. 203 * 204 * The caller must take whatever precautions are necessary 205 * (such as holding appropriate locks) to avoid racing 206 * with another list-mutation primitive, such as list_del_rcu() 207 * or list_add_rcu(), running on this same list. 208 * However, it is perfectly legal to run concurrently with 209 * the _rcu list-traversal primitives, such as 210 * list_for_each_entry_rcu(). 211 * 212 * Note that the caller is not permitted to immediately free 213 * the newly deleted entry. Instead, either synchronize_kernel() 214 * or call_rcu() must be used to defer freeing until an RCU 215 * grace period has elapsed. 216 */ 217 static inline void list_del_rcu(struct list_head *entry) 218 { 219 __list_del(entry->prev, entry->next); 220 entry->prev = LIST_POISON2; 221 } 222 223 /** 224 * list_del_init - deletes entry from list and reinitialize it. 225 * @entry: the element to delete from the list. 226 */ 227 static inline void list_del_init(struct list_head *entry) 228 { 229 __list_del(entry->prev, entry->next); 230 INIT_LIST_HEAD(entry); 231 } 232 233 /** 234 * list_move - delete from one list and add as another's head 235 * @list: the entry to move 236 * @head: the head that will precede our entry 237 */ 238 static inline void list_move(struct list_head *list, struct list_head *head) 239 { 240 __list_del(list->prev, list->next); 241 list_add(list, head); 242 } 243 244 /** 245 * list_move_tail - delete from one list and add as another's tail 246 * @list: the entry to move 247 * @head: the head that will follow our entry 248 */ 249 static inline void list_move_tail(struct list_head *list, 250 struct list_head *head) 251 { 252 __list_del(list->prev, list->next); 253 list_add_tail(list, head); 254 } 255 256 /** 257 * list_empty - tests whether a list is empty 258 * @head: the list to test. 259 */ 260 static inline int list_empty(const struct list_head *head) 261 { 262 return head->next == head; 263 } 264 265 /** 266 * list_empty_careful - tests whether a list is 267 * empty _and_ checks that no other CPU might be 268 * in the process of still modifying either member 269 * 270 * NOTE: using list_empty_careful() without synchronization 271 * can only be safe if the only activity that can happen 272 * to the list entry is list_del_init(). Eg. it cannot be used 273 * if another CPU could re-list_add() it. 274 * 275 * @head: the list to test. 276 */ 277 static inline int list_empty_careful(const struct list_head *head) 278 { 279 struct list_head *next = head->next; 280 return (next == head) && (next == head->prev); 281 } 282 283 static inline void __list_splice(struct list_head *list, 284 struct list_head *head) 285 { 286 struct list_head *first = list->next; 287 struct list_head *last = list->prev; 288 struct list_head *at = head->next; 289 290 first->prev = head; 291 head->next = first; 292 293 last->next = at; 294 at->prev = last; 295 } 296 297 /** 298 * list_splice - join two lists 299 * @list: the new list to add. 300 * @head: the place to add it in the first list. 301 */ 302 static inline void list_splice(struct list_head *list, struct list_head *head) 303 { 304 if (!list_empty(list)) 305 __list_splice(list, head); 306 } 307 308 /** 309 * list_splice_init - join two lists and reinitialise the emptied list. 310 * @list: the new list to add. 311 * @head: the place to add it in the first list. 312 * 313 * The list at @list is reinitialised 314 */ 315 static inline void list_splice_init(struct list_head *list, 316 struct list_head *head) 317 { 318 if (!list_empty(list)) { 319 __list_splice(list, head); 320 INIT_LIST_HEAD(list); 321 } 322 } 323 324 /** 325 * list_entry - get the struct for this entry 326 * @ptr: the &struct list_head pointer. 327 * @type: the type of the struct this is embedded in. 328 * @member: the name of the list_struct within the struct. 329 */ 330 #define list_entry(ptr, type, member) \ 331 container_of(ptr, type, member) 332 333 /** 334 * list_for_each - iterate over a list 335 * @pos: the &struct list_head to use as a loop counter. 336 * @head: the head for your list. 337 */ 338 #define list_for_each(pos, head) \ 339 for (pos = (head)->next, prefetch(pos->next); pos != (head); \ 340 pos = pos->next, prefetch(pos->next)) 341 342 /** 343 * __list_for_each - iterate over a list 344 * @pos: the &struct list_head to use as a loop counter. 345 * @head: the head for your list. 346 * 347 * This variant differs from list_for_each() in that it's the 348 * simplest possible list iteration code, no prefetching is done. 349 * Use this for code that knows the list to be very short (empty 350 * or 1 entry) most of the time. 351 */ 352 #define __list_for_each(pos, head) \ 353 for (pos = (head)->next; pos != (head); pos = pos->next) 354 355 /** 356 * list_for_each_prev - iterate over a list backwards 357 * @pos: the &struct list_head to use as a loop counter. 358 * @head: the head for your list. 359 */ 360 #define list_for_each_prev(pos, head) \ 361 for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \ 362 pos = pos->prev, prefetch(pos->prev)) 363 364 /** 365 * list_for_each_safe - iterate over a list safe against removal of list entry 366 * @pos: the &struct list_head to use as a loop counter. 367 * @n: another &struct list_head to use as temporary storage 368 * @head: the head for your list. 369 */ 370 #define list_for_each_safe(pos, n, head) \ 371 for (pos = (head)->next, n = pos->next; pos != (head); \ 372 pos = n, n = pos->next) 373 374 /** 375 * list_for_each_entry - iterate over list of given type 376 * @pos: the type * to use as a loop counter. 377 * @head: the head for your list. 378 * @member: the name of the list_struct within the struct. 379 */ 380 #define list_for_each_entry(pos, head, member) \ 381 for (pos = list_entry((head)->next, typeof(*pos), member), \ 382 prefetch(pos->member.next); \ 383 &pos->member != (head); \ 384 pos = list_entry(pos->member.next, typeof(*pos), member), \ 385 prefetch(pos->member.next)) 386 387 /** 388 * list_for_each_entry_reverse - iterate backwards over list of given type. 389 * @pos: the type * to use as a loop counter. 390 * @head: the head for your list. 391 * @member: the name of the list_struct within the struct. 392 */ 393 #define list_for_each_entry_reverse(pos, head, member) \ 394 for (pos = list_entry((head)->prev, typeof(*pos), member), \ 395 prefetch(pos->member.prev); \ 396 &pos->member != (head); \ 397 pos = list_entry(pos->member.prev, typeof(*pos), member), \ 398 prefetch(pos->member.prev)) 399 400 /** 401 * list_prepare_entry - prepare a pos entry for use as a start point in 402 * list_for_each_entry_continue 403 * @pos: the type * to use as a start point 404 * @head: the head of the list 405 * @member: the name of the list_struct within the struct. 406 */ 407 #define list_prepare_entry(pos, head, member) \ 408 ((pos) ? : list_entry(head, typeof(*pos), member)) 409 410 /** 411 * list_for_each_entry_continue - iterate over list of given type 412 * continuing after existing point 413 * @pos: the type * to use as a loop counter. 414 * @head: the head for your list. 415 * @member: the name of the list_struct within the struct. 416 */ 417 #define list_for_each_entry_continue(pos, head, member) \ 418 for (pos = list_entry(pos->member.next, typeof(*pos), member), \ 419 prefetch(pos->member.next); \ 420 &pos->member != (head); \ 421 pos = list_entry(pos->member.next, typeof(*pos), member), \ 422 prefetch(pos->member.next)) 423 424 /** 425 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 426 * @pos: the type * to use as a loop counter. 427 * @n: another type * to use as temporary storage 428 * @head: the head for your list. 429 * @member: the name of the list_struct within the struct. 430 */ 431 #define list_for_each_entry_safe(pos, n, head, member) \ 432 for (pos = list_entry((head)->next, typeof(*pos), member), \ 433 n = list_entry(pos->member.next, typeof(*pos), member); \ 434 &pos->member != (head); \ 435 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 436 437 /** 438 * list_for_each_rcu - iterate over an rcu-protected list 439 * @pos: the &struct list_head to use as a loop counter. 440 * @head: the head for your list. 441 * 442 * This list-traversal primitive may safely run concurrently with 443 * the _rcu list-mutation primitives such as list_add_rcu() 444 * as long as the traversal is guarded by rcu_read_lock(). 445 */ 446 #define list_for_each_rcu(pos, head) \ 447 for (pos = (head)->next, prefetch(pos->next); pos != (head); \ 448 pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next)) 449 450 #define __list_for_each_rcu(pos, head) \ 451 for (pos = (head)->next; pos != (head); \ 452 pos = pos->next, ({ smp_read_barrier_depends(); 0;})) 453 454 /** 455 * list_for_each_safe_rcu - iterate over an rcu-protected list safe 456 * against removal of list entry 457 * @pos: the &struct list_head to use as a loop counter. 458 * @n: another &struct list_head to use as temporary storage 459 * @head: the head for your list. 460 * 461 * This list-traversal primitive may safely run concurrently with 462 * the _rcu list-mutation primitives such as list_add_rcu() 463 * as long as the traversal is guarded by rcu_read_lock(). 464 */ 465 #define list_for_each_safe_rcu(pos, n, head) \ 466 for (pos = (head)->next, n = pos->next; pos != (head); \ 467 pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next) 468 469 /** 470 * list_for_each_entry_rcu - iterate over rcu list of given type 471 * @pos: the type * to use as a loop counter. 472 * @head: the head for your list. 473 * @member: the name of the list_struct within the struct. 474 * 475 * This list-traversal primitive may safely run concurrently with 476 * the _rcu list-mutation primitives such as list_add_rcu() 477 * as long as the traversal is guarded by rcu_read_lock(). 478 */ 479 #define list_for_each_entry_rcu(pos, head, member) \ 480 for (pos = list_entry((head)->next, typeof(*pos), member), \ 481 prefetch(pos->member.next); \ 482 &pos->member != (head); \ 483 pos = list_entry(pos->member.next, typeof(*pos), member), \ 484 ({ smp_read_barrier_depends(); 0;}), \ 485 prefetch(pos->member.next)) 486 487 488 /** 489 * list_for_each_continue_rcu - iterate over an rcu-protected list 490 * continuing after existing point. 491 * @pos: the &struct list_head to use as a loop counter. 492 * @head: the head for your list. 493 * 494 * This list-traversal primitive may safely run concurrently with 495 * the _rcu list-mutation primitives such as list_add_rcu() 496 * as long as the traversal is guarded by rcu_read_lock(). 497 */ 498 #define list_for_each_continue_rcu(pos, head) \ 499 for ((pos) = (pos)->next, prefetch((pos)->next); (pos) != (head); \ 500 (pos) = (pos)->next, ({ smp_read_barrier_depends(); 0;}), prefetch((pos)->next)) 501 502 /* 503 * Double linked lists with a single pointer list head. 504 * Mostly useful for hash tables where the two pointer list head is 505 * too wasteful. 506 * You lose the ability to access the tail in O(1). 507 */ 508 509 struct hlist_head { 510 struct hlist_node *first; 511 }; 512 513 struct hlist_node { 514 struct hlist_node *next, **pprev; 515 }; 516 517 #define HLIST_HEAD_INIT { .first = NULL } 518 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } 519 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) 520 #define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL) 521 522 static inline int hlist_unhashed(const struct hlist_node *h) 523 { 524 return !h->pprev; 525 } 526 527 static inline int hlist_empty(const struct hlist_head *h) 528 { 529 return !h->first; 530 } 531 532 static inline void __hlist_del(struct hlist_node *n) 533 { 534 struct hlist_node *next = n->next; 535 struct hlist_node **pprev = n->pprev; 536 *pprev = next; 537 if (next) 538 next->pprev = pprev; 539 } 540 541 static inline void hlist_del(struct hlist_node *n) 542 { 543 __hlist_del(n); 544 n->next = LIST_POISON1; 545 n->pprev = LIST_POISON2; 546 } 547 548 /** 549 * hlist_del_rcu - deletes entry from hash list without re-initialization 550 * @n: the element to delete from the hash list. 551 * 552 * Note: list_unhashed() on entry does not return true after this, 553 * the entry is in an undefined state. It is useful for RCU based 554 * lockfree traversal. 555 * 556 * In particular, it means that we can not poison the forward 557 * pointers that may still be used for walking the hash list. 558 * 559 * The caller must take whatever precautions are necessary 560 * (such as holding appropriate locks) to avoid racing 561 * with another list-mutation primitive, such as hlist_add_head_rcu() 562 * or hlist_del_rcu(), running on this same list. 563 * However, it is perfectly legal to run concurrently with 564 * the _rcu list-traversal primitives, such as 565 * hlist_for_each_entry(). 566 */ 567 static inline void hlist_del_rcu(struct hlist_node *n) 568 { 569 __hlist_del(n); 570 n->pprev = LIST_POISON2; 571 } 572 573 static inline void hlist_del_init(struct hlist_node *n) 574 { 575 if (n->pprev) { 576 __hlist_del(n); 577 INIT_HLIST_NODE(n); 578 } 579 } 580 581 #define hlist_del_rcu_init hlist_del_init 582 583 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) 584 { 585 struct hlist_node *first = h->first; 586 n->next = first; 587 if (first) 588 first->pprev = &n->next; 589 h->first = n; 590 n->pprev = &h->first; 591 } 592 593 594 /** 595 * hlist_add_head_rcu - adds the specified element to the specified hlist, 596 * while permitting racing traversals. 597 * @n: the element to add to the hash list. 598 * @h: the list to add to. 599 * 600 * The caller must take whatever precautions are necessary 601 * (such as holding appropriate locks) to avoid racing 602 * with another list-mutation primitive, such as hlist_add_head_rcu() 603 * or hlist_del_rcu(), running on this same list. 604 * However, it is perfectly legal to run concurrently with 605 * the _rcu list-traversal primitives, such as 606 * hlist_for_each_entry(), but only if smp_read_barrier_depends() 607 * is used to prevent memory-consistency problems on Alpha CPUs. 608 * Regardless of the type of CPU, the list-traversal primitive 609 * must be guarded by rcu_read_lock(). 610 * 611 * OK, so why don't we have an hlist_for_each_entry_rcu()??? 612 */ 613 static inline void hlist_add_head_rcu(struct hlist_node *n, 614 struct hlist_head *h) 615 { 616 struct hlist_node *first = h->first; 617 n->next = first; 618 n->pprev = &h->first; 619 smp_wmb(); 620 if (first) 621 first->pprev = &n->next; 622 h->first = n; 623 } 624 625 /* next must be != NULL */ 626 static inline void hlist_add_before(struct hlist_node *n, 627 struct hlist_node *next) 628 { 629 n->pprev = next->pprev; 630 n->next = next; 631 next->pprev = &n->next; 632 *(n->pprev) = n; 633 } 634 635 static inline void hlist_add_after(struct hlist_node *n, 636 struct hlist_node *next) 637 { 638 next->next = n->next; 639 n->next = next; 640 next->pprev = &n->next; 641 642 if(next->next) 643 next->next->pprev = &next->next; 644 } 645 646 #define hlist_entry(ptr, type, member) container_of(ptr,type,member) 647 648 #define hlist_for_each(pos, head) \ 649 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ 650 pos = pos->next) 651 652 #define hlist_for_each_safe(pos, n, head) \ 653 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 654 pos = n) 655 656 /** 657 * hlist_for_each_entry - iterate over list of given type 658 * @tpos: the type * to use as a loop counter. 659 * @pos: the &struct hlist_node to use as a loop counter. 660 * @head: the head for your list. 661 * @member: the name of the hlist_node within the struct. 662 */ 663 #define hlist_for_each_entry(tpos, pos, head, member) \ 664 for (pos = (head)->first; \ 665 pos && ({ prefetch(pos->next); 1;}) && \ 666 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 667 pos = pos->next) 668 669 /** 670 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point 671 * @tpos: the type * to use as a loop counter. 672 * @pos: the &struct hlist_node to use as a loop counter. 673 * @member: the name of the hlist_node within the struct. 674 */ 675 #define hlist_for_each_entry_continue(tpos, pos, member) \ 676 for (pos = (pos)->next; \ 677 pos && ({ prefetch(pos->next); 1;}) && \ 678 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 679 pos = pos->next) 680 681 /** 682 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point 683 * @tpos: the type * to use as a loop counter. 684 * @pos: the &struct hlist_node to use as a loop counter. 685 * @member: the name of the hlist_node within the struct. 686 */ 687 #define hlist_for_each_entry_from(tpos, pos, member) \ 688 for (; pos && ({ prefetch(pos->next); 1;}) && \ 689 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 690 pos = pos->next) 691 692 /** 693 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 694 * @tpos: the type * to use as a loop counter. 695 * @pos: the &struct hlist_node to use as a loop counter. 696 * @n: another &struct hlist_node to use as temporary storage 697 * @head: the head for your list. 698 * @member: the name of the hlist_node within the struct. 699 */ 700 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ 701 for (pos = (head)->first; \ 702 pos && ({ n = pos->next; 1; }) && \ 703 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 704 pos = n) 705 706 /** 707 * hlist_for_each_entry_rcu - iterate over rcu list of given type 708 * @pos: the type * to use as a loop counter. 709 * @pos: the &struct hlist_node to use as a loop counter. 710 * @head: the head for your list. 711 * @member: the name of the hlist_node within the struct. 712 * 713 * This list-traversal primitive may safely run concurrently with 714 * the _rcu list-mutation primitives such as hlist_add_rcu() 715 * as long as the traversal is guarded by rcu_read_lock(). 716 */ 717 #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 718 for (pos = (head)->first; \ 719 pos && ({ prefetch(pos->next); 1;}) && \ 720 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 721 pos = pos->next, ({ smp_read_barrier_depends(); 0; }) ) 722 723 #endif 724