| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | mmu_rb.c | 80 struct mmu_rb_node *rbnode; in hfi1_mmu_rb_unregister() local 101 rbnode = rb_entry(node, struct mmu_rb_node, node); in hfi1_mmu_rb_unregister() 104 list_move(&rbnode->list, &del_list); in hfi1_mmu_rb_unregister() 109 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); in hfi1_mmu_rb_unregister() 110 list_del(&rbnode->list); in hfi1_mmu_rb_unregister() 111 kref_put(&rbnode->refcount, release_immediate); in hfi1_mmu_rb_unregister() 228 struct mmu_rb_node *rbnode, *ptr; in hfi1_mmu_rb_evict() local 239 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { in hfi1_mmu_rb_evict() 241 if (kref_read(&rbnode->refcount) > 1) in hfi1_mmu_rb_evict() 244 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, in hfi1_mmu_rb_evict() [all …]
|
| /linux/net/ipv4/ |
| H A D | inet_fragment.c | 51 RB_CLEAR_NODE(&skb->rbnode); in fragcb_clear() 74 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, in fragrun_create() 75 &q->last_run_head->rbnode.rb_right); in fragrun_create() 77 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); in fragrun_create() 78 rb_insert_color(&skb->rbnode, &q->rb_fragments); in fragrun_create() 308 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in inet_frag_rbtree_purge() 311 rb_erase(&skb->rbnode, root); in inet_frag_rbtree_purge() 486 rb_link_node(&skb->rbnode, parent, rbn); in inet_frag_queue_insert() 487 rb_insert_color(&skb->rbnode, &q->rb_fragments); in inet_frag_queue_insert() 525 if (RB_EMPTY_NODE(&skb->rbnode)) in inet_frag_reasm_prepare() [all …]
|
| H A D | tcp_input.c | 5081 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue() 5184 rb_link_node(&skb->rbnode, NULL, p); in tcp_data_queue_ofo() 5185 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo() 5207 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo() 5239 rb_replace_node(&skb1->rbnode, &skb->rbnode, in tcp_data_queue_ofo() 5258 rb_link_node(&skb->rbnode, parent, p); in tcp_data_queue_ofo() 5259 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo() 5271 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo() 5522 rb_erase(&skb->rbnode, root); in tcp_collapse_one() 5545 rb_link_node(&skb->rbnode, parent, p); in tcp_rbtree_insert() [all …]
|
| /linux/net/netfilter/ |
| H A D | nf_conncount.c | 391 struct rb_node **rbnode, *parent; in insert_tree() local 400 rbnode = &(root->rb_node); in insert_tree() 401 while (*rbnode) { in insert_tree() 403 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); in insert_tree() 405 parent = *rbnode; in insert_tree() 408 rbnode = &((*rbnode)->rb_left); in insert_tree() 410 rbnode = &((*rbnode)->rb_right); in insert_tree() 461 rb_link_node_rcu(&rbconn->node, parent, rbnode); in insert_tree()
|
| /linux/net/sched/ |
| H A D | sch_etf.c | 188 rb_link_node(&nskb->rbnode, parent, p); in etf_enqueue_timesortedlist() 189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist() 211 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_drop() 235 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_remove() 427 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_clear()
|
| H A D | sch_netem.c | 162 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, 381 rb_erase(&skb->rbnode, &q->t_root); in tfifo_reset() 415 rb_link_node(&nskb->rbnode, parent, p); in tfifo_enqueue() 416 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue() 698 rb_erase(&skb->rbnode, &q->t_root); in netem_erase_head() 730 /* skb->dev shares skb->rbnode area, in netem_dequeue()
|
| H A D | sch_fq.c | 488 rb_erase(&skb->rbnode, &flow->t_root); in fq_erase_head() 534 rb_link_node(&skb->rbnode, parent, p); in fq_packet_beyond_horizon() 535 rb_insert_color(&skb->rbnode, &flow->t_root); in fq_packet_beyond_horizon() 794 rb_erase(&skb->rbnode, &flow->t_root); in fq_flow_purge()
|
| /linux/net/mptcp/ |
| H A D | protocol.c | 270 rb_link_node(&skb->rbnode, NULL, p); in mptcp_data_queue_ofo() 271 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo() 288 parent = &msk->ooo_last_skb->rbnode; in mptcp_data_queue_ofo() 319 rb_replace_node(&skb1->rbnode, &skb->rbnode, in mptcp_data_queue_ofo() 334 rb_link_node(&skb->rbnode, parent, p); in mptcp_data_queue_ofo() 335 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo() 342 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo() 793 rb_erase(&skb->rbnode, &msk->out_of_order_queue); in __mptcp_ofo_queue()
|
| /linux/drivers/android/binder/ |
| H A D | process.rs | 767 let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?; in get_node_inner() localVariable 773 inner.nodes.insert(rbnode); in get_node_inner()
|
| /linux/include/linux/ |
| H A D | skbuff.h | 901 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ member 4126 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) 4129 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) 4130 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
|
| /linux/include/net/ |
| H A D | tcp.h | 2201 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
|
| /linux/net/core/ |
| H A D | skbuff.c | 4008 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() 4011 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
|