| /linux/drivers/base/regmap/ |
| H A D | regcache-rbtree.c | 41 struct regcache_rbtree_node *rbnode, in regcache_rbtree_get_base_top_reg() argument 44 *base = rbnode->base_reg; in regcache_rbtree_get_base_top_reg() 45 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); in regcache_rbtree_get_base_top_reg() 49 struct regcache_rbtree_node *rbnode, unsigned int idx) in regcache_rbtree_get_register() argument 51 return regcache_get_val(map, rbnode->block, idx); in regcache_rbtree_get_register() 55 struct regcache_rbtree_node *rbnode, in regcache_rbtree_set_register() argument 58 set_bit(idx, rbnode->cache_present); in regcache_rbtree_set_register() 59 regcache_set_val(map, rbnode->block, idx, val); in regcache_rbtree_set_register() 67 struct regcache_rbtree_node *rbnode; in regcache_rbtree_lookup() local 70 rbnode = rbtree_ctx->cached_rbnode; in regcache_rbtree_lookup() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | mmu_rb.c | 80 struct mmu_rb_node *rbnode; in hfi1_mmu_rb_unregister() local 101 rbnode = rb_entry(node, struct mmu_rb_node, node); in hfi1_mmu_rb_unregister() 104 list_move(&rbnode->list, &del_list); in hfi1_mmu_rb_unregister() 109 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); in hfi1_mmu_rb_unregister() 110 list_del(&rbnode->list); in hfi1_mmu_rb_unregister() 111 kref_put(&rbnode->refcount, release_immediate); in hfi1_mmu_rb_unregister() 228 struct mmu_rb_node *rbnode, *ptr; in hfi1_mmu_rb_evict() local 239 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { in hfi1_mmu_rb_evict() 241 if (kref_read(&rbnode->refcount) > 1) in hfi1_mmu_rb_evict() 244 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, in hfi1_mmu_rb_evict() [all …]
|
| /linux/net/ipv4/ |
| H A D | inet_fragment.c | 51 RB_CLEAR_NODE(&skb->rbnode); in fragcb_clear() 74 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, in fragrun_create() 75 &q->last_run_head->rbnode.rb_right); in fragrun_create() 77 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); in fragrun_create() 78 rb_insert_color(&skb->rbnode, &q->rb_fragments); in fragrun_create() 308 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in inet_frag_rbtree_purge() 311 rb_erase(&skb->rbnode, root); in inet_frag_rbtree_purge() 486 rb_link_node(&skb->rbnode, parent, rbn); in inet_frag_queue_insert() 487 rb_insert_color(&skb->rbnode, &q->rb_fragments); in inet_frag_queue_insert() 525 if (RB_EMPTY_NODE(&skb->rbnode)) in inet_frag_reasm_prepare() [all …]
|
| H A D | tcp_input.c | 5348 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue() 5437 rb_link_node(&skb->rbnode, NULL, p); in tcp_data_queue_ofo() 5438 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo() 5460 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo() 5492 rb_replace_node(&skb1->rbnode, &skb->rbnode, in tcp_data_queue_ofo() 5511 rb_link_node(&skb->rbnode, parent, p); in tcp_data_queue_ofo() 5512 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo() 5524 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo() 5775 rb_erase(&skb->rbnode, root); in tcp_collapse_one() 5969 node = &tp->ooo_last_skb->rbnode; in tcp_prune_ofo_queue()
|
| /linux/net/netfilter/ |
| H A D | nf_conncount.c | 407 struct rb_node **rbnode, *parent; in insert_tree() local 416 rbnode = &(root->rb_node); in insert_tree() 417 while (*rbnode) { in insert_tree() 419 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); in insert_tree() 421 parent = *rbnode; in insert_tree() 424 rbnode = &((*rbnode)->rb_left); in insert_tree() 426 rbnode = &((*rbnode)->rb_right); in insert_tree() 477 rb_link_node_rcu(&rbconn->node, parent, rbnode); in insert_tree()
|
| /linux/net/sched/ |
| H A D | sch_etf.c | 188 rb_link_node(&nskb->rbnode, parent, p); in etf_enqueue_timesortedlist() 189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist() 211 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_drop() 235 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_remove() 427 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_clear()
|
| H A D | sch_netem.c | 381 rb_erase(&skb->rbnode, &q->t_root); in tfifo_reset() 415 rb_link_node(&nskb->rbnode, parent, p); in tfifo_enqueue() 416 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue() 698 rb_erase(&skb->rbnode, &q->t_root); in netem_erase_head()
|
| H A D | sch_fq.c | 486 rb_erase(&skb->rbnode, &flow->t_root); in fq_erase_head() 532 rb_link_node(&skb->rbnode, parent, p); in flow_queue_add() 533 rb_insert_color(&skb->rbnode, &flow->t_root); in flow_queue_add() 792 rb_erase(&skb->rbnode, &flow->t_root); in fq_flow_purge()
|
| /linux/fs/btrfs/ |
| H A D | backref.c | 257 rb_entry(new, struct prelim_ref, rbnode); in prelim_ref_rb_add_cmp() 259 rb_entry(exist, struct prelim_ref, rbnode); in prelim_ref_rb_add_cmp() 299 exist = rb_find_add_cached(&newref->rbnode, root, prelim_ref_rb_add_cmp); in prelim_ref_insert() 301 struct prelim_ref *ref = rb_entry(exist, struct prelim_ref, rbnode); in prelim_ref_insert() 340 &preftree->root.rb_root, rbnode) { in prelim_release() 454 ref = rb_entry(parent, struct prelim_ref, rbnode); in is_shared_data_backref() 755 ref = rb_entry(rnode, struct prelim_ref, rbnode); in resolve_indirect_refs() 762 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs() 846 ref = rb_entry(node, struct prelim_ref, rbnode); in add_missing_keys() 1591 ref = rb_entry(node, struct prelim_ref, rbnode); in find_parent_nodes() [all …]
|
| /linux/net/mptcp/ |
| H A D | protocol.c | 269 rb_link_node(&skb->rbnode, NULL, p); in mptcp_data_queue_ofo() 270 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo() 287 parent = &msk->ooo_last_skb->rbnode; in mptcp_data_queue_ofo() 318 rb_replace_node(&skb1->rbnode, &skb->rbnode, in mptcp_data_queue_ofo() 333 rb_link_node(&skb->rbnode, parent, p); in mptcp_data_queue_ofo() 334 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo() 341 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo() 789 rb_erase(&skb->rbnode, &msk->out_of_order_queue); in __mptcp_ofo_queue()
|
| /linux/net/rxrpc/ |
| H A D | af_rxrpc.c | 902 skb = rb_entry(rx->pending_oobq.rb_node, struct sk_buff, rbnode); in rxrpc_purge_oob_queue() 903 rb_erase(&skb->rbnode, &rx->pending_oobq); in rxrpc_purge_oob_queue()
|
| /linux/drivers/android/binder/ |
| H A D | process.rs | 773 let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?; in get_node_inner() localVariable 779 inner.nodes.insert(rbnode); in get_node_inner()
|
| /linux/include/linux/ |
| H A D | skbuff.h | 901 struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ member 4127 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) 4130 #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) 4131 #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
|
| /linux/include/net/ |
| H A D | tcp.h | 2262 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
|
| /linux/net/core/ |
| H A D | skbuff.c | 4041 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() 4044 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
|