| /linux/net/ceph/ |
| H A D | debugfs.c | 74 for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { in osdmap_show() 97 for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { in osdmap_show() 108 for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { in osdmap_show() 115 for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { in osdmap_show() 126 for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { in osdmap_show() 166 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { in monc_show() 245 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in dump_requests() 271 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { in dump_linger_requests() 332 for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) { in dump_backoffs() 358 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in osdc_show() [all …]
|
| H A D | osd_client.c | 1180 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in DEFINE_RB_FUNCS() 1183 for (p = rb_first(&osd->o_requests); p; ) { in DEFINE_RB_FUNCS() 1193 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { in DEFINE_RB_FUNCS() 1349 for (n = rb_first(&osd->o_requests); n; ) { in close_osd() 1359 for (n = rb_first(&osd->o_linger_requests); n; ) { in close_osd() 1400 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { in reopen_osd() 1496 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { in have_pool_full() 1976 rb_entry(rb_first(&osd->o_backoff_mappings), in DEFINE_RB_FUNCS() 1981 rb_entry(rb_first(&spg->backoffs), in DEFINE_RB_FUNCS() 3448 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { in handle_timeout() [all …]
|
| H A D | osdmap.c | 223 rb_entry(rb_first(root), struct crush_name_node, cn_node); in clear_crush_names() 273 rb_entry(rb_first(&c->choose_args), in clear_choose_args() 766 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { in ceph_pg_poolid_by_name() 1145 rb_entry(rb_first(&map->pg_temp), in ceph_osdmap_destroy() 1152 rb_entry(rb_first(&map->primary_temp), in ceph_osdmap_destroy() 1159 rb_entry(rb_first(&map->pg_upmap), in ceph_osdmap_destroy() 1166 rb_entry(rb_first(&map->pg_upmap_items), in ceph_osdmap_destroy() 1173 rb_entry(rb_first(&map->pg_pools), in ceph_osdmap_destroy() 2971 struct rb_node *n1 = rb_first(locs1); in ceph_compare_crush_locs() 2972 struct rb_node *n2 = rb_first(locs2); in ceph_compare_crush_locs() [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | eeh_cache.c | 103 n = rb_first(&cache->rb_root); in eeh_addr_cache_print() 218 n = rb_first(&pci_io_addr_cache_root.rb_root); in __eeh_addr_cache_rmv_dev() 270 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { in eeh_addr_cache_show()
|
| /linux/tools/perf/util/ |
| H A D | rb_resort.h | |
| H A D | callchain.c | 436 n = rb_first(&node->rb_root_in); in __sort_chain_flat() 467 n = rb_first(&node->rb_root_in); in __sort_chain_graph_abs() 498 n = rb_first(&node->rb_root_in); in __sort_chain_graph_rel() 563 n = rb_first(&new->rb_root_in); in create_child() 1062 n = rb_first(&src->rb_root_in); in merge_chain_branch() 1305 n = rb_first(&node->rb_root_in); in callchain_node_branch_counts_cumul() 1513 n = rb_first(&node->rb_root_in); in free_callchain_node() 1538 n = rb_first(&node->rb_root_in); in decay_callchain_node() 1765 struct rb_node *rb_node = rb_first(root); in count_callchain_hits()
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | rbtree.rst | 165 struct rb_node *rb_first(struct rb_root *tree); 170 要开始迭代,需要使用一个指向树根的指针调用rb_first()或rb_last(),它将返回一个指向 181 for (node = rb_first(&mytree); node; node = rb_next(node)) 189 时间复杂度为O(logN)的rb_first()的调用,以简单地获取指针,避免了潜在的昂贵的树迭代。
|
| /linux/scripts/gdb/linux/ |
| H A D | rbtree.py | 25 def rb_first(root): function 122 result = rb_first(root)
|
| /linux/rust/helpers/ |
| H A D | rbtree.c | 13 return rb_first(root); in rust_helper_rb_first()
|
| /linux/tools/perf/ui/gtk/ |
| H A D | hists.c | 101 bool has_single_node = (rb_first(root) == rb_last(root)); in perf_gtk__add_callchain_flat() 103 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_flat() 165 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_folded() 224 bool has_single_node = (rb_first(root) == rb_last(root)); in perf_gtk__add_callchain_graph() 226 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain_graph()
|
| /linux/kernel/trace/ |
| H A D | trace_stat.c | 185 node = rb_first(&session->stat_root); in stat_seq_start() 200 return rb_first(&session->stat_root); in stat_seq_next()
|
| /linux/fs/proc/ |
| H A D | nommu.c | 86 for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) in nommu_region_list_start()
|
| /linux/drivers/android/ |
| H A D | binder_alloc.c | 437 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in debug_no_space_locked() 446 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { in debug_no_space_locked() 486 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked() 974 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release() 1048 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in binder_alloc_print_allocated() 1106 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
|
| /linux/fs/btrfs/ |
| H A D | ref-verify.c | 224 while ((n = rb_first(&be->roots))) { in free_block_entry() 230 while((n = rb_first(&be->refs))) { in free_block_entry() 626 for (n = rb_first(&be->refs); n; n = rb_next(n)) { in dump_block_entry() 634 for (n = rb_first(&be->roots); n; n = rb_next(n)) { in dump_block_entry() 897 while ((n = rb_first(&fs_info->block_tree))) { in btrfs_free_ref_cache()
|
| /linux/tools/perf/ui/stdio/ |
| H A D | hist.c | 129 node = rb_first(root); in __callchain__fprintf_graph() 234 node = rb_first(root); in callchain__fprintf_graph() 315 struct rb_node *rb_node = rb_first(tree); in callchain__fprintf_flat() 366 struct rb_node *rb_node = rb_first(tree); in callchain__fprintf_folded()
|
| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | cm.c | 549 while (rb_first(sl_id_map)) { in mlx4_ib_cm_paravirt_clean() 551 rb_entry(rb_first(sl_id_map), in mlx4_ib_cm_paravirt_clean() 560 nd = rb_first(sl_id_map); in mlx4_ib_cm_paravirt_clean()
|
| /linux/net/netfilter/ |
| H A D | nf_conncount.c | 547 for (node = rb_first(root); node != NULL; node = rb_next(node)) { in tree_gc_worker() 562 node = rb_first(root); in tree_gc_worker() 648 while ((node = rb_first(r)) != NULL) { in destroy_tree()
|
| H A D | nft_set_rbtree.c | 345 first = rb_first(&priv->root); in __nft_rbtree_insert() 595 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { in nft_rbtree_do_walk() 656 for (node = rb_first(&priv->root); node ; node = next) { in nft_rbtree_gc()
|
| /linux/tools/perf/ui/browsers/ |
| H A D | map.c | 122 for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { in map__browse()
|
| H A D | hists.c | 182 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__count_rows_rb_tree() 265 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__count_rows() 331 struct rb_node *nd = rb_first(&node->rb_root); in callchain_node__init_have_children_rb_tree() 333 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__init_have_children_rb_tree() 370 struct rb_node *nd = rb_first(root); in callchain__init_have_children() 373 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in callchain__init_have_children() 516 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__set_folding_rb_tree() 557 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__set_folding() 921 node = rb_first(root); in hist_browser__show_callchain_flat() 1025 node = rb_first(root); in hist_browser__show_callchain_folded() [all …]
|
| /linux/tools/perf/tests/ |
| H A D | hists_cumulate.c | 216 for (node = rb_first(root), i = 0; in do_test() 238 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); in do_test()
|
| /linux/rust/kernel/ |
| H A D | rbtree.rs | 209 next: unsafe { bindings::rb_first(&self.root) }, in iter() 224 next: unsafe { bindings::rb_first(from_mut(&mut self.root)) }, in iter_mut() 249 let current = unsafe { bindings::rb_first(root) }; in cursor_front_mut() 264 let current = unsafe { bindings::rb_first(root) }; in cursor_front()
|
| /linux/arch/x86/events/intel/ |
| H A D | uncore_discovery.c | 200 for (node = rb_first(root); node; node = rb_next(node)) { in uncore_find_unit() 449 node = rb_first(&type->units); in intel_uncore_clear_discovery_tables() 756 for (node = rb_first(&discovery_tables); node; node = rb_next(node)) { in intel_uncore_generic_init_uncores()
|
| /linux/drivers/vfio/ |
| H A D | vfio_iommu_type1.c | 273 for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) { in vfio_dma_populate_bitmap() 285 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_populate_bitmap_full() 296 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_alloc_all() 321 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_free_all() 1330 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iova_dirty_bitmap() 1820 n = rb_first(&iommu->dma_list); in vfio_iommu_replay() 1897 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_replay() 2433 while ((node = rb_first(&iommu->dma_list))) in vfio_iommu_unmap_unpin_all() 2441 n = rb_first(&iommu->dma_list); in vfio_iommu_unmap_unpin_reaccount() 2448 p = rb_first(&dma->pfn_list); in vfio_iommu_unmap_unpin_reaccount()
|
| /linux/drivers/android/tests/ |
| H A D | binder_alloc_kunit.c | 430 n = rb_first(&alloc->allocated_buffers); in binder_alloc_test_mmap() 433 n = rb_first(&alloc->free_buffers); in binder_alloc_test_mmap()
|