| /linux/drivers/s390/scsi/ |
| H A D | zfcp_reqlist.h | 26 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member 52 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc() 68 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty() 92 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find() 165 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add() 182 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move() 209 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
|
| /linux/Documentation/networking/ |
| H A D | nexthop-group-resilient.rst | 54 continuous. With a hash table, mapping between the hash table buckets and 56 the buckets that held it are simply reassigned to other next hops:: 70 choose a subset of buckets that are currently not used for forwarding 72 keeping the "busy" buckets intact. This way, established flows are ideally 80 certain number of buckets, according to its weight and the number of 81 buckets in the hash table. In accordance with the source code, we will call 86 Next hops that have fewer buckets than their wants count, are called 98 buckets: 105 underweight next hops. If, after considering all buckets in this manner, 109 There may not be enough "idle" buckets to satisfy the updated wants counts [all …]
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | bpf_arena_htab.h | 14 htab_bucket_t *buckets; member 21 htab_bucket_t *b = htab->buckets; in __select_bucket() 95 void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in htab_init() local 97 cast_user(buckets); in htab_init() 98 htab->buckets = buckets; in htab_init()
|
| /linux/tools/lib/bpf/ |
| H A D | hashmap.c | 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
|
| /linux/tools/perf/util/ |
| H A D | hashmap.c | 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
|
| H A D | ftrace.h | 57 int buckets[], struct stats *stats); 82 int buckets[] __maybe_unused, in perf_ftrace__latency_read_bpf()
|
| H A D | bpf_ftrace.c | 169 int buckets[], struct stats *stats) in perf_ftrace__latency_read_bpf() argument 185 buckets[idx] = 0; in perf_ftrace__latency_read_bpf() 190 buckets[idx] += hist[i]; in perf_ftrace__latency_read_bpf()
|
| /linux/block/ |
| H A D | blk-stat.c | 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 102 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument 110 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 116 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 127 cb->buckets = buckets; in blk_stat_alloc_callback() 144 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
|
| H A D | blk-stat.h | 45 unsigned int buckets; member 88 unsigned int buckets, void *data);
|
| /linux/drivers/md/dm-vdo/ |
| H A D | priority-table.c | 44 struct bucket buckets[]; member 69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() 116 list_del_init(&table->buckets[priority].queue); in vdo_reset_priority_table() 134 list_move_tail(entry, &table->buckets[priority].queue); in vdo_priority_table_enqueue() 173 bucket = &table->buckets[top_priority]; in vdo_priority_table_dequeue()
|
| /linux/tools/testing/selftests/drivers/net/netdevsim/ |
| H A D | nexthop.sh | 213 $IP nexthop add id 10 group 1/2 type resilient buckets 4 229 $IP nexthop add id 10 group 1,3/2,2 type resilient buckets 5 259 $IP nexthop add id 10 group 1/2 type resilient buckets 4 &> /dev/null 325 $IP nexthop add id 10 group 1/2 type resilient buckets 6 353 $IP nexthop add id 10 group 1/2 type resilient buckets 6 408 $IP nexthop add id 10 group 1/2 type resilient buckets 8 idle_timer 4 434 type resilient buckets 8 idle_timer 6 469 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 4 504 $IP nexthop add id 10 group 1/2 type resilient buckets 8 $timer 8 535 type resilient buckets 8 $timer 4 [all …]
|
| /linux/tools/perf/ |
| H A D | builtin-ftrace.c | 852 static void make_histogram(struct perf_ftrace *ftrace, int buckets[], in make_histogram() argument 929 buckets[i]++; in make_histogram() 941 static void display_histogram(struct perf_ftrace *ftrace, int buckets[]) in display_histogram() argument 953 total += buckets[i]; in display_histogram() 963 bar_len = buckets[0] * bar_total / total; in display_histogram() 965 if (!ftrace->hide_empty || buckets[0]) in display_histogram() 968 buckets[0], bar_len, bar, bar_total - bar_len, ""); in display_histogram() 974 if (ftrace->hide_empty && !buckets[i]) in display_histogram() 1005 bar_len = buckets[i] * bar_total / total; in display_histogram() 1006 printf(" %s | %10d | %.*s%*s |\n", unit, buckets[i], bar_len, bar, in display_histogram() [all …]
|
| /linux/net/ceph/crush/ |
| H A D | crush.c | 111 if (map->buckets) { in crush_destroy() 114 if (map->buckets[b] == NULL) in crush_destroy() 116 crush_destroy_bucket(map->buckets[b]); in crush_destroy() 118 kfree(map->buckets); in crush_destroy()
|
| H A D | mapper.c | 531 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn() 544 in = map->buckets[-1-item]; in crush_choose_firstn() 568 map->buckets[-1-item], in crush_choose_firstn() 744 itemtype = map->buckets[-1-item]->type; in crush_choose_indep() 761 in = map->buckets[-1-item]; in crush_choose_indep() 781 map->buckets[-1-item], in crush_choose_indep() 868 if (!map->buckets[b]) in crush_init_workspace() 872 switch (map->buckets[b]->alg) { in crush_init_workspace() 880 v += map->buckets[b]->size * sizeof(__u32); in crush_init_workspace() 951 map->buckets[-1-curstep->arg1])) { in crush_do_rule() [all …]
|
| /linux/net/netfilter/ipvs/ |
| H A D | ip_vs_sh.c | 69 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; 107 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get() 129 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback() 144 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback() 159 * Assign all the hash buckets of the specified table with the service. 171 b = &s->buckets[0]; in ip_vs_sh_reassign() 207 * Flush all the hash buckets of the specified table. 215 b = &s->buckets[0]; in ip_vs_sh_flush() 241 /* assign the hash buckets with current dests */ in ip_vs_sh_init_svc() 252 /* got to clean up hash buckets her in ip_vs_sh_done_svc() 70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; global() member [all...] |
| H A D | ip_vs_dh.c | 63 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; 89 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get() 94 * Assign all the hash buckets of the specified table with the service. 105 b = &s->buckets[0]; in ip_vs_dh_reassign() 131 * Flush all the hash buckets of the specified table. 139 b = &s->buckets[0]; in ip_vs_dh_flush() 165 /* assign the hash buckets with current dests */ in ip_vs_dh_init_svc() 176 /* got to clean up hash buckets here */ in ip_vs_dh_done_svc() 191 /* assign the hash buckets with the updated service */ in ip_vs_dh_dest_changed() 64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; global() member
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | arena_htab.c | 20 printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets); in test_arena_htab_common() 21 ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL"); in test_arena_htab_common() 22 for (i = 0; htab->buckets && i < 16; i += 4) { in test_arena_htab_common()
|
| /linux/fs/nfs/ |
| H A D | pnfs_nfs.c | 104 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array() 111 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array() 261 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array() argument 269 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array() 290 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists() 306 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs() argument 316 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs() 345 array->buckets, in pnfs_generic_recover_commit_reqs() 385 static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets, in pnfs_generic_retry_commit() argument 394 for (bucket = buckets; idx < nbuckets; bucket++, idx++) { in pnfs_generic_retry_commit() [all …]
|
| /linux/kernel/bpf/ |
| H A D | bpf_local_storage.c | 24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket() 706 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log); in bpf_local_storage_map_alloc_check() 726 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ in bpf_local_storage_map_check_btf() 730 smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets, 731 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN); in bpf_local_storage_destroy() 732 if (!smap->buckets) { in bpf_local_storage_destroy() 738 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_local_storage_destroy() 739 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_local_storage_destroy() 755 kvfree(smap->buckets); in bpf_local_storage_destroy() 788 b = &smap->buckets[ [all...] |
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
| H A D | port_sel.c | 54 ft_attr.max_fte = ldev->ports * ldev->buckets; in mlx5_lag_create_port_sel_table() 81 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_create_port_sel_table() 84 idx = i * ldev->buckets + j; in mlx5_lag_create_port_sel_table() 96 idx = k * ldev->buckets + j; in mlx5_lag_create_port_sel_table() 99 j = ldev->buckets; in mlx5_lag_create_port_sel_table() 361 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_destroy_definer() 362 idx = i * ldev->buckets + j; in mlx5_lag_destroy_definer() 592 for (j = 0; j < ldev->buckets; j++) { in __mlx5_lag_modify_definers_destinations() 593 idx = i * ldev->buckets + j; in __mlx5_lag_modify_definers_destinations()
|
| /linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| H A D | pno.c | 298 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig() argument 323 *buckets = NULL; in brcmf_pno_prep_fwconfig() 355 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig() 396 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local 403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans() 437 memcpy(gscan_cfg->bucket, buckets, in brcmf_pno_config_sched_scans() 438 array_size(n_buckets, sizeof(*buckets))); in brcmf_pno_config_sched_scans() 463 kfree(buckets); in brcmf_pno_config_sched_scans()
|
| /linux/net/sched/ |
| H A D | sch_hhf.c | 128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member 355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() 357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; in hhf_drop() 368 return bucket - q->buckets; in hhf_drop() 381 bucket = &q->buckets[idx]; in hhf_enqueue() 435 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? in hhf_dequeue() 648 struct wdrr_bucket *bucket = q->buckets + i; in hhf_init()
|
| /linux/lib/ |
| H A D | rhashtable.c | 59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); in lockdep_rht_bucket_is_held() 72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1); in nested_table_top() 159 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); in nested_bucket_table_alloc() 166 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, in nested_bucket_table_alloc() 187 kvmalloc_node_align_noprof(struct_size(tbl, buckets, nbuckets), in bucket_table_alloc() 210 INIT_RHT_NULLS_HEAD(tbl->buckets[i]); in bucket_table_alloc() 261 flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], in rhashtable_rehash_one() 264 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); in rhashtable_rehash_one() 268 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); in rhashtable_rehash_one()
|
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | metafmt-vsp1-hgt.rst | 24 The histogram is a matrix of 6 Hue and 32 Saturation buckets, 192 in 25 total. Each HSV value is added to one or more buckets with a weight 27 corresponding buckets is done by inspecting the H and S value independently.
|
| /linux/security/selinux/ |
| H A D | Kconfig | 55 This option sets the number of buckets used in the sidtab hashtable 56 to 2^SECURITY_SELINUX_SIDTAB_HASH_BITS buckets. The number of hash 78 This option sets the number of buckets used in the AVC hash table
|