| /linux/tools/testing/selftests/drivers/net/netdevsim/ |
| H A D | nexthop.sh | 213 $IP nexthop add id 10 group 1/2 type resilient buckets 4 214 …nexthop_check "id 10" "id 10 group 1/2 type resilient buckets 4 idle_timer 120 unbalanced_timer 0 … 218 check_err $? "Wrong nexthop buckets count" 220 check_err $? "Wrong nexthop buckets count" 229 $IP nexthop add id 10 group 1,3/2,2 type resilient buckets 5 230 …nexthop_check "id 10" "id 10 group 1,3/2,2 type resilient buckets 5 idle_timer 120 unbalanced_time… 234 check_err $? "Wrong nexthop buckets count" 236 check_err $? "Wrong nexthop buckets count" 259 $IP nexthop add id 10 group 1/2 type resilient buckets 4 &> /dev/null 325 $IP nexthop add id 10 group 1/2 type resilient buckets 6 [all …]
|
| /linux/Documentation/networking/ |
| H A D | nexthop-group-resilient.rst | 54 continuous. With a hash table, mapping between the hash table buckets and 56 the buckets that held it are simply reassigned to other next hops:: 70 choose a subset of buckets that are currently not used for forwarding 72 keeping the "busy" buckets intact. This way, established flows are ideally 80 certain number of buckets, according to its weight and the number of 81 buckets in the hash table. In accordance with the source code, we will call 86 Next hops that have fewer buckets than their wants count, are called 95 be considered "idle". Buckets that are not idle are busy. 98 buckets: 105 underweight next hops. If, after considering all buckets in this manner, [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | priority-table.c | 21 * priority. The table is essentially an array of buckets. 33 * A priority table is an array of buckets, indexed by priority. New entries are added to the end 41 /* A bit vector flagging all buckets that are currently non-empty */ 43 /* The array of all buckets, indexed by priority */ 44 struct bucket buckets[]; member 69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() 94 * Unlink the buckets from any entries still in the table so the entries won't be left with in vdo_free_priority_table() 116 list_del_init(&table->buckets[priority].queue); in vdo_reset_priority_table() 134 list_move_tail(entry, &table->buckets[priority].queue); in vdo_priority_table_enqueue() 162 /* All buckets are empty. */ in vdo_priority_table_dequeue() [all …]
|
| /linux/drivers/s390/scsi/ |
| H A D | zfcp_reqlist.h | 16 /* number of hash buckets */ 22 * @buckets: Array of hashbuckets, each is a list of requests in this bucket 26 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member 52 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc() 68 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty() 92 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find() 165 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add() 182 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move() 209 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | bpf_arena_htab.h | 14 htab_bucket_t *buckets; member 21 htab_bucket_t *b = htab->buckets; in __select_bucket() 95 void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in htab_init() local 97 cast_user(buckets); in htab_init() 98 htab->buckets = buckets; in htab_init()
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | arena_htab.c | 20 printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets); in test_arena_htab_common() 21 ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL"); in test_arena_htab_common() 22 for (i = 0; htab->buckets && i < 16; i += 4) { in test_arena_htab_common() 24 * Walk htab buckets and link lists since all pointers are correct, in test_arena_htab_common()
|
| /linux/tools/lib/bpf/ |
| H A D | hashmap.c | 21 /* start with 4 buckets */ 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
|
| /linux/tools/perf/util/ |
| H A D | hashmap.c | 21 /* start with 4 buckets */ 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
|
| /linux/net/netfilter/ipvs/ |
| H A D | ip_vs_dh.c | 63 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; 89 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get() 94 * Assign all the hash buckets of the specified table with the service. 105 b = &s->buckets[0]; in ip_vs_dh_reassign() 131 * Flush all the hash buckets of the specified table. 139 b = &s->buckets[0]; in ip_vs_dh_flush() 165 /* assign the hash buckets with current dests */ in ip_vs_dh_init_svc() 176 /* got to clean up hash buckets here */ in ip_vs_dh_done_svc() 191 /* assign the hash buckets with the updated service */ in ip_vs_dh_dest_changed() 64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; global() member
|
| H A D | ip_vs_sh.c | 69 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; 107 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get() 129 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback() 144 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback() 159 * Assign all the hash buckets of the specified table with the service. 171 b = &s->buckets[0]; in ip_vs_sh_reassign() 207 * Flush all the hash buckets of the specified table. 215 b = &s->buckets[0]; in ip_vs_sh_flush() 241 /* assign the hash buckets with current dests */ in ip_vs_sh_init_svc() 252 /* got to clean up hash buckets her in ip_vs_sh_done_svc() 70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; global() member [all...] |
| /linux/tools/perf/bench/ |
| H A D | futex.c | 25 printf("Requesting %d hash buckets failed: %d/%m\n", in futex_set_nbuckets_param() 40 printf("Can't query number of buckets: %m\n"); in futex_print_nbuckets() 43 printf("Requested number of hash buckets does not currently used.\n"); in futex_print_nbuckets() 50 ret = asprintf(&futex_hash_mode, "Futex hashing: %d hash buckets", in futex_print_nbuckets() 56 ret = asprintf(&futex_hash_mode, "Futex hashing: auto resized to %d buckets", in futex_print_nbuckets()
|
| /linux/tools/testing/selftests/net/ |
| H A D | fib_nexthops.sh | 362 local buckets=$2 371 # create a resilient group with $buckets buckets and dump them 373 run_cmd "$IP nexthop add id 1000 group 100 type resilient buckets $buckets" 375 log_test $? 0 "Dump large (x$buckets) nexthop buckets" 1020 # migration of nexthop buckets - equal weights 1024 run_cmd "$IP nexthop add id 102 group 62/63 type resilient buckets 2 idle_timer 0" 1028 "id 102 group 62 type resilient buckets 2 idle_timer 0 unbalanced_timer 0 unbalanced_time 0" 1032 log_test $? 0 "Nexthop buckets updated when entry is deleted" 1035 run_cmd "$IP nexthop replace id 102 group 62/63 type resilient buckets 2 idle_timer 0" 1037 "id 102 group 62/63 type resilient buckets 2 idle_timer 0 unbalanced_timer 0 unbalanced_time 0" [all …]
|
| /linux/block/ |
| H A D | blk-stat.c | 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 102 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument 110 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 116 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 127 cb->buckets = buckets; in blk_stat_alloc_callback() 144 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
|
| /linux/drivers/md/bcache/ |
| H A D | bcache.h | 35 * BUCKETS/ALLOCATION: 42 * To do this, we first divide the cache device up into buckets. A bucket is the 47 * it. The gens and priorities for all the buckets are stored contiguously and 48 * packed on disk (in a linked list of buckets - aside from the superblock, all 49 * of bcache's metadata is stored in buckets). 56 * The generation is used for invalidating buckets. Each pointer also has an 8 62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that 113 * (If buckets are really big we'll only use part of the bucket for a btree node 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse. 145 * allocation can reuse buckets sooner when they've been mostly overwritten. [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/ |
| H A D | port_sel.h | 13 /* Each port has ldev->buckets number of rules and they are arrange in 14 * [port * buckets .. port * buckets + buckets) locations
|
| H A D | port_sel.c | 54 ft_attr.max_fte = ldev->ports * ldev->buckets; in mlx5_lag_create_port_sel_table() 81 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_create_port_sel_table() 84 idx = i * ldev->buckets + j; in mlx5_lag_create_port_sel_table() 96 idx = k * ldev->buckets + j; in mlx5_lag_create_port_sel_table() 99 j = ldev->buckets; in mlx5_lag_create_port_sel_table() 361 for (j = 0; j < ldev->buckets; j++) { in mlx5_lag_destroy_definer() 362 idx = i * ldev->buckets + j; in mlx5_lag_destroy_definer() 592 for (j = 0; j < ldev->buckets; j++) { in __mlx5_lag_modify_definers_destinations() 593 idx = i * ldev->buckets + j; in __mlx5_lag_modify_definers_destinations()
|
| /linux/net/ceph/crush/ |
| H A D | crush.c | 110 /* buckets */ in crush_destroy() 111 if (map->buckets) { in crush_destroy() 114 if (map->buckets[b] == NULL) in crush_destroy() 116 crush_destroy_bucket(map->buckets[b]); in crush_destroy() 118 kfree(map->buckets); in crush_destroy()
|
| H A D | mapper.c | 496 /* choose through intervening buckets */ in crush_choose_firstn() 531 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn() 544 in = map->buckets[-1-item]; in crush_choose_firstn() 568 map->buckets[-1-item], in crush_choose_firstn() 700 /* choose through intervening buckets */ in crush_choose_indep() 744 itemtype = map->buckets[-1-item]->type; in crush_choose_indep() 761 in = map->buckets[-1-item]; in crush_choose_indep() 781 map->buckets[-1-item], in crush_choose_indep() 868 if (!map->buckets[b]) in crush_init_workspace() 872 switch (map->buckets[b]->alg) { in crush_init_workspace() [all …]
|
| /linux/net/sched/ |
| H A D | sch_hhf.c | 19 * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter 20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified 22 * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler, 103 #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */ 128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; member 147 struct list_head new_buckets; /* list of new buckets */ 148 struct list_head old_buckets; /* list of old buckets */ 244 /* Assigns packets to WDRR buckets. Implements a multi-stage filter to 355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() 357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; in hhf_drop() [all …]
|
| /linux/include/uapi/linux/netfilter/ |
| H A D | xt_hashlimit.h | 38 __u32 size; /* how many buckets */ 62 __u32 size; /* how many buckets */ 76 __u32 size; /* how many buckets */ 90 __u32 size; /* how many buckets */
|
| /linux/tools/perf/ |
| H A D | builtin-ftrace.c | 852 static void make_histogram(struct perf_ftrace *ftrace, int buckets[], in make_histogram() argument 929 buckets[i]++; in make_histogram() 941 static void display_histogram(struct perf_ftrace *ftrace, int buckets[]) in display_histogram() argument 953 total += buckets[i]; in display_histogram() 963 bar_len = buckets[0] * bar_total / total; in display_histogram() 965 if (!ftrace->hide_empty || buckets[0]) in display_histogram() 968 buckets[0], bar_len, bar, bar_total - bar_len, ""); in display_histogram() 974 if (ftrace->hide_empty && !buckets[i]) in display_histogram() 1005 bar_len = buckets[i] * bar_total / total; in display_histogram() 1006 printf(" %s | %10d | %.*s%*s |\n", unit, buckets[i], bar_len, bar, in display_histogram() [all …]
|
| /linux/fs/nfs/ |
| H A D | pnfs_nfs.c | 104 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array() 111 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array() 261 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array() argument 269 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max); in pnfs_bucket_scan_array() 290 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists() 306 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs() argument 316 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs() 345 array->buckets, in pnfs_generic_recover_commit_reqs() 385 static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets, in pnfs_generic_retry_commit() argument 394 for (bucket = buckets; idx < nbuckets; bucket++, idx++) { in pnfs_generic_retry_commit() [all …]
|
| /linux/drivers/message/fusion/ |
| H A D | mptlan.c | 89 atomic_t buckets_out; /* number of unused buckets on IOC */ 103 int max_buckets_out; /* Max buckets to send to IOC */ 459 any buckets it still has. */ 503 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " in mpt_lan_close() 832 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n", in mpt_lan_receive_skb() 838 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets " in mpt_lan_receive_skb() 920 "IOC returned %d buckets, freeing them...\n", count)); in mpt_lan_receive_post_free() 950 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", in mpt_lan_receive_post_free() 953 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " in mpt_lan_receive_post_free() 1010 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned " in mpt_lan_receive_post_reply() [all …]
|
| /linux/security/selinux/ |
| H A D | Kconfig | 55 This option sets the number of buckets used in the sidtab hashtable 56 to 2^SECURITY_SELINUX_SIDTAB_HASH_BITS buckets. The number of hash 78 This option sets the number of buckets used in the AVC hash table
|
| /linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| H A D | pno.c | 298 struct brcmf_gscan_bucket_config **buckets, in brcmf_pno_prep_fwconfig() argument 323 *buckets = NULL; in brcmf_pno_prep_fwconfig() 355 *buckets = fw_buckets; in brcmf_pno_prep_fwconfig() 396 struct brcmf_gscan_bucket_config *buckets; in brcmf_pno_config_sched_scans() local 403 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets, in brcmf_pno_config_sched_scans() 437 memcpy(gscan_cfg->bucket, buckets, in brcmf_pno_config_sched_scans() 438 array_size(n_buckets, sizeof(*buckets))); in brcmf_pno_config_sched_scans() 463 kfree(buckets); in brcmf_pno_config_sched_scans()
|