Home
last modified time | relevance | path

Searched refs:bucket (Results 1 – 25 of 210) sorted by relevance

123456789

/linux/drivers/md/dm-vdo/
H A Dint-map.c73 struct __packed bucket { struct
102 struct bucket *buckets;
167 return vdo_allocate(map->bucket_count, struct bucket, in allocate_buckets()
244 static struct bucket *dereference_hop(struct bucket *neighborhood, unsigned int hop_offset) in dereference_hop()
260 static void insert_in_hop_list(struct bucket *neighborhood, struct bucket *new_bucket) in insert_in_hop_list()
276 struct bucket *bucket = dereference_hop(neighborhood, next_hop); in insert_in_hop_list() local
278 next_hop = bucket->next_hop; in insert_in_hop_list()
282 bucket->next_hop = hop_offset; in insert_in_hop_list()
293 static struct bucket *select_bucket(const struct int_map *map, u64 key) in select_bucket()
324 static struct bucket *search_hop_list(struct int_map *map __always_unused, in search_hop_list()
[all …]
H A Dpriority-table.c23 struct bucket { struct
44 struct bucket buckets[];
64 struct bucket, __func__, &table); in vdo_make_priority_table()
69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() local
71 bucket->priority = priority; in vdo_make_priority_table()
72 INIT_LIST_HEAD(&bucket->queue); in vdo_make_priority_table()
140 static inline void mark_bucket_empty(struct priority_table *table, struct bucket *bucket) in mark_bucket_empty() argument
142 table->search_vector &= ~(1ULL << bucket->priority); in mark_bucket_empty()
157 struct bucket *bucket; in vdo_priority_table_dequeue() local
173 bucket = &table->buckets[top_priority]; in vdo_priority_table_dequeue()
[all …]
/linux/net/mptcp/
H A Dtoken.c111 struct token_bucket *bucket; in mptcp_token_new_request() local
122 bucket = token_bucket(token); in mptcp_token_new_request()
123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request()
124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request()
125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request()
129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request()
130 bucket->chain_len++; in mptcp_token_new_request()
131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request()
157 struct token_bucket *bucket; in mptcp_token_new_connect() local
163 bucket = token_bucket(subflow->token); in mptcp_token_new_connect()
[all …]
/linux/net/ceph/crush/
H A Dmapper.c74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument
78 unsigned int pr = r % bucket->size; in bucket_perm_choose()
83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose()
88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose()
89 bucket->size; in bucket_perm_choose()
95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose()
100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose()
112 if (p < bucket->size - 1) { in bucket_perm_choose()
113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose()
114 (bucket->size - p); in bucket_perm_choose()
[all …]
/linux/drivers/interconnect/qcom/
H A Dbcm-voter.c65 int bucket, i; in bcm_aggregate_mask() local
67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask()
68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask()
69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask()
75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask()
76 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask()
77 bcm->vote_y[bucket] = bcm->enable_mask; in bcm_aggregate_mask()
94 size_t i, bucket; in bcm_aggregate() local
99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate()
102 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate()
[all …]
/linux/block/
H A Dblk-stat.c55 int bucket, cpu; in blk_stat_add() local
66 bucket = cb->bucket_fn(rq); in blk_stat_add()
67 if (bucket < 0) in blk_stat_add()
70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add()
80 unsigned int bucket; in blk_stat_timer_fn() local
83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()
84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn()
90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()
91 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn()
92 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn()
[all …]
/linux/net/sched/
H A Dsch_hhf.c329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument
331 struct sk_buff *skb = bucket->head; in dequeue_head()
333 bucket->head = skb->next; in dequeue_head()
339 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument
341 if (bucket->head == NULL) in bucket_add()
342 bucket->head = skb; in bucket_add()
344 bucket->tail->next = skb; in bucket_add()
345 bucket->tail = skb; in bucket_add()
352 struct wdrr_bucket *bucket; in hhf_drop() local
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
[all …]
/linux/drivers/infiniband/sw/rdmavt/
H A Dtrace_qp.h18 TP_PROTO(struct rvt_qp *qp, u32 bucket),
19 TP_ARGS(qp, bucket),
23 __field(u32, bucket)
28 __entry->bucket = bucket;
34 __entry->bucket
39 TP_PROTO(struct rvt_qp *qp, u32 bucket),
40 TP_ARGS(qp, bucket));
43 TP_PROTO(struct rvt_qp *qp, u32 bucket),
44 TP_ARGS(qp, bucket));
/linux/fs/nfs/
H A Dnfs42xattr.c87 struct nfs4_xattr_bucket *bucket; member
238 entry->bucket = NULL; in nfs4_xattr_alloc_entry()
388 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local
394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache()
397 bucket->draining = true; in nfs4_xattr_discard_cache()
398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache()
403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache()
511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument
517 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry()
[all …]
H A Dpnfs_nfs.c63 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument
65 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg()
66 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg()
67 bucket->lseg = NULL; in pnfs_free_bucket_lseg()
81 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local
87 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit()
91 if (bucket) in pnfs_generic_clear_request_commit()
92 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit()
241 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_bucket_scan_ds_commit_list() argument
245 struct list_head *src = &bucket->written; in pnfs_bucket_scan_ds_commit_list()
[all …]
/linux/net/9p/
H A Derror.c179 int bucket; in p9_error_init() local
182 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init()
183 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init()
188 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init()
190 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init()
208 int bucket; in p9_errstr2errno() local
212 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno()
213 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
/linux/fs/bcachefs/
H A Dbackpointers.h37 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
47 static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
52 *bucket = bp_pos_to_bucket(ca, bp_pos); in bucket_pos_to_bp_noerror()
57 static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
59 return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket),
64 struct bpos bucket, in bucket_pos_to_bp()
67 return POS(bucket.inode, in bucket_pos_to_bp()
68 (bucket_to_sector(ca, bucket.offset) << in bucket_pos_to_bp()
73 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree: in bch2_bucket_backpointer_mod()
76 struct bpos bucket, in bch2_bucket_backpointer_mod()
48 bucket_pos_to_bp_noerror(const struct bch_dev * ca,struct bpos bucket,u64 bucket_offset) bucket_pos_to_bp_noerror() argument
60 bucket_pos_to_bp(const struct bch_fs * c,struct bpos bucket,u64 bucket_offset) bucket_pos_to_bp() argument
74 bch2_bucket_backpointer_mod(struct btree_trans * trans,struct bpos bucket,struct bch_backpointer bp,struct bkey_s_c orig_k,bool insert) bch2_bucket_backpointer_mod() argument
[all...]
H A Dalloc_foreground.h121 unsigned dev, u64 bucket)
124 (jhash_3words(dev, bucket, bucket >> 32, 0) & in bch2_bucket_is_open()
128 static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket) in bch2_bucket_is_open()
130 open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket); in bch2_bucket_is_open()
135 if (ob->dev == dev && ob->bucket == bucket) in bch2_bucket_is_open()
144 static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket) in bch2_bucket_is_open_safe()
148 if (bch2_bucket_is_open(c, dev, bucket)) in bch2_bucket_is_open_safe()
152 ret = bch2_bucket_is_open(c, dev, bucket);
115 open_bucket_hashslot(struct bch_fs * c,unsigned dev,u64 bucket) open_bucket_hashslot() argument
122 bch2_bucket_is_open(struct bch_fs * c,unsigned dev,u64 bucket) bch2_bucket_is_open() argument
138 bch2_bucket_is_open_safe(struct bch_fs * c,unsigned dev,u64 bucket) bch2_bucket_is_open_safe() argument
[all...]
H A Dalloc_background.h25 static inline u64 bucket_to_u64(struct bpos bucket) in bucket_to_u64() argument
27 return (bucket.inode << 48) | bucket.offset; in bucket_to_u64()
30 static inline struct bpos u64_to_bucket(u64 bucket) in u64_to_bucket() argument
32 return POS(bucket >> 48, bucket & ~(~0ULL << 48)); in u64_to_bucket()
40 static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src) in alloc_to_bucket()
49 static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src) in __bucket_m_to_alloc()
58 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) in bucket_m_to_alloc()
76 static inline bool bucket_data_type_mismatch(enum bch_data_type bucket, in bucket_data_type_mismatch() argument
79 return !data_type_is_empty(bucket) && in bucket_data_type_mismatch()
80 bucket_data_type(bucket) != bucket_data_type(ptr); in bucket_data_type_mismatch()
H A Dbackpointers.c19 struct bpos bucket, in extent_matches_bp() argument
39 if (bpos_eq(bucket, bucket2) && in extent_matches_bp()
64 struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p);
65 struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset); in bch2_backpointer_to_text()
94 struct bpos bucket = bp_pos_to_bucket(ca, k.k->p); in bch2_backpointer_swab()
96 prt_str(out, "bucket="); in backpointer_mod_err()
97 bch2_bpos_to_text(out, bucket); in backpointer_mod_err()
167 struct bpos bucket, in bch2_bucket_backpointer_mod_nowritebuffer()
183 bp_k->k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); in bch2_bucket_backpointer_mod_nowritebuffer()
220 struct bpos bucket, in in bch2_get_next_backpointer()
53 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p); bch2_backpointer_invalid() local
148 bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans * trans,struct bpos bucket,struct bch_backpointer bp,struct bkey_s_c orig_k,bool insert) bch2_bucket_backpointer_mod_nowritebuffer() argument
200 bch2_get_next_backpointer(struct btree_trans * trans,struct bpos bucket,int gen,struct bpos * bp_pos,struct bch_backpointer * bp,unsigned iter_flags) bch2_get_next_backpointer() argument
252 struct bpos bucket = bp_pos_to_bucket(c, bp_pos); backpointer_not_found() local
291 struct bpos bucket = bp_pos_to_bucket(c, bp_pos); bch2_backpointer_get_key() local
328 struct bpos bucket = bp_pos_to_bucket(c, bp_pos); bch2_backpointer_get_node() local
504 check_bp_exists(struct btree_trans * trans,struct extents_to_bp_state * s,struct bpos bucket,struct bch_backpointer bp,struct bkey_s_c orig_k) check_bp_exists() argument
[all...]
/linux/net/vmw_vsock/
H A Ddiag.c52 unsigned int bucket; in vsock_diag_dump() local
63 bucket = cb->args[1]; in vsock_diag_dump()
72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump()
73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump()
94 bucket++; in vsock_diag_dump()
98 bucket = 0; in vsock_diag_dump()
102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump()
103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump()
128 bucket++; in vsock_diag_dump()
135 cb->args[1] = bucket; in vsock_diag_dump()
/linux/drivers/cpuidle/governors/
H A Dmenu.c116 unsigned int bucket; member
124 int bucket = 0; in which_bucket() local
133 bucket = BUCKETS/2; in which_bucket()
136 return bucket; in which_bucket()
138 return bucket + 1; in which_bucket()
140 return bucket + 2; in which_bucket()
142 return bucket + 3; in which_bucket()
144 return bucket + 4; in which_bucket()
145 return bucket + 5; in which_bucket()
293 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); in menu_select()
[all …]
/linux/kernel/dma/
H A Ddebug.c265 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument
267 __releases(&bucket->lock) in put_hash_bucket()
269 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket()
294 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument
301 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
344 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument
347 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact()
350 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, in bucket_find_contain() argument
359 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain()
367 put_hash_bucket(*bucket, *flags); in bucket_find_contain()
[all …]
/linux/Documentation/userspace-api/media/v4l/
H A Dmetafmt-vsp1-hgt.rst28 The Saturation position **n** (0 - 31) of the bucket in the matrix is
33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on
101 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0]
103 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0]
107 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0]
109 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0]
113 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0]
117 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0]
121 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0]
125 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0]
[all …]
/linux/fs/ocfs2/
H A Dxattr.c121 struct ocfs2_xattr_bucket *bucket; member
275 struct ocfs2_xattr_bucket *bucket,
297 struct ocfs2_xattr_bucket *bucket,
318 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local
323 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new()
324 if (bucket) { in ocfs2_xattr_bucket_new()
325 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new()
326 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new()
329 return bucket; in ocfs2_xattr_bucket_new()
332 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument
[all …]
/linux/drivers/md/bcache/
H A Dalloc.c3 * Primary bucket allocation code
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
33 * If we've got discards enabled, that happens when a bucket moves from the
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
52 * bch_bucket_alloc_set() allocates one bucket from different caches
76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *
302 bch_allocator_push(struct cache * ca,long bucket) bch_allocator_push() argument
330 long bucket; bch_allocator_thread() local
[all...]
/linux/drivers/md/
H A Ddm-clone-target.c551 * the least significant bit of the list head to lock the corresponding bucket,
555 * spin locks; one per hash table bucket.
560 /* Spinlock protecting the bucket */
564 #define bucket_lock_irqsave(bucket, flags) \ argument
565 spin_lock_irqsave(&(bucket)->lock, flags)
567 #define bucket_unlock_irqrestore(bucket, flags) \ argument
568 spin_unlock_irqrestore(&(bucket)->lock, flags)
570 #define bucket_lock_irq(bucket) \ argument
571 spin_lock_irq(&(bucket)->lock)
573 #define bucket_unlock_irq(bucket) \ argument
579 struct hash_table_bucket *bucket; hash_table_init() local
613 __hash_find(struct hash_table_bucket * bucket,unsigned long region_nr) __hash_find() argument
631 __insert_region_hydration(struct hash_table_bucket * bucket,struct dm_clone_region_hydration * hd) __insert_region_hydration() argument
645 __find_or_insert_region_hydration(struct hash_table_bucket * bucket,struct dm_clone_region_hydration * hd) __find_or_insert_region_hydration() argument
703 struct hash_table_bucket *bucket; hydration_update_metadata() local
877 struct hash_table_bucket *bucket; hydrate_bio_region() local
1020 struct hash_table_bucket *bucket; __start_next_hydration() local
[all...]
/linux/tools/testing/selftests/drivers/net/hw/
H A Dethtool_rmon.sh35 local bucket=$1; shift
50 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val")
58 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val")
78 while read -r -a bucket; do
82 if ! ensure_mtu $if ${bucket[0]}; then
88 if ! bucket_test $iface $neigh $set $nbuckets ${bucket[0]}; then
/linux/Documentation/networking/
H A Dnexthop-group-resilient.rst49 to choose a hash table bucket, then reads the next hop that this bucket
83 cause bucket allocation change, the wants counts for individual next hops
91 Each bucket maintains a last-used timer. Every time a packet is forwarded
92 through a bucket, this timer is updated to current jiffies value. One
94 amount of time that a bucket must not be hit by traffic in order for it to
104 upkeep changes the next hop that the bucket references to one of the
135 - Single-bucket notifications of the type
143 Some single-bucket notifications are forced, as indicated by the "force"
145 hop associated with the bucket was removed, and the bucket really must be
150 bucket should be migrated, but the HW discovers that the bucket has in fact
[all …]
/linux/include/trace/events/
H A Dbcache.h68 __field(size_t, bucket )
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
75 TP_printk("bucket %zu", __entry->bucket)
267 __field(size_t, bucket )
273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
279 __entry->bucket, __entry->block, __entry->keys)
370 __field(size_t, bucket )
375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
429 TP_PROTO(struct cache *ca, size_t bucket),
[all …]

123456789