| /linux/net/mptcp/ |
| H A D | token.c | 53 /* called with bucket lock held */ 66 /* called with bucket lock held */ 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() [all …]
|
| /linux/drivers/md/dm-vdo/ |
| H A D | priority-table.c | 20 * All the entries with the same priority are queued in a circular list in a bucket for that 23 struct bucket { struct 28 /* The priority of all the entries in this bucket */ argument 34 * of the queue in the appropriate bucket. The dequeue operation finds the highest-priority argument 35 * non-empty bucket by searching a bit vector represented as a single 8-byte word, which is very 44 struct bucket buckets[]; 64 struct bucket, __func__, &table); in vdo_make_priority_table() 69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() local 71 bucket->priority = priority; in vdo_make_priority_table() 72 INIT_LIST_HEAD(&bucket->queue); in vdo_make_priority_table() [all …]
|
| /linux/net/ceph/crush/ |
| H A D | mapper.c | 58 * bucket choose methods 60 * For each bucket algorithm, we have a "choose" method that, given a 62 * will produce an item in the bucket. 66 * Choose based on a random permutation of the bucket. 70 * calculate an actual random permutation of the bucket members. 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() [all …]
|
| /linux/block/ |
| H A D | blk-stat.c | 55 int bucket, cpu; in blk_stat_add() local 66 bucket = cb->bucket_fn(rq); in blk_stat_add() 67 if (bucket < 0) in blk_stat_add() 70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 80 unsigned int bucket; in blk_stat_timer_fn() local 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 91 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn() 92 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn() [all …]
|
| /linux/drivers/interconnect/qcom/ |
| H A D | bcm-voter.c | 65 int bucket, i; in bcm_aggregate_mask() local 67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask() 68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask() 74 /* If any vote in this bucket exists, keep the BCM enabled */ in bcm_aggregate_mask() 75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask() 76 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 77 bcm->vote_y[bucket] = bcm->enable_mask; in bcm_aggregate_mask() 94 size_t i, bucket; in bcm_aggregate() local 99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate() [all …]
|
| /linux/net/sched/ |
| H A D | sch_hhf.c | 21 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket. 23 * in which the heavy-hitter bucket is served with less weight. 61 * dispatched to the heavy-hitter bucket accordingly. 68 * bucket. 71 * to the non-heavy-hitter bucket. 74 * send p to the heavy-hitter bucket. 105 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */ 106 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */ 328 /* Removes one skb from head of bucket. */ 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument [all …]
|
| /linux/tools/tracing/rtla/src/ |
| H A D | timerlat.bpf.c | 75 int bucket) in update_main_hist() argument 81 if (bucket >= entries) in update_main_hist() 85 map_increment(map, bucket); in update_main_hist() 90 int bucket) in update_summary() argument 98 if (bucket >= entries) in update_summary() 127 int bucket; in handle_timerlat_sample() local 134 bucket = latency / bucket_size; in handle_timerlat_sample() 137 update_main_hist(&hist_irq, bucket); in handle_timerlat_sample() 138 update_summary(&summary_irq, latency, bucket); in handle_timerlat_sample() 143 update_main_hist(&hist_thread, bucket); in handle_timerlat_sample() [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | trace_qp.h | 18 TP_PROTO(struct rvt_qp *qp, u32 bucket), 19 TP_ARGS(qp, bucket), 23 __field(u32, bucket) 28 __entry->bucket = bucket; 31 "[%s] qpn 0x%x bucket %u", 34 __entry->bucket 39 TP_PROTO(struct rvt_qp *qp, u32 bucket), 40 TP_ARGS(qp, bucket)); 43 TP_PROTO(struct rvt_qp *qp, u32 bucket), 44 TP_ARGS(qp, bucket));
|
| /linux/fs/ocfs2/ |
| H A D | xattr.c | 64 /* The actual buffers that make up the bucket */ 67 /* How many blocks make up one bucket for this filesystem */ 125 struct ocfs2_xattr_bucket *bucket; member 279 struct ocfs2_xattr_bucket *bucket, 301 struct ocfs2_xattr_bucket *bucket, 322 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local 327 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new() 328 if (bucket) { in ocfs2_xattr_bucket_new() 329 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new() 330 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new() [all …]
|
| /linux/include/linux/crush/ |
| H A D | crush.h | 99 * A bucket is a named container of other items (either devices or 100 * other buckets). Items within a bucket are chosen using one of a 105 * Bucket Alg Speed Additions Removals 144 * Replacement weights for each item in a bucket. The size of the 145 * array must be exactly the size of the straw2 bucket, just as the 157 * Replacement weights and ids for a given straw2 bucket, for 160 * When crush_do_rule() chooses the Nth item from a straw2 bucket, the 185 * Replacement weights and ids for each bucket in the crushmap. The 190 * an item from the bucket __map->buckets[N]__ bucket, provided it 191 * is a straw2 bucket. [all …]
|
| /linux/Documentation/networking/ |
| H A D | nexthop-group-resilient.rst | 49 to choose a hash table bucket, then reads the next hop that this bucket 83 cause bucket allocation change, the wants counts for individual next hops 91 Each bucket maintains a last-used timer. Every time a packet is forwarded 92 through a bucket, this timer is updated to current jiffies value. One 94 amount of time that a bucket must not be hit by traffic in order for it to 104 upkeep changes the next hop that the bucket references to one of the 135 - Single-bucket notifications of the type 143 Some single-bucket notifications are forced, as indicated by the "force" 145 hop associated with the bucket was removed, and the bucket really must be 150 bucket should be migrated, but the HW discovers that the bucket has in fact [all …]
|
| /linux/Documentation/userspace-api/media/v4l/ |
| H A D | metafmt-vsp1-hgt.rst | 29 The Saturation position **n** (0 - 31) of the bucket in the matrix is 34 The Hue position **m** (0 - 5) of the bucket in the matrix depends on 102 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0] 104 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0] 108 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0] 110 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0] 114 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0] 118 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0] 122 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0] 126 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0] [all …]
|
| /linux/net/vmw_vsock/ |
| H A D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
| /linux/drivers/md/bcache/ |
| H A D | bcache.h | 42 * To do this, we first divide the cache device up into buckets. A bucket is the 46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with 51 * The priority is used to implement an LRU. We reset a bucket's priority when 53 * of each bucket. It could be used to implement something more sophisticated, 58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all 62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that 100 * accomplished by either by invalidating pointers (by incrementing a bucket's 110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and 111 * free smaller than a bucket - so, that's how big our btree nodes are. 113 * (If buckets are really big we'll only use part of the bucket for a btree node [all …]
|
| /linux/fs/afs/ |
| H A D | dir_search.c | 25 int bucket; in afs_dir_hash_name() local 29 bucket = hash & (AFS_DIR_HASHTBL_SIZE - 1); in afs_dir_hash_name() 31 bucket = AFS_DIR_HASHTBL_SIZE - bucket; in afs_dir_hash_name() 32 bucket &= (AFS_DIR_HASHTBL_SIZE - 1); in afs_dir_hash_name() 34 return bucket; in afs_dir_hash_name() 60 iter->bucket = afs_dir_hash_name(name); in afs_dir_init_iter() 121 * Search through a directory bucket. 134 entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]); in afs_dir_search_bucket() 135 _enter("%x,%x", iter->bucket, entry); in afs_dir_search_bucket() 148 iter->bucket, resv, slot, slot + iter->nr_slots - 1); in afs_dir_search_bucket() [all …]
|
| /linux/include/trace/events/ |
| H A D | bcache.h | 68 __field(size_t, bucket ) 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 75 TP_printk("bucket %zu", __entry->bucket) 267 __field(size_t, bucket ) 273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 278 TP_printk("bucket %zu written block %u + %u", 279 __entry->bucket, __entry->block, __entry->keys) 370 __field(size_t, bucket ) 375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) [all …]
|
| /linux/fs/nfs/ |
| H A D | pnfs_nfs.c | 65 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument 67 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg() 68 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg() 69 bucket->lseg = NULL; in pnfs_free_bucket_lseg() 76 * If this will make the bucket empty, it will need to put the lseg reference. 83 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local 89 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit() 93 if (bucket) in pnfs_generic_clear_request_commit() 94 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit() 240 * @bucket->committing. [all …]
|
| /linux/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
| H A D | pno.h | 56 * brcmf_pno_find_reqid_by_bucket - find request id for given bucket index. 59 * @bucket: index of firmware bucket. 61 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket); 64 * brcmf_pno_get_bucket_map - determine bucket map for given netinfo. 67 * @netinfo: netinfo to compare with bucket configuration.
|
| /linux/kernel/dma/ |
| H A D | debug.c | 249 * Request exclusive access to a hash bucket for a given dma_debug_entry. 264 * Give up exclusive access to the hash bucket 266 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument 268 __releases(&bucket->lock) in put_hash_bucket() 270 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket() 293 * Search a given entry in the hash bucket list 295 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument 302 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find() 345 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument 348 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact() [all …]
|
| /linux/include/linux/ |
| H A D | rhashtable.h | 36 * the hash bucket. This allows us to be sure we've found the end 38 * The value stored in the hash bucket has BIT(0) used as a lock bit. 42 * pointer stored in the bucket. This struct needs to be defined so 68 * @rehash: Current bucket being rehashed 308 * We lock a bucket by setting BIT(0) in the pointer - this is always 309 * zero in real pointers. The NULLS mark is never stored in the bucket, 310 * rather we store NULL if the bucket is empty. 312 * of the hashtable design is to achieve minimum per-bucket contention. 313 * A nested hash table might not have a bucket pointer. In that case 314 * we cannot get a lock. For remove and replace the bucket cannot be [all …]
|
| /linux/tools/testing/selftests/drivers/net/hw/ |
| H A D | ethtool_rmon.sh | 35 local bucket=$1; shift 50 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val") 58 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val") 78 while read -r -a bucket; do 79 step="$set-pkts${bucket[0]}to${bucket[1]} on $iface" 82 if ! ensure_mtu $if ${bucket[0]}; then 88 if ! bucket_test $iface $neigh $set $nbuckets ${bucket[0]}; then
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | sock_iter_batch.c | 272 /* Close a socket we've already seen to remove it from the bucket. */ in remove_seen() 308 /* Close a socket we've already seen to remove it from the bucket. */ in remove_seen_established() 340 /* Close what would be the next socket in the bucket to exercise the in remove_unseen() 383 /* Close what would be the next socket in the bucket to exercise the in remove_unseen_established() 495 /* Double the number of sockets in the bucket. */ in add_some() 535 /* Double the number of established sockets in the bucket. */ in add_some_established() 565 /* Double the number of sockets in the bucket to force a realloc on the in force_realloc() 646 * bucket's list. 657 * bucket's list, needing to be added to the next batch to force 693 * bucket's list. [all …]
|
| /linux/net/openvswitch/ |
| H A D | meter.c | 386 /* Figure out max delta_t that is enough to fill any bucket. in dp_meter_create() 387 * Keep max_delta_t size to the bucket units: in dp_meter_create() 390 * Start with a full bucket. in dp_meter_create() 392 band->bucket = band->burst_size * 1000ULL; in dp_meter_create() 393 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create() 622 /* Make sure delta_ms will not be too large, so that bucket will not in ovs_meter_execute() 634 /* Bucket rate is either in kilobits per second, or in packets per in ovs_meter_execute() 635 * second. We maintain the bucket in the units of either bits or in ovs_meter_execute() 638 * bucket units: in ovs_meter_execute() 642 * 'cost' is the number of bucket units in this packet. in ovs_meter_execute() [all …]
|
| /linux/fs/xfs/scrub/ |
| H A D | agheader_repair.c | 821 /* heads of the unlinked inode bucket lists */ 1009 * Given an @agino, look up the next inode in the iunlink bucket. Returns 1070 * Walk an AGI unlinked bucket's list to load incore any unlinked inodes that 1077 unsigned int bucket) in xrep_iunlink_walk_ondisk_bucket() argument 1085 next_agino = be32_to_cpu(agi->agi_unlinked[bucket]); in xrep_iunlink_walk_ondisk_bucket() 1092 trace_xrep_iunlink_walk_ondisk_bucket(sc->sa.pag, bucket, in xrep_iunlink_walk_ondisk_bucket() 1095 if (bucket != agino % XFS_AGI_UNLINKED_BUCKETS) in xrep_iunlink_walk_ondisk_bucket() 1138 unsigned int bucket; in xrep_iunlink_visit() local 1145 bucket = agino % XFS_AGI_UNLINKED_BUCKETS; in xrep_iunlink_visit() 1147 trace_xrep_iunlink_visit(ragi->sc->sa.pag, bucket, in xrep_iunlink_visit() [all …]
|
| /linux/include/uapi/linux/ |
| H A D | nexthop.h | 71 /* nested; nexthop bucket attributes */ 98 /* clock_t as u32; nexthop bucket idle timer (per-group) */ 115 /* u16; nexthop bucket index */ 117 /* clock_t as u64; nexthop bucket idle time */ 119 /* u32; nexthop id assigned to the nexthop bucket */
|