/linux/tools/testing/selftests/drivers/net/hw/ |
H A D | ethtool_rmon.sh | 73 local nbuckets=0 88 if ! bucket_test $iface $neigh $set $nbuckets ${bucket[0]}; then 93 nbuckets=$((nbuckets + 1)) 97 if [ $nbuckets -eq 0 ]; then
|
/linux/fs/omfs/ |
H A D | dir.c | 27 int nbuckets = (dir->i_size - OMFS_DIR_START)/8; in omfs_get_bucket() local 28 int bucket = omfs_hash(name, namelen, nbuckets); in omfs_get_bucket() 218 int nbuckets = (inode->i_size - OMFS_DIR_START) / 8; in omfs_dir_is_empty() local 230 for (i = 0; i < nbuckets; i++, ptr++) in omfs_dir_is_empty() 414 int nbuckets; in omfs_readdir() local 425 nbuckets = (dir->i_size - OMFS_DIR_START) / 8; in omfs_readdir() 437 for (; hchain < nbuckets; hchain++) { in omfs_readdir()
|
/linux/fs/nfs/ |
H A D | pnfs_nfs.c | 105 p->nbuckets = n; in pnfs_alloc_commit_array() 260 unsigned int nbuckets, in pnfs_bucket_scan_array() argument 266 for (i = 0; i < nbuckets && max != 0; i++) { in pnfs_bucket_scan_array() 289 array->nbuckets, max); in pnfs_generic_scan_commit_lists() 305 unsigned int nbuckets, in pnfs_bucket_recover_commit_reqs() argument 314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs() 344 array->nbuckets, in pnfs_generic_recover_commit_reqs() 384 unsigned int nbuckets, in pnfs_generic_retry_commit() argument 392 for (bucket = buckets; idx < nbuckets; bucket++, idx++) { in pnfs_generic_retry_commit() 406 unsigned int nbuckets, in pnfs_bucket_alloc_ds_commits() argument [all …]
|
H A D | internal.h | 623 unsigned int nbuckets) in pnfs_bucket_clear_pnfs_ds_commit_verifiers() argument 627 for (i = 0; i < nbuckets; i++) in pnfs_bucket_clear_pnfs_ds_commit_verifiers() 638 array->nbuckets); in nfs_clear_pnfs_ds_commit_verifiers()
|
/linux/fs/bcachefs/ |
H A D | journal_sb.c | 53 if (b[nr - 1] >= le64_to_cpu(m.nbuckets)) { in bch2_sb_journal_validate() 55 b[nr - 1], le64_to_cpu(m.nbuckets)); in bch2_sb_journal_validate() 147 if (b[nr - 1].end > le64_to_cpu(m.nbuckets)) { in bch2_sb_journal_v2_validate() 149 b[nr - 1].end - 1, le64_to_cpu(m.nbuckets)); in bch2_sb_journal_v2_validate()
|
H A D | sb-members.c | 139 if (le64_to_cpu(m.nbuckets) > BCH_MEMBER_NBUCKETS_MAX) { in validate_member() 141 i, le64_to_cpu(m.nbuckets), BCH_MEMBER_NBUCKETS_MAX); in validate_member() 145 if (le64_to_cpu(m.nbuckets) - in validate_member() 148 i, le64_to_cpu(m.nbuckets), BCH_MIN_NR_NBUCKETS); in validate_member() 182 u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size; in member_to_text() 224 prt_printf(out, "Buckets:\t%llu\n", le64_to_cpu(m.nbuckets)); in member_to_text()
|
H A D | buckets.c | 97 prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets); in bch2_dev_usage_to_text() 1042 if (b >= ca->mi.nbuckets) in bch2_trans_mark_metadata_bucket() 1241 ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) * in bch2_buckets_nouse_alloc() 1261 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) in bch2_dev_buckets_resize() argument 1269 if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets, in bch2_dev_buckets_resize() 1276 bucket_gens->nbuckets = nbuckets; in bch2_dev_buckets_resize() 1278 bucket_gens->nbuckets - bucket_gens->first_bucket; in bch2_dev_buckets_resize() 1288 size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets); in bch2_dev_buckets_resize() 1298 nbuckets = ca->mi.nbuckets; in bch2_dev_buckets_resize() 1326 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets); in bch2_dev_buckets_alloc()
|
H A D | super.c | 1399 ca->mi.bucket_size * ca->mi.nbuckets) { in __bch2_dev_attach_bdev() 1873 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_dev_online() 1922 int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) in bch2_dev_resize() argument 1929 old_nbuckets = ca->mi.nbuckets; in bch2_dev_resize() 1931 if (nbuckets < ca->mi.nbuckets) { in bch2_dev_resize() 1937 if (nbuckets > BCH_MEMBER_NBUCKETS_MAX) { in bch2_dev_resize() 1939 nbuckets, BCH_MEMBER_NBUCKETS_MAX); in bch2_dev_resize() 1946 ca->mi.bucket_size * nbuckets) { in bch2_dev_resize() 1952 ret = bch2_dev_buckets_resize(c, ca, nbuckets); in bch2_dev_resize() 1963 m->nbuckets = cpu_to_le64(nbuckets); in bch2_dev_resize() [all …]
|
H A D | bcachefs_ioctl.h | 374 __u64 nbuckets; member 387 __u64 nbuckets; member
|
H A D | sb-members.h | 323 .nbuckets = le64_to_cpu(mi->nbuckets), in bch2_mi_to_cpu() 324 .nbuckets_minus_first = le64_to_cpu(mi->nbuckets) - in bch2_mi_to_cpu()
|
H A D | buckets.h | 40 _b < (_buckets)->b + (_buckets)->nbuckets; _b++) 215 reserved += ca->mi.nbuckets >> 6; in bch2_dev_buckets_reserved() 218 reserved += ca->mi.nbuckets >> 6; in bch2_dev_buckets_reserved()
|
H A D | buckets_types.h | 25 size_t nbuckets; member
|
H A D | alloc_background.c | 624 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read() 647 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read() 1071 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket() 1114 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole() 1115 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole() 1462 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key() 1478 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key() 2198 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init() 2210 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init() 2304 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init() [all …]
|
H A D | chardev.c | 604 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket; in bch2_ioctl_dev_usage() 646 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket; in bch2_ioctl_dev_usage_v2() 746 ret = bch2_dev_resize(c, ca, arg.nbuckets); in bch2_ioctl_disk_resize() 765 if (arg.nbuckets > U32_MAX) in bch2_ioctl_disk_resize_journal() 772 ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets); in bch2_ioctl_disk_resize_journal()
|
H A D | sb-members_format.h | 45 __le64 nbuckets; /* device size */ member
|
H A D | sysfs.c | 156 read_attribute(nbuckets); 778 sysfs_print(nbuckets, ca->mi.nbuckets); in SHOW()
|
H A D | btree_gc.c | 900 POS(ca->dev_idx, ca->mi.nbuckets - 1), in bch2_gc_alloc_done() 919 ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL); in bch2_gc_alloc_start() 1257 ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL); in bch2_gc_gens() 1265 b < gens->nbuckets; b++) in bch2_gc_gens()
|
H A D | btree_node_scan.c | 223 for (u64 bucket = ca->mi.first_bucket; bucket < ca->mi.nbuckets; bucket++) in read_btree_nodes_worker() 229 u64 end_sector = ca->mi.nbuckets * ca->mi.bucket_size; in read_btree_nodes_worker()
|
/linux/drivers/md/bcache/ |
H A D | alloc.c | 90 unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024; in bch_rescale_priorities() 244 ca->fifo_last_bucket >= ca->sb.nbuckets) in invalidate_buckets_fifo() 252 if (++checked >= ca->sb.nbuckets) { in invalidate_buckets_fifo() 270 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); in invalidate_buckets_random() 278 if (++checked >= ca->sb.nbuckets / 2) { in invalidate_buckets_random() 494 if (ca->set->avail_nbuckets < ca->set->nbuckets) { in __bch_bucket_free()
|
H A D | sysfs.c | 68 read_attribute(nbuckets); 1040 sysfs_print(nbuckets, ca->sb.nbuckets); in SHOW() 1059 size_t n = ca->sb.nbuckets, i; in SHOW() 1067 ca->sb.nbuckets)); in SHOW() 1119 unused * 100 / (size_t) ca->sb.nbuckets, in SHOW() 1120 available * 100 / (size_t) ca->sb.nbuckets, in SHOW() 1121 dirty * 100 / (size_t) ca->sb.nbuckets, in SHOW() 1122 meta * 100 / (size_t) ca->sb.nbuckets, sum, in SHOW()
|
H A D | super.c | 98 sb->nbuckets = le64_to_cpu(s->nbuckets); in read_super_common() 109 if (sb->nbuckets > LONG_MAX) in read_super_common() 113 if (sb->nbuckets < 1 << 7) in read_super_common() 134 sb->bucket_size * sb->nbuckets) in read_super_common() 153 if (sb->first_bucket + sb->keys > sb->nbuckets) in read_super_common() 649 b < ca->buckets + ca->sb.nbuckets && d < end; in bch_prio_write() 700 b < ca->buckets + ca->sb.nbuckets; in prio_read() 1975 c->nbuckets = ca->sb.nbuckets; in run_cache_set() 2063 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, in run_cache_set() 2245 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; in cache_alloc() [all …]
|
H A D | bcache_ondisk.h | 189 __le64 nbuckets; /* device size */ member 249 __u64 nbuckets; /* device size */ member
|
H A D | bcache.h | 634 size_t nbuckets; member 796 DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca)) 893 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
|
H A D | extents.c | 60 bucket >= ca->sb.nbuckets) in __ptr_invalid() 83 if (bucket >= ca->sb.nbuckets) in bch_ptr_status() 140 if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets) in bch_bkey_dump()
|
/linux/lib/ |
H A D | rhashtable.c | 149 size_t nbuckets, in nested_bucket_table_alloc() argument 156 if (nbuckets < (1 << (shift + 1))) in nested_bucket_table_alloc() 172 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; in nested_bucket_table_alloc() 178 size_t nbuckets, in bucket_table_alloc() argument 187 kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), in bucket_table_alloc() 190 size = nbuckets; in bucket_table_alloc() 193 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); in bucket_table_alloc() 194 nbuckets = 0; in bucket_table_alloc() 209 for (i = 0; i < nbuckets; i++) in bucket_table_alloc()
|