| /linux/drivers/net/wireless/broadcom/b43legacy/ |
| H A D | main.h | 25 #define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] argument 26 #define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) argument 28 #define PAD_BYTES(nr_bytes) P4D_BYTES(__LINE__ , (nr_bytes)) argument
|
| /linux/drivers/net/wireless/broadcom/b43/ |
| H A D | main.h | 23 #define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] argument 24 #define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) argument 26 #define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes)) argument
|
| /linux/drivers/md/ |
| H A D | dm-path-selector.h | 70 struct dm_path *(*select_path)(struct path_selector *ps, size_t nr_bytes); 90 size_t nr_bytes); 92 size_t nr_bytes, u64 start_time);
|
| H A D | dm-ps-service-time.c | 277 static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes) in st_select_path() argument 289 if (!best || (st_compare_load(pi, best, nr_bytes) < 0)) in st_select_path() 305 size_t nr_bytes) in st_start_io() argument 309 atomic_add(nr_bytes, &pi->in_flight_size); in st_start_io() 315 size_t nr_bytes, u64 start_time) in st_end_io() argument 319 atomic_sub(nr_bytes, &pi->in_flight_size); in st_end_io()
|
| H A D | dm-ps-queue-length.c | 191 static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes) in ql_select_path() argument 224 size_t nr_bytes) in ql_start_io() argument 234 size_t nr_bytes, u64 start_time) in ql_end_io() argument
|
| H A D | dm-mpath.c | 113 size_t nr_bytes; member 303 mpio->nr_bytes = bio->bi_iter.bi_size; in multipath_init_per_bio_data() 379 size_t nr_bytes) in choose_path_in_pg() argument 385 path = pg->ps.type->select_path(&pg->ps, nr_bytes); in choose_path_in_pg() 402 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) in choose_pgpath() argument 419 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath() 434 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath() 449 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath() 511 size_t nr_bytes = blk_rq_bytes(rq); in multipath_clone_and_map() local 521 pgpath = choose_pgpath(m, nr_bytes); in multipath_clone_and_map() [all …]
|
| H A D | dm-ps-historical-service-time.c | 431 size_t nr_bytes) in hst_select_path() argument 462 size_t nr_bytes) in hst_start_io() argument 493 size_t nr_bytes, u64 start_time) in hst_end_io() argument
|
| H A D | dm-rq.c | 83 unsigned int nr_bytes = info->orig->bi_iter.bi_size; in end_clone_bio() local 110 tio->completed += nr_bytes; in end_clone_bio()
|
| H A D | dm-ps-round-robin.c | 189 static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) in rr_select_path() argument
|
| H A D | dm-ps-io-affinity.c | 197 size_t nr_bytes) in ioa_select_path() argument
|
| /linux/drivers/acpi/ |
| H A D | nvs.c | 103 unsigned int nr_bytes; in suspend_nvs_register() local 111 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK); in suspend_nvs_register() 112 entry->size = (size < nr_bytes) ? size : nr_bytes; in suspend_nvs_register()
|
| /linux/block/ |
| H A D | t10-pi.c | 176 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) in t10_pi_type1_complete() argument 179 unsigned intervals = nr_bytes >> bi->interval_exp; in t10_pi_type1_complete() 339 static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) in ext_pi_type1_complete() argument 342 unsigned intervals = nr_bytes >> bi->interval_exp; in ext_pi_type1_complete() 462 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) in blk_integrity_complete() argument 470 ext_pi_type1_complete(rq, nr_bytes); in blk_integrity_complete() 472 t10_pi_type1_complete(rq, nr_bytes); in blk_integrity_complete()
|
| /linux/drivers/platform/olpc/ |
| H A D | olpc-xo175-ec.c | 479 size_t nr_bytes; in olpc_xo175_ec_cmd() local 507 nr_bytes = resp_len; in olpc_xo175_ec_cmd() 509 nr_bytes = (size_t)ret; in olpc_xo175_ec_cmd() 512 resp_len = min(resp_len, nr_bytes); in olpc_xo175_ec_cmd() 525 priv->expected_resp_len = nr_bytes; in olpc_xo175_ec_cmd() 552 } else if (priv->resp_len != nr_bytes) { in olpc_xo175_ec_cmd() 554 cmd, priv->resp_len, nr_bytes); in olpc_xo175_ec_cmd()
|
| /linux/tools/testing/selftests/kvm/ |
| H A D | mmu_stress_test.c | 209 uint64_t gpa, nr_bytes; in spawn_workers() local 219 nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & in spawn_workers() 221 TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); in spawn_workers() 223 for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { in spawn_workers() 226 info[i].end_gpa = gpa + nr_bytes; in spawn_workers()
|
| /linux/include/trace/events/ |
| H A D | block.h | 120 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), 122 TP_ARGS(rq, error, nr_bytes), 137 __entry->nr_sector = nr_bytes >> 9; 169 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), 171 TP_ARGS(rq, error, nr_bytes) 185 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), 187 TP_ARGS(rq, error, nr_bytes)
|
| /linux/mm/ |
| H A D | memcontrol.c | 141 unsigned int nr_bytes; in obj_cgroup_release() local 165 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release() 166 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release() 167 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release() 1797 unsigned int nr_bytes; member 2944 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, in consume_obj_stock() argument 2954 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { in consume_obj_stock() 2955 stock->nr_bytes -= nr_bytes; in consume_obj_stock() 2959 __account_obj_stock(objcg, stock, nr_bytes, pgdat, idx); in consume_obj_stock() 2974 if (stock->nr_bytes) { in drain_obj_stock() [all …]
|
| /linux/net/rds/ |
| H A D | rdma.c | 618 unsigned int nr_bytes; in rds_cmsg_rdma_args() local 703 nr_bytes = 0; in rds_cmsg_rdma_args() 762 nr_bytes, nr, iov->bytes, iov->addr); in rds_cmsg_rdma_args() 764 nr_bytes += iov->bytes; in rds_cmsg_rdma_args() 786 if (nr_bytes > args->remote_vec.bytes) { in rds_cmsg_rdma_args() 788 nr_bytes, in rds_cmsg_rdma_args() 793 op->op_bytes = nr_bytes; in rds_cmsg_rdma_args()
|
| /linux/fs/proc/ |
| H A D | vmcore.c | 163 ssize_t nr_bytes; in read_from_oldmem() local 176 nr_bytes = PAGE_SIZE - offset; in read_from_oldmem() 178 nr_bytes = count; in read_from_oldmem() 182 tmp = iov_iter_zero(nr_bytes, iter); in read_from_oldmem() 186 nr_bytes, in read_from_oldmem() 189 tmp = copy_oldmem_page(iter, pfn, nr_bytes, in read_from_oldmem() 192 if (tmp < nr_bytes) { in read_from_oldmem() 197 *ppos += nr_bytes; in read_from_oldmem() 198 count -= nr_bytes; in read_from_oldmem() 199 read += nr_bytes; in read_from_oldmem()
|
| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | private_mem_conversions_test.c | 349 size_t nr_bytes = min_t(size_t, vm->page_size, size - i); in __test_mem_conversions() local 353 memcmp_h(hva, gpa + i, uc.args[3], nr_bytes); in __test_mem_conversions() 357 memset(hva, uc.args[4], nr_bytes); in __test_mem_conversions()
|
| /linux/fs/xfs/scrub/ |
| H A D | xfarray.c | 447 size_t nr_bytes = sizeof(struct xfarray_sortinfo); in xfarray_sortinfo_alloc() local 470 nr_bytes += max_stack_depth * sizeof(xfarray_idx_t) * 2; in xfarray_sortinfo_alloc() 473 nr_bytes += max_t(size_t, in xfarray_sortinfo_alloc() 477 si = kvzalloc(nr_bytes, XCHK_GFP_FLAGS); in xfarray_sortinfo_alloc() 493 trace_xfarray_sort(si, nr_bytes); in xfarray_sortinfo_alloc()
|
| /linux/fs/fuse/ |
| H A D | dev.c | 1807 unsigned int nr_bytes; in fuse_notify_store() local 1816 nr_bytes = min_t(unsigned, num, folio_size(folio) - folio_offset); in fuse_notify_store() 1817 nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; in fuse_notify_store() 1819 err = fuse_copy_folio(cs, &folio, folio_offset, nr_bytes, 0); in fuse_notify_store() 1821 (nr_bytes == folio_size(folio) || file_size == end)) { in fuse_notify_store() 1822 folio_zero_segment(folio, nr_bytes, folio_size(folio)); in fuse_notify_store() 1831 num -= nr_bytes; in fuse_notify_store() 1912 unsigned int nr_bytes; in fuse_retrieve() local 1920 nr_bytes = min(folio_size(folio) - folio_offset, num); in fuse_retrieve() 1921 nr_pages = (offset + nr_bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; in fuse_retrieve() [all …]
|
| H A D | file.c | 869 unsigned int nr_bytes; member 890 if (ia && fuse_folios_need_send(fc, pos, len, &ia->ap, data->nr_bytes, in fuse_handle_readahead() 892 fuse_send_readpages(ia, data->file, data->nr_bytes, in fuse_handle_readahead() 894 data->nr_bytes = 0; in fuse_handle_readahead() 918 data->nr_bytes += len; in fuse_handle_readahead() 955 fuse_send_readpages(data->ia, data->file, data->nr_bytes, in fuse_iomap_read_submit() 2121 unsigned int nr_bytes; member 2213 data->nr_bytes, true); in fuse_iomap_writeback_range() 2227 data->nr_bytes = 0; in fuse_iomap_writeback_range() 2242 data->nr_bytes += len; in fuse_iomap_writeback_range()
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | en_tx.c | 502 bytes += ring->tx_info[ring_index].nr_bytes; in mlx4_en_process_tx_cq() 1052 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; in mlx4_en_xmit() 1059 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); in mlx4_en_xmit() 1062 ring->bytes += tx_info->nr_bytes; in mlx4_en_xmit() 1101 tx_info->nr_bytes, in mlx4_en_xmit() 1199 tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); in mlx4_en_xmit_frame()
|
| /linux/tools/testing/selftests/kvm/lib/x86/ |
| H A D | processor.c | 277 uint64_t nr_bytes, int level) in virt_map_level() argument 280 uint64_t nr_pages = nr_bytes / pg_size; in virt_map_level() 283 TEST_ASSERT(nr_bytes % pg_size == 0, in virt_map_level() 285 nr_bytes, pg_size); in virt_map_level() 290 nr_bytes / PAGE_SIZE); in virt_map_level()
|
| /linux/drivers/block/ |
| H A D | swim3.c | 256 static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes) in swim3_end_request() argument 261 err, nr_bytes, req); in swim3_end_request() 264 nr_bytes = blk_rq_cur_bytes(req); in swim3_end_request() 265 if (blk_update_request(req, err, nr_bytes)) in swim3_end_request()
|