/linux/drivers/net/ethernet/intel/idpf/ |
H A D | idpf_virtchnl.c | 1065 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_q_reg() local 1069 while (num_chunks--) { in idpf_vport_get_q_reg() 1073 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_q_reg() 1444 int totqs, num_msgs, num_chunks; in idpf_send_config_tx_queues_msg() local 1516 num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz), in idpf_send_config_tx_queues_msg() 1518 num_msgs = DIV_ROUND_UP(totqs, num_chunks); in idpf_send_config_tx_queues_msg() 1520 buf_sz = struct_size(ctq, qinfo, num_chunks); in idpf_send_config_tx_queues_msg() 1531 ctq->num_qinfo = cpu_to_le16(num_chunks); in idpf_send_config_tx_queues_msg() 1532 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); in idpf_send_config_tx_queues_msg() 1540 k += num_chunks; in idpf_send_config_tx_queues_msg() [all …]
|
H A D | virtchnl2.h | 556 __le16 num_chunks; member 558 struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks); 1132 __le16 num_chunks; member 1134 struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_guc_log.c | 83 snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE); in xe_guc_log_snapshot_alloc() 85 snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy), in xe_guc_log_snapshot_alloc() 91 for (i = 0; i < snapshot->num_chunks; i++) { in xe_guc_log_snapshot_alloc() 103 for (i = 0; i < snapshot->num_chunks; i++) in xe_guc_log_snapshot_alloc() 126 for (i = 0; i < snapshot->num_chunks; i++) in xe_guc_log_snapshot_free() 164 for (i = 0; i < snapshot->num_chunks; i++) { in xe_guc_log_snapshot_capture() 213 for (i = 0; i < snapshot->num_chunks; i++) { in xe_guc_log_snapshot_print() 216 char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0; in xe_guc_log_snapshot_print()
|
/linux/drivers/platform/x86/intel/ifs/ |
H A D | load.c | 122 int i, num_chunks, chunk_size; in copy_hashes_authenticate_chunks() local 134 num_chunks = hashes_status.num_chunks; in copy_hashes_authenticate_chunks() 148 for (i = 0; i < num_chunks; i++) { in copy_hashes_authenticate_chunks() 170 return gen >= IFS_GEN_STRIDE_AWARE ? status.chunks_in_stride : status.num_chunks; in get_num_chunks() 186 int i, num_chunks, chunk_size; in copy_hashes_authenticate_chunks_gen2() local 205 num_chunks = get_num_chunks(ifsd->generation, hashes_status); in copy_hashes_authenticate_chunks_gen2() 214 num_chunks = ifsd->valid_chunks; in copy_hashes_authenticate_chunks_gen2() 233 for (i = 0; i < num_chunks; i++) { in copy_hashes_authenticate_chunks_gen2()
|
H A D | ifs.h | 189 u32 num_chunks :8; member 202 u16 num_chunks; member
|
/linux/tools/testing/selftests/mm/ |
H A D | mremap_test.c | 392 unsigned long num_chunks; in remap_region() local 466 num_chunks = get_sqrt(threshold); in remap_region() 467 for (unsigned long i = 0; i < num_chunks; ++i) { in remap_region() 468 size_t chunk_size = threshold / num_chunks; in remap_region() 488 * if threshold is not divisible by num_chunks, then check the in remap_region() 491 for (t = num_chunks * (threshold / num_chunks); t < threshold; ++t) { in remap_region() 506 num_chunks = get_sqrt(c.dest_preamble_size); in remap_region() 508 for (unsigned long i = 0; i < num_chunks; ++i) { in remap_region() 509 size_t chunk_size = c.dest_preamble_size / num_chunks; in remap_region() [all...] |
/linux/drivers/crypto/intel/qat/qat_common/ |
H A D | icp_qat_uclo.h | 252 unsigned short num_chunks; member 267 short num_chunks; member 507 unsigned short num_chunks; member 533 unsigned short num_chunks; member 549 unsigned short num_chunks; member
|
H A D | qat_uclo.c | 112 if (suof_hdr->num_chunks <= 0x1) { in qat_uclo_check_suof_format() 455 for (i = 0; i < obj_hdr->num_chunks; i++) { in qat_uclo_find_chunk() 502 for (i = 0; i < file_hdr->num_chunks; i++) { in qat_uclo_map_chunk() 1181 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; in qat_uclo_map_suof() 1745 uobj_chunk_num = uobj_hdr->num_chunks; in qat_uclo_map_objs_from_mof() 1747 sobj_chunk_num = sobj_hdr->num_chunks; in qat_uclo_map_objs_from_mof() 1827 if (mof_hdr->num_chunks <= 0x1) { in qat_uclo_check_mof_format() 1871 chunks_num = mof_ptr->num_chunks; in qat_uclo_map_mof_obj()
|
/linux/drivers/accel/habanalabs/common/ |
H A D | command_submission.c | 1351 u32 cs_type_flags, num_chunks; in hl_cs_sanity_checks() local 1382 num_chunks = args->in.num_chunks_execute; in hl_cs_sanity_checks() 1393 if (!num_chunks) { in hl_cs_sanity_checks() 1397 } else if (is_sync_stream && num_chunks != 1) { in hl_cs_sanity_checks() 1409 void __user *chunks, u32 num_chunks, in hl_cs_copy_chunk_array() argument 1414 if (num_chunks > HL_MAX_JOBS_PER_CS) { in hl_cs_copy_chunk_array() 1423 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), in hl_cs_copy_chunk_array() 1426 *cs_chunk_array = kmalloc_array(num_chunks, in hl_cs_copy_chunk_array() 1434 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); in hl_cs_copy_chunk_array() 1490 u32 num_chunks, u64 *cs_seq, u32 flags, in cs_ioctl_default() argument [all …]
|
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_mem.c | 365 int num_pages, num_chunks, i, rv = 0; in siw_umem_get() local 372 num_chunks = (num_pages >> CHUNK_SHIFT) + 1; in siw_umem_get() 379 kcalloc(num_chunks, sizeof(struct siw_page_chunk), GFP_KERNEL); in siw_umem_get()
|
/linux/drivers/gpu/drm/lima/ |
H A D | lima_sched.c | 334 dt->num_chunks++; in lima_sched_build_error_task_list() 341 dt->num_chunks++; in lima_sched_build_error_task_list() 347 dt->num_chunks++; in lima_sched_build_error_task_list() 386 dt->num_chunks++; in lima_sched_build_error_task_list()
|
H A D | lima_dump.h | 47 __u32 num_chunks; member
|
/linux/drivers/crypto/marvell/octeontx/ |
H A D | otx_cptvf.h | 37 u32 num_chunks; /* Number of command chunks */ member
|
H A D | otx_cptvf_main.c | 179 queue->num_chunks = 0; in free_command_queues() 226 i, queue->num_chunks); in alloc_command_queues() 231 if (queue->num_chunks == 0) { in alloc_command_queues() 238 queue->num_chunks++; in alloc_command_queues()
|
/linux/drivers/gpu/drm/radeon/ |
H A D | radeon_cs.c | 277 if (!cs->num_chunks) { in radeon_cs_parser_init() 289 p->chunks_array = kvmalloc_array(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); in radeon_cs_parser_init() 295 sizeof(uint64_t)*cs->num_chunks)) { in radeon_cs_parser_init() 299 p->nchunks = cs->num_chunks; in radeon_cs_parser_init()
|
/linux/drivers/net/dsa/sja1105/ |
H A D | sja1105_spi.c | 44 int num_chunks; in sja1105_xfer() local 47 num_chunks = DIV_ROUND_UP(len, priv->max_xfer_len); in sja1105_xfer() 56 for (i = 0; i < num_chunks; i++) { in sja1105_xfer()
|
/linux/drivers/vfio/pci/mlx5/ |
H A D | main.c | 415 int num_chunks; in mlx5vf_prep_stop_copy() local 438 num_chunks = mvdev->chunk_mode ? MAX_NUM_CHUNKS : 1; in mlx5vf_prep_stop_copy() 439 for (i = 0; i < num_chunks; i++) { in mlx5vf_prep_stop_copy() 469 for (i = 0; i < num_chunks; i++) { in mlx5vf_prep_stop_copy()
|
/linux/drivers/mtd/nand/raw/ |
H A D | mxc_nand.c | 254 u16 num_chunks = mtd->writesize / 512; in copy_spare() local 261 oob_chunk_size = (host->used_oobsize / num_chunks) & ~1; in copy_spare() 264 for (i = 0; i < num_chunks - 1; i++) in copy_spare() 274 for (i = 0; i < num_chunks - 1; i++) in copy_spare()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_cs.c | 51 if (cs->in.num_chunks == 0) in amdgpu_cs_parser_init() 188 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), in amdgpu_cs_pass1() 196 sizeof(uint64_t)*cs->in.num_chunks)) { in amdgpu_cs_pass1() 201 p->nchunks = cs->in.num_chunks; in amdgpu_cs_pass1()
|
/linux/drivers/media/i2c/ |
H A D | thp7312.c | 1669 unsigned int num_chunks; in thp7312_fw_load_to_ram() local 1677 num_chunks = DIV_ROUND_UP(bank_size, THP7312_FW_DOWNLOAD_UNIT); in thp7312_fw_load_to_ram() 1680 __func__, bank_size, i, num_chunks); in thp7312_fw_load_to_ram() 1682 for (j = 0 ; j < num_chunks; j++) { in thp7312_fw_load_to_ram()
|
/linux/net/sctp/ |
H A D | socket.c | 6972 u32 num_chunks = 0; in sctp_getsockopt_peer_auth_chunks() local 6994 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); in sctp_getsockopt_peer_auth_chunks() 6995 if (len < num_chunks) in sctp_getsockopt_peer_auth_chunks() 6998 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_peer_auth_chunks() 7001 len = sizeof(struct sctp_authchunks) + num_chunks; in sctp_getsockopt_peer_auth_chunks() 7004 if (put_user(num_chunks, &p->gauth_number_of_chunks)) in sctp_getsockopt_peer_auth_chunks() 7017 u32 num_chunks = 0; in sctp_getsockopt_local_auth_chunks() local 7044 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr); in sctp_getsockopt_local_auth_chunks() 7045 if (len < sizeof(struct sctp_authchunks) + num_chunks) in sctp_getsockopt_local_auth_chunks() 7048 if (copy_to_user(to, ch->chunks, num_chunks)) in sctp_getsockopt_local_auth_chunks() [all …]
|
/linux/include/uapi/drm/ |
H A D | radeon_drm.h | 984 __u32 num_chunks; member
|
H A D | amdgpu_drm.h | 644 __u32 num_chunks; member
|
/linux/fs/btrfs/ |
H A D | inode.c | 753 atomic_t num_chunks; member 1582 if (atomic_dec_and_test(&async_cow->num_chunks)) in submit_compressed_extents() 1612 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); in run_delalloc_compressed() local 1618 ctx = kvmalloc(struct_size(ctx, chunks, num_chunks), GFP_KERNEL); in run_delalloc_compressed() 1626 atomic_set(&ctx->num_chunks, num_chunks); in run_delalloc_compressed() 1628 for (i = 0; i < num_chunks; i++) { in run_delalloc_compressed()
|
/linux/drivers/staging/media/atomisp/pci/runtime/binary/src/ |
H A D | binary.c | 96 info->output.num_chunks, info->pipeline.pipelining); in ia_css_binary_internal_res()
|