| /linux/drivers/net/ethernet/netronome/nfp/nfpcore/ |
| H A D | nfp_nsp.c | 505 } *chunks; in nfp_nsp_command_buf_dma_sg() local 517 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 518 if (!chunks) in nfp_nsp_command_buf_dma_sg() 526 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 528 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 531 chunks[i].len = min_t(u64, chunk_size, max_size - off); in nfp_nsp_command_buf_dma_sg() 536 memcpy(chunks[i].chunk, arg->in_buf + off, coff); in nfp_nsp_command_buf_dma_sg() 538 memset(chunks[i].chunk + coff, 0, chunk_size - coff); in nfp_nsp_command_buf_dma_sg() 540 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 548 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() [all …]
|
| /linux/drivers/comedi/drivers/ni_routing/tools/ |
| H A D | convert_csv_to_c.py | 228 chunks = [ self.output_file_top, 244 chunks.append('\t&{},'.format(dev_table_name)) 273 chunks.append('\tNULL,') # terminate list 274 chunks.append('};') 275 return '\n'.join(chunks) 416 chunks = [ self.output_file_top, 432 chunks.append('\t&{},'.format(fam_table_name)) 462 chunks.append('\tNULL,') # terminate list 463 chunks.append('};') 464 return '\n'.join(chunks)
|
| /linux/drivers/net/mctp/ |
| H A D | mctp-serial.c | 534 u8 chunks[MAX_CHUNKS]; member 552 KUNIT_EXPECT_EQ(test, next, params->chunks[i]); in test_next_chunk_len() 567 .chunks = { 3, 1, 1, 0}, 572 .chunks = { 3, 1, 1, 0}, 577 .chunks = { 1, 2, 0}, 582 .chunks = { 1, 1, 1, 0}, 587 .chunks = { 1, 1, 1, 1, 0}, 592 .chunks = { 1, 1, 1, 1, 2, 0}, 597 .chunks = { 1, 0 }, 602 .chunks = { 1, 0 }, [all …]
|
| /linux/arch/x86/kernel/cpu/resctrl/ |
| H A D | monitor.c | 214 u64 shift = 64 - width, chunks; in mbm_overflow_count() local 216 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 217 return chunks >> shift; in mbm_overflow_count() 226 u64 chunks; in get_corrected_val() local 230 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, in get_corrected_val() 232 chunks = get_corrected_mbm_count(rmid, am->chunks); in get_corrected_val() 235 chunks = msr_val; in get_corrected_val() 238 return chunks * hw_res->mon_scale; in get_corrected_val()
|
| /linux/drivers/infiniband/hw/usnic/ |
| H A D | usnic_vnic.c | 44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 118 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 222 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 228 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 254 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 286 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 427 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
| /linux/net/xdp/ |
| H A D | xdp_umem.c | 163 u64 chunks, npgs; in xdp_umem_reg() local 198 chunks = div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 199 if (!chunks || chunks > U32_MAX) in xdp_umem_reg() 217 umem->chunks = chunks; in xdp_umem_reg()
|
| /linux/scripts/gdb/linux/ |
| H A D | timerlist.py | 162 chunks = [] 168 chunks.append(buf[start:end]) 170 chunks.append(',') 174 chunks[0] = chunks[0][0] # Cut off the first 0 176 return "".join(str(chunks))
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| H A D | idpf_virtchnl.c | 724 const void *chunks; member 764 const void *pos = params->chunks; in idpf_send_chunked_msg() 1275 struct virtchnl2_vector_chunks *chunks; in idpf_get_reg_intr_vecs() local 1280 chunks = &vport->adapter->req_vec_chunks->vchunks; in idpf_get_reg_intr_vecs() 1281 num_vchunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_reg_intr_vecs() 1288 chunk = &chunks->vchunks[j]; in idpf_get_reg_intr_vecs() 1325 struct virtchnl2_queue_reg_chunks *chunks) in idpf_vport_get_q_reg() argument 1327 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_q_reg() 1335 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_q_reg() 1419 struct virtchnl2_queue_reg_chunks *chunks; in idpf_queue_reg_init() local [all …]
|
| /linux/lib/ |
| H A D | genalloc.c | 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 203 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner() 223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner() 503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner() 538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr() 586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
| H A D | bitmap-str.c | 477 int chunks = BITS_TO_U32(nmaskbits); in bitmap_parse() local 487 if (!chunks--) in bitmap_parse() 499 unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32; in bitmap_parse()
|
| /linux/Documentation/networking/ |
| H A D | oa-tc6-framework.rst | 49 each chunk. Ethernet frames are transferred over one or more data chunks. 59 receive (RX) chunks. Chunks in both transmit and receive directions may 69 In parallel, receive data chunks are received on MISO. Each receive data 160 the MAC-PHY will be converted into multiple transmit data chunks. Each 177 transaction. For TX data chunks, this bit shall be ’1’. 242 received from the MAC-PHY. The SPI host should not write more data chunks 248 chunks, the MAC-PHY interrupt is asserted to SPI host. On reception of the 254 host will be sent as multiple receive data chunks. Each receive data 286 data chunks of frame data that are available for 340 of transmit data chunks of frame data that the SPI host [all …]
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | dynptr_success.c | 672 int i, chunks = 200; in test_dynptr_copy_xdp() 677 bpf_ringbuf_reserve_dynptr(&ringbuf, len * chunks, 0, &ptr_buf); in test_dynptr_copy_xdp() 680 bpf_for(i, 0, chunks) { in test_dynptr_copy_xdp() 686 err = bpf_dynptr_copy(&ptr_xdp, 0, &ptr_buf, 0, len * chunks); 690 bpf_for(i, 0, chunks) { in test_dynptr_memset_zero() 701 bpf_for(i, 0, chunks) { in test_dynptr_memset_zero() 707 err = bpf_dynptr_copy(&ptr_buf, 0, &ptr_xdp, 0, len * chunks); 711 bpf_for(i, 0, chunks) { in test_dynptr_memset_notzero() 721 err = bpf_dynptr_copy(&ptr_xdp, 2, &ptr_xdp, len, len * (chunks - 1)); in test_dynptr_memset_notzero() 725 bpf_for(i, 0, chunks 617 int i, chunks = 200; test_dynptr_copy_xdp() local [all...] |
| /linux/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
| H A D | devlink_lib_spectrum.sh | 90 devlink_resource_size_set 32000 kvd linear chunks 99 devlink_resource_size_set 32000 kvd linear chunks 108 devlink_resource_size_set 49152 kvd linear chunks
|
| /linux/kernel/ |
| H A D | audit_tree.c | 17 struct list_head chunks; member 103 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 437 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 509 list_add(&p->list, &tree->chunks); in tag_chunk() 574 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 579 p = list_first_entry(&victim->chunks, struct audit_node, list); in prune_tree_chunks() 620 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 625 list_add(p, &tree->chunks); in trim_marked() 702 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() 851 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_cs.c | 188 chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks), in amdgpu_cs_pass1() 195 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), in amdgpu_cs_pass1() 197 if (!p->chunks) { in amdgpu_cs_pass1() 213 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_pass1() 214 p->chunks[i].length_dw = user_chunk.length_dw; in amdgpu_cs_pass1() 216 size = p->chunks[i].length_dw; in amdgpu_cs_pass1() 218 p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data), in amdgpu_cs_pass1() 221 if (IS_ERR(p->chunks[i].kdata)) { in amdgpu_cs_pass1() 222 ret = PTR_ERR(p->chunks[i].kdata); in amdgpu_cs_pass1() 230 switch (p->chunks[i].chunk_id) { in amdgpu_cs_pass1() [all …]
|
| /linux/drivers/infiniband/ulp/rtrs/ |
| H A D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
| H A D | rtrs-srv.c | 604 int nr, nr_sgt, chunks; in map_cont_bufs() local 607 chunks = chunks_per_mr * srv_path->mrs_num; in map_cont_bufs() 610 srv->queue_depth - chunks); in map_cont_bufs() 617 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs() 652 srv_path->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs() 1051 data = page_address(srv->chunks[buf_id]); in process_read() 1104 data = page_address(srv->chunks[buf_id]); in process_write() 1179 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_inv_rkey_done() 1282 data = page_address(srv->chunks[msg_id]) + off; in rtrs_srv_rdma_done() 1382 __free_pages(srv->chunks[i], get_order(max_chunk_size)); in free_srv() [all …]
|
| /linux/drivers/infiniband/hw/efa/ |
| H A D | efa_verbs.c | 116 struct pbl_chunk *chunks; member 1373 /* allocate a chunk list that consists of 4KB chunks */ in pbl_chunk_list_create() 1377 chunk_list->chunks = kcalloc(chunk_list_size, in pbl_chunk_list_create() 1378 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1380 if (!chunk_list->chunks) in pbl_chunk_list_create() 1389 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); in pbl_chunk_list_create() 1390 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1393 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; in pbl_chunk_list_create() 1395 chunk_list->chunks[chunk_list_size - 1].length = in pbl_chunk_list_create() 1399 /* fill the dma addresses of sg list pages to chunks in pbl_chunk_list_create() [all...] |
| /linux/drivers/net/wireless/ti/wlcore/ |
| H A D | boot.c | 237 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 242 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 245 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 247 while (chunks--) { in wlcore_boot_upload_firmware() 258 chunks, addr, len); in wlcore_boot_upload_firmware()
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | generate_udp_fragments.py | 46 chunks = [frag[i : i + 10] for i in range(0, len(frag), 10)] 47 chunks_fmted = [", ".join([str(hex(b)) for b in chunk]) for chunk in chunks]
|
| /linux/drivers/net/wireless/intel/iwlwifi/pcie/ |
| H A D | ctxt-info-v2.c | 365 len0 = pnvm_data->chunks[0].len; in iwl_pcie_load_payloads_contig() 366 len1 = pnvm_data->chunks[1].len; in iwl_pcie_load_payloads_contig() 381 memcpy(dram->block, pnvm_data->chunks[0].data, len0); in iwl_pcie_load_payloads_contig() 382 memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1); in iwl_pcie_load_payloads_contig() 415 len = pnvm_data->chunks[i].len; in iwl_pcie_load_payloads_segments() 416 data = pnvm_data->chunks[i].data; in iwl_pcie_load_payloads_segments()
|
| /linux/drivers/virt/vboxguest/ |
| H A D | vboxguest_core.c | 361 u32 i, chunks; in vbg_balloon_work() local 389 chunks = req->balloon_chunks; in vbg_balloon_work() 390 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work() 392 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work() 396 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work() 398 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work() 403 gdev->mem_balloon.chunks++; in vbg_balloon_work() 407 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work() 412 gdev->mem_balloon.chunks--; in vbg_balloon_work() 1667 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
|
| /linux/drivers/dma/sh/ |
| H A D | shdma-base.c | 97 if (chunk->chunks == 1) { in shdma_tx_submit() 369 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup() 385 BUG_ON(desc->chunks != 1); in __ld_cleanup() 580 int chunks = 0; in shdma_prep_sg() local 585 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg() 625 new->chunks = 1; in shdma_prep_sg() 627 new->chunks = chunks--; in shdma_prep_sg()
|
| H A D | rcar-dmac.c | 79 struct list_head chunks; member 107 DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks); 115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ 389 list_first_entry(&desc->chunks, in rcar_dmac_chan_start_xfer() 513 desc->running = list_first_entry(&desc->chunks, in rcar_dmac_tx_submit() 546 INIT_LIST_HEAD(&desc->chunks); in rcar_dmac_desc_alloc() 577 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); in rcar_dmac_desc_put() 673 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; in rcar_dmac_xfer_chunk_alloc() 771 list_for_each_entry(chunk, &desc->chunks, node) { in rcar_dmac_fill_hwdesc() 1025 list_add_tail(&chunk->node, &desc->chunks); in rcar_dmac_chan_prep_sg() [all …]
|
| /linux/Documentation/admin-guide/device-mapper/ |
| H A D | striped.rst | 6 device across one or more underlying devices. Data is written in "chunks", 7 with consecutive chunks rotating among the underlying devices. This can
|