| /linux/kernel/dma/ |
| H A D | debug.h | 24 int nelems, int dir); 43 int nelems, int direction); 47 int nelems, int direction); 75 int nelems, int dir) in debug_dma_unmap_sg() argument 104 int nelems, int direction) in debug_dma_sync_sg_for_cpu() argument 110 int nelems, int direction) in debug_dma_sync_sg_for_device() argument
|
| H A D | dummy.c | 30 int nelems, enum dma_data_direction dir, in dma_dummy_map_sg() argument 37 int nelems, enum dma_data_direction dir, in dma_dummy_unmap_sg() argument
|
| H A D | mapping.c | 404 int nelems, enum dma_data_direction dir) in __dma_sync_sg_for_cpu() argument 410 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu() 412 iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu() 414 ops->sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu() 415 trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu() 416 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); in __dma_sync_sg_for_cpu() 421 int nelems, enum dma_data_direction dir) in __dma_sync_sg_for_device() argument 427 dma_direct_sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device() 429 iommu_dma_sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device() 431 ops->sync_sg_for_device(dev, sg, nelems, dir); in __dma_sync_sg_for_device() [all …]
|
| H A D | debug.c | 1355 int nelems, int dir) in debug_dma_unmap_sg() argument 1363 for_each_sg(sglist, s, nelems, i) { in debug_dma_unmap_sg() 1372 .sg_call_ents = nelems, in debug_dma_unmap_sg() 1488 int nelems, int direction) in debug_dma_sync_sg_for_cpu() argument 1496 for_each_sg(sg, s, nelems, i) { in debug_dma_sync_sg_for_cpu() 1505 .sg_call_ents = nelems, in debug_dma_sync_sg_for_cpu() 1519 int nelems, int direction) in debug_dma_sync_sg_for_device() argument 1527 for_each_sg(sg, s, nelems, i) { in debug_dma_sync_sg_for_device() 1536 .sg_call_ents = nelems, in debug_dma_sync_sg_for_device()
|
| /linux/drivers/xen/ |
| H A D | swiotlb-xen.c | 346 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, in xen_swiotlb_unmap_sg() argument 354 for_each_sg(sgl, sg, nelems, i) in xen_swiotlb_unmap_sg() 361 xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, in xen_swiotlb_map_sg() argument 369 for_each_sg(sgl, sg, nelems, i) { in xen_swiotlb_map_sg() 377 return nelems; in xen_swiotlb_map_sg() 386 int nelems, enum dma_data_direction dir) in xen_swiotlb_sync_sg_for_cpu() argument 391 for_each_sg(sgl, sg, nelems, i) { in xen_swiotlb_sync_sg_for_cpu() 399 int nelems, enum dma_data_direction dir) in xen_swiotlb_sync_sg_for_device() argument 404 for_each_sg(sgl, sg, nelems, i) { in xen_swiotlb_sync_sg_for_device()
|
| /linux/arch/powerpc/kernel/ |
| H A D | dma-iommu.c | 117 int nelems, enum dma_data_direction direction, in dma_iommu_map_sg() argument 120 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, in dma_iommu_map_sg() 125 int nelems, enum dma_data_direction direction, in dma_iommu_unmap_sg() argument 128 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, in dma_iommu_unmap_sg()
|
| H A D | iommu.c | 473 struct scatterlist *sglist, int nelems, in ppc_iommu_map_sg() argument 486 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg() 491 incount = nelems; in ppc_iommu_map_sg() 497 DBG("sg mapping %d elements:\n", nelems); in ppc_iommu_map_sg() 500 for_each_sg(sglist, s, nelems, i) { in ppc_iommu_map_sg() 598 for_each_sg(sglist, s, nelems, i) { in ppc_iommu_map_sg() 616 int nelems, enum dma_data_direction direction, in ppc_iommu_unmap_sg() argument 627 while (nelems--) { in ppc_iommu_unmap_sg()
|
| /linux/arch/sparc/kernel/ |
| H A D | iommu.c | 441 int nelems, enum dma_data_direction direction, in dma_4u_map_sg() argument 458 if (nelems == 0 || !iommu) in dma_4u_map_sg() 476 incount = nelems; in dma_4u_map_sg() 485 for_each_sg(sglist, s, nelems, i) { in dma_4u_map_sg() 563 for_each_sg(sglist, s, nelems, i) { in dma_4u_map_sg() 614 int nelems, enum dma_data_direction direction, in dma_4u_unmap_sg() argument 632 while (nelems--) { in dma_4u_unmap_sg() 704 struct scatterlist *sglist, int nelems, in dma_4u_sync_sg_for_cpu() argument 736 for_each_sg(sglist, sg, nelems, i) { in dma_4u_sync_sg_for_cpu()
|
| H A D | pci_sun4v.c | 477 int nelems, enum dma_data_direction direction, in dma_4v_map_sg() argument 496 if (nelems == 0 || !iommu) in dma_4v_map_sg() 509 incount = nelems; in dma_4v_map_sg() 530 for_each_sg(sglist, s, nelems, i) { in dma_4v_map_sg() 611 for_each_sg(sglist, s, nelems, i) { in dma_4v_map_sg() 632 int nelems, enum dma_data_direction direction, in dma_4v_unmap_sg() argument 653 while (nelems--) { in dma_4v_unmap_sg()
|
| /linux/include/linux/ |
| H A D | dma-mapping.h | 399 int nelems, enum dma_data_direction dir); 401 int nelems, enum dma_data_direction dir); 425 struct scatterlist *sg, int nelems, enum dma_data_direction dir) in dma_sync_sg_for_cpu() argument 428 __dma_sync_sg_for_cpu(dev, sg, nelems, dir); in dma_sync_sg_for_cpu() 432 struct scatterlist *sg, int nelems, enum dma_data_direction dir) in dma_sync_sg_for_device() argument 435 __dma_sync_sg_for_device(dev, sg, nelems, dir); in dma_sync_sg_for_device() 457 struct scatterlist *sg, int nelems, enum dma_data_direction dir) in dma_sync_sg_for_cpu() argument 461 struct scatterlist *sg, int nelems, enum dma_data_direction dir) in dma_sync_sg_for_device() argument
|
| H A D | rhashtable.h | 186 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && in rht_grow_above_75() 199 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && in rht_shrink_below_30() 211 return atomic_read(&ht->nelems) > tbl->size && in rht_grow_above_100() 223 return atomic_read(&ht->nelems) >= ht->max_elems; in rht_grow_above_max() 842 atomic_inc(&ht->nelems); in __rhashtable_insert_fast() 1112 atomic_dec(&ht->nelems); in __rhashtable_remove_fast_one()
|
| /linux/arch/x86/kernel/ |
| H A D | amd_gart_64.c | 324 int nelems, struct scatterlist *sout, in __dma_map_cont() argument 335 for_each_sg(start, s, nelems, i) { in __dma_map_cont() 362 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, in dma_map_cont() argument 366 BUG_ON(nelems != 1); in dma_map_cont() 371 return __dma_map_cont(dev, start, nelems, sout, pages); in dma_map_cont()
|
| /linux/lib/ |
| H A D | rhashtable.c | 397 unsigned int nelems = atomic_read(&ht->nelems); in rhashtable_shrink() local 400 if (nelems) in rhashtable_shrink() 401 size = roundup_pow_of_two(nelems * 3 / 2); in rhashtable_shrink() 623 atomic_inc(&ht->nelems); in rhashtable_try_insert() 1081 atomic_set(&ht->nelems, 0); in rhashtable_init_noprof()
|
| H A D | test_rhashtable.c | 202 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", in test_bucket_stats() 203 total, atomic_read(&ht->nelems), entries, chain_len); in test_bucket_stats() 205 if (total != atomic_read(&ht->nelems) || total != entries) in test_bucket_stats()
|
| /linux/arch/powerpc/include/asm/ |
| H A D | iommu.h | 262 struct scatterlist *sglist, int nelems, 268 int nelems,
|
| /linux/include/uapi/linux/ |
| H A D | btf.h | 115 __u32 nelems; 114 __u32 nelems; global() member
|
| /linux/tools/include/uapi/linux/ |
| H A D | btf.h | 115 __u32 nelems; 114 __u32 nelems; global() member
|
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | vio.c | 555 int nelems, enum dma_data_direction direction, in vio_dma_iommu_map_sg() argument 564 for_each_sg(sglist, sgl, nelems, count) in vio_dma_iommu_map_sg() 570 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), in vio_dma_iommu_map_sg() 589 struct scatterlist *sglist, int nelems, in vio_dma_iommu_unmap_sg() argument 599 for_each_sg(sglist, sgl, nelems, count) in vio_dma_iommu_unmap_sg() 602 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs); in vio_dma_iommu_unmap_sg()
|
| /linux/tools/lib/bpf/ |
| H A D | btf_relocate.c | 104 int nelems) in search_btf_name_size() argument 107 int high = nelems - 1; in search_btf_name_size()
|
| /linux/net/netfilter/ |
| H A D | nft_dynset.c | 56 if (!atomic_add_unless(&set->nelems, 1, set->size)) in nft_dynset_new() 77 atomic_dec(&set->nelems); in nft_dynset_new()
|
| H A D | nf_tables_api.c | 457 return a->set == b->set && a->bound == b->bound && a->nelems < NFT_MAX_SET_NELEMS; in nft_trans_collapse_set_elem() 464 unsigned int nelems, old_nelems = tail->nelems; in nft_trans_collapse_set_elem() 476 if (WARN_ON_ONCE(trans->nelems != 1)) in nft_trans_collapse_set_elem() 479 if (check_add_overflow(old_nelems, trans->nelems, &nelems)) in nft_trans_collapse_set_elem() 485 new_trans = krealloc(tail, struct_size(tail, elems, nelems), in nft_trans_try_collapse() 497 new_trans->nelems = nelems; in nft_trans_try_collapse() 4914 unsigned int nelems; in nf_tables_fill_set() 441 unsigned int nelems, old_nelems = tail->nelems; nft_trans_collapse_set_elem() local 4853 unsigned int nelems; nf_tables_fill_set_info() local 6588 int rem, err = 0, nelems = 0; nf_tables_getsetelem_reset() local [all...] |
| /linux/drivers/iommu/ |
| H A D | dma-iommu.c | 1120 int nelems, enum dma_data_direction dir) in iommu_dma_sync_sg_for_cpu() argument 1126 for_each_sg(sgl, sg, nelems, i) in iommu_dma_sync_sg_for_cpu() 1130 for_each_sg(sgl, sg, nelems, i) in iommu_dma_sync_sg_for_cpu() 1135 int nelems, enum dma_data_direction dir) in iommu_dma_sync_sg_for_device() argument 1141 for_each_sg(sgl, sg, nelems, i) in iommu_dma_sync_sg_for_device() 1146 for_each_sg(sgl, sg, nelems, i) in iommu_dma_sync_sg_for_device()
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | btf_helpers.c | 121 arr->type, arr->index_type, arr->nelems); in fprintf_btf_type_raw()
|
| /linux/tools/bpf/bpftool/ |
| H A D | btf.c | 171 jsonw_uint_field(w, "nr_elems", arr->nelems); in dump_btf_type() 174 arr->type, arr->index_type, arr->nelems); in dump_btf_type() 717 hash = hasher(hash, arr->nelems); in btf_type_disambig_hash()
|
| /linux/include/net/netfilter/ |
| H A D | nf_tables.h | 561 * @nelems: number of elements 596 atomic_t nelems; member 1801 unsigned int nelems; 1802 struct nft_trans_one_elem elems[] __counted_by(nelems); 1783 unsigned int nelems; global() member
|