| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | rqt.c | 145 rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); in mlx5e_rqt_init_indir() 150 rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids), in mlx5e_rqt_init_indir() 243 rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL); in mlx5e_rqt_redirect_indir() 248 rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids), in mlx5e_rqt_redirect_indir()
|
| /linux/scripts/coccinelle/api/ |
| H A D | kfree_mismatch.cocci | 58 kvmalloc_array\)(...) 124 * kvmalloc_array\)(...)@a 134 kvmalloc_array\)(...)
|
| /linux/drivers/dma-buf/ |
| H A D | udmabuf.c | 118 pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); in vmap_udmabuf() 210 ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); in init_udmabuf() 218 ubuf->pinned_folios = kvmalloc_array(pgcnt, in init_udmabuf() 411 folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL); in udmabuf_create()
|
| H A D | st-dma-fence-chain.c | 119 fc->chains = kvmalloc_array(count, sizeof(*fc->chains), in fence_chains_init() 124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init()
|
| /linux/io_uring/ |
| H A D | alloc_cache.c | 25 cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); in io_alloc_cache_init()
|
| H A D | memmap.c | 59 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in io_pin_pages() 164 pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp); in io_region_allocate_pages()
|
| H A D | zcrx.c | 431 area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]), in io_zcrx_create_area() 436 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]), in io_zcrx_create_area() 441 area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]), in io_zcrx_create_area()
|
| /linux/lib/ |
| H A D | bucket_locks.c | 34 tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp); in __alloc_bucket_spinlocks()
|
| H A D | test_kho.c | 193 folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL); in kho_test_save()
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| H A D | mem.c | 194 if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL))) in nvkm_mem_new_host() 196 if (!(mem->dma = kvmalloc_array(size, sizeof(*mem->dma), GFP_KERNEL))) in nvkm_mem_new_host()
|
| /linux/kernel/dma/ |
| H A D | remap.c | 48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
|
| /linux/drivers/gpu/drm/nouveau/dispnv50/ |
| H A D | lut.c | 41 in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL); in nv50_lut_load()
|
| /linux/drivers/xen/ |
| H A D | gntdev.c | 148 add->grants = kvmalloc_array(count, sizeof(add->grants[0]), in gntdev_alloc_map() 150 add->map_ops = kvmalloc_array(count, sizeof(add->map_ops[0]), in gntdev_alloc_map() 152 add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]), in gntdev_alloc_map() 164 add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]), in gntdev_alloc_map() 166 add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]), in gntdev_alloc_map()
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_gpusvm.c | 711 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in drm_gpusvm_check_pages() 1295 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in drm_gpusvm_get_pages() 1349 kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL); in drm_gpusvm_get_pages() 1562 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in drm_gpusvm_range_evict()
|
| H A D | drm_gem.c | 647 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in drm_gem_get_pages() 795 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), in drm_gem_objects_lookup() 802 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); in drm_gem_objects_lookup()
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gem_prime.c | 129 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table()
|
| /linux/tools/virtio/ringtest/ |
| H A D | ptr_ring.c | 61 #define kvmalloc_array kmalloc_array macro
|
| /linux/drivers/net/ethernet/wangxun/txgbe/ |
| H A D | txgbe_hw.c | 89 eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16), in txgbe_calc_eeprom_checksum()
|
| /linux/net/core/ |
| H A D | devmem.c | 234 binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE, in net_devmem_bind_dmabuf() 282 owner->area.niovs = kvmalloc_array(owner->area.num_niovs, in net_devmem_bind_dmabuf()
|
| /linux/drivers/staging/media/ipu3/ |
| H A D | ipu3-dmamap.c | 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer()
|
| /linux/drivers/gpu/drm/i915/gem/ |
| H A D | i915_gem_pages.c | 315 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); in i915_gem_object_map_page() 345 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); in i915_gem_object_map_pfn()
|
| /linux/kernel/module/ |
| H A D | decompress.c | 21 new_pages = kvmalloc_array(info->max_pages + extent, in module_extend_max_pages()
|
| /linux/drivers/gpu/drm/virtio/ |
| H A D | virtgpu_prime.c | 167 *ents = kvmalloc_array(sgt->nents, in virtgpu_dma_buf_import_sgt()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_ring.c | 325 *data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); in radeon_ring_backup()
|
| /linux/drivers/infiniband/hw/mlx4/ |
| H A D | srq.c | 172 srq->wrid = kvmalloc_array(srq->msrq.max, in mlx4_ib_create_srq()
|