Lines Matching full:owner
42 struct dmabuf_genpool_chunk_owner *owner = chunk->owner; in net_devmem_dmabuf_free_chunk_owner() local
44 kvfree(owner->area.niovs); in net_devmem_dmabuf_free_chunk_owner()
45 kfree(owner); in net_devmem_dmabuf_free_chunk_owner()
50 struct dmabuf_genpool_chunk_owner *owner; in net_devmem_get_dma_addr() local
52 owner = net_devmem_iov_to_chunk_owner(niov); in net_devmem_get_dma_addr()
53 return owner->base_dma_addr + in net_devmem_get_dma_addr()
85 struct dmabuf_genpool_chunk_owner *owner; in net_devmem_alloc_dmabuf() local
92 (void **)&owner); in net_devmem_alloc_dmabuf()
96 offset = dma_addr - owner->base_dma_addr; in net_devmem_alloc_dmabuf()
98 niov = &owner->area.niovs[index]; in net_devmem_alloc_dmabuf()
258 struct dmabuf_genpool_chunk_owner *owner; in net_devmem_bind_dmabuf() local
262 owner = kzalloc_node(sizeof(*owner), GFP_KERNEL, in net_devmem_bind_dmabuf()
264 if (!owner) { in net_devmem_bind_dmabuf()
269 owner->area.base_virtual = virtual; in net_devmem_bind_dmabuf()
270 owner->base_dma_addr = dma_addr; in net_devmem_bind_dmabuf()
271 owner->area.num_niovs = len / PAGE_SIZE; in net_devmem_bind_dmabuf()
272 owner->binding = binding; in net_devmem_bind_dmabuf()
276 owner); in net_devmem_bind_dmabuf()
278 kfree(owner); in net_devmem_bind_dmabuf()
283 owner->area.niovs = kvmalloc_array(owner->area.num_niovs, in net_devmem_bind_dmabuf()
284 sizeof(*owner->area.niovs), in net_devmem_bind_dmabuf()
286 if (!owner->area.niovs) { in net_devmem_bind_dmabuf()
291 for (i = 0; i < owner->area.num_niovs; i++) { in net_devmem_bind_dmabuf()
292 niov = &owner->area.niovs[i]; in net_devmem_bind_dmabuf()
294 niov->owner = &owner->area; in net_devmem_bind_dmabuf()
298 binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov; in net_devmem_bind_dmabuf()