Lines Matching refs:binding
59 struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w); in __net_devmem_dmabuf_binding_free() local
63 gen_pool_for_each_chunk(binding->chunk_pool, in __net_devmem_dmabuf_binding_free()
66 size = gen_pool_size(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
67 avail = gen_pool_avail(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
71 gen_pool_destroy(binding->chunk_pool); in __net_devmem_dmabuf_binding_free()
73 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in __net_devmem_dmabuf_binding_free()
74 binding->direction); in __net_devmem_dmabuf_binding_free()
75 dma_buf_detach(binding->dmabuf, binding->attachment); in __net_devmem_dmabuf_binding_free()
76 dma_buf_put(binding->dmabuf); in __net_devmem_dmabuf_binding_free()
77 xa_destroy(&binding->bound_rxqs); in __net_devmem_dmabuf_binding_free()
78 kvfree(binding->tx_vec); in __net_devmem_dmabuf_binding_free()
79 kfree(binding); in __net_devmem_dmabuf_binding_free()
83 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_alloc_dmabuf() argument
91 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, in net_devmem_alloc_dmabuf()
109 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); in net_devmem_free_dmabuf() local
112 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, in net_devmem_free_dmabuf()
116 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); in net_devmem_free_dmabuf()
119 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) in net_devmem_unbind_dmabuf() argument
125 xa_erase(&net_devmem_dmabuf_bindings, binding->id); in net_devmem_unbind_dmabuf()
132 if (binding->list.next) in net_devmem_unbind_dmabuf()
133 list_del(&binding->list); in net_devmem_unbind_dmabuf()
135 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf()
137 .mp_priv = binding, in net_devmem_unbind_dmabuf()
143 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params); in net_devmem_unbind_dmabuf()
146 net_devmem_dmabuf_binding_put(binding); in net_devmem_unbind_dmabuf()
150 struct net_devmem_dmabuf_binding *binding, in net_devmem_bind_dmabuf_to_queue() argument
154 .mp_priv = binding, in net_devmem_bind_dmabuf_to_queue()
166 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue()
185 struct net_devmem_dmabuf_binding *binding; in net_devmem_bind_dmabuf() local
202 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, in net_devmem_bind_dmabuf()
204 if (!binding) { in net_devmem_bind_dmabuf()
209 binding->dev = dev; in net_devmem_bind_dmabuf()
210 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); in net_devmem_bind_dmabuf()
212 refcount_set(&binding->ref, 1); in net_devmem_bind_dmabuf()
214 mutex_init(&binding->lock); in net_devmem_bind_dmabuf()
216 binding->dmabuf = dmabuf; in net_devmem_bind_dmabuf()
217 binding->direction = direction; in net_devmem_bind_dmabuf()
219 binding->attachment = dma_buf_attach(binding->dmabuf, dma_dev); in net_devmem_bind_dmabuf()
220 if (IS_ERR(binding->attachment)) { in net_devmem_bind_dmabuf()
221 err = PTR_ERR(binding->attachment); in net_devmem_bind_dmabuf()
226 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, in net_devmem_bind_dmabuf()
228 if (IS_ERR(binding->sgt)) { in net_devmem_bind_dmabuf()
229 err = PTR_ERR(binding->sgt); in net_devmem_bind_dmabuf()
235 binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE, in net_devmem_bind_dmabuf()
238 if (!binding->tx_vec) { in net_devmem_bind_dmabuf()
248 binding->chunk_pool = gen_pool_create(PAGE_SHIFT, in net_devmem_bind_dmabuf()
250 if (!binding->chunk_pool) { in net_devmem_bind_dmabuf()
256 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { in net_devmem_bind_dmabuf()
272 owner->binding = binding; in net_devmem_bind_dmabuf()
274 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, in net_devmem_bind_dmabuf()
298 binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov; in net_devmem_bind_dmabuf()
304 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, in net_devmem_bind_dmabuf()
305 binding, xa_limit_32b, &id_alloc_next, in net_devmem_bind_dmabuf()
310 list_add(&binding->list, &priv->bindings); in net_devmem_bind_dmabuf()
312 return binding; in net_devmem_bind_dmabuf()
315 gen_pool_for_each_chunk(binding->chunk_pool, in net_devmem_bind_dmabuf()
317 gen_pool_destroy(binding->chunk_pool); in net_devmem_bind_dmabuf()
319 kvfree(binding->tx_vec); in net_devmem_bind_dmabuf()
321 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, in net_devmem_bind_dmabuf()
324 dma_buf_detach(dmabuf, binding->attachment); in net_devmem_bind_dmabuf()
326 kfree(binding); in net_devmem_bind_dmabuf()
334 struct net_devmem_dmabuf_binding *binding; in net_devmem_lookup_dmabuf() local
337 binding = xa_load(&net_devmem_dmabuf_bindings, id); in net_devmem_lookup_dmabuf()
338 if (binding) { in net_devmem_lookup_dmabuf()
339 if (!net_devmem_dmabuf_binding_get(binding)) in net_devmem_lookup_dmabuf()
340 binding = NULL; in net_devmem_lookup_dmabuf()
344 return binding; in net_devmem_lookup_dmabuf()
360 struct net_devmem_dmabuf_binding *binding; in net_devmem_get_binding() local
365 binding = net_devmem_lookup_dmabuf(dmabuf_id); in net_devmem_get_binding()
366 if (!binding || !binding->tx_vec) { in net_devmem_get_binding()
390 if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) { in net_devmem_get_binding()
396 return binding; in net_devmem_get_binding()
401 if (binding) in net_devmem_get_binding()
402 net_devmem_dmabuf_binding_put(binding); in net_devmem_get_binding()
408 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, in net_devmem_get_niov_at() argument
411 if (virt_addr >= binding->dmabuf->size) in net_devmem_get_niov_at()
417 return binding->tx_vec[virt_addr / PAGE_SIZE]; in net_devmem_get_niov_at()
424 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_init() local
426 if (!binding) in mp_dmabuf_devmem_init()
438 net_devmem_dmabuf_binding_get(binding); in mp_dmabuf_devmem_init()
444 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_alloc_netmems() local
448 niov = net_devmem_alloc_dmabuf(binding); in mp_dmabuf_devmem_alloc_netmems()
463 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; in mp_dmabuf_devmem_destroy() local
465 net_devmem_dmabuf_binding_put(binding); in mp_dmabuf_devmem_destroy()
489 const struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_nl_fill() local
492 return nla_put_u32(rsp, type, binding->id); in mp_dmabuf_devmem_nl_fill()
498 struct net_devmem_dmabuf_binding *binding = mp_priv; in mp_dmabuf_devmem_uninstall() local
502 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { in mp_dmabuf_devmem_uninstall()
504 xa_erase(&binding->bound_rxqs, xa_idx); in mp_dmabuf_devmem_uninstall()
505 if (xa_empty(&binding->bound_rxqs)) { in mp_dmabuf_devmem_uninstall()
506 mutex_lock(&binding->lock); in mp_dmabuf_devmem_uninstall()
507 binding->dev = NULL; in mp_dmabuf_devmem_uninstall()
508 mutex_unlock(&binding->lock); in mp_dmabuf_devmem_uninstall()