Lines Matching defs:binding

28 	 * the binding to remain alive. Each page pool using this binding holds
29 * a ref to keep the binding alive. The page_pool does not release the
30 * ref until all the net_iovs allocated from this binding are released
33 * The binding undos itself and unmaps the underlying dmabuf once all
34 * those refs are dropped and the binding is no longer desired or in
38 * reference, making sure that the binding remains alive until all the
39 * net_iovs are no longer used. net_iovs allocated from this binding
41 * retransmits) hold a reference to the binding until the skb holding
51 /* rxq's this binding is active on. */
54 /* ID of this binding. Globally unique to all bindings currently
59 /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */
62 /* Array of net_iov pointers for this binding, sorted by virtual
79 struct net_devmem_dmabuf_binding *binding;
93 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
95 struct net_devmem_dmabuf_binding *binding,
110 return net_devmem_iov_to_chunk_owner(niov)->binding;
127 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
129 return refcount_inc_not_zero(&binding->ref);
133 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
135 if (!refcount_dec_and_test(&binding->ref))
138 INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
139 schedule_work(&binding->unbind_w);
146 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
153 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
160 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
189 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
195 struct net_devmem_dmabuf_binding *binding,
203 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
234 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,