xref: /linux/net/core/devmem.h (revision 28c5c74eeaa0a2aad8b9cd9ede22a4c623f2a7fc)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 struct netlink_ext_ack;
14 
15 struct net_devmem_dmabuf_binding {
16 	struct dma_buf *dmabuf;
17 	struct dma_buf_attachment *attachment;
18 	struct sg_table *sgt;
19 	struct net_device *dev;
20 	struct gen_pool *chunk_pool;
21 
22 	/* The user holds a ref (via the netlink API) for as long as they want
23 	 * the binding to remain alive. Each page pool using this binding holds
24 	 * a ref to keep the binding alive. Each allocated net_iov holds a
25 	 * ref.
26 	 *
27 	 * The binding undos itself and unmaps the underlying dmabuf once all
28 	 * those refs are dropped and the binding is no longer desired or in
29 	 * use.
30 	 */
31 	refcount_t ref;
32 
33 	/* The list of bindings currently active. Used for netlink to notify us
34 	 * of the user dropping the bind.
35 	 */
36 	struct list_head list;
37 
38 	/* rxq's this binding is active on. */
39 	struct xarray bound_rxqs;
40 
41 	/* ID of this binding. Globally unique to all bindings currently
42 	 * active.
43 	 */
44 	u32 id;
45 };
46 
47 #if defined(CONFIG_NET_DEVMEM)
48 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
49  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
50  * this owner struct to keep track of some metadata necessary to create
51  * allocations from this chunk.
52  */
53 struct dmabuf_genpool_chunk_owner {
54 	/* Offset into the dma-buf where this chunk starts.  */
55 	unsigned long base_virtual;
56 
57 	/* dma_addr of the start of the chunk.  */
58 	dma_addr_t base_dma_addr;
59 
60 	/* Array of net_iovs for this chunk. */
61 	struct net_iov *niovs;
62 	size_t num_niovs;
63 
64 	struct net_devmem_dmabuf_binding *binding;
65 };
66 
67 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
68 struct net_devmem_dmabuf_binding *
69 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
70 		       struct netlink_ext_ack *extack);
71 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
72 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
73 				    struct net_devmem_dmabuf_binding *binding,
74 				    struct netlink_ext_ack *extack);
75 void dev_dmabuf_uninstall(struct net_device *dev);
76 
77 static inline struct dmabuf_genpool_chunk_owner *
78 net_iov_owner(const struct net_iov *niov)
79 {
80 	return niov->owner;
81 }
82 
83 static inline unsigned int net_iov_idx(const struct net_iov *niov)
84 {
85 	return niov - net_iov_owner(niov)->niovs;
86 }
87 
88 static inline struct net_devmem_dmabuf_binding *
89 net_iov_binding(const struct net_iov *niov)
90 {
91 	return net_iov_owner(niov)->binding;
92 }
93 
94 static inline void
95 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
96 {
97 	refcount_inc(&binding->ref);
98 }
99 
100 static inline void
101 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
102 {
103 	if (!refcount_dec_and_test(&binding->ref))
104 		return;
105 
106 	__net_devmem_dmabuf_binding_free(binding);
107 }
108 
109 struct net_iov *
110 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
111 void net_devmem_free_dmabuf(struct net_iov *ppiov);
112 
113 #else
114 struct net_devmem_dmabuf_binding;
115 
116 static inline void
117 __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
118 {
119 }
120 
121 static inline struct net_devmem_dmabuf_binding *
122 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
123 		       struct netlink_ext_ack *extack)
124 {
125 	return ERR_PTR(-EOPNOTSUPP);
126 }
127 
128 static inline void
129 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
130 {
131 }
132 
133 static inline int
134 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
135 				struct net_devmem_dmabuf_binding *binding,
136 				struct netlink_ext_ack *extack)
137 
138 {
139 	return -EOPNOTSUPP;
140 }
141 
142 static inline void dev_dmabuf_uninstall(struct net_device *dev)
143 {
144 }
145 
146 static inline struct net_iov *
147 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
148 {
149 	return NULL;
150 }
151 
152 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
153 {
154 }
155 
156 #endif
157 
158 #endif /* _NET_DEVMEM_H */
159