xref: /linux/net/core/devmem.h (revision 6bab77ced3ffbce3d6c5b5bcce17da7c8a3f8266)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Device memory TCP support
4  *
5  * Authors:	Mina Almasry <almasrymina@google.com>
6  *		Willem de Bruijn <willemb@google.com>
7  *		Kaiyuan Zhang <kaiyuanz@google.com>
8  *
9  */
10 #ifndef _NET_DEVMEM_H
11 #define _NET_DEVMEM_H
12 
13 #include <net/netmem.h>
14 
15 struct netlink_ext_ack;
16 
17 struct net_devmem_dmabuf_binding {
18 	struct dma_buf *dmabuf;
19 	struct dma_buf_attachment *attachment;
20 	struct sg_table *sgt;
21 	struct net_device *dev;
22 	struct gen_pool *chunk_pool;
23 	/* Protect dev */
24 	struct mutex lock;
25 
26 	/* The user holds a ref (via the netlink API) for as long as they want
27 	 * the binding to remain alive. Each page pool using this binding holds
28 	 * a ref to keep the binding alive. The page_pool does not release the
29 	 * ref until all the net_iovs allocated from this binding are released
30 	 * back to the page_pool.
31 	 *
32 	 * The binding undos itself and unmaps the underlying dmabuf once all
33 	 * those refs are dropped and the binding is no longer desired or in
34 	 * use.
35 	 *
36 	 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this
37 	 * reference, making sure that the binding remains alive until all the
38 	 * net_iovs are no longer used. net_iovs allocated from this binding
39 	 * that are stuck in the TX path for any reason (such as awaiting
40 	 * retransmits) hold a reference to the binding until the skb holding
41 	 * them is freed.
42 	 */
43 	refcount_t ref;
44 
45 	/* The list of bindings currently active. Used for netlink to notify us
46 	 * of the user dropping the bind.
47 	 */
48 	struct list_head list;
49 
50 	/* rxq's this binding is active on. */
51 	struct xarray bound_rxqs;
52 
53 	/* ID of this binding. Globally unique to all bindings currently
54 	 * active.
55 	 */
56 	u32 id;
57 
58 	/* Array of net_iov pointers for this binding, sorted by virtual
59 	 * address. This array is convenient to map the virtual addresses to
60 	 * net_iovs in the TX path.
61 	 */
62 	struct net_iov **tx_vec;
63 
64 	struct work_struct unbind_w;
65 };
66 
67 #if defined(CONFIG_NET_DEVMEM)
68 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
69  * entry from the dmabuf is inserted into the genpool as a chunk, and needs
70  * this owner struct to keep track of some metadata necessary to create
71  * allocations from this chunk.
72  */
73 struct dmabuf_genpool_chunk_owner {
74 	struct net_iov_area area;
75 	struct net_devmem_dmabuf_binding *binding;
76 
77 	/* dma_addr of the start of the chunk.  */
78 	dma_addr_t base_dma_addr;
79 };
80 
81 void __net_devmem_dmabuf_binding_free(struct work_struct *wq);
82 struct net_devmem_dmabuf_binding *
83 net_devmem_bind_dmabuf(struct net_device *dev,
84 		       enum dma_data_direction direction,
85 		       unsigned int dmabuf_fd, struct netlink_ext_ack *extack);
86 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id);
87 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
88 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
89 				    struct net_devmem_dmabuf_binding *binding,
90 				    struct netlink_ext_ack *extack);
91 void net_devmem_bind_tx_release(struct sock *sk);
92 
93 static inline struct dmabuf_genpool_chunk_owner *
94 net_devmem_iov_to_chunk_owner(const struct net_iov *niov)
95 {
96 	struct net_iov_area *owner = net_iov_owner(niov);
97 
98 	return container_of(owner, struct dmabuf_genpool_chunk_owner, area);
99 }
100 
101 static inline struct net_devmem_dmabuf_binding *
102 net_devmem_iov_binding(const struct net_iov *niov)
103 {
104 	return net_devmem_iov_to_chunk_owner(niov)->binding;
105 }
106 
107 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
108 {
109 	return net_devmem_iov_binding(niov)->id;
110 }
111 
112 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
113 {
114 	struct net_iov_area *owner = net_iov_owner(niov);
115 
116 	return owner->base_virtual +
117 	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
118 }
119 
120 static inline bool
121 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
122 {
123 	return refcount_inc_not_zero(&binding->ref);
124 }
125 
126 static inline void
127 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
128 {
129 	if (!refcount_dec_and_test(&binding->ref))
130 		return;
131 
132 	INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
133 	schedule_work(&binding->unbind_w);
134 }
135 
136 void net_devmem_get_net_iov(struct net_iov *niov);
137 void net_devmem_put_net_iov(struct net_iov *niov);
138 
139 struct net_iov *
140 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
141 void net_devmem_free_dmabuf(struct net_iov *ppiov);
142 
143 bool net_is_devmem_iov(struct net_iov *niov);
144 struct net_devmem_dmabuf_binding *
145 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id);
146 struct net_iov *
147 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
148 		       size_t *off, size_t *size);
149 
150 #else
151 struct net_devmem_dmabuf_binding;
152 
153 static inline void
154 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
155 {
156 }
157 
158 static inline void net_devmem_get_net_iov(struct net_iov *niov)
159 {
160 }
161 
162 static inline void net_devmem_put_net_iov(struct net_iov *niov)
163 {
164 }
165 
166 static inline void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
167 {
168 }
169 
170 static inline struct net_devmem_dmabuf_binding *
171 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
172 		       enum dma_data_direction direction,
173 		       struct netlink_ext_ack *extack)
174 {
175 	return ERR_PTR(-EOPNOTSUPP);
176 }
177 
178 static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
179 {
180 	return NULL;
181 }
182 
183 static inline void
184 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
185 {
186 }
187 
188 static inline int
189 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
190 				struct net_devmem_dmabuf_binding *binding,
191 				struct netlink_ext_ack *extack)
192 
193 {
194 	return -EOPNOTSUPP;
195 }
196 
197 static inline struct net_iov *
198 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
199 {
200 	return NULL;
201 }
202 
203 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
204 {
205 }
206 
207 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
208 {
209 	return 0;
210 }
211 
212 static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov)
213 {
214 	return 0;
215 }
216 
217 static inline bool net_is_devmem_iov(struct net_iov *niov)
218 {
219 	return false;
220 }
221 
222 static inline struct net_devmem_dmabuf_binding *
223 net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id)
224 {
225 	return ERR_PTR(-EOPNOTSUPP);
226 }
227 
228 static inline struct net_iov *
229 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr,
230 		       size_t *off, size_t *size)
231 {
232 	return NULL;
233 }
234 
235 static inline struct net_devmem_dmabuf_binding *
236 net_devmem_iov_binding(const struct net_iov *niov)
237 {
238 	return NULL;
239 }
240 #endif
241 
242 #endif /* _NET_DEVMEM_H */
243