xref: /linux/net/core/devmem.c (revision de5ca699bc3f7fe9f90ba927d8a6e7783cd7311d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Devmem TCP
4  *
5  *      Authors:	Mina Almasry <almasrymina@google.com>
6  *			Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7  *			Kaiyuan Zhang <kaiyuanz@google.com
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/ethtool_netlink.h>
12 #include <linux/genalloc.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/types.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/page_pool/helpers.h>
19 #include <net/page_pool/memory_provider.h>
20 #include <trace/events/page_pool.h>
21 
22 #include "devmem.h"
23 #include "mp_dmabuf_devmem.h"
24 #include "page_pool_priv.h"
25 
26 /* Device memory support */
27 
28 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
29 
30 static const struct memory_provider_ops dmabuf_devmem_ops;
31 
32 bool net_is_devmem_iov(struct net_iov *niov)
33 {
34 	return niov->pp->mp_ops == &dmabuf_devmem_ops;
35 }
36 
37 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
38 					       struct gen_pool_chunk *chunk,
39 					       void *not_used)
40 {
41 	struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
42 
43 	kvfree(owner->area.niovs);
44 	kfree(owner);
45 }
46 
47 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
48 {
49 	struct dmabuf_genpool_chunk_owner *owner;
50 
51 	owner = net_devmem_iov_to_chunk_owner(niov);
52 	return owner->base_dma_addr +
53 	       ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
54 }
55 
56 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
57 {
58 	size_t size, avail;
59 
60 	gen_pool_for_each_chunk(binding->chunk_pool,
61 				net_devmem_dmabuf_free_chunk_owner, NULL);
62 
63 	size = gen_pool_size(binding->chunk_pool);
64 	avail = gen_pool_avail(binding->chunk_pool);
65 
66 	if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
67 		  size, avail))
68 		gen_pool_destroy(binding->chunk_pool);
69 
70 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
71 					  DMA_FROM_DEVICE);
72 	dma_buf_detach(binding->dmabuf, binding->attachment);
73 	dma_buf_put(binding->dmabuf);
74 	xa_destroy(&binding->bound_rxqs);
75 	kfree(binding);
76 }
77 
78 struct net_iov *
79 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
80 {
81 	struct dmabuf_genpool_chunk_owner *owner;
82 	unsigned long dma_addr;
83 	struct net_iov *niov;
84 	ssize_t offset;
85 	ssize_t index;
86 
87 	dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
88 					(void **)&owner);
89 	if (!dma_addr)
90 		return NULL;
91 
92 	offset = dma_addr - owner->base_dma_addr;
93 	index = offset / PAGE_SIZE;
94 	niov = &owner->area.niovs[index];
95 
96 	niov->pp_magic = 0;
97 	niov->pp = NULL;
98 	atomic_long_set(&niov->pp_ref_count, 0);
99 
100 	return niov;
101 }
102 
103 void net_devmem_free_dmabuf(struct net_iov *niov)
104 {
105 	struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
106 	unsigned long dma_addr = net_devmem_get_dma_addr(niov);
107 
108 	if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
109 				       PAGE_SIZE)))
110 		return;
111 
112 	gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
113 }
114 
115 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
116 {
117 	struct netdev_rx_queue *rxq;
118 	unsigned long xa_idx;
119 	unsigned int rxq_idx;
120 	int err;
121 
122 	if (binding->list.next)
123 		list_del(&binding->list);
124 
125 	xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
126 		WARN_ON(rxq->mp_params.mp_priv != binding);
127 
128 		rxq->mp_params.mp_priv = NULL;
129 		rxq->mp_params.mp_ops = NULL;
130 
131 		netdev_lock(binding->dev);
132 		rxq_idx = get_netdev_rx_queue_index(rxq);
133 
134 		err = netdev_rx_queue_restart(binding->dev, rxq_idx);
135 		WARN_ON(err && err != -ENETDOWN);
136 		netdev_unlock(binding->dev);
137 	}
138 
139 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
140 
141 	net_devmem_dmabuf_binding_put(binding);
142 }
143 
144 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
145 				    struct net_devmem_dmabuf_binding *binding,
146 				    struct netlink_ext_ack *extack)
147 {
148 	struct netdev_rx_queue *rxq;
149 	u32 xa_idx;
150 	int err;
151 
152 	if (rxq_idx >= dev->real_num_rx_queues) {
153 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
154 		return -ERANGE;
155 	}
156 
157 	if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
158 		NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
159 		return -EINVAL;
160 	}
161 
162 	if (dev->cfg->hds_thresh) {
163 		NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
164 		return -EINVAL;
165 	}
166 
167 	rxq = __netif_get_rx_queue(dev, rxq_idx);
168 	if (rxq->mp_params.mp_ops) {
169 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
170 		return -EEXIST;
171 	}
172 
173 #ifdef CONFIG_XDP_SOCKETS
174 	if (rxq->pool) {
175 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
176 		return -EBUSY;
177 	}
178 #endif
179 
180 	err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
181 		       GFP_KERNEL);
182 	if (err)
183 		return err;
184 
185 	rxq->mp_params.mp_priv = binding;
186 	rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
187 
188 	err = netdev_rx_queue_restart(dev, rxq_idx);
189 	if (err)
190 		goto err_xa_erase;
191 
192 	return 0;
193 
194 err_xa_erase:
195 	rxq->mp_params.mp_priv = NULL;
196 	rxq->mp_params.mp_ops = NULL;
197 	xa_erase(&binding->bound_rxqs, xa_idx);
198 
199 	return err;
200 }
201 
202 struct net_devmem_dmabuf_binding *
203 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
204 		       struct netlink_ext_ack *extack)
205 {
206 	struct net_devmem_dmabuf_binding *binding;
207 	static u32 id_alloc_next;
208 	struct scatterlist *sg;
209 	struct dma_buf *dmabuf;
210 	unsigned int sg_idx, i;
211 	unsigned long virtual;
212 	int err;
213 
214 	dmabuf = dma_buf_get(dmabuf_fd);
215 	if (IS_ERR(dmabuf))
216 		return ERR_CAST(dmabuf);
217 
218 	binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
219 			       dev_to_node(&dev->dev));
220 	if (!binding) {
221 		err = -ENOMEM;
222 		goto err_put_dmabuf;
223 	}
224 
225 	binding->dev = dev;
226 
227 	err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
228 			      binding, xa_limit_32b, &id_alloc_next,
229 			      GFP_KERNEL);
230 	if (err < 0)
231 		goto err_free_binding;
232 
233 	xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
234 
235 	refcount_set(&binding->ref, 1);
236 
237 	binding->dmabuf = dmabuf;
238 
239 	binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
240 	if (IS_ERR(binding->attachment)) {
241 		err = PTR_ERR(binding->attachment);
242 		NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
243 		goto err_free_id;
244 	}
245 
246 	binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
247 						       DMA_FROM_DEVICE);
248 	if (IS_ERR(binding->sgt)) {
249 		err = PTR_ERR(binding->sgt);
250 		NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
251 		goto err_detach;
252 	}
253 
254 	/* For simplicity we expect to make PAGE_SIZE allocations, but the
255 	 * binding can be much more flexible than that. We may be able to
256 	 * allocate MTU sized chunks here. Leave that for future work...
257 	 */
258 	binding->chunk_pool =
259 		gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
260 	if (!binding->chunk_pool) {
261 		err = -ENOMEM;
262 		goto err_unmap;
263 	}
264 
265 	virtual = 0;
266 	for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
267 		dma_addr_t dma_addr = sg_dma_address(sg);
268 		struct dmabuf_genpool_chunk_owner *owner;
269 		size_t len = sg_dma_len(sg);
270 		struct net_iov *niov;
271 
272 		owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
273 				     dev_to_node(&dev->dev));
274 		if (!owner) {
275 			err = -ENOMEM;
276 			goto err_free_chunks;
277 		}
278 
279 		owner->area.base_virtual = virtual;
280 		owner->base_dma_addr = dma_addr;
281 		owner->area.num_niovs = len / PAGE_SIZE;
282 		owner->binding = binding;
283 
284 		err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
285 					 dma_addr, len, dev_to_node(&dev->dev),
286 					 owner);
287 		if (err) {
288 			kfree(owner);
289 			err = -EINVAL;
290 			goto err_free_chunks;
291 		}
292 
293 		owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
294 						   sizeof(*owner->area.niovs),
295 						   GFP_KERNEL);
296 		if (!owner->area.niovs) {
297 			err = -ENOMEM;
298 			goto err_free_chunks;
299 		}
300 
301 		for (i = 0; i < owner->area.num_niovs; i++) {
302 			niov = &owner->area.niovs[i];
303 			niov->owner = &owner->area;
304 			page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
305 						      net_devmem_get_dma_addr(niov));
306 		}
307 
308 		virtual += len;
309 	}
310 
311 	return binding;
312 
313 err_free_chunks:
314 	gen_pool_for_each_chunk(binding->chunk_pool,
315 				net_devmem_dmabuf_free_chunk_owner, NULL);
316 	gen_pool_destroy(binding->chunk_pool);
317 err_unmap:
318 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
319 					  DMA_FROM_DEVICE);
320 err_detach:
321 	dma_buf_detach(dmabuf, binding->attachment);
322 err_free_id:
323 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
324 err_free_binding:
325 	kfree(binding);
326 err_put_dmabuf:
327 	dma_buf_put(dmabuf);
328 	return ERR_PTR(err);
329 }
330 
331 /*** "Dmabuf devmem memory provider" ***/
332 
333 int mp_dmabuf_devmem_init(struct page_pool *pool)
334 {
335 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
336 
337 	if (!binding)
338 		return -EINVAL;
339 
340 	/* dma-buf dma addresses do not need and should not be used with
341 	 * dma_sync_for_cpu/device. Force disable dma_sync.
342 	 */
343 	pool->dma_sync = false;
344 	pool->dma_sync_for_cpu = false;
345 
346 	if (pool->p.order != 0)
347 		return -E2BIG;
348 
349 	net_devmem_dmabuf_binding_get(binding);
350 	return 0;
351 }
352 
353 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
354 {
355 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
356 	struct net_iov *niov;
357 	netmem_ref netmem;
358 
359 	niov = net_devmem_alloc_dmabuf(binding);
360 	if (!niov)
361 		return 0;
362 
363 	netmem = net_iov_to_netmem(niov);
364 
365 	page_pool_set_pp_info(pool, netmem);
366 
367 	pool->pages_state_hold_cnt++;
368 	trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
369 	return netmem;
370 }
371 
372 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
373 {
374 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
375 
376 	net_devmem_dmabuf_binding_put(binding);
377 }
378 
379 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
380 {
381 	long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
382 
383 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
384 		return false;
385 
386 	if (WARN_ON_ONCE(refcount != 1))
387 		return false;
388 
389 	page_pool_clear_pp_info(netmem);
390 
391 	net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
392 
393 	/* We don't want the page pool put_page()ing our net_iovs. */
394 	return false;
395 }
396 
397 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
398 				    struct netdev_rx_queue *rxq)
399 {
400 	const struct net_devmem_dmabuf_binding *binding = mp_priv;
401 	int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
402 
403 	return nla_put_u32(rsp, type, binding->id);
404 }
405 
406 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
407 				       struct netdev_rx_queue *rxq)
408 {
409 	struct net_devmem_dmabuf_binding *binding = mp_priv;
410 	struct netdev_rx_queue *bound_rxq;
411 	unsigned long xa_idx;
412 
413 	xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
414 		if (bound_rxq == rxq) {
415 			xa_erase(&binding->bound_rxqs, xa_idx);
416 			break;
417 		}
418 	}
419 }
420 
421 static const struct memory_provider_ops dmabuf_devmem_ops = {
422 	.init			= mp_dmabuf_devmem_init,
423 	.destroy		= mp_dmabuf_devmem_destroy,
424 	.alloc_netmems		= mp_dmabuf_devmem_alloc_netmems,
425 	.release_netmem		= mp_dmabuf_devmem_release_page,
426 	.nl_fill		= mp_dmabuf_devmem_nl_fill,
427 	.uninstall		= mp_dmabuf_devmem_uninstall,
428 };
429