xref: /linux/net/core/devmem.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Devmem TCP
4  *
5  *      Authors:	Mina Almasry <almasrymina@google.com>
6  *			Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7  *			Kaiyuan Zhang <kaiyuanz@google.com
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <net/page_pool/memory_provider.h>
19 #include <net/sock.h>
20 #include <net/tcp.h>
21 #include <trace/events/page_pool.h>
22 
23 #include "devmem.h"
24 #include "mp_dmabuf_devmem.h"
25 #include "page_pool_priv.h"
26 
27 /* Device memory support */
28 
29 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
30 
31 static const struct memory_provider_ops dmabuf_devmem_ops;
32 
net_devmem_dmabuf_free_chunk_owner(struct gen_pool * genpool,struct gen_pool_chunk * chunk,void * not_used)33 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
34 					       struct gen_pool_chunk *chunk,
35 					       void *not_used)
36 {
37 	struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
38 
39 	kvfree(owner->area.niovs);
40 	kfree(owner);
41 }
42 
net_devmem_get_dma_addr(const struct net_iov * niov)43 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
44 {
45 	struct dmabuf_genpool_chunk_owner *owner;
46 
47 	owner = net_devmem_iov_to_chunk_owner(niov);
48 	return owner->base_dma_addr +
49 	       ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
50 }
51 
net_devmem_dmabuf_binding_release(struct percpu_ref * ref)52 static void net_devmem_dmabuf_binding_release(struct percpu_ref *ref)
53 {
54 	struct net_devmem_dmabuf_binding *binding =
55 		container_of(ref, struct net_devmem_dmabuf_binding, ref);
56 
57 	INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free);
58 	schedule_work(&binding->unbind_w);
59 }
60 
__net_devmem_dmabuf_binding_free(struct work_struct * wq)61 void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
62 {
63 	struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w);
64 
65 	size_t size, avail;
66 
67 	gen_pool_for_each_chunk(binding->chunk_pool,
68 				net_devmem_dmabuf_free_chunk_owner, NULL);
69 
70 	size = gen_pool_size(binding->chunk_pool);
71 	avail = gen_pool_avail(binding->chunk_pool);
72 
73 	if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
74 		  size, avail))
75 		gen_pool_destroy(binding->chunk_pool);
76 
77 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
78 					  binding->direction);
79 	dma_buf_detach(binding->dmabuf, binding->attachment);
80 	dma_buf_put(binding->dmabuf);
81 	xa_destroy(&binding->bound_rxqs);
82 	percpu_ref_exit(&binding->ref);
83 	kvfree(binding->tx_vec);
84 	kfree(binding);
85 }
86 
87 struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)88 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
89 {
90 	struct dmabuf_genpool_chunk_owner *owner;
91 	unsigned long dma_addr;
92 	struct net_iov *niov;
93 	ssize_t offset;
94 	ssize_t index;
95 
96 	dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
97 					(void **)&owner);
98 	if (!dma_addr)
99 		return NULL;
100 
101 	offset = dma_addr - owner->base_dma_addr;
102 	index = offset / PAGE_SIZE;
103 	niov = &owner->area.niovs[index];
104 
105 	niov->desc.pp_magic = 0;
106 	niov->desc.pp = NULL;
107 	atomic_long_set(&niov->desc.pp_ref_count, 0);
108 
109 	return niov;
110 }
111 
net_devmem_free_dmabuf(struct net_iov * niov)112 void net_devmem_free_dmabuf(struct net_iov *niov)
113 {
114 	struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
115 	unsigned long dma_addr = net_devmem_get_dma_addr(niov);
116 
117 	if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
118 				       PAGE_SIZE)))
119 		return;
120 
121 	gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
122 }
123 
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)124 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
125 {
126 	struct netdev_rx_queue *rxq;
127 	unsigned long xa_idx;
128 	unsigned int rxq_idx;
129 
130 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
131 
132 	/* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the
133 	 * erase.
134 	 */
135 	synchronize_net();
136 
137 	if (binding->list.next)
138 		list_del(&binding->list);
139 
140 	xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
141 		const struct pp_memory_provider_params mp_params = {
142 			.mp_priv	= binding,
143 			.mp_ops		= &dmabuf_devmem_ops,
144 		};
145 
146 		rxq_idx = get_netdev_rx_queue_index(rxq);
147 
148 		__net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
149 	}
150 
151 	percpu_ref_kill(&binding->ref);
152 }
153 
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)154 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
155 				    struct net_devmem_dmabuf_binding *binding,
156 				    struct netlink_ext_ack *extack)
157 {
158 	struct pp_memory_provider_params mp_params = {
159 		.mp_priv	= binding,
160 		.mp_ops		= &dmabuf_devmem_ops,
161 	};
162 	struct netdev_rx_queue *rxq;
163 	u32 xa_idx;
164 	int err;
165 
166 	err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
167 	if (err)
168 		return err;
169 
170 	rxq = __netif_get_rx_queue(dev, rxq_idx);
171 	err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
172 		       GFP_KERNEL);
173 	if (err)
174 		goto err_close_rxq;
175 
176 	return 0;
177 
178 err_close_rxq:
179 	__net_mp_close_rxq(dev, rxq_idx, &mp_params);
180 	return err;
181 }
182 
183 struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,struct device * dma_dev,enum dma_data_direction direction,unsigned int dmabuf_fd,struct netdev_nl_sock * priv,struct netlink_ext_ack * extack)184 net_devmem_bind_dmabuf(struct net_device *dev,
185 		       struct device *dma_dev,
186 		       enum dma_data_direction direction,
187 		       unsigned int dmabuf_fd, struct netdev_nl_sock *priv,
188 		       struct netlink_ext_ack *extack)
189 {
190 	struct net_devmem_dmabuf_binding *binding;
191 	static u32 id_alloc_next;
192 	struct scatterlist *sg;
193 	struct dma_buf *dmabuf;
194 	unsigned int sg_idx, i;
195 	unsigned long virtual;
196 	int err;
197 
198 	if (!dma_dev) {
199 		NL_SET_ERR_MSG(extack, "Device doesn't support DMA");
200 		return ERR_PTR(-EOPNOTSUPP);
201 	}
202 
203 	dmabuf = dma_buf_get(dmabuf_fd);
204 	if (IS_ERR(dmabuf))
205 		return ERR_CAST(dmabuf);
206 
207 	binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
208 			       dev_to_node(&dev->dev));
209 	if (!binding) {
210 		err = -ENOMEM;
211 		goto err_put_dmabuf;
212 	}
213 
214 	binding->dev = dev;
215 	xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
216 
217 	err = percpu_ref_init(&binding->ref,
218 			      net_devmem_dmabuf_binding_release,
219 			      0, GFP_KERNEL);
220 	if (err < 0)
221 		goto err_free_binding;
222 
223 	mutex_init(&binding->lock);
224 
225 	binding->dmabuf = dmabuf;
226 	binding->direction = direction;
227 
228 	binding->attachment = dma_buf_attach(binding->dmabuf, dma_dev);
229 	if (IS_ERR(binding->attachment)) {
230 		err = PTR_ERR(binding->attachment);
231 		NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
232 		goto err_exit_ref;
233 	}
234 
235 	binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
236 						       direction);
237 	if (IS_ERR(binding->sgt)) {
238 		err = PTR_ERR(binding->sgt);
239 		NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
240 		goto err_detach;
241 	}
242 
243 	if (direction == DMA_TO_DEVICE) {
244 		binding->tx_vec = kvmalloc_objs(struct net_iov *,
245 						dmabuf->size / PAGE_SIZE);
246 		if (!binding->tx_vec) {
247 			err = -ENOMEM;
248 			goto err_unmap;
249 		}
250 	}
251 
252 	/* For simplicity we expect to make PAGE_SIZE allocations, but the
253 	 * binding can be much more flexible than that. We may be able to
254 	 * allocate MTU sized chunks here. Leave that for future work...
255 	 */
256 	binding->chunk_pool = gen_pool_create(PAGE_SHIFT,
257 					      dev_to_node(&dev->dev));
258 	if (!binding->chunk_pool) {
259 		err = -ENOMEM;
260 		goto err_tx_vec;
261 	}
262 
263 	virtual = 0;
264 	for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
265 		dma_addr_t dma_addr = sg_dma_address(sg);
266 		struct dmabuf_genpool_chunk_owner *owner;
267 		size_t len = sg_dma_len(sg);
268 		struct net_iov *niov;
269 
270 		owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
271 				     dev_to_node(&dev->dev));
272 		if (!owner) {
273 			err = -ENOMEM;
274 			goto err_free_chunks;
275 		}
276 
277 		owner->area.base_virtual = virtual;
278 		owner->base_dma_addr = dma_addr;
279 		owner->area.num_niovs = len / PAGE_SIZE;
280 		owner->binding = binding;
281 
282 		err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
283 					 dma_addr, len, dev_to_node(&dev->dev),
284 					 owner);
285 		if (err) {
286 			kfree(owner);
287 			err = -EINVAL;
288 			goto err_free_chunks;
289 		}
290 
291 		owner->area.niovs = kvmalloc_objs(*owner->area.niovs,
292 						  owner->area.num_niovs);
293 		if (!owner->area.niovs) {
294 			err = -ENOMEM;
295 			goto err_free_chunks;
296 		}
297 
298 		for (i = 0; i < owner->area.num_niovs; i++) {
299 			niov = &owner->area.niovs[i];
300 			niov->type = NET_IOV_DMABUF;
301 			niov->owner = &owner->area;
302 			page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
303 						      net_devmem_get_dma_addr(niov));
304 			if (direction == DMA_TO_DEVICE)
305 				binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
306 		}
307 
308 		virtual += len;
309 	}
310 
311 	err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
312 			      binding, xa_limit_32b, &id_alloc_next,
313 			      GFP_KERNEL);
314 	if (err < 0)
315 		goto err_free_chunks;
316 
317 	list_add(&binding->list, &priv->bindings);
318 
319 	return binding;
320 
321 err_free_chunks:
322 	gen_pool_for_each_chunk(binding->chunk_pool,
323 				net_devmem_dmabuf_free_chunk_owner, NULL);
324 	gen_pool_destroy(binding->chunk_pool);
325 err_tx_vec:
326 	kvfree(binding->tx_vec);
327 err_unmap:
328 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
329 					  direction);
330 err_detach:
331 	dma_buf_detach(dmabuf, binding->attachment);
332 err_exit_ref:
333 	percpu_ref_exit(&binding->ref);
334 err_free_binding:
335 	kfree(binding);
336 err_put_dmabuf:
337 	dma_buf_put(dmabuf);
338 	return ERR_PTR(err);
339 }
340 
net_devmem_lookup_dmabuf(u32 id)341 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
342 {
343 	struct net_devmem_dmabuf_binding *binding;
344 
345 	rcu_read_lock();
346 	binding = xa_load(&net_devmem_dmabuf_bindings, id);
347 	if (binding) {
348 		if (!net_devmem_dmabuf_binding_get(binding))
349 			binding = NULL;
350 	}
351 	rcu_read_unlock();
352 
353 	return binding;
354 }
355 
net_devmem_get_net_iov(struct net_iov * niov)356 void net_devmem_get_net_iov(struct net_iov *niov)
357 {
358 	net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
359 }
360 
net_devmem_put_net_iov(struct net_iov * niov)361 void net_devmem_put_net_iov(struct net_iov *niov)
362 {
363 	net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
364 }
365 
net_devmem_get_binding(struct sock * sk,unsigned int dmabuf_id)366 struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
367 							 unsigned int dmabuf_id)
368 {
369 	struct net_devmem_dmabuf_binding *binding;
370 	struct net_device *dst_dev;
371 	struct dst_entry *dst;
372 	int err = 0;
373 
374 	binding = net_devmem_lookup_dmabuf(dmabuf_id);
375 	if (!binding || !binding->tx_vec) {
376 		err = -EINVAL;
377 		goto out_err;
378 	}
379 
380 	rcu_read_lock();
381 	dst = __sk_dst_get(sk);
382 	/* If dst is NULL (route expired), attempt to rebuild it. */
383 	if (unlikely(!dst)) {
384 		if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) {
385 			err = -EHOSTUNREACH;
386 			goto out_unlock;
387 		}
388 		dst = __sk_dst_get(sk);
389 		if (unlikely(!dst)) {
390 			err = -ENODEV;
391 			goto out_unlock;
392 		}
393 	}
394 
395 	/* The dma-addrs in this binding are only reachable to the corresponding
396 	 * net_device.
397 	 */
398 	dst_dev = dst_dev_rcu(dst);
399 	if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {
400 		err = -ENODEV;
401 		goto out_unlock;
402 	}
403 
404 	rcu_read_unlock();
405 	return binding;
406 
407 out_unlock:
408 	rcu_read_unlock();
409 out_err:
410 	if (binding)
411 		net_devmem_dmabuf_binding_put(binding);
412 
413 	return ERR_PTR(err);
414 }
415 
416 struct net_iov *
net_devmem_get_niov_at(struct net_devmem_dmabuf_binding * binding,size_t virt_addr,size_t * off,size_t * size)417 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding,
418 		       size_t virt_addr, size_t *off, size_t *size)
419 {
420 	if (virt_addr >= binding->dmabuf->size)
421 		return NULL;
422 
423 	*off = virt_addr % PAGE_SIZE;
424 	*size = PAGE_SIZE - *off;
425 
426 	return binding->tx_vec[virt_addr / PAGE_SIZE];
427 }
428 
429 /*** "Dmabuf devmem memory provider" ***/
430 
mp_dmabuf_devmem_init(struct page_pool * pool)431 int mp_dmabuf_devmem_init(struct page_pool *pool)
432 {
433 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
434 
435 	if (!binding)
436 		return -EINVAL;
437 
438 	/* dma-buf dma addresses do not need and should not be used with
439 	 * dma_sync_for_cpu/device. Force disable dma_sync.
440 	 */
441 	pool->dma_sync = false;
442 	pool->dma_sync_for_cpu = false;
443 
444 	if (pool->p.order != 0)
445 		return -E2BIG;
446 
447 	net_devmem_dmabuf_binding_get(binding);
448 	return 0;
449 }
450 
mp_dmabuf_devmem_alloc_netmems(struct page_pool * pool,gfp_t gfp)451 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
452 {
453 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
454 	struct net_iov *niov;
455 	netmem_ref netmem;
456 
457 	niov = net_devmem_alloc_dmabuf(binding);
458 	if (!niov)
459 		return 0;
460 
461 	netmem = net_iov_to_netmem(niov);
462 
463 	page_pool_set_pp_info(pool, netmem);
464 
465 	pool->pages_state_hold_cnt++;
466 	trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
467 	return netmem;
468 }
469 
mp_dmabuf_devmem_destroy(struct page_pool * pool)470 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
471 {
472 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
473 
474 	net_devmem_dmabuf_binding_put(binding);
475 }
476 
mp_dmabuf_devmem_release_page(struct page_pool * pool,netmem_ref netmem)477 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
478 {
479 	long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
480 
481 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
482 		return false;
483 
484 	if (WARN_ON_ONCE(refcount != 1))
485 		return false;
486 
487 	page_pool_clear_pp_info(netmem);
488 
489 	net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
490 
491 	/* We don't want the page pool put_page()ing our net_iovs. */
492 	return false;
493 }
494 
mp_dmabuf_devmem_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)495 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
496 				    struct netdev_rx_queue *rxq)
497 {
498 	const struct net_devmem_dmabuf_binding *binding = mp_priv;
499 	int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
500 
501 	return nla_put_u32(rsp, type, binding->id);
502 }
503 
mp_dmabuf_devmem_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)504 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
505 				       struct netdev_rx_queue *rxq)
506 {
507 	struct net_devmem_dmabuf_binding *binding = mp_priv;
508 	struct netdev_rx_queue *bound_rxq;
509 	unsigned long xa_idx;
510 
511 	xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
512 		if (bound_rxq == rxq) {
513 			xa_erase(&binding->bound_rxqs, xa_idx);
514 			if (xa_empty(&binding->bound_rxqs)) {
515 				mutex_lock(&binding->lock);
516 				binding->dev = NULL;
517 				mutex_unlock(&binding->lock);
518 			}
519 			break;
520 		}
521 	}
522 }
523 
524 static const struct memory_provider_ops dmabuf_devmem_ops = {
525 	.init			= mp_dmabuf_devmem_init,
526 	.destroy		= mp_dmabuf_devmem_destroy,
527 	.alloc_netmems		= mp_dmabuf_devmem_alloc_netmems,
528 	.release_netmem		= mp_dmabuf_devmem_release_page,
529 	.nl_fill		= mp_dmabuf_devmem_nl_fill,
530 	.uninstall		= mp_dmabuf_devmem_uninstall,
531 };
532