xref: /linux/net/core/devmem.c (revision 3186a8e55ae3428ec1e06af09075e20885376e4e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Devmem TCP
4  *
5  *      Authors:	Mina Almasry <almasrymina@google.com>
6  *			Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7  *			Kaiyuan Zhang <kaiyuanz@google.com
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <net/page_pool/memory_provider.h>
19 #include <net/sock.h>
20 #include <trace/events/page_pool.h>
21 
22 #include "devmem.h"
23 #include "mp_dmabuf_devmem.h"
24 #include "page_pool_priv.h"
25 
26 /* Device memory support */
27 
28 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
29 
30 static const struct memory_provider_ops dmabuf_devmem_ops;
31 
32 bool net_is_devmem_iov(struct net_iov *niov)
33 {
34 	return niov->type == NET_IOV_DMABUF;
35 }
36 
37 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
38 					       struct gen_pool_chunk *chunk,
39 					       void *not_used)
40 {
41 	struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
42 
43 	kvfree(owner->area.niovs);
44 	kfree(owner);
45 }
46 
47 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
48 {
49 	struct dmabuf_genpool_chunk_owner *owner;
50 
51 	owner = net_devmem_iov_to_chunk_owner(niov);
52 	return owner->base_dma_addr +
53 	       ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
54 }
55 
56 void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
57 {
58 	struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w);
59 
60 	size_t size, avail;
61 
62 	gen_pool_for_each_chunk(binding->chunk_pool,
63 				net_devmem_dmabuf_free_chunk_owner, NULL);
64 
65 	size = gen_pool_size(binding->chunk_pool);
66 	avail = gen_pool_avail(binding->chunk_pool);
67 
68 	if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
69 		  size, avail))
70 		gen_pool_destroy(binding->chunk_pool);
71 
72 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
73 					  DMA_FROM_DEVICE);
74 	dma_buf_detach(binding->dmabuf, binding->attachment);
75 	dma_buf_put(binding->dmabuf);
76 	xa_destroy(&binding->bound_rxqs);
77 	kvfree(binding->tx_vec);
78 	kfree(binding);
79 }
80 EXPORT_SYMBOL(__net_devmem_dmabuf_binding_free);
81 
82 struct net_iov *
83 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
84 {
85 	struct dmabuf_genpool_chunk_owner *owner;
86 	unsigned long dma_addr;
87 	struct net_iov *niov;
88 	ssize_t offset;
89 	ssize_t index;
90 
91 	dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
92 					(void **)&owner);
93 	if (!dma_addr)
94 		return NULL;
95 
96 	offset = dma_addr - owner->base_dma_addr;
97 	index = offset / PAGE_SIZE;
98 	niov = &owner->area.niovs[index];
99 
100 	niov->pp_magic = 0;
101 	niov->pp = NULL;
102 	atomic_long_set(&niov->pp_ref_count, 0);
103 
104 	return niov;
105 }
106 
107 void net_devmem_free_dmabuf(struct net_iov *niov)
108 {
109 	struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
110 	unsigned long dma_addr = net_devmem_get_dma_addr(niov);
111 
112 	if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
113 				       PAGE_SIZE)))
114 		return;
115 
116 	gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
117 }
118 
119 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
120 {
121 	struct netdev_rx_queue *rxq;
122 	unsigned long xa_idx;
123 	unsigned int rxq_idx;
124 
125 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
126 
127 	/* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the
128 	 * erase.
129 	 */
130 	synchronize_net();
131 
132 	if (binding->list.next)
133 		list_del(&binding->list);
134 
135 	xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
136 		const struct pp_memory_provider_params mp_params = {
137 			.mp_priv	= binding,
138 			.mp_ops		= &dmabuf_devmem_ops,
139 		};
140 
141 		rxq_idx = get_netdev_rx_queue_index(rxq);
142 
143 		__net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
144 	}
145 
146 	net_devmem_dmabuf_binding_put(binding);
147 }
148 
149 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
150 				    struct net_devmem_dmabuf_binding *binding,
151 				    struct netlink_ext_ack *extack)
152 {
153 	struct pp_memory_provider_params mp_params = {
154 		.mp_priv	= binding,
155 		.mp_ops		= &dmabuf_devmem_ops,
156 	};
157 	struct netdev_rx_queue *rxq;
158 	u32 xa_idx;
159 	int err;
160 
161 	err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
162 	if (err)
163 		return err;
164 
165 	rxq = __netif_get_rx_queue(dev, rxq_idx);
166 	err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
167 		       GFP_KERNEL);
168 	if (err)
169 		goto err_close_rxq;
170 
171 	return 0;
172 
173 err_close_rxq:
174 	__net_mp_close_rxq(dev, rxq_idx, &mp_params);
175 	return err;
176 }
177 
178 struct net_devmem_dmabuf_binding *
179 net_devmem_bind_dmabuf(struct net_device *dev,
180 		       enum dma_data_direction direction,
181 		       unsigned int dmabuf_fd, struct netlink_ext_ack *extack)
182 {
183 	struct net_devmem_dmabuf_binding *binding;
184 	static u32 id_alloc_next;
185 	struct scatterlist *sg;
186 	struct dma_buf *dmabuf;
187 	unsigned int sg_idx, i;
188 	unsigned long virtual;
189 	int err;
190 
191 	dmabuf = dma_buf_get(dmabuf_fd);
192 	if (IS_ERR(dmabuf))
193 		return ERR_CAST(dmabuf);
194 
195 	binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
196 			       dev_to_node(&dev->dev));
197 	if (!binding) {
198 		err = -ENOMEM;
199 		goto err_put_dmabuf;
200 	}
201 
202 	binding->dev = dev;
203 	xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
204 
205 	refcount_set(&binding->ref, 1);
206 
207 	mutex_init(&binding->lock);
208 
209 	binding->dmabuf = dmabuf;
210 
211 	binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
212 	if (IS_ERR(binding->attachment)) {
213 		err = PTR_ERR(binding->attachment);
214 		NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
215 		goto err_free_binding;
216 	}
217 
218 	binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
219 						       direction);
220 	if (IS_ERR(binding->sgt)) {
221 		err = PTR_ERR(binding->sgt);
222 		NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
223 		goto err_detach;
224 	}
225 
226 	if (direction == DMA_TO_DEVICE) {
227 		binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
228 						 sizeof(struct net_iov *),
229 						 GFP_KERNEL);
230 		if (!binding->tx_vec) {
231 			err = -ENOMEM;
232 			goto err_unmap;
233 		}
234 	}
235 
236 	/* For simplicity we expect to make PAGE_SIZE allocations, but the
237 	 * binding can be much more flexible than that. We may be able to
238 	 * allocate MTU sized chunks here. Leave that for future work...
239 	 */
240 	binding->chunk_pool = gen_pool_create(PAGE_SHIFT,
241 					      dev_to_node(&dev->dev));
242 	if (!binding->chunk_pool) {
243 		err = -ENOMEM;
244 		goto err_tx_vec;
245 	}
246 
247 	virtual = 0;
248 	for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
249 		dma_addr_t dma_addr = sg_dma_address(sg);
250 		struct dmabuf_genpool_chunk_owner *owner;
251 		size_t len = sg_dma_len(sg);
252 		struct net_iov *niov;
253 
254 		owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
255 				     dev_to_node(&dev->dev));
256 		if (!owner) {
257 			err = -ENOMEM;
258 			goto err_free_chunks;
259 		}
260 
261 		owner->area.base_virtual = virtual;
262 		owner->base_dma_addr = dma_addr;
263 		owner->area.num_niovs = len / PAGE_SIZE;
264 		owner->binding = binding;
265 
266 		err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
267 					 dma_addr, len, dev_to_node(&dev->dev),
268 					 owner);
269 		if (err) {
270 			kfree(owner);
271 			err = -EINVAL;
272 			goto err_free_chunks;
273 		}
274 
275 		owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
276 						   sizeof(*owner->area.niovs),
277 						   GFP_KERNEL);
278 		if (!owner->area.niovs) {
279 			err = -ENOMEM;
280 			goto err_free_chunks;
281 		}
282 
283 		for (i = 0; i < owner->area.num_niovs; i++) {
284 			niov = &owner->area.niovs[i];
285 			niov->type = NET_IOV_DMABUF;
286 			niov->owner = &owner->area;
287 			page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
288 						      net_devmem_get_dma_addr(niov));
289 			if (direction == DMA_TO_DEVICE)
290 				binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
291 		}
292 
293 		virtual += len;
294 	}
295 
296 	err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
297 			      binding, xa_limit_32b, &id_alloc_next,
298 			      GFP_KERNEL);
299 	if (err < 0)
300 		goto err_free_chunks;
301 
302 	return binding;
303 
304 err_free_chunks:
305 	gen_pool_for_each_chunk(binding->chunk_pool,
306 				net_devmem_dmabuf_free_chunk_owner, NULL);
307 	gen_pool_destroy(binding->chunk_pool);
308 err_tx_vec:
309 	kvfree(binding->tx_vec);
310 err_unmap:
311 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
312 					  DMA_FROM_DEVICE);
313 err_detach:
314 	dma_buf_detach(dmabuf, binding->attachment);
315 err_free_binding:
316 	kfree(binding);
317 err_put_dmabuf:
318 	dma_buf_put(dmabuf);
319 	return ERR_PTR(err);
320 }
321 
322 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
323 {
324 	struct net_devmem_dmabuf_binding *binding;
325 
326 	rcu_read_lock();
327 	binding = xa_load(&net_devmem_dmabuf_bindings, id);
328 	if (binding) {
329 		if (!net_devmem_dmabuf_binding_get(binding))
330 			binding = NULL;
331 	}
332 	rcu_read_unlock();
333 
334 	return binding;
335 }
336 
337 void net_devmem_get_net_iov(struct net_iov *niov)
338 {
339 	net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
340 }
341 
342 void net_devmem_put_net_iov(struct net_iov *niov)
343 {
344 	net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
345 }
346 
347 struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
348 							 unsigned int dmabuf_id)
349 {
350 	struct net_devmem_dmabuf_binding *binding;
351 	struct dst_entry *dst = __sk_dst_get(sk);
352 	int err = 0;
353 
354 	binding = net_devmem_lookup_dmabuf(dmabuf_id);
355 	if (!binding || !binding->tx_vec) {
356 		err = -EINVAL;
357 		goto out_err;
358 	}
359 
360 	/* The dma-addrs in this binding are only reachable to the corresponding
361 	 * net_device.
362 	 */
363 	if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
364 		err = -ENODEV;
365 		goto out_err;
366 	}
367 
368 	return binding;
369 
370 out_err:
371 	if (binding)
372 		net_devmem_dmabuf_binding_put(binding);
373 
374 	return ERR_PTR(err);
375 }
376 
377 struct net_iov *
378 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding,
379 		       size_t virt_addr, size_t *off, size_t *size)
380 {
381 	if (virt_addr >= binding->dmabuf->size)
382 		return NULL;
383 
384 	*off = virt_addr % PAGE_SIZE;
385 	*size = PAGE_SIZE - *off;
386 
387 	return binding->tx_vec[virt_addr / PAGE_SIZE];
388 }
389 
390 /*** "Dmabuf devmem memory provider" ***/
391 
392 int mp_dmabuf_devmem_init(struct page_pool *pool)
393 {
394 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
395 
396 	if (!binding)
397 		return -EINVAL;
398 
399 	/* dma-buf dma addresses do not need and should not be used with
400 	 * dma_sync_for_cpu/device. Force disable dma_sync.
401 	 */
402 	pool->dma_sync = false;
403 	pool->dma_sync_for_cpu = false;
404 
405 	if (pool->p.order != 0)
406 		return -E2BIG;
407 
408 	net_devmem_dmabuf_binding_get(binding);
409 	return 0;
410 }
411 
412 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
413 {
414 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
415 	struct net_iov *niov;
416 	netmem_ref netmem;
417 
418 	niov = net_devmem_alloc_dmabuf(binding);
419 	if (!niov)
420 		return 0;
421 
422 	netmem = net_iov_to_netmem(niov);
423 
424 	page_pool_set_pp_info(pool, netmem);
425 
426 	pool->pages_state_hold_cnt++;
427 	trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
428 	return netmem;
429 }
430 
431 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
432 {
433 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
434 
435 	net_devmem_dmabuf_binding_put(binding);
436 }
437 
438 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
439 {
440 	long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
441 
442 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
443 		return false;
444 
445 	if (WARN_ON_ONCE(refcount != 1))
446 		return false;
447 
448 	page_pool_clear_pp_info(netmem);
449 
450 	net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
451 
452 	/* We don't want the page pool put_page()ing our net_iovs. */
453 	return false;
454 }
455 
456 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
457 				    struct netdev_rx_queue *rxq)
458 {
459 	const struct net_devmem_dmabuf_binding *binding = mp_priv;
460 	int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
461 
462 	return nla_put_u32(rsp, type, binding->id);
463 }
464 
465 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
466 				       struct netdev_rx_queue *rxq)
467 {
468 	struct net_devmem_dmabuf_binding *binding = mp_priv;
469 	struct netdev_rx_queue *bound_rxq;
470 	unsigned long xa_idx;
471 
472 	xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
473 		if (bound_rxq == rxq) {
474 			xa_erase(&binding->bound_rxqs, xa_idx);
475 			if (xa_empty(&binding->bound_rxqs)) {
476 				mutex_lock(&binding->lock);
477 				binding->dev = NULL;
478 				mutex_unlock(&binding->lock);
479 			}
480 			break;
481 		}
482 	}
483 }
484 
485 static const struct memory_provider_ops dmabuf_devmem_ops = {
486 	.init			= mp_dmabuf_devmem_init,
487 	.destroy		= mp_dmabuf_devmem_destroy,
488 	.alloc_netmems		= mp_dmabuf_devmem_alloc_netmems,
489 	.release_netmem		= mp_dmabuf_devmem_release_page,
490 	.nl_fill		= mp_dmabuf_devmem_nl_fill,
491 	.uninstall		= mp_dmabuf_devmem_uninstall,
492 };
493