xref: /linux/net/core/devmem.c (revision 0d161eb27d69ceb371b3409184a1bb69d3c83de3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Devmem TCP
4  *
5  *      Authors:	Mina Almasry <almasrymina@google.com>
6  *			Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7  *			Kaiyuan Zhang <kaiyuanz@google.com
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <net/page_pool/memory_provider.h>
19 #include <net/sock.h>
20 #include <trace/events/page_pool.h>
21 
22 #include "devmem.h"
23 #include "mp_dmabuf_devmem.h"
24 #include "page_pool_priv.h"
25 
26 /* Device memory support */
27 
28 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
29 
30 static const struct memory_provider_ops dmabuf_devmem_ops;
31 
32 bool net_is_devmem_iov(struct net_iov *niov)
33 {
34 	return niov->type == NET_IOV_DMABUF;
35 }
36 
37 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
38 					       struct gen_pool_chunk *chunk,
39 					       void *not_used)
40 {
41 	struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
42 
43 	kvfree(owner->area.niovs);
44 	kfree(owner);
45 }
46 
47 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
48 {
49 	struct dmabuf_genpool_chunk_owner *owner;
50 
51 	owner = net_devmem_iov_to_chunk_owner(niov);
52 	return owner->base_dma_addr +
53 	       ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
54 }
55 
56 void __net_devmem_dmabuf_binding_free(struct work_struct *wq)
57 {
58 	struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w);
59 
60 	size_t size, avail;
61 
62 	gen_pool_for_each_chunk(binding->chunk_pool,
63 				net_devmem_dmabuf_free_chunk_owner, NULL);
64 
65 	size = gen_pool_size(binding->chunk_pool);
66 	avail = gen_pool_avail(binding->chunk_pool);
67 
68 	if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
69 		  size, avail))
70 		gen_pool_destroy(binding->chunk_pool);
71 
72 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
73 					  DMA_FROM_DEVICE);
74 	dma_buf_detach(binding->dmabuf, binding->attachment);
75 	dma_buf_put(binding->dmabuf);
76 	xa_destroy(&binding->bound_rxqs);
77 	kvfree(binding->tx_vec);
78 	kfree(binding);
79 }
80 EXPORT_SYMBOL(__net_devmem_dmabuf_binding_free);
81 
82 struct net_iov *
83 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
84 {
85 	struct dmabuf_genpool_chunk_owner *owner;
86 	unsigned long dma_addr;
87 	struct net_iov *niov;
88 	ssize_t offset;
89 	ssize_t index;
90 
91 	dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
92 					(void **)&owner);
93 	if (!dma_addr)
94 		return NULL;
95 
96 	offset = dma_addr - owner->base_dma_addr;
97 	index = offset / PAGE_SIZE;
98 	niov = &owner->area.niovs[index];
99 
100 	niov->pp_magic = 0;
101 	niov->pp = NULL;
102 	atomic_long_set(&niov->pp_ref_count, 0);
103 
104 	return niov;
105 }
106 
107 void net_devmem_free_dmabuf(struct net_iov *niov)
108 {
109 	struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
110 	unsigned long dma_addr = net_devmem_get_dma_addr(niov);
111 
112 	if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
113 				       PAGE_SIZE)))
114 		return;
115 
116 	gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
117 }
118 
119 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
120 {
121 	struct netdev_rx_queue *rxq;
122 	unsigned long xa_idx;
123 	unsigned int rxq_idx;
124 
125 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
126 
127 	/* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the
128 	 * erase.
129 	 */
130 	synchronize_net();
131 
132 	if (binding->list.next)
133 		list_del(&binding->list);
134 
135 	xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
136 		const struct pp_memory_provider_params mp_params = {
137 			.mp_priv	= binding,
138 			.mp_ops		= &dmabuf_devmem_ops,
139 		};
140 
141 		rxq_idx = get_netdev_rx_queue_index(rxq);
142 
143 		__net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
144 	}
145 
146 	net_devmem_dmabuf_binding_put(binding);
147 }
148 
149 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
150 				    struct net_devmem_dmabuf_binding *binding,
151 				    struct netlink_ext_ack *extack)
152 {
153 	struct pp_memory_provider_params mp_params = {
154 		.mp_priv	= binding,
155 		.mp_ops		= &dmabuf_devmem_ops,
156 	};
157 	struct netdev_rx_queue *rxq;
158 	u32 xa_idx;
159 	int err;
160 
161 	err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
162 	if (err)
163 		return err;
164 
165 	rxq = __netif_get_rx_queue(dev, rxq_idx);
166 	err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
167 		       GFP_KERNEL);
168 	if (err)
169 		goto err_close_rxq;
170 
171 	return 0;
172 
173 err_close_rxq:
174 	__net_mp_close_rxq(dev, rxq_idx, &mp_params);
175 	return err;
176 }
177 
178 struct net_devmem_dmabuf_binding *
179 net_devmem_bind_dmabuf(struct net_device *dev,
180 		       enum dma_data_direction direction,
181 		       unsigned int dmabuf_fd, struct netlink_ext_ack *extack)
182 {
183 	struct net_devmem_dmabuf_binding *binding;
184 	static u32 id_alloc_next;
185 	struct scatterlist *sg;
186 	struct dma_buf *dmabuf;
187 	unsigned int sg_idx, i;
188 	unsigned long virtual;
189 	int err;
190 
191 	dmabuf = dma_buf_get(dmabuf_fd);
192 	if (IS_ERR(dmabuf))
193 		return ERR_CAST(dmabuf);
194 
195 	binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
196 			       dev_to_node(&dev->dev));
197 	if (!binding) {
198 		err = -ENOMEM;
199 		goto err_put_dmabuf;
200 	}
201 
202 	binding->dev = dev;
203 	xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
204 
205 	refcount_set(&binding->ref, 1);
206 
207 	binding->dmabuf = dmabuf;
208 
209 	binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
210 	if (IS_ERR(binding->attachment)) {
211 		err = PTR_ERR(binding->attachment);
212 		NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
213 		goto err_free_binding;
214 	}
215 
216 	binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
217 						       direction);
218 	if (IS_ERR(binding->sgt)) {
219 		err = PTR_ERR(binding->sgt);
220 		NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
221 		goto err_detach;
222 	}
223 
224 	if (direction == DMA_TO_DEVICE) {
225 		binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE,
226 						 sizeof(struct net_iov *),
227 						 GFP_KERNEL);
228 		if (!binding->tx_vec) {
229 			err = -ENOMEM;
230 			goto err_unmap;
231 		}
232 	}
233 
234 	/* For simplicity we expect to make PAGE_SIZE allocations, but the
235 	 * binding can be much more flexible than that. We may be able to
236 	 * allocate MTU sized chunks here. Leave that for future work...
237 	 */
238 	binding->chunk_pool = gen_pool_create(PAGE_SHIFT,
239 					      dev_to_node(&dev->dev));
240 	if (!binding->chunk_pool) {
241 		err = -ENOMEM;
242 		goto err_tx_vec;
243 	}
244 
245 	virtual = 0;
246 	for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
247 		dma_addr_t dma_addr = sg_dma_address(sg);
248 		struct dmabuf_genpool_chunk_owner *owner;
249 		size_t len = sg_dma_len(sg);
250 		struct net_iov *niov;
251 
252 		owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
253 				     dev_to_node(&dev->dev));
254 		if (!owner) {
255 			err = -ENOMEM;
256 			goto err_free_chunks;
257 		}
258 
259 		owner->area.base_virtual = virtual;
260 		owner->base_dma_addr = dma_addr;
261 		owner->area.num_niovs = len / PAGE_SIZE;
262 		owner->binding = binding;
263 
264 		err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
265 					 dma_addr, len, dev_to_node(&dev->dev),
266 					 owner);
267 		if (err) {
268 			kfree(owner);
269 			err = -EINVAL;
270 			goto err_free_chunks;
271 		}
272 
273 		owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
274 						   sizeof(*owner->area.niovs),
275 						   GFP_KERNEL);
276 		if (!owner->area.niovs) {
277 			err = -ENOMEM;
278 			goto err_free_chunks;
279 		}
280 
281 		for (i = 0; i < owner->area.num_niovs; i++) {
282 			niov = &owner->area.niovs[i];
283 			niov->type = NET_IOV_DMABUF;
284 			niov->owner = &owner->area;
285 			page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
286 						      net_devmem_get_dma_addr(niov));
287 			if (direction == DMA_TO_DEVICE)
288 				binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov;
289 		}
290 
291 		virtual += len;
292 	}
293 
294 	err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
295 			      binding, xa_limit_32b, &id_alloc_next,
296 			      GFP_KERNEL);
297 	if (err < 0)
298 		goto err_free_chunks;
299 
300 	return binding;
301 
302 err_free_chunks:
303 	gen_pool_for_each_chunk(binding->chunk_pool,
304 				net_devmem_dmabuf_free_chunk_owner, NULL);
305 	gen_pool_destroy(binding->chunk_pool);
306 err_tx_vec:
307 	kvfree(binding->tx_vec);
308 err_unmap:
309 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
310 					  DMA_FROM_DEVICE);
311 err_detach:
312 	dma_buf_detach(dmabuf, binding->attachment);
313 err_free_binding:
314 	kfree(binding);
315 err_put_dmabuf:
316 	dma_buf_put(dmabuf);
317 	return ERR_PTR(err);
318 }
319 
320 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
321 {
322 	struct net_devmem_dmabuf_binding *binding;
323 
324 	rcu_read_lock();
325 	binding = xa_load(&net_devmem_dmabuf_bindings, id);
326 	if (binding) {
327 		if (!net_devmem_dmabuf_binding_get(binding))
328 			binding = NULL;
329 	}
330 	rcu_read_unlock();
331 
332 	return binding;
333 }
334 
335 void net_devmem_get_net_iov(struct net_iov *niov)
336 {
337 	net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov));
338 }
339 
340 void net_devmem_put_net_iov(struct net_iov *niov)
341 {
342 	net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov));
343 }
344 
345 struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
346 							 unsigned int dmabuf_id)
347 {
348 	struct net_devmem_dmabuf_binding *binding;
349 	struct dst_entry *dst = __sk_dst_get(sk);
350 	int err = 0;
351 
352 	binding = net_devmem_lookup_dmabuf(dmabuf_id);
353 	if (!binding || !binding->tx_vec) {
354 		err = -EINVAL;
355 		goto out_err;
356 	}
357 
358 	/* The dma-addrs in this binding are only reachable to the corresponding
359 	 * net_device.
360 	 */
361 	if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
362 		err = -ENODEV;
363 		goto out_err;
364 	}
365 
366 	return binding;
367 
368 out_err:
369 	if (binding)
370 		net_devmem_dmabuf_binding_put(binding);
371 
372 	return ERR_PTR(err);
373 }
374 
375 struct net_iov *
376 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding,
377 		       size_t virt_addr, size_t *off, size_t *size)
378 {
379 	if (virt_addr >= binding->dmabuf->size)
380 		return NULL;
381 
382 	*off = virt_addr % PAGE_SIZE;
383 	*size = PAGE_SIZE - *off;
384 
385 	return binding->tx_vec[virt_addr / PAGE_SIZE];
386 }
387 
388 /*** "Dmabuf devmem memory provider" ***/
389 
390 int mp_dmabuf_devmem_init(struct page_pool *pool)
391 {
392 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
393 
394 	if (!binding)
395 		return -EINVAL;
396 
397 	/* dma-buf dma addresses do not need and should not be used with
398 	 * dma_sync_for_cpu/device. Force disable dma_sync.
399 	 */
400 	pool->dma_sync = false;
401 	pool->dma_sync_for_cpu = false;
402 
403 	if (pool->p.order != 0)
404 		return -E2BIG;
405 
406 	net_devmem_dmabuf_binding_get(binding);
407 	return 0;
408 }
409 
410 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
411 {
412 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
413 	struct net_iov *niov;
414 	netmem_ref netmem;
415 
416 	niov = net_devmem_alloc_dmabuf(binding);
417 	if (!niov)
418 		return 0;
419 
420 	netmem = net_iov_to_netmem(niov);
421 
422 	page_pool_set_pp_info(pool, netmem);
423 
424 	pool->pages_state_hold_cnt++;
425 	trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
426 	return netmem;
427 }
428 
429 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
430 {
431 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
432 
433 	net_devmem_dmabuf_binding_put(binding);
434 }
435 
436 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
437 {
438 	long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
439 
440 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
441 		return false;
442 
443 	if (WARN_ON_ONCE(refcount != 1))
444 		return false;
445 
446 	page_pool_clear_pp_info(netmem);
447 
448 	net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
449 
450 	/* We don't want the page pool put_page()ing our net_iovs. */
451 	return false;
452 }
453 
454 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
455 				    struct netdev_rx_queue *rxq)
456 {
457 	const struct net_devmem_dmabuf_binding *binding = mp_priv;
458 	int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
459 
460 	return nla_put_u32(rsp, type, binding->id);
461 }
462 
463 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
464 				       struct netdev_rx_queue *rxq)
465 {
466 	struct net_devmem_dmabuf_binding *binding = mp_priv;
467 	struct netdev_rx_queue *bound_rxq;
468 	unsigned long xa_idx;
469 
470 	xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
471 		if (bound_rxq == rxq) {
472 			xa_erase(&binding->bound_rxqs, xa_idx);
473 			break;
474 		}
475 	}
476 }
477 
478 static const struct memory_provider_ops dmabuf_devmem_ops = {
479 	.init			= mp_dmabuf_devmem_init,
480 	.destroy		= mp_dmabuf_devmem_destroy,
481 	.alloc_netmems		= mp_dmabuf_devmem_alloc_netmems,
482 	.release_netmem		= mp_dmabuf_devmem_release_page,
483 	.nl_fill		= mp_dmabuf_devmem_nl_fill,
484 	.uninstall		= mp_dmabuf_devmem_uninstall,
485 };
486