xref: /linux/net/core/devmem.c (revision d1e879ec600f9b3bdd253167533959facfefb17b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Devmem TCP
4  *
5  *      Authors:	Mina Almasry <almasrymina@google.com>
6  *			Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7  *			Kaiyuan Zhang <kaiyuanz@google.com
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/ethtool_netlink.h>
12 #include <linux/genalloc.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/types.h>
16 #include <net/netdev_queues.h>
17 #include <net/netdev_rx_queue.h>
18 #include <net/page_pool/helpers.h>
19 #include <net/page_pool/memory_provider.h>
20 #include <trace/events/page_pool.h>
21 
22 #include "devmem.h"
23 #include "mp_dmabuf_devmem.h"
24 #include "page_pool_priv.h"
25 
26 /* Device memory support */
27 
28 /* Protected by rtnl_lock() */
29 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
30 
31 static const struct memory_provider_ops dmabuf_devmem_ops;
32 
33 bool net_is_devmem_iov(struct net_iov *niov)
34 {
35 	return niov->pp->mp_ops == &dmabuf_devmem_ops;
36 }
37 
38 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
39 					       struct gen_pool_chunk *chunk,
40 					       void *not_used)
41 {
42 	struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
43 
44 	kvfree(owner->area.niovs);
45 	kfree(owner);
46 }
47 
48 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
49 {
50 	struct dmabuf_genpool_chunk_owner *owner;
51 
52 	owner = net_devmem_iov_to_chunk_owner(niov);
53 	return owner->base_dma_addr +
54 	       ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
55 }
56 
57 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
58 {
59 	size_t size, avail;
60 
61 	gen_pool_for_each_chunk(binding->chunk_pool,
62 				net_devmem_dmabuf_free_chunk_owner, NULL);
63 
64 	size = gen_pool_size(binding->chunk_pool);
65 	avail = gen_pool_avail(binding->chunk_pool);
66 
67 	if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
68 		  size, avail))
69 		gen_pool_destroy(binding->chunk_pool);
70 
71 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
72 					  DMA_FROM_DEVICE);
73 	dma_buf_detach(binding->dmabuf, binding->attachment);
74 	dma_buf_put(binding->dmabuf);
75 	xa_destroy(&binding->bound_rxqs);
76 	kfree(binding);
77 }
78 
79 struct net_iov *
80 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
81 {
82 	struct dmabuf_genpool_chunk_owner *owner;
83 	unsigned long dma_addr;
84 	struct net_iov *niov;
85 	ssize_t offset;
86 	ssize_t index;
87 
88 	dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
89 					(void **)&owner);
90 	if (!dma_addr)
91 		return NULL;
92 
93 	offset = dma_addr - owner->base_dma_addr;
94 	index = offset / PAGE_SIZE;
95 	niov = &owner->area.niovs[index];
96 
97 	niov->pp_magic = 0;
98 	niov->pp = NULL;
99 	atomic_long_set(&niov->pp_ref_count, 0);
100 
101 	return niov;
102 }
103 
104 void net_devmem_free_dmabuf(struct net_iov *niov)
105 {
106 	struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
107 	unsigned long dma_addr = net_devmem_get_dma_addr(niov);
108 
109 	if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
110 				       PAGE_SIZE)))
111 		return;
112 
113 	gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
114 }
115 
116 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
117 {
118 	struct netdev_rx_queue *rxq;
119 	unsigned long xa_idx;
120 	unsigned int rxq_idx;
121 
122 	if (binding->list.next)
123 		list_del(&binding->list);
124 
125 	xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
126 		WARN_ON(rxq->mp_params.mp_priv != binding);
127 
128 		rxq->mp_params.mp_priv = NULL;
129 		rxq->mp_params.mp_ops = NULL;
130 
131 		rxq_idx = get_netdev_rx_queue_index(rxq);
132 
133 		WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
134 	}
135 
136 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
137 
138 	net_devmem_dmabuf_binding_put(binding);
139 }
140 
141 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
142 				    struct net_devmem_dmabuf_binding *binding,
143 				    struct netlink_ext_ack *extack)
144 {
145 	struct netdev_rx_queue *rxq;
146 	u32 xa_idx;
147 	int err;
148 
149 	if (rxq_idx >= dev->real_num_rx_queues) {
150 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
151 		return -ERANGE;
152 	}
153 
154 	if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
155 		NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
156 		return -EINVAL;
157 	}
158 
159 	if (dev->cfg->hds_thresh) {
160 		NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
161 		return -EINVAL;
162 	}
163 
164 	rxq = __netif_get_rx_queue(dev, rxq_idx);
165 	if (rxq->mp_params.mp_ops) {
166 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
167 		return -EEXIST;
168 	}
169 
170 #ifdef CONFIG_XDP_SOCKETS
171 	if (rxq->pool) {
172 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
173 		return -EBUSY;
174 	}
175 #endif
176 
177 	err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
178 		       GFP_KERNEL);
179 	if (err)
180 		return err;
181 
182 	rxq->mp_params.mp_priv = binding;
183 	rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
184 
185 	err = netdev_rx_queue_restart(dev, rxq_idx);
186 	if (err)
187 		goto err_xa_erase;
188 
189 	return 0;
190 
191 err_xa_erase:
192 	rxq->mp_params.mp_priv = NULL;
193 	rxq->mp_params.mp_ops = NULL;
194 	xa_erase(&binding->bound_rxqs, xa_idx);
195 
196 	return err;
197 }
198 
199 struct net_devmem_dmabuf_binding *
200 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
201 		       struct netlink_ext_ack *extack)
202 {
203 	struct net_devmem_dmabuf_binding *binding;
204 	static u32 id_alloc_next;
205 	struct scatterlist *sg;
206 	struct dma_buf *dmabuf;
207 	unsigned int sg_idx, i;
208 	unsigned long virtual;
209 	int err;
210 
211 	dmabuf = dma_buf_get(dmabuf_fd);
212 	if (IS_ERR(dmabuf))
213 		return ERR_CAST(dmabuf);
214 
215 	binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
216 			       dev_to_node(&dev->dev));
217 	if (!binding) {
218 		err = -ENOMEM;
219 		goto err_put_dmabuf;
220 	}
221 
222 	binding->dev = dev;
223 
224 	err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
225 			      binding, xa_limit_32b, &id_alloc_next,
226 			      GFP_KERNEL);
227 	if (err < 0)
228 		goto err_free_binding;
229 
230 	xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
231 
232 	refcount_set(&binding->ref, 1);
233 
234 	binding->dmabuf = dmabuf;
235 
236 	binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
237 	if (IS_ERR(binding->attachment)) {
238 		err = PTR_ERR(binding->attachment);
239 		NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
240 		goto err_free_id;
241 	}
242 
243 	binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
244 						       DMA_FROM_DEVICE);
245 	if (IS_ERR(binding->sgt)) {
246 		err = PTR_ERR(binding->sgt);
247 		NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
248 		goto err_detach;
249 	}
250 
251 	/* For simplicity we expect to make PAGE_SIZE allocations, but the
252 	 * binding can be much more flexible than that. We may be able to
253 	 * allocate MTU sized chunks here. Leave that for future work...
254 	 */
255 	binding->chunk_pool =
256 		gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
257 	if (!binding->chunk_pool) {
258 		err = -ENOMEM;
259 		goto err_unmap;
260 	}
261 
262 	virtual = 0;
263 	for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
264 		dma_addr_t dma_addr = sg_dma_address(sg);
265 		struct dmabuf_genpool_chunk_owner *owner;
266 		size_t len = sg_dma_len(sg);
267 		struct net_iov *niov;
268 
269 		owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
270 				     dev_to_node(&dev->dev));
271 		if (!owner) {
272 			err = -ENOMEM;
273 			goto err_free_chunks;
274 		}
275 
276 		owner->area.base_virtual = virtual;
277 		owner->base_dma_addr = dma_addr;
278 		owner->area.num_niovs = len / PAGE_SIZE;
279 		owner->binding = binding;
280 
281 		err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
282 					 dma_addr, len, dev_to_node(&dev->dev),
283 					 owner);
284 		if (err) {
285 			kfree(owner);
286 			err = -EINVAL;
287 			goto err_free_chunks;
288 		}
289 
290 		owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
291 						   sizeof(*owner->area.niovs),
292 						   GFP_KERNEL);
293 		if (!owner->area.niovs) {
294 			err = -ENOMEM;
295 			goto err_free_chunks;
296 		}
297 
298 		for (i = 0; i < owner->area.num_niovs; i++) {
299 			niov = &owner->area.niovs[i];
300 			niov->owner = &owner->area;
301 			page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
302 						      net_devmem_get_dma_addr(niov));
303 		}
304 
305 		virtual += len;
306 	}
307 
308 	return binding;
309 
310 err_free_chunks:
311 	gen_pool_for_each_chunk(binding->chunk_pool,
312 				net_devmem_dmabuf_free_chunk_owner, NULL);
313 	gen_pool_destroy(binding->chunk_pool);
314 err_unmap:
315 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
316 					  DMA_FROM_DEVICE);
317 err_detach:
318 	dma_buf_detach(dmabuf, binding->attachment);
319 err_free_id:
320 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
321 err_free_binding:
322 	kfree(binding);
323 err_put_dmabuf:
324 	dma_buf_put(dmabuf);
325 	return ERR_PTR(err);
326 }
327 
328 /*** "Dmabuf devmem memory provider" ***/
329 
330 int mp_dmabuf_devmem_init(struct page_pool *pool)
331 {
332 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
333 
334 	if (!binding)
335 		return -EINVAL;
336 
337 	/* dma-buf dma addresses do not need and should not be used with
338 	 * dma_sync_for_cpu/device. Force disable dma_sync.
339 	 */
340 	pool->dma_sync = false;
341 	pool->dma_sync_for_cpu = false;
342 
343 	if (pool->p.order != 0)
344 		return -E2BIG;
345 
346 	net_devmem_dmabuf_binding_get(binding);
347 	return 0;
348 }
349 
350 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
351 {
352 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
353 	struct net_iov *niov;
354 	netmem_ref netmem;
355 
356 	niov = net_devmem_alloc_dmabuf(binding);
357 	if (!niov)
358 		return 0;
359 
360 	netmem = net_iov_to_netmem(niov);
361 
362 	page_pool_set_pp_info(pool, netmem);
363 
364 	pool->pages_state_hold_cnt++;
365 	trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
366 	return netmem;
367 }
368 
369 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
370 {
371 	struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
372 
373 	net_devmem_dmabuf_binding_put(binding);
374 }
375 
376 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
377 {
378 	long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
379 
380 	if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
381 		return false;
382 
383 	if (WARN_ON_ONCE(refcount != 1))
384 		return false;
385 
386 	page_pool_clear_pp_info(netmem);
387 
388 	net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
389 
390 	/* We don't want the page pool put_page()ing our net_iovs. */
391 	return false;
392 }
393 
394 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
395 				    struct netdev_rx_queue *rxq)
396 {
397 	const struct net_devmem_dmabuf_binding *binding = mp_priv;
398 	int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
399 
400 	return nla_put_u32(rsp, type, binding->id);
401 }
402 
403 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
404 				       struct netdev_rx_queue *rxq)
405 {
406 	struct net_devmem_dmabuf_binding *binding = mp_priv;
407 	struct netdev_rx_queue *bound_rxq;
408 	unsigned long xa_idx;
409 
410 	xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
411 		if (bound_rxq == rxq) {
412 			xa_erase(&binding->bound_rxqs, xa_idx);
413 			break;
414 		}
415 	}
416 }
417 
418 static const struct memory_provider_ops dmabuf_devmem_ops = {
419 	.init			= mp_dmabuf_devmem_init,
420 	.destroy		= mp_dmabuf_devmem_destroy,
421 	.alloc_netmems		= mp_dmabuf_devmem_alloc_netmems,
422 	.release_netmem		= mp_dmabuf_devmem_release_page,
423 	.nl_fill		= mp_dmabuf_devmem_nl_fill,
424 	.uninstall		= mp_dmabuf_devmem_uninstall,
425 };
426