xref: /linux/net/core/devmem.c (revision 170aafe35cb98e0f3fbacb446ea86389fbce22ea)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      Devmem TCP
4  *
5  *      Authors:	Mina Almasry <almasrymina@google.com>
6  *			Willem de Bruijn <willemdebruijn.kernel@gmail.com>
7  *			Kaiyuan Zhang <kaiyuanz@google.com
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <trace/events/page_pool.h>
19 
20 #include "devmem.h"
21 
22 /* Device memory support */
23 
24 /* Protected by rtnl_lock() */
25 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
26 
27 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
28 					       struct gen_pool_chunk *chunk,
29 					       void *not_used)
30 {
31 	struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
32 
33 	kvfree(owner->niovs);
34 	kfree(owner);
35 }
36 
37 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
38 {
39 	size_t size, avail;
40 
41 	gen_pool_for_each_chunk(binding->chunk_pool,
42 				net_devmem_dmabuf_free_chunk_owner, NULL);
43 
44 	size = gen_pool_size(binding->chunk_pool);
45 	avail = gen_pool_avail(binding->chunk_pool);
46 
47 	if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
48 		  size, avail))
49 		gen_pool_destroy(binding->chunk_pool);
50 
51 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
52 					  DMA_FROM_DEVICE);
53 	dma_buf_detach(binding->dmabuf, binding->attachment);
54 	dma_buf_put(binding->dmabuf);
55 	xa_destroy(&binding->bound_rxqs);
56 	kfree(binding);
57 }
58 
59 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
60 {
61 	struct netdev_rx_queue *rxq;
62 	unsigned long xa_idx;
63 	unsigned int rxq_idx;
64 
65 	if (binding->list.next)
66 		list_del(&binding->list);
67 
68 	xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
69 		WARN_ON(rxq->mp_params.mp_priv != binding);
70 
71 		rxq->mp_params.mp_priv = NULL;
72 
73 		rxq_idx = get_netdev_rx_queue_index(rxq);
74 
75 		WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
76 	}
77 
78 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
79 
80 	net_devmem_dmabuf_binding_put(binding);
81 }
82 
83 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
84 				    struct net_devmem_dmabuf_binding *binding,
85 				    struct netlink_ext_ack *extack)
86 {
87 	struct netdev_rx_queue *rxq;
88 	u32 xa_idx;
89 	int err;
90 
91 	if (rxq_idx >= dev->real_num_rx_queues) {
92 		NL_SET_ERR_MSG(extack, "rx queue index out of range");
93 		return -ERANGE;
94 	}
95 
96 	rxq = __netif_get_rx_queue(dev, rxq_idx);
97 	if (rxq->mp_params.mp_priv) {
98 		NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
99 		return -EEXIST;
100 	}
101 
102 #ifdef CONFIG_XDP_SOCKETS
103 	if (rxq->pool) {
104 		NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
105 		return -EBUSY;
106 	}
107 #endif
108 
109 	err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
110 		       GFP_KERNEL);
111 	if (err)
112 		return err;
113 
114 	rxq->mp_params.mp_priv = binding;
115 
116 	err = netdev_rx_queue_restart(dev, rxq_idx);
117 	if (err)
118 		goto err_xa_erase;
119 
120 	return 0;
121 
122 err_xa_erase:
123 	rxq->mp_params.mp_priv = NULL;
124 	xa_erase(&binding->bound_rxqs, xa_idx);
125 
126 	return err;
127 }
128 
129 struct net_devmem_dmabuf_binding *
130 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
131 		       struct netlink_ext_ack *extack)
132 {
133 	struct net_devmem_dmabuf_binding *binding;
134 	static u32 id_alloc_next;
135 	struct scatterlist *sg;
136 	struct dma_buf *dmabuf;
137 	unsigned int sg_idx, i;
138 	unsigned long virtual;
139 	int err;
140 
141 	dmabuf = dma_buf_get(dmabuf_fd);
142 	if (IS_ERR(dmabuf))
143 		return ERR_CAST(dmabuf);
144 
145 	binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
146 			       dev_to_node(&dev->dev));
147 	if (!binding) {
148 		err = -ENOMEM;
149 		goto err_put_dmabuf;
150 	}
151 
152 	binding->dev = dev;
153 
154 	err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
155 			      binding, xa_limit_32b, &id_alloc_next,
156 			      GFP_KERNEL);
157 	if (err < 0)
158 		goto err_free_binding;
159 
160 	xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
161 
162 	refcount_set(&binding->ref, 1);
163 
164 	binding->dmabuf = dmabuf;
165 
166 	binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
167 	if (IS_ERR(binding->attachment)) {
168 		err = PTR_ERR(binding->attachment);
169 		NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
170 		goto err_free_id;
171 	}
172 
173 	binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
174 						       DMA_FROM_DEVICE);
175 	if (IS_ERR(binding->sgt)) {
176 		err = PTR_ERR(binding->sgt);
177 		NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
178 		goto err_detach;
179 	}
180 
181 	/* For simplicity we expect to make PAGE_SIZE allocations, but the
182 	 * binding can be much more flexible than that. We may be able to
183 	 * allocate MTU sized chunks here. Leave that for future work...
184 	 */
185 	binding->chunk_pool =
186 		gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
187 	if (!binding->chunk_pool) {
188 		err = -ENOMEM;
189 		goto err_unmap;
190 	}
191 
192 	virtual = 0;
193 	for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
194 		dma_addr_t dma_addr = sg_dma_address(sg);
195 		struct dmabuf_genpool_chunk_owner *owner;
196 		size_t len = sg_dma_len(sg);
197 		struct net_iov *niov;
198 
199 		owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
200 				     dev_to_node(&dev->dev));
201 		if (!owner) {
202 			err = -ENOMEM;
203 			goto err_free_chunks;
204 		}
205 
206 		owner->base_virtual = virtual;
207 		owner->base_dma_addr = dma_addr;
208 		owner->num_niovs = len / PAGE_SIZE;
209 		owner->binding = binding;
210 
211 		err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
212 					 dma_addr, len, dev_to_node(&dev->dev),
213 					 owner);
214 		if (err) {
215 			kfree(owner);
216 			err = -EINVAL;
217 			goto err_free_chunks;
218 		}
219 
220 		owner->niovs = kvmalloc_array(owner->num_niovs,
221 					      sizeof(*owner->niovs),
222 					      GFP_KERNEL);
223 		if (!owner->niovs) {
224 			err = -ENOMEM;
225 			goto err_free_chunks;
226 		}
227 
228 		for (i = 0; i < owner->num_niovs; i++) {
229 			niov = &owner->niovs[i];
230 			niov->owner = owner;
231 		}
232 
233 		virtual += len;
234 	}
235 
236 	return binding;
237 
238 err_free_chunks:
239 	gen_pool_for_each_chunk(binding->chunk_pool,
240 				net_devmem_dmabuf_free_chunk_owner, NULL);
241 	gen_pool_destroy(binding->chunk_pool);
242 err_unmap:
243 	dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
244 					  DMA_FROM_DEVICE);
245 err_detach:
246 	dma_buf_detach(dmabuf, binding->attachment);
247 err_free_id:
248 	xa_erase(&net_devmem_dmabuf_bindings, binding->id);
249 err_free_binding:
250 	kfree(binding);
251 err_put_dmabuf:
252 	dma_buf_put(dmabuf);
253 	return ERR_PTR(err);
254 }
255 
256 void dev_dmabuf_uninstall(struct net_device *dev)
257 {
258 	struct net_devmem_dmabuf_binding *binding;
259 	struct netdev_rx_queue *rxq;
260 	unsigned long xa_idx;
261 	unsigned int i;
262 
263 	for (i = 0; i < dev->real_num_rx_queues; i++) {
264 		binding = dev->_rx[i].mp_params.mp_priv;
265 		if (!binding)
266 			continue;
267 
268 		xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
269 			if (rxq == &dev->_rx[i]) {
270 				xa_erase(&binding->bound_rxqs, xa_idx);
271 				break;
272 			}
273 	}
274 }
275