1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Devmem TCP 4 * 5 * Authors: Mina Almasry <almasrymina@google.com> 6 * Willem de Bruijn <willemdebruijn.kernel@gmail.com> 7 * Kaiyuan Zhang <kaiyuanz@google.com 8 */ 9 10 #include <linux/dma-buf.h> 11 #include <linux/ethtool_netlink.h> 12 #include <linux/genalloc.h> 13 #include <linux/mm.h> 14 #include <linux/netdevice.h> 15 #include <linux/types.h> 16 #include <net/netdev_queues.h> 17 #include <net/netdev_rx_queue.h> 18 #include <net/page_pool/helpers.h> 19 #include <net/page_pool/memory_provider.h> 20 #include <trace/events/page_pool.h> 21 22 #include "devmem.h" 23 #include "mp_dmabuf_devmem.h" 24 #include "page_pool_priv.h" 25 26 /* Device memory support */ 27 28 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1); 29 30 static const struct memory_provider_ops dmabuf_devmem_ops; 31 32 bool net_is_devmem_iov(struct net_iov *niov) 33 { 34 return niov->pp->mp_ops == &dmabuf_devmem_ops; 35 } 36 37 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool, 38 struct gen_pool_chunk *chunk, 39 void *not_used) 40 { 41 struct dmabuf_genpool_chunk_owner *owner = chunk->owner; 42 43 kvfree(owner->area.niovs); 44 kfree(owner); 45 } 46 47 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov) 48 { 49 struct dmabuf_genpool_chunk_owner *owner; 50 51 owner = net_devmem_iov_to_chunk_owner(niov); 52 return owner->base_dma_addr + 53 ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT); 54 } 55 56 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding) 57 { 58 size_t size, avail; 59 60 gen_pool_for_each_chunk(binding->chunk_pool, 61 net_devmem_dmabuf_free_chunk_owner, NULL); 62 63 size = gen_pool_size(binding->chunk_pool); 64 avail = gen_pool_avail(binding->chunk_pool); 65 66 if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu", 67 size, avail)) 68 gen_pool_destroy(binding->chunk_pool); 69 70 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, 71 DMA_FROM_DEVICE); 72 dma_buf_detach(binding->dmabuf, binding->attachment); 73 dma_buf_put(binding->dmabuf); 74 xa_destroy(&binding->bound_rxqs); 75 kfree(binding); 76 } 77 78 struct net_iov * 79 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) 80 { 81 struct dmabuf_genpool_chunk_owner *owner; 82 unsigned long dma_addr; 83 struct net_iov *niov; 84 ssize_t offset; 85 ssize_t index; 86 87 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, 88 (void **)&owner); 89 if (!dma_addr) 90 return NULL; 91 92 offset = dma_addr - owner->base_dma_addr; 93 index = offset / PAGE_SIZE; 94 niov = &owner->area.niovs[index]; 95 96 niov->pp_magic = 0; 97 niov->pp = NULL; 98 atomic_long_set(&niov->pp_ref_count, 0); 99 100 return niov; 101 } 102 103 void net_devmem_free_dmabuf(struct net_iov *niov) 104 { 105 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); 106 unsigned long dma_addr = net_devmem_get_dma_addr(niov); 107 108 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, 109 PAGE_SIZE))) 110 return; 111 112 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); 113 } 114 115 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) 116 { 117 struct netdev_rx_queue *rxq; 118 unsigned long xa_idx; 119 unsigned int rxq_idx; 120 int err; 121 122 if (binding->list.next) 123 list_del(&binding->list); 124 125 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { 126 WARN_ON(rxq->mp_params.mp_priv != binding); 127 128 rxq->mp_params.mp_priv = NULL; 129 rxq->mp_params.mp_ops = NULL; 130 131 rxq_idx = get_netdev_rx_queue_index(rxq); 132 133 err = netdev_rx_queue_restart(binding->dev, rxq_idx); 134 WARN_ON(err && err != -ENETDOWN); 135 } 136 137 xa_erase(&net_devmem_dmabuf_bindings, binding->id); 138 139 net_devmem_dmabuf_binding_put(binding); 140 } 141 142 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, 143 struct net_devmem_dmabuf_binding *binding, 144 struct netlink_ext_ack *extack) 145 { 146 struct netdev_rx_queue *rxq; 147 u32 xa_idx; 148 int err; 149 150 if (rxq_idx >= dev->real_num_rx_queues) { 151 NL_SET_ERR_MSG(extack, "rx queue index out of range"); 152 return -ERANGE; 153 } 154 155 if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) { 156 NL_SET_ERR_MSG(extack, "tcp-data-split is disabled"); 157 return -EINVAL; 158 } 159 160 if (dev->cfg->hds_thresh) { 161 NL_SET_ERR_MSG(extack, "hds-thresh is not zero"); 162 return -EINVAL; 163 } 164 165 rxq = __netif_get_rx_queue(dev, rxq_idx); 166 if (rxq->mp_params.mp_ops) { 167 NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); 168 return -EEXIST; 169 } 170 171 #ifdef CONFIG_XDP_SOCKETS 172 if (rxq->pool) { 173 NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); 174 return -EBUSY; 175 } 176 #endif 177 178 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, 179 GFP_KERNEL); 180 if (err) 181 return err; 182 183 rxq->mp_params.mp_priv = binding; 184 rxq->mp_params.mp_ops = &dmabuf_devmem_ops; 185 186 err = netdev_rx_queue_restart(dev, rxq_idx); 187 if (err) 188 goto err_xa_erase; 189 190 return 0; 191 192 err_xa_erase: 193 rxq->mp_params.mp_priv = NULL; 194 rxq->mp_params.mp_ops = NULL; 195 xa_erase(&binding->bound_rxqs, xa_idx); 196 197 return err; 198 } 199 200 struct net_devmem_dmabuf_binding * 201 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, 202 struct netlink_ext_ack *extack) 203 { 204 struct net_devmem_dmabuf_binding *binding; 205 static u32 id_alloc_next; 206 struct scatterlist *sg; 207 struct dma_buf *dmabuf; 208 unsigned int sg_idx, i; 209 unsigned long virtual; 210 int err; 211 212 dmabuf = dma_buf_get(dmabuf_fd); 213 if (IS_ERR(dmabuf)) 214 return ERR_CAST(dmabuf); 215 216 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, 217 dev_to_node(&dev->dev)); 218 if (!binding) { 219 err = -ENOMEM; 220 goto err_put_dmabuf; 221 } 222 223 binding->dev = dev; 224 225 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, 226 binding, xa_limit_32b, &id_alloc_next, 227 GFP_KERNEL); 228 if (err < 0) 229 goto err_free_binding; 230 231 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); 232 233 refcount_set(&binding->ref, 1); 234 235 binding->dmabuf = dmabuf; 236 237 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); 238 if (IS_ERR(binding->attachment)) { 239 err = PTR_ERR(binding->attachment); 240 NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device"); 241 goto err_free_id; 242 } 243 244 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, 245 DMA_FROM_DEVICE); 246 if (IS_ERR(binding->sgt)) { 247 err = PTR_ERR(binding->sgt); 248 NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment"); 249 goto err_detach; 250 } 251 252 /* For simplicity we expect to make PAGE_SIZE allocations, but the 253 * binding can be much more flexible than that. We may be able to 254 * allocate MTU sized chunks here. Leave that for future work... 255 */ 256 binding->chunk_pool = 257 gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev)); 258 if (!binding->chunk_pool) { 259 err = -ENOMEM; 260 goto err_unmap; 261 } 262 263 virtual = 0; 264 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { 265 dma_addr_t dma_addr = sg_dma_address(sg); 266 struct dmabuf_genpool_chunk_owner *owner; 267 size_t len = sg_dma_len(sg); 268 struct net_iov *niov; 269 270 owner = kzalloc_node(sizeof(*owner), GFP_KERNEL, 271 dev_to_node(&dev->dev)); 272 if (!owner) { 273 err = -ENOMEM; 274 goto err_free_chunks; 275 } 276 277 owner->area.base_virtual = virtual; 278 owner->base_dma_addr = dma_addr; 279 owner->area.num_niovs = len / PAGE_SIZE; 280 owner->binding = binding; 281 282 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, 283 dma_addr, len, dev_to_node(&dev->dev), 284 owner); 285 if (err) { 286 kfree(owner); 287 err = -EINVAL; 288 goto err_free_chunks; 289 } 290 291 owner->area.niovs = kvmalloc_array(owner->area.num_niovs, 292 sizeof(*owner->area.niovs), 293 GFP_KERNEL); 294 if (!owner->area.niovs) { 295 err = -ENOMEM; 296 goto err_free_chunks; 297 } 298 299 for (i = 0; i < owner->area.num_niovs; i++) { 300 niov = &owner->area.niovs[i]; 301 niov->owner = &owner->area; 302 page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), 303 net_devmem_get_dma_addr(niov)); 304 } 305 306 virtual += len; 307 } 308 309 return binding; 310 311 err_free_chunks: 312 gen_pool_for_each_chunk(binding->chunk_pool, 313 net_devmem_dmabuf_free_chunk_owner, NULL); 314 gen_pool_destroy(binding->chunk_pool); 315 err_unmap: 316 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, 317 DMA_FROM_DEVICE); 318 err_detach: 319 dma_buf_detach(dmabuf, binding->attachment); 320 err_free_id: 321 xa_erase(&net_devmem_dmabuf_bindings, binding->id); 322 err_free_binding: 323 kfree(binding); 324 err_put_dmabuf: 325 dma_buf_put(dmabuf); 326 return ERR_PTR(err); 327 } 328 329 /*** "Dmabuf devmem memory provider" ***/ 330 331 int mp_dmabuf_devmem_init(struct page_pool *pool) 332 { 333 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 334 335 if (!binding) 336 return -EINVAL; 337 338 /* dma-buf dma addresses do not need and should not be used with 339 * dma_sync_for_cpu/device. Force disable dma_sync. 340 */ 341 pool->dma_sync = false; 342 pool->dma_sync_for_cpu = false; 343 344 if (pool->p.order != 0) 345 return -E2BIG; 346 347 net_devmem_dmabuf_binding_get(binding); 348 return 0; 349 } 350 351 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) 352 { 353 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 354 struct net_iov *niov; 355 netmem_ref netmem; 356 357 niov = net_devmem_alloc_dmabuf(binding); 358 if (!niov) 359 return 0; 360 361 netmem = net_iov_to_netmem(niov); 362 363 page_pool_set_pp_info(pool, netmem); 364 365 pool->pages_state_hold_cnt++; 366 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); 367 return netmem; 368 } 369 370 void mp_dmabuf_devmem_destroy(struct page_pool *pool) 371 { 372 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 373 374 net_devmem_dmabuf_binding_put(binding); 375 } 376 377 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) 378 { 379 long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem)); 380 381 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 382 return false; 383 384 if (WARN_ON_ONCE(refcount != 1)) 385 return false; 386 387 page_pool_clear_pp_info(netmem); 388 389 net_devmem_free_dmabuf(netmem_to_net_iov(netmem)); 390 391 /* We don't want the page pool put_page()ing our net_iovs. */ 392 return false; 393 } 394 395 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp, 396 struct netdev_rx_queue *rxq) 397 { 398 const struct net_devmem_dmabuf_binding *binding = mp_priv; 399 int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF; 400 401 return nla_put_u32(rsp, type, binding->id); 402 } 403 404 static void mp_dmabuf_devmem_uninstall(void *mp_priv, 405 struct netdev_rx_queue *rxq) 406 { 407 struct net_devmem_dmabuf_binding *binding = mp_priv; 408 struct netdev_rx_queue *bound_rxq; 409 unsigned long xa_idx; 410 411 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { 412 if (bound_rxq == rxq) { 413 xa_erase(&binding->bound_rxqs, xa_idx); 414 break; 415 } 416 } 417 } 418 419 static const struct memory_provider_ops dmabuf_devmem_ops = { 420 .init = mp_dmabuf_devmem_init, 421 .destroy = mp_dmabuf_devmem_destroy, 422 .alloc_netmems = mp_dmabuf_devmem_alloc_netmems, 423 .release_netmem = mp_dmabuf_devmem_release_page, 424 .nl_fill = mp_dmabuf_devmem_nl_fill, 425 .uninstall = mp_dmabuf_devmem_uninstall, 426 }; 427