1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Devmem TCP 4 * 5 * Authors: Mina Almasry <almasrymina@google.com> 6 * Willem de Bruijn <willemdebruijn.kernel@gmail.com> 7 * Kaiyuan Zhang <kaiyuanz@google.com 8 */ 9 10 #include <linux/dma-buf.h> 11 #include <linux/genalloc.h> 12 #include <linux/mm.h> 13 #include <linux/netdevice.h> 14 #include <linux/types.h> 15 #include <net/netdev_queues.h> 16 #include <net/netdev_rx_queue.h> 17 #include <net/page_pool/helpers.h> 18 #include <net/page_pool/memory_provider.h> 19 #include <net/sock.h> 20 #include <net/tcp.h> 21 #include <trace/events/page_pool.h> 22 23 #include "devmem.h" 24 #include "mp_dmabuf_devmem.h" 25 #include "page_pool_priv.h" 26 27 /* Device memory support */ 28 29 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1); 30 31 static const struct memory_provider_ops dmabuf_devmem_ops; 32 33 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool, 34 struct gen_pool_chunk *chunk, 35 void *not_used) 36 { 37 struct dmabuf_genpool_chunk_owner *owner = chunk->owner; 38 39 kvfree(owner->area.niovs); 40 kfree(owner); 41 } 42 43 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov) 44 { 45 struct dmabuf_genpool_chunk_owner *owner; 46 47 owner = net_devmem_iov_to_chunk_owner(niov); 48 return owner->base_dma_addr + 49 ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT); 50 } 51 52 static void net_devmem_dmabuf_binding_release(struct percpu_ref *ref) 53 { 54 struct net_devmem_dmabuf_binding *binding = 55 container_of(ref, struct net_devmem_dmabuf_binding, ref); 56 57 INIT_WORK(&binding->unbind_w, __net_devmem_dmabuf_binding_free); 58 schedule_work(&binding->unbind_w); 59 } 60 61 void __net_devmem_dmabuf_binding_free(struct work_struct *wq) 62 { 63 struct net_devmem_dmabuf_binding *binding = container_of(wq, typeof(*binding), unbind_w); 64 65 size_t size, avail; 66 67 gen_pool_for_each_chunk(binding->chunk_pool, 68 net_devmem_dmabuf_free_chunk_owner, NULL); 69 70 size = gen_pool_size(binding->chunk_pool); 71 avail = gen_pool_avail(binding->chunk_pool); 72 73 if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu", 74 size, avail)) 75 gen_pool_destroy(binding->chunk_pool); 76 77 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, 78 binding->direction); 79 dma_buf_detach(binding->dmabuf, binding->attachment); 80 dma_buf_put(binding->dmabuf); 81 xa_destroy(&binding->bound_rxqs); 82 percpu_ref_exit(&binding->ref); 83 kvfree(binding->tx_vec); 84 kfree(binding); 85 } 86 87 struct net_iov * 88 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) 89 { 90 struct dmabuf_genpool_chunk_owner *owner; 91 unsigned long dma_addr; 92 struct net_iov *niov; 93 ssize_t offset; 94 ssize_t index; 95 96 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, 97 (void **)&owner); 98 if (!dma_addr) 99 return NULL; 100 101 offset = dma_addr - owner->base_dma_addr; 102 index = offset / PAGE_SIZE; 103 niov = &owner->area.niovs[index]; 104 105 niov->desc.pp_magic = 0; 106 niov->desc.pp = NULL; 107 atomic_long_set(&niov->desc.pp_ref_count, 0); 108 109 return niov; 110 } 111 112 void net_devmem_free_dmabuf(struct net_iov *niov) 113 { 114 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); 115 unsigned long dma_addr = net_devmem_get_dma_addr(niov); 116 117 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, 118 PAGE_SIZE))) 119 return; 120 121 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); 122 } 123 124 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) 125 { 126 struct netdev_rx_queue *rxq; 127 unsigned long xa_idx; 128 unsigned int rxq_idx; 129 130 xa_erase(&net_devmem_dmabuf_bindings, binding->id); 131 132 /* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the 133 * erase. 134 */ 135 synchronize_net(); 136 137 if (binding->list.next) 138 list_del(&binding->list); 139 140 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { 141 const struct pp_memory_provider_params mp_params = { 142 .mp_priv = binding, 143 .mp_ops = &dmabuf_devmem_ops, 144 }; 145 146 rxq_idx = get_netdev_rx_queue_index(rxq); 147 148 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params); 149 } 150 151 percpu_ref_kill(&binding->ref); 152 } 153 154 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, 155 struct net_devmem_dmabuf_binding *binding, 156 struct netlink_ext_ack *extack) 157 { 158 struct pp_memory_provider_params mp_params = { 159 .mp_priv = binding, 160 .mp_ops = &dmabuf_devmem_ops, 161 }; 162 struct netdev_rx_queue *rxq; 163 u32 xa_idx; 164 int err; 165 166 err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack); 167 if (err) 168 return err; 169 170 rxq = __netif_get_rx_queue(dev, rxq_idx); 171 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, 172 GFP_KERNEL); 173 if (err) 174 goto err_close_rxq; 175 176 return 0; 177 178 err_close_rxq: 179 __net_mp_close_rxq(dev, rxq_idx, &mp_params); 180 return err; 181 } 182 183 struct net_devmem_dmabuf_binding * 184 net_devmem_bind_dmabuf(struct net_device *dev, 185 struct device *dma_dev, 186 enum dma_data_direction direction, 187 unsigned int dmabuf_fd, struct netdev_nl_sock *priv, 188 struct netlink_ext_ack *extack) 189 { 190 struct net_devmem_dmabuf_binding *binding; 191 static u32 id_alloc_next; 192 struct scatterlist *sg; 193 struct dma_buf *dmabuf; 194 unsigned int sg_idx, i; 195 unsigned long virtual; 196 int err; 197 198 if (!dma_dev) { 199 NL_SET_ERR_MSG(extack, "Device doesn't support DMA"); 200 return ERR_PTR(-EOPNOTSUPP); 201 } 202 203 dmabuf = dma_buf_get(dmabuf_fd); 204 if (IS_ERR(dmabuf)) 205 return ERR_CAST(dmabuf); 206 207 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, 208 dev_to_node(&dev->dev)); 209 if (!binding) { 210 err = -ENOMEM; 211 goto err_put_dmabuf; 212 } 213 214 binding->dev = dev; 215 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); 216 217 err = percpu_ref_init(&binding->ref, 218 net_devmem_dmabuf_binding_release, 219 0, GFP_KERNEL); 220 if (err < 0) 221 goto err_free_binding; 222 223 mutex_init(&binding->lock); 224 225 binding->dmabuf = dmabuf; 226 binding->direction = direction; 227 228 binding->attachment = dma_buf_attach(binding->dmabuf, dma_dev); 229 if (IS_ERR(binding->attachment)) { 230 err = PTR_ERR(binding->attachment); 231 NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device"); 232 goto err_exit_ref; 233 } 234 235 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, 236 direction); 237 if (IS_ERR(binding->sgt)) { 238 err = PTR_ERR(binding->sgt); 239 NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment"); 240 goto err_detach; 241 } 242 243 if (direction == DMA_TO_DEVICE) { 244 binding->tx_vec = kvmalloc_array(dmabuf->size / PAGE_SIZE, 245 sizeof(struct net_iov *), 246 GFP_KERNEL); 247 if (!binding->tx_vec) { 248 err = -ENOMEM; 249 goto err_unmap; 250 } 251 } 252 253 /* For simplicity we expect to make PAGE_SIZE allocations, but the 254 * binding can be much more flexible than that. We may be able to 255 * allocate MTU sized chunks here. Leave that for future work... 256 */ 257 binding->chunk_pool = gen_pool_create(PAGE_SHIFT, 258 dev_to_node(&dev->dev)); 259 if (!binding->chunk_pool) { 260 err = -ENOMEM; 261 goto err_tx_vec; 262 } 263 264 virtual = 0; 265 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { 266 dma_addr_t dma_addr = sg_dma_address(sg); 267 struct dmabuf_genpool_chunk_owner *owner; 268 size_t len = sg_dma_len(sg); 269 struct net_iov *niov; 270 271 owner = kzalloc_node(sizeof(*owner), GFP_KERNEL, 272 dev_to_node(&dev->dev)); 273 if (!owner) { 274 err = -ENOMEM; 275 goto err_free_chunks; 276 } 277 278 owner->area.base_virtual = virtual; 279 owner->base_dma_addr = dma_addr; 280 owner->area.num_niovs = len / PAGE_SIZE; 281 owner->binding = binding; 282 283 err = gen_pool_add_owner(binding->chunk_pool, dma_addr, 284 dma_addr, len, dev_to_node(&dev->dev), 285 owner); 286 if (err) { 287 kfree(owner); 288 err = -EINVAL; 289 goto err_free_chunks; 290 } 291 292 owner->area.niovs = kvmalloc_array(owner->area.num_niovs, 293 sizeof(*owner->area.niovs), 294 GFP_KERNEL); 295 if (!owner->area.niovs) { 296 err = -ENOMEM; 297 goto err_free_chunks; 298 } 299 300 for (i = 0; i < owner->area.num_niovs; i++) { 301 niov = &owner->area.niovs[i]; 302 niov->type = NET_IOV_DMABUF; 303 niov->owner = &owner->area; 304 page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), 305 net_devmem_get_dma_addr(niov)); 306 if (direction == DMA_TO_DEVICE) 307 binding->tx_vec[owner->area.base_virtual / PAGE_SIZE + i] = niov; 308 } 309 310 virtual += len; 311 } 312 313 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, 314 binding, xa_limit_32b, &id_alloc_next, 315 GFP_KERNEL); 316 if (err < 0) 317 goto err_free_chunks; 318 319 list_add(&binding->list, &priv->bindings); 320 321 return binding; 322 323 err_free_chunks: 324 gen_pool_for_each_chunk(binding->chunk_pool, 325 net_devmem_dmabuf_free_chunk_owner, NULL); 326 gen_pool_destroy(binding->chunk_pool); 327 err_tx_vec: 328 kvfree(binding->tx_vec); 329 err_unmap: 330 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, 331 direction); 332 err_detach: 333 dma_buf_detach(dmabuf, binding->attachment); 334 err_exit_ref: 335 percpu_ref_exit(&binding->ref); 336 err_free_binding: 337 kfree(binding); 338 err_put_dmabuf: 339 dma_buf_put(dmabuf); 340 return ERR_PTR(err); 341 } 342 343 struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id) 344 { 345 struct net_devmem_dmabuf_binding *binding; 346 347 rcu_read_lock(); 348 binding = xa_load(&net_devmem_dmabuf_bindings, id); 349 if (binding) { 350 if (!net_devmem_dmabuf_binding_get(binding)) 351 binding = NULL; 352 } 353 rcu_read_unlock(); 354 355 return binding; 356 } 357 358 void net_devmem_get_net_iov(struct net_iov *niov) 359 { 360 net_devmem_dmabuf_binding_get(net_devmem_iov_binding(niov)); 361 } 362 363 void net_devmem_put_net_iov(struct net_iov *niov) 364 { 365 net_devmem_dmabuf_binding_put(net_devmem_iov_binding(niov)); 366 } 367 368 struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk, 369 unsigned int dmabuf_id) 370 { 371 struct net_devmem_dmabuf_binding *binding; 372 struct net_device *dst_dev; 373 struct dst_entry *dst; 374 int err = 0; 375 376 binding = net_devmem_lookup_dmabuf(dmabuf_id); 377 if (!binding || !binding->tx_vec) { 378 err = -EINVAL; 379 goto out_err; 380 } 381 382 rcu_read_lock(); 383 dst = __sk_dst_get(sk); 384 /* If dst is NULL (route expired), attempt to rebuild it. */ 385 if (unlikely(!dst)) { 386 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { 387 err = -EHOSTUNREACH; 388 goto out_unlock; 389 } 390 dst = __sk_dst_get(sk); 391 if (unlikely(!dst)) { 392 err = -ENODEV; 393 goto out_unlock; 394 } 395 } 396 397 /* The dma-addrs in this binding are only reachable to the corresponding 398 * net_device. 399 */ 400 dst_dev = dst_dev_rcu(dst); 401 if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) { 402 err = -ENODEV; 403 goto out_unlock; 404 } 405 406 rcu_read_unlock(); 407 return binding; 408 409 out_unlock: 410 rcu_read_unlock(); 411 out_err: 412 if (binding) 413 net_devmem_dmabuf_binding_put(binding); 414 415 return ERR_PTR(err); 416 } 417 418 struct net_iov * 419 net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, 420 size_t virt_addr, size_t *off, size_t *size) 421 { 422 if (virt_addr >= binding->dmabuf->size) 423 return NULL; 424 425 *off = virt_addr % PAGE_SIZE; 426 *size = PAGE_SIZE - *off; 427 428 return binding->tx_vec[virt_addr / PAGE_SIZE]; 429 } 430 431 /*** "Dmabuf devmem memory provider" ***/ 432 433 int mp_dmabuf_devmem_init(struct page_pool *pool) 434 { 435 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 436 437 if (!binding) 438 return -EINVAL; 439 440 /* dma-buf dma addresses do not need and should not be used with 441 * dma_sync_for_cpu/device. Force disable dma_sync. 442 */ 443 pool->dma_sync = false; 444 pool->dma_sync_for_cpu = false; 445 446 if (pool->p.order != 0) 447 return -E2BIG; 448 449 net_devmem_dmabuf_binding_get(binding); 450 return 0; 451 } 452 453 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) 454 { 455 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 456 struct net_iov *niov; 457 netmem_ref netmem; 458 459 niov = net_devmem_alloc_dmabuf(binding); 460 if (!niov) 461 return 0; 462 463 netmem = net_iov_to_netmem(niov); 464 465 page_pool_set_pp_info(pool, netmem); 466 467 pool->pages_state_hold_cnt++; 468 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); 469 return netmem; 470 } 471 472 void mp_dmabuf_devmem_destroy(struct page_pool *pool) 473 { 474 struct net_devmem_dmabuf_binding *binding = pool->mp_priv; 475 476 net_devmem_dmabuf_binding_put(binding); 477 } 478 479 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) 480 { 481 long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem)); 482 483 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 484 return false; 485 486 if (WARN_ON_ONCE(refcount != 1)) 487 return false; 488 489 page_pool_clear_pp_info(netmem); 490 491 net_devmem_free_dmabuf(netmem_to_net_iov(netmem)); 492 493 /* We don't want the page pool put_page()ing our net_iovs. */ 494 return false; 495 } 496 497 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp, 498 struct netdev_rx_queue *rxq) 499 { 500 const struct net_devmem_dmabuf_binding *binding = mp_priv; 501 int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF; 502 503 return nla_put_u32(rsp, type, binding->id); 504 } 505 506 static void mp_dmabuf_devmem_uninstall(void *mp_priv, 507 struct netdev_rx_queue *rxq) 508 { 509 struct net_devmem_dmabuf_binding *binding = mp_priv; 510 struct netdev_rx_queue *bound_rxq; 511 unsigned long xa_idx; 512 513 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { 514 if (bound_rxq == rxq) { 515 xa_erase(&binding->bound_rxqs, xa_idx); 516 if (xa_empty(&binding->bound_rxqs)) { 517 mutex_lock(&binding->lock); 518 binding->dev = NULL; 519 mutex_unlock(&binding->lock); 520 } 521 break; 522 } 523 } 524 } 525 526 static const struct memory_provider_ops dmabuf_devmem_ops = { 527 .init = mp_dmabuf_devmem_init, 528 .destroy = mp_dmabuf_devmem_destroy, 529 .alloc_netmems = mp_dmabuf_devmem_alloc_netmems, 530 .release_netmem = mp_dmabuf_devmem_release_page, 531 .nl_fill = mp_dmabuf_devmem_nl_fill, 532 .uninstall = mp_dmabuf_devmem_uninstall, 533 }; 534