1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <linux/vmalloc.h> 26 #include <net/xdp_sock_drv.h> 27 #include <net/busy_poll.h> 28 #include <net/netdev_rx_queue.h> 29 #include <net/xdp.h> 30 31 #include "xsk_queue.h" 32 #include "xdp_umem.h" 33 #include "xsk.h" 34 35 #define TX_BATCH_SIZE 32 36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) 37 38 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 39 { 40 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 41 return; 42 43 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 44 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 45 } 46 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 47 48 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 49 { 50 struct xdp_sock *xs; 51 52 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 53 return; 54 55 rcu_read_lock(); 56 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 57 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 58 } 59 rcu_read_unlock(); 60 61 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 62 } 63 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 64 65 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 66 { 67 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 68 return; 69 70 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 71 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 72 } 73 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 74 75 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 76 { 77 struct xdp_sock *xs; 78 79 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 80 return; 81 82 rcu_read_lock(); 83 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 84 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 85 } 86 rcu_read_unlock(); 87 88 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 89 } 90 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 91 92 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 93 { 94 return pool->uses_need_wakeup; 95 } 96 EXPORT_SYMBOL(xsk_uses_need_wakeup); 97 98 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 99 u16 queue_id) 100 { 101 if (queue_id < dev->real_num_rx_queues) 102 return dev->_rx[queue_id].pool; 103 if (queue_id < dev->real_num_tx_queues) 104 return dev->_tx[queue_id].pool; 105 106 return NULL; 107 } 108 EXPORT_SYMBOL(xsk_get_pool_from_qid); 109 110 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 111 { 112 if (queue_id < dev->num_rx_queues) 113 dev->_rx[queue_id].pool = NULL; 114 if (queue_id < dev->num_tx_queues) 115 dev->_tx[queue_id].pool = NULL; 116 } 117 118 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 119 * not know if the device has more tx queues than rx, or the opposite. 120 * This might also change during run time. 121 */ 122 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 123 u16 queue_id) 124 { 125 if (queue_id >= max_t(unsigned int, 126 dev->real_num_rx_queues, 127 dev->real_num_tx_queues)) 128 return -EINVAL; 129 130 if (queue_id < dev->real_num_rx_queues) 131 dev->_rx[queue_id].pool = pool; 132 if (queue_id < dev->real_num_tx_queues) 133 dev->_tx[queue_id].pool = pool; 134 135 return 0; 136 } 137 138 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, 139 u32 flags) 140 { 141 u64 addr; 142 int err; 143 144 addr = xp_get_handle(xskb); 145 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); 146 if (err) { 147 xs->rx_queue_full++; 148 return err; 149 } 150 151 xp_release(xskb); 152 return 0; 153 } 154 155 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 156 { 157 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 158 u32 frags = xdp_buff_has_frags(xdp); 159 struct xdp_buff_xsk *pos, *tmp; 160 struct list_head *xskb_list; 161 u32 contd = 0; 162 int err; 163 164 if (frags) 165 contd = XDP_PKT_CONTD; 166 167 err = __xsk_rcv_zc(xs, xskb, len, contd); 168 if (err) 169 goto err; 170 if (likely(!frags)) 171 return 0; 172 173 xskb_list = &xskb->pool->xskb_list; 174 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { 175 if (list_is_singular(xskb_list)) 176 contd = 0; 177 len = pos->xdp.data_end - pos->xdp.data; 178 err = __xsk_rcv_zc(xs, pos, len, contd); 179 if (err) 180 goto err; 181 list_del(&pos->xskb_list_node); 182 } 183 184 return 0; 185 err: 186 xsk_buff_free(xdp); 187 return err; 188 } 189 190 static void *xsk_copy_xdp_start(struct xdp_buff *from) 191 { 192 if (unlikely(xdp_data_meta_unsupported(from))) 193 return from->data; 194 else 195 return from->data_meta; 196 } 197 198 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len, 199 u32 *from_len, skb_frag_t **frag, u32 rem) 200 { 201 u32 copied = 0; 202 203 while (1) { 204 u32 copy_len = min_t(u32, *from_len, to_len); 205 206 memcpy(to, *from, copy_len); 207 copied += copy_len; 208 if (rem == copied) 209 return copied; 210 211 if (*from_len == copy_len) { 212 *from = skb_frag_address(*frag); 213 *from_len = skb_frag_size((*frag)++); 214 } else { 215 *from += copy_len; 216 *from_len -= copy_len; 217 } 218 if (to_len == copy_len) 219 return copied; 220 221 to_len -= copy_len; 222 to += copy_len; 223 } 224 } 225 226 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 227 { 228 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool); 229 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to; 230 u32 from_len, meta_len, rem, num_desc; 231 struct xdp_buff_xsk *xskb; 232 struct xdp_buff *xsk_xdp; 233 skb_frag_t *frag; 234 235 from_len = xdp->data_end - copy_from; 236 meta_len = xdp->data - copy_from; 237 rem = len + meta_len; 238 239 if (len <= frame_size && !xdp_buff_has_frags(xdp)) { 240 int err; 241 242 xsk_xdp = xsk_buff_alloc(xs->pool); 243 if (!xsk_xdp) { 244 xs->rx_dropped++; 245 return -ENOMEM; 246 } 247 memcpy(xsk_xdp->data - meta_len, copy_from, rem); 248 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 249 err = __xsk_rcv_zc(xs, xskb, len, 0); 250 if (err) { 251 xsk_buff_free(xsk_xdp); 252 return err; 253 } 254 255 return 0; 256 } 257 258 num_desc = (len - 1) / frame_size + 1; 259 260 if (!xsk_buff_can_alloc(xs->pool, num_desc)) { 261 xs->rx_dropped++; 262 return -ENOMEM; 263 } 264 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { 265 xs->rx_queue_full++; 266 return -ENOBUFS; 267 } 268 269 if (xdp_buff_has_frags(xdp)) { 270 struct skb_shared_info *sinfo; 271 272 sinfo = xdp_get_shared_info_from_buff(xdp); 273 frag = &sinfo->frags[0]; 274 } 275 276 do { 277 u32 to_len = frame_size + meta_len; 278 u32 copied; 279 280 xsk_xdp = xsk_buff_alloc(xs->pool); 281 copy_to = xsk_xdp->data - meta_len; 282 283 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem); 284 rem -= copied; 285 286 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 287 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); 288 meta_len = 0; 289 } while (rem); 290 291 return 0; 292 } 293 294 static bool xsk_tx_writeable(struct xdp_sock *xs) 295 { 296 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 297 return false; 298 299 return true; 300 } 301 302 static bool xsk_is_bound(struct xdp_sock *xs) 303 { 304 if (READ_ONCE(xs->state) == XSK_BOUND) { 305 /* Matches smp_wmb() in bind(). */ 306 smp_rmb(); 307 return true; 308 } 309 return false; 310 } 311 312 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 313 { 314 if (!xsk_is_bound(xs)) 315 return -ENXIO; 316 317 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 318 return -EINVAL; 319 320 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 321 xs->rx_dropped++; 322 return -ENOSPC; 323 } 324 325 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 326 return 0; 327 } 328 329 static void xsk_flush(struct xdp_sock *xs) 330 { 331 xskq_prod_submit(xs->rx); 332 __xskq_cons_release(xs->pool->fq); 333 sock_def_readable(&xs->sk); 334 } 335 336 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 337 { 338 u32 len = xdp_get_buff_len(xdp); 339 int err; 340 341 spin_lock_bh(&xs->rx_lock); 342 err = xsk_rcv_check(xs, xdp, len); 343 if (!err) { 344 err = __xsk_rcv(xs, xdp, len); 345 xsk_flush(xs); 346 } 347 spin_unlock_bh(&xs->rx_lock); 348 return err; 349 } 350 351 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 352 { 353 u32 len = xdp_get_buff_len(xdp); 354 int err; 355 356 err = xsk_rcv_check(xs, xdp, len); 357 if (err) 358 return err; 359 360 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 361 len = xdp->data_end - xdp->data; 362 return xsk_rcv_zc(xs, xdp, len); 363 } 364 365 err = __xsk_rcv(xs, xdp, len); 366 if (!err) 367 xdp_return_buff(xdp); 368 return err; 369 } 370 371 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 372 { 373 int err; 374 375 err = xsk_rcv(xs, xdp); 376 if (err) 377 return err; 378 379 if (!xs->flush_node.prev) { 380 struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list(); 381 382 list_add(&xs->flush_node, flush_list); 383 } 384 385 return 0; 386 } 387 388 void __xsk_map_flush(struct list_head *flush_list) 389 { 390 struct xdp_sock *xs, *tmp; 391 392 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 393 xsk_flush(xs); 394 __list_del_clearprev(&xs->flush_node); 395 } 396 } 397 398 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 399 { 400 xskq_prod_submit_n(pool->cq, nb_entries); 401 } 402 EXPORT_SYMBOL(xsk_tx_completed); 403 404 void xsk_tx_release(struct xsk_buff_pool *pool) 405 { 406 struct xdp_sock *xs; 407 408 rcu_read_lock(); 409 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 410 __xskq_cons_release(xs->tx); 411 if (xsk_tx_writeable(xs)) 412 xs->sk.sk_write_space(&xs->sk); 413 } 414 rcu_read_unlock(); 415 } 416 EXPORT_SYMBOL(xsk_tx_release); 417 418 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 419 { 420 bool budget_exhausted = false; 421 struct xdp_sock *xs; 422 423 rcu_read_lock(); 424 again: 425 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 426 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) { 427 budget_exhausted = true; 428 continue; 429 } 430 431 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 432 if (xskq_has_descs(xs->tx)) 433 xskq_cons_release(xs->tx); 434 continue; 435 } 436 437 xs->tx_budget_spent++; 438 439 /* This is the backpressure mechanism for the Tx path. 440 * Reserve space in the completion queue and only proceed 441 * if there is space in it. This avoids having to implement 442 * any buffering in the Tx path. 443 */ 444 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 445 goto out; 446 447 xskq_cons_release(xs->tx); 448 rcu_read_unlock(); 449 return true; 450 } 451 452 if (budget_exhausted) { 453 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) 454 xs->tx_budget_spent = 0; 455 456 budget_exhausted = false; 457 goto again; 458 } 459 460 out: 461 rcu_read_unlock(); 462 return false; 463 } 464 EXPORT_SYMBOL(xsk_tx_peek_desc); 465 466 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 467 { 468 struct xdp_desc *descs = pool->tx_descs; 469 u32 nb_pkts = 0; 470 471 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 472 nb_pkts++; 473 474 xsk_tx_release(pool); 475 return nb_pkts; 476 } 477 478 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) 479 { 480 struct xdp_sock *xs; 481 482 rcu_read_lock(); 483 if (!list_is_singular(&pool->xsk_tx_list)) { 484 /* Fallback to the non-batched version */ 485 rcu_read_unlock(); 486 return xsk_tx_peek_release_fallback(pool, nb_pkts); 487 } 488 489 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 490 if (!xs) { 491 nb_pkts = 0; 492 goto out; 493 } 494 495 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); 496 497 /* This is the backpressure mechanism for the Tx path. Try to 498 * reserve space in the completion queue for all packets, but 499 * if there are fewer slots available, just process that many 500 * packets. This avoids having to implement any buffering in 501 * the Tx path. 502 */ 503 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); 504 if (!nb_pkts) 505 goto out; 506 507 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); 508 if (!nb_pkts) { 509 xs->tx->queue_empty_descs++; 510 goto out; 511 } 512 513 __xskq_cons_release(xs->tx); 514 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 515 xs->sk.sk_write_space(&xs->sk); 516 517 out: 518 rcu_read_unlock(); 519 return nb_pkts; 520 } 521 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 522 523 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 524 { 525 struct net_device *dev = xs->dev; 526 527 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 528 } 529 530 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr) 531 { 532 unsigned long flags; 533 int ret; 534 535 spin_lock_irqsave(&xs->pool->cq_lock, flags); 536 ret = xskq_prod_reserve_addr(xs->pool->cq, addr); 537 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 538 539 return ret; 540 } 541 542 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n) 543 { 544 unsigned long flags; 545 546 spin_lock_irqsave(&xs->pool->cq_lock, flags); 547 xskq_prod_submit_n(xs->pool->cq, n); 548 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 549 } 550 551 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n) 552 { 553 unsigned long flags; 554 555 spin_lock_irqsave(&xs->pool->cq_lock, flags); 556 xskq_prod_cancel_n(xs->pool->cq, n); 557 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 558 } 559 560 static u32 xsk_get_num_desc(struct sk_buff *skb) 561 { 562 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0; 563 } 564 565 static void xsk_destruct_skb(struct sk_buff *skb) 566 { 567 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta; 568 569 if (compl->tx_timestamp) { 570 /* sw completion timestamp, not a real one */ 571 *compl->tx_timestamp = ktime_get_tai_fast_ns(); 572 } 573 574 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb)); 575 sock_wfree(skb); 576 } 577 578 static void xsk_set_destructor_arg(struct sk_buff *skb) 579 { 580 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1; 581 582 skb_shinfo(skb)->destructor_arg = (void *)num; 583 } 584 585 static void xsk_consume_skb(struct sk_buff *skb) 586 { 587 struct xdp_sock *xs = xdp_sk(skb->sk); 588 589 skb->destructor = sock_wfree; 590 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb)); 591 /* Free skb without triggering the perf drop trace */ 592 consume_skb(skb); 593 xs->skb = NULL; 594 } 595 596 static void xsk_drop_skb(struct sk_buff *skb) 597 { 598 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb); 599 xsk_consume_skb(skb); 600 } 601 602 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 603 struct xdp_desc *desc) 604 { 605 struct xsk_buff_pool *pool = xs->pool; 606 u32 hr, len, ts, offset, copy, copied; 607 struct sk_buff *skb = xs->skb; 608 struct page *page; 609 void *buffer; 610 int err, i; 611 u64 addr; 612 613 if (!skb) { 614 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 615 616 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 617 if (unlikely(!skb)) 618 return ERR_PTR(err); 619 620 skb_reserve(skb, hr); 621 } 622 623 addr = desc->addr; 624 len = desc->len; 625 ts = pool->unaligned ? len : pool->chunk_size; 626 627 buffer = xsk_buff_raw_get_data(pool, addr); 628 offset = offset_in_page(buffer); 629 addr = buffer - pool->addrs; 630 631 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) { 632 if (unlikely(i >= MAX_SKB_FRAGS)) 633 return ERR_PTR(-EOVERFLOW); 634 635 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 636 get_page(page); 637 638 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 639 skb_fill_page_desc(skb, i, page, offset, copy); 640 641 copied += copy; 642 addr += copy; 643 offset = 0; 644 } 645 646 skb->len += len; 647 skb->data_len += len; 648 skb->truesize += ts; 649 650 refcount_add(ts, &xs->sk.sk_wmem_alloc); 651 652 return skb; 653 } 654 655 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 656 struct xdp_desc *desc) 657 { 658 struct xsk_tx_metadata *meta = NULL; 659 struct net_device *dev = xs->dev; 660 struct sk_buff *skb = xs->skb; 661 bool first_frag = false; 662 int err; 663 664 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 665 skb = xsk_build_skb_zerocopy(xs, desc); 666 if (IS_ERR(skb)) { 667 err = PTR_ERR(skb); 668 goto free_err; 669 } 670 } else { 671 u32 hr, tr, len; 672 void *buffer; 673 674 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 675 len = desc->len; 676 677 if (!skb) { 678 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 679 tr = dev->needed_tailroom; 680 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 681 if (unlikely(!skb)) 682 goto free_err; 683 684 skb_reserve(skb, hr); 685 skb_put(skb, len); 686 687 err = skb_store_bits(skb, 0, buffer, len); 688 if (unlikely(err)) { 689 kfree_skb(skb); 690 goto free_err; 691 } 692 693 first_frag = true; 694 } else { 695 int nr_frags = skb_shinfo(skb)->nr_frags; 696 struct page *page; 697 u8 *vaddr; 698 699 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) { 700 err = -EOVERFLOW; 701 goto free_err; 702 } 703 704 page = alloc_page(xs->sk.sk_allocation); 705 if (unlikely(!page)) { 706 err = -EAGAIN; 707 goto free_err; 708 } 709 710 vaddr = kmap_local_page(page); 711 memcpy(vaddr, buffer, len); 712 kunmap_local(vaddr); 713 714 skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE); 715 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc); 716 } 717 718 if (first_frag && desc->options & XDP_TX_METADATA) { 719 if (unlikely(xs->pool->tx_metadata_len == 0)) { 720 err = -EINVAL; 721 goto free_err; 722 } 723 724 meta = buffer - xs->pool->tx_metadata_len; 725 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) { 726 err = -EINVAL; 727 goto free_err; 728 } 729 730 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) { 731 if (unlikely(meta->request.csum_start + 732 meta->request.csum_offset + 733 sizeof(__sum16) > len)) { 734 err = -EINVAL; 735 goto free_err; 736 } 737 738 skb->csum_start = hr + meta->request.csum_start; 739 skb->csum_offset = meta->request.csum_offset; 740 skb->ip_summed = CHECKSUM_PARTIAL; 741 742 if (unlikely(xs->pool->tx_sw_csum)) { 743 err = skb_checksum_help(skb); 744 if (err) 745 goto free_err; 746 } 747 } 748 } 749 } 750 751 skb->dev = dev; 752 skb->priority = READ_ONCE(xs->sk.sk_priority); 753 skb->mark = READ_ONCE(xs->sk.sk_mark); 754 skb->destructor = xsk_destruct_skb; 755 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta); 756 xsk_set_destructor_arg(skb); 757 758 return skb; 759 760 free_err: 761 if (err == -EOVERFLOW) { 762 /* Drop the packet */ 763 xsk_set_destructor_arg(xs->skb); 764 xsk_drop_skb(xs->skb); 765 xskq_cons_release(xs->tx); 766 } else { 767 /* Let application retry */ 768 xsk_cq_cancel_locked(xs, 1); 769 } 770 771 return ERR_PTR(err); 772 } 773 774 static int __xsk_generic_xmit(struct sock *sk) 775 { 776 struct xdp_sock *xs = xdp_sk(sk); 777 u32 max_batch = TX_BATCH_SIZE; 778 bool sent_frame = false; 779 struct xdp_desc desc; 780 struct sk_buff *skb; 781 int err = 0; 782 783 mutex_lock(&xs->mutex); 784 785 /* Since we dropped the RCU read lock, the socket state might have changed. */ 786 if (unlikely(!xsk_is_bound(xs))) { 787 err = -ENXIO; 788 goto out; 789 } 790 791 if (xs->queue_id >= xs->dev->real_num_tx_queues) 792 goto out; 793 794 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 795 if (max_batch-- == 0) { 796 err = -EAGAIN; 797 goto out; 798 } 799 800 /* This is the backpressure mechanism for the Tx path. 801 * Reserve space in the completion queue and only proceed 802 * if there is space in it. This avoids having to implement 803 * any buffering in the Tx path. 804 */ 805 if (xsk_cq_reserve_addr_locked(xs, desc.addr)) 806 goto out; 807 808 skb = xsk_build_skb(xs, &desc); 809 if (IS_ERR(skb)) { 810 err = PTR_ERR(skb); 811 if (err != -EOVERFLOW) 812 goto out; 813 err = 0; 814 continue; 815 } 816 817 xskq_cons_release(xs->tx); 818 819 if (xp_mb_desc(&desc)) { 820 xs->skb = skb; 821 continue; 822 } 823 824 err = __dev_direct_xmit(skb, xs->queue_id); 825 if (err == NETDEV_TX_BUSY) { 826 /* Tell user-space to retry the send */ 827 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb)); 828 xsk_consume_skb(skb); 829 err = -EAGAIN; 830 goto out; 831 } 832 833 /* Ignore NET_XMIT_CN as packet might have been sent */ 834 if (err == NET_XMIT_DROP) { 835 /* SKB completed but not sent */ 836 err = -EBUSY; 837 xs->skb = NULL; 838 goto out; 839 } 840 841 sent_frame = true; 842 xs->skb = NULL; 843 } 844 845 if (xskq_has_descs(xs->tx)) { 846 if (xs->skb) 847 xsk_drop_skb(xs->skb); 848 xskq_cons_release(xs->tx); 849 } 850 851 out: 852 if (sent_frame) 853 if (xsk_tx_writeable(xs)) 854 sk->sk_write_space(sk); 855 856 mutex_unlock(&xs->mutex); 857 return err; 858 } 859 860 static int xsk_generic_xmit(struct sock *sk) 861 { 862 int ret; 863 864 /* Drop the RCU lock since the SKB path might sleep. */ 865 rcu_read_unlock(); 866 ret = __xsk_generic_xmit(sk); 867 /* Reaquire RCU lock before going into common code. */ 868 rcu_read_lock(); 869 870 return ret; 871 } 872 873 static bool xsk_no_wakeup(struct sock *sk) 874 { 875 #ifdef CONFIG_NET_RX_BUSY_POLL 876 /* Prefer busy-polling, skip the wakeup. */ 877 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 878 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 879 #else 880 return false; 881 #endif 882 } 883 884 static int xsk_check_common(struct xdp_sock *xs) 885 { 886 if (unlikely(!xsk_is_bound(xs))) 887 return -ENXIO; 888 if (unlikely(!(xs->dev->flags & IFF_UP))) 889 return -ENETDOWN; 890 891 return 0; 892 } 893 894 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 895 { 896 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 897 struct sock *sk = sock->sk; 898 struct xdp_sock *xs = xdp_sk(sk); 899 struct xsk_buff_pool *pool; 900 int err; 901 902 err = xsk_check_common(xs); 903 if (err) 904 return err; 905 if (unlikely(need_wait)) 906 return -EOPNOTSUPP; 907 if (unlikely(!xs->tx)) 908 return -ENOBUFS; 909 910 if (sk_can_busy_loop(sk)) { 911 if (xs->zc) 912 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool)); 913 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 914 } 915 916 if (xs->zc && xsk_no_wakeup(sk)) 917 return 0; 918 919 pool = xs->pool; 920 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) { 921 if (xs->zc) 922 return xsk_wakeup(xs, XDP_WAKEUP_TX); 923 return xsk_generic_xmit(sk); 924 } 925 return 0; 926 } 927 928 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 929 { 930 int ret; 931 932 rcu_read_lock(); 933 ret = __xsk_sendmsg(sock, m, total_len); 934 rcu_read_unlock(); 935 936 return ret; 937 } 938 939 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 940 { 941 bool need_wait = !(flags & MSG_DONTWAIT); 942 struct sock *sk = sock->sk; 943 struct xdp_sock *xs = xdp_sk(sk); 944 int err; 945 946 err = xsk_check_common(xs); 947 if (err) 948 return err; 949 if (unlikely(!xs->rx)) 950 return -ENOBUFS; 951 if (unlikely(need_wait)) 952 return -EOPNOTSUPP; 953 954 if (sk_can_busy_loop(sk)) 955 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 956 957 if (xsk_no_wakeup(sk)) 958 return 0; 959 960 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 961 return xsk_wakeup(xs, XDP_WAKEUP_RX); 962 return 0; 963 } 964 965 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 966 { 967 int ret; 968 969 rcu_read_lock(); 970 ret = __xsk_recvmsg(sock, m, len, flags); 971 rcu_read_unlock(); 972 973 return ret; 974 } 975 976 static __poll_t xsk_poll(struct file *file, struct socket *sock, 977 struct poll_table_struct *wait) 978 { 979 __poll_t mask = 0; 980 struct sock *sk = sock->sk; 981 struct xdp_sock *xs = xdp_sk(sk); 982 struct xsk_buff_pool *pool; 983 984 sock_poll_wait(file, sock, wait); 985 986 rcu_read_lock(); 987 if (xsk_check_common(xs)) 988 goto out; 989 990 pool = xs->pool; 991 992 if (pool->cached_need_wakeup) { 993 if (xs->zc) 994 xsk_wakeup(xs, pool->cached_need_wakeup); 995 else if (xs->tx) 996 /* Poll needs to drive Tx also in copy mode */ 997 xsk_generic_xmit(sk); 998 } 999 1000 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 1001 mask |= EPOLLIN | EPOLLRDNORM; 1002 if (xs->tx && xsk_tx_writeable(xs)) 1003 mask |= EPOLLOUT | EPOLLWRNORM; 1004 out: 1005 rcu_read_unlock(); 1006 return mask; 1007 } 1008 1009 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 1010 bool umem_queue) 1011 { 1012 struct xsk_queue *q; 1013 1014 if (entries == 0 || *queue || !is_power_of_2(entries)) 1015 return -EINVAL; 1016 1017 q = xskq_create(entries, umem_queue); 1018 if (!q) 1019 return -ENOMEM; 1020 1021 /* Make sure queue is ready before it can be seen by others */ 1022 smp_wmb(); 1023 WRITE_ONCE(*queue, q); 1024 return 0; 1025 } 1026 1027 static void xsk_unbind_dev(struct xdp_sock *xs) 1028 { 1029 struct net_device *dev = xs->dev; 1030 1031 if (xs->state != XSK_BOUND) 1032 return; 1033 WRITE_ONCE(xs->state, XSK_UNBOUND); 1034 1035 /* Wait for driver to stop using the xdp socket. */ 1036 xp_del_xsk(xs->pool, xs); 1037 synchronize_net(); 1038 dev_put(dev); 1039 } 1040 1041 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 1042 struct xdp_sock __rcu ***map_entry) 1043 { 1044 struct xsk_map *map = NULL; 1045 struct xsk_map_node *node; 1046 1047 *map_entry = NULL; 1048 1049 spin_lock_bh(&xs->map_list_lock); 1050 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 1051 node); 1052 if (node) { 1053 bpf_map_inc(&node->map->map); 1054 map = node->map; 1055 *map_entry = node->map_entry; 1056 } 1057 spin_unlock_bh(&xs->map_list_lock); 1058 return map; 1059 } 1060 1061 static void xsk_delete_from_maps(struct xdp_sock *xs) 1062 { 1063 /* This function removes the current XDP socket from all the 1064 * maps it resides in. We need to take extra care here, due to 1065 * the two locks involved. Each map has a lock synchronizing 1066 * updates to the entries, and each socket has a lock that 1067 * synchronizes access to the list of maps (map_list). For 1068 * deadlock avoidance the locks need to be taken in the order 1069 * "map lock"->"socket map list lock". We start off by 1070 * accessing the socket map list, and take a reference to the 1071 * map to guarantee existence between the 1072 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 1073 * calls. Then we ask the map to remove the socket, which 1074 * tries to remove the socket from the map. Note that there 1075 * might be updates to the map between 1076 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 1077 */ 1078 struct xdp_sock __rcu **map_entry = NULL; 1079 struct xsk_map *map; 1080 1081 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 1082 xsk_map_try_sock_delete(map, xs, map_entry); 1083 bpf_map_put(&map->map); 1084 } 1085 } 1086 1087 static int xsk_release(struct socket *sock) 1088 { 1089 struct sock *sk = sock->sk; 1090 struct xdp_sock *xs = xdp_sk(sk); 1091 struct net *net; 1092 1093 if (!sk) 1094 return 0; 1095 1096 net = sock_net(sk); 1097 1098 if (xs->skb) 1099 xsk_drop_skb(xs->skb); 1100 1101 mutex_lock(&net->xdp.lock); 1102 sk_del_node_init_rcu(sk); 1103 mutex_unlock(&net->xdp.lock); 1104 1105 sock_prot_inuse_add(net, sk->sk_prot, -1); 1106 1107 xsk_delete_from_maps(xs); 1108 mutex_lock(&xs->mutex); 1109 xsk_unbind_dev(xs); 1110 mutex_unlock(&xs->mutex); 1111 1112 xskq_destroy(xs->rx); 1113 xskq_destroy(xs->tx); 1114 xskq_destroy(xs->fq_tmp); 1115 xskq_destroy(xs->cq_tmp); 1116 1117 sock_orphan(sk); 1118 sock->sk = NULL; 1119 1120 sock_put(sk); 1121 1122 return 0; 1123 } 1124 1125 static struct socket *xsk_lookup_xsk_from_fd(int fd) 1126 { 1127 struct socket *sock; 1128 int err; 1129 1130 sock = sockfd_lookup(fd, &err); 1131 if (!sock) 1132 return ERR_PTR(-ENOTSOCK); 1133 1134 if (sock->sk->sk_family != PF_XDP) { 1135 sockfd_put(sock); 1136 return ERR_PTR(-ENOPROTOOPT); 1137 } 1138 1139 return sock; 1140 } 1141 1142 static bool xsk_validate_queues(struct xdp_sock *xs) 1143 { 1144 return xs->fq_tmp && xs->cq_tmp; 1145 } 1146 1147 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 1148 { 1149 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 1150 struct sock *sk = sock->sk; 1151 struct xdp_sock *xs = xdp_sk(sk); 1152 struct net_device *dev; 1153 int bound_dev_if; 1154 u32 flags, qid; 1155 int err = 0; 1156 1157 if (addr_len < sizeof(struct sockaddr_xdp)) 1158 return -EINVAL; 1159 if (sxdp->sxdp_family != AF_XDP) 1160 return -EINVAL; 1161 1162 flags = sxdp->sxdp_flags; 1163 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 1164 XDP_USE_NEED_WAKEUP | XDP_USE_SG)) 1165 return -EINVAL; 1166 1167 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 1168 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) 1169 return -EINVAL; 1170 1171 rtnl_lock(); 1172 mutex_lock(&xs->mutex); 1173 if (xs->state != XSK_READY) { 1174 err = -EBUSY; 1175 goto out_release; 1176 } 1177 1178 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 1179 if (!dev) { 1180 err = -ENODEV; 1181 goto out_release; 1182 } 1183 1184 if (!xs->rx && !xs->tx) { 1185 err = -EINVAL; 1186 goto out_unlock; 1187 } 1188 1189 qid = sxdp->sxdp_queue_id; 1190 1191 if (flags & XDP_SHARED_UMEM) { 1192 struct xdp_sock *umem_xs; 1193 struct socket *sock; 1194 1195 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 1196 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) { 1197 /* Cannot specify flags for shared sockets. */ 1198 err = -EINVAL; 1199 goto out_unlock; 1200 } 1201 1202 if (xs->umem) { 1203 /* We have already our own. */ 1204 err = -EINVAL; 1205 goto out_unlock; 1206 } 1207 1208 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 1209 if (IS_ERR(sock)) { 1210 err = PTR_ERR(sock); 1211 goto out_unlock; 1212 } 1213 1214 umem_xs = xdp_sk(sock->sk); 1215 if (!xsk_is_bound(umem_xs)) { 1216 err = -EBADF; 1217 sockfd_put(sock); 1218 goto out_unlock; 1219 } 1220 1221 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 1222 /* Share the umem with another socket on another qid 1223 * and/or device. 1224 */ 1225 xs->pool = xp_create_and_assign_umem(xs, 1226 umem_xs->umem); 1227 if (!xs->pool) { 1228 err = -ENOMEM; 1229 sockfd_put(sock); 1230 goto out_unlock; 1231 } 1232 1233 err = xp_assign_dev_shared(xs->pool, umem_xs, dev, 1234 qid); 1235 if (err) { 1236 xp_destroy(xs->pool); 1237 xs->pool = NULL; 1238 sockfd_put(sock); 1239 goto out_unlock; 1240 } 1241 } else { 1242 /* Share the buffer pool with the other socket. */ 1243 if (xs->fq_tmp || xs->cq_tmp) { 1244 /* Do not allow setting your own fq or cq. */ 1245 err = -EINVAL; 1246 sockfd_put(sock); 1247 goto out_unlock; 1248 } 1249 1250 xp_get_pool(umem_xs->pool); 1251 xs->pool = umem_xs->pool; 1252 1253 /* If underlying shared umem was created without Tx 1254 * ring, allocate Tx descs array that Tx batching API 1255 * utilizes 1256 */ 1257 if (xs->tx && !xs->pool->tx_descs) { 1258 err = xp_alloc_tx_descs(xs->pool, xs); 1259 if (err) { 1260 xp_put_pool(xs->pool); 1261 xs->pool = NULL; 1262 sockfd_put(sock); 1263 goto out_unlock; 1264 } 1265 } 1266 } 1267 1268 xdp_get_umem(umem_xs->umem); 1269 WRITE_ONCE(xs->umem, umem_xs->umem); 1270 sockfd_put(sock); 1271 } else if (!xs->umem || !xsk_validate_queues(xs)) { 1272 err = -EINVAL; 1273 goto out_unlock; 1274 } else { 1275 /* This xsk has its own umem. */ 1276 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 1277 if (!xs->pool) { 1278 err = -ENOMEM; 1279 goto out_unlock; 1280 } 1281 1282 err = xp_assign_dev(xs->pool, dev, qid, flags); 1283 if (err) { 1284 xp_destroy(xs->pool); 1285 xs->pool = NULL; 1286 goto out_unlock; 1287 } 1288 } 1289 1290 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 1291 xs->fq_tmp = NULL; 1292 xs->cq_tmp = NULL; 1293 1294 xs->dev = dev; 1295 xs->zc = xs->umem->zc; 1296 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG); 1297 xs->queue_id = qid; 1298 xp_add_xsk(xs->pool, xs); 1299 1300 out_unlock: 1301 if (err) { 1302 dev_put(dev); 1303 } else { 1304 /* Matches smp_rmb() in bind() for shared umem 1305 * sockets, and xsk_is_bound(). 1306 */ 1307 smp_wmb(); 1308 WRITE_ONCE(xs->state, XSK_BOUND); 1309 } 1310 out_release: 1311 mutex_unlock(&xs->mutex); 1312 rtnl_unlock(); 1313 return err; 1314 } 1315 1316 struct xdp_umem_reg_v1 { 1317 __u64 addr; /* Start of packet data area */ 1318 __u64 len; /* Length of packet data area */ 1319 __u32 chunk_size; 1320 __u32 headroom; 1321 }; 1322 1323 static int xsk_setsockopt(struct socket *sock, int level, int optname, 1324 sockptr_t optval, unsigned int optlen) 1325 { 1326 struct sock *sk = sock->sk; 1327 struct xdp_sock *xs = xdp_sk(sk); 1328 int err; 1329 1330 if (level != SOL_XDP) 1331 return -ENOPROTOOPT; 1332 1333 switch (optname) { 1334 case XDP_RX_RING: 1335 case XDP_TX_RING: 1336 { 1337 struct xsk_queue **q; 1338 int entries; 1339 1340 if (optlen < sizeof(entries)) 1341 return -EINVAL; 1342 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1343 return -EFAULT; 1344 1345 mutex_lock(&xs->mutex); 1346 if (xs->state != XSK_READY) { 1347 mutex_unlock(&xs->mutex); 1348 return -EBUSY; 1349 } 1350 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1351 err = xsk_init_queue(entries, q, false); 1352 if (!err && optname == XDP_TX_RING) 1353 /* Tx needs to be explicitly woken up the first time */ 1354 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1355 mutex_unlock(&xs->mutex); 1356 return err; 1357 } 1358 case XDP_UMEM_REG: 1359 { 1360 size_t mr_size = sizeof(struct xdp_umem_reg); 1361 struct xdp_umem_reg mr = {}; 1362 struct xdp_umem *umem; 1363 1364 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1365 return -EINVAL; 1366 else if (optlen < sizeof(mr)) 1367 mr_size = sizeof(struct xdp_umem_reg_v1); 1368 1369 BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg)); 1370 1371 /* Make sure the last field of the struct doesn't have 1372 * uninitialized padding. All padding has to be explicit 1373 * and has to be set to zero by the userspace to make 1374 * struct xdp_umem_reg extensible in the future. 1375 */ 1376 BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) + 1377 sizeof_field(struct xdp_umem_reg, tx_metadata_len) != 1378 sizeof(struct xdp_umem_reg)); 1379 1380 if (copy_from_sockptr(&mr, optval, mr_size)) 1381 return -EFAULT; 1382 1383 mutex_lock(&xs->mutex); 1384 if (xs->state != XSK_READY || xs->umem) { 1385 mutex_unlock(&xs->mutex); 1386 return -EBUSY; 1387 } 1388 1389 umem = xdp_umem_create(&mr); 1390 if (IS_ERR(umem)) { 1391 mutex_unlock(&xs->mutex); 1392 return PTR_ERR(umem); 1393 } 1394 1395 /* Make sure umem is ready before it can be seen by others */ 1396 smp_wmb(); 1397 WRITE_ONCE(xs->umem, umem); 1398 mutex_unlock(&xs->mutex); 1399 return 0; 1400 } 1401 case XDP_UMEM_FILL_RING: 1402 case XDP_UMEM_COMPLETION_RING: 1403 { 1404 struct xsk_queue **q; 1405 int entries; 1406 1407 if (optlen < sizeof(entries)) 1408 return -EINVAL; 1409 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1410 return -EFAULT; 1411 1412 mutex_lock(&xs->mutex); 1413 if (xs->state != XSK_READY) { 1414 mutex_unlock(&xs->mutex); 1415 return -EBUSY; 1416 } 1417 1418 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1419 &xs->cq_tmp; 1420 err = xsk_init_queue(entries, q, true); 1421 mutex_unlock(&xs->mutex); 1422 return err; 1423 } 1424 default: 1425 break; 1426 } 1427 1428 return -ENOPROTOOPT; 1429 } 1430 1431 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1432 { 1433 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1434 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1435 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1436 } 1437 1438 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1439 { 1440 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1441 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1442 ring->desc = offsetof(struct xdp_umem_ring, desc); 1443 } 1444 1445 struct xdp_statistics_v1 { 1446 __u64 rx_dropped; 1447 __u64 rx_invalid_descs; 1448 __u64 tx_invalid_descs; 1449 }; 1450 1451 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1452 char __user *optval, int __user *optlen) 1453 { 1454 struct sock *sk = sock->sk; 1455 struct xdp_sock *xs = xdp_sk(sk); 1456 int len; 1457 1458 if (level != SOL_XDP) 1459 return -ENOPROTOOPT; 1460 1461 if (get_user(len, optlen)) 1462 return -EFAULT; 1463 if (len < 0) 1464 return -EINVAL; 1465 1466 switch (optname) { 1467 case XDP_STATISTICS: 1468 { 1469 struct xdp_statistics stats = {}; 1470 bool extra_stats = true; 1471 size_t stats_size; 1472 1473 if (len < sizeof(struct xdp_statistics_v1)) { 1474 return -EINVAL; 1475 } else if (len < sizeof(stats)) { 1476 extra_stats = false; 1477 stats_size = sizeof(struct xdp_statistics_v1); 1478 } else { 1479 stats_size = sizeof(stats); 1480 } 1481 1482 mutex_lock(&xs->mutex); 1483 stats.rx_dropped = xs->rx_dropped; 1484 if (extra_stats) { 1485 stats.rx_ring_full = xs->rx_queue_full; 1486 stats.rx_fill_ring_empty_descs = 1487 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1488 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1489 } else { 1490 stats.rx_dropped += xs->rx_queue_full; 1491 } 1492 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1493 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1494 mutex_unlock(&xs->mutex); 1495 1496 if (copy_to_user(optval, &stats, stats_size)) 1497 return -EFAULT; 1498 if (put_user(stats_size, optlen)) 1499 return -EFAULT; 1500 1501 return 0; 1502 } 1503 case XDP_MMAP_OFFSETS: 1504 { 1505 struct xdp_mmap_offsets off; 1506 struct xdp_mmap_offsets_v1 off_v1; 1507 bool flags_supported = true; 1508 void *to_copy; 1509 1510 if (len < sizeof(off_v1)) 1511 return -EINVAL; 1512 else if (len < sizeof(off)) 1513 flags_supported = false; 1514 1515 if (flags_supported) { 1516 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1517 * except for the flags field added to the end. 1518 */ 1519 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1520 &off.rx); 1521 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1522 &off.tx); 1523 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1524 &off.fr); 1525 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1526 &off.cr); 1527 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1528 ptrs.flags); 1529 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1530 ptrs.flags); 1531 off.fr.flags = offsetof(struct xdp_umem_ring, 1532 ptrs.flags); 1533 off.cr.flags = offsetof(struct xdp_umem_ring, 1534 ptrs.flags); 1535 1536 len = sizeof(off); 1537 to_copy = &off; 1538 } else { 1539 xsk_enter_rxtx_offsets(&off_v1.rx); 1540 xsk_enter_rxtx_offsets(&off_v1.tx); 1541 xsk_enter_umem_offsets(&off_v1.fr); 1542 xsk_enter_umem_offsets(&off_v1.cr); 1543 1544 len = sizeof(off_v1); 1545 to_copy = &off_v1; 1546 } 1547 1548 if (copy_to_user(optval, to_copy, len)) 1549 return -EFAULT; 1550 if (put_user(len, optlen)) 1551 return -EFAULT; 1552 1553 return 0; 1554 } 1555 case XDP_OPTIONS: 1556 { 1557 struct xdp_options opts = {}; 1558 1559 if (len < sizeof(opts)) 1560 return -EINVAL; 1561 1562 mutex_lock(&xs->mutex); 1563 if (xs->zc) 1564 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1565 mutex_unlock(&xs->mutex); 1566 1567 len = sizeof(opts); 1568 if (copy_to_user(optval, &opts, len)) 1569 return -EFAULT; 1570 if (put_user(len, optlen)) 1571 return -EFAULT; 1572 1573 return 0; 1574 } 1575 default: 1576 break; 1577 } 1578 1579 return -EOPNOTSUPP; 1580 } 1581 1582 static int xsk_mmap(struct file *file, struct socket *sock, 1583 struct vm_area_struct *vma) 1584 { 1585 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1586 unsigned long size = vma->vm_end - vma->vm_start; 1587 struct xdp_sock *xs = xdp_sk(sock->sk); 1588 int state = READ_ONCE(xs->state); 1589 struct xsk_queue *q = NULL; 1590 1591 if (state != XSK_READY && state != XSK_BOUND) 1592 return -EBUSY; 1593 1594 if (offset == XDP_PGOFF_RX_RING) { 1595 q = READ_ONCE(xs->rx); 1596 } else if (offset == XDP_PGOFF_TX_RING) { 1597 q = READ_ONCE(xs->tx); 1598 } else { 1599 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1600 smp_rmb(); 1601 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1602 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) : 1603 READ_ONCE(xs->pool->fq); 1604 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1605 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) : 1606 READ_ONCE(xs->pool->cq); 1607 } 1608 1609 if (!q) 1610 return -EINVAL; 1611 1612 /* Matches the smp_wmb() in xsk_init_queue */ 1613 smp_rmb(); 1614 if (size > q->ring_vmalloc_size) 1615 return -EINVAL; 1616 1617 return remap_vmalloc_range(vma, q->ring, 0); 1618 } 1619 1620 static int xsk_notifier(struct notifier_block *this, 1621 unsigned long msg, void *ptr) 1622 { 1623 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1624 struct net *net = dev_net(dev); 1625 struct sock *sk; 1626 1627 switch (msg) { 1628 case NETDEV_UNREGISTER: 1629 mutex_lock(&net->xdp.lock); 1630 sk_for_each(sk, &net->xdp.list) { 1631 struct xdp_sock *xs = xdp_sk(sk); 1632 1633 mutex_lock(&xs->mutex); 1634 if (xs->dev == dev) { 1635 sk->sk_err = ENETDOWN; 1636 if (!sock_flag(sk, SOCK_DEAD)) 1637 sk_error_report(sk); 1638 1639 xsk_unbind_dev(xs); 1640 1641 /* Clear device references. */ 1642 xp_clear_dev(xs->pool); 1643 } 1644 mutex_unlock(&xs->mutex); 1645 } 1646 mutex_unlock(&net->xdp.lock); 1647 break; 1648 } 1649 return NOTIFY_DONE; 1650 } 1651 1652 static struct proto xsk_proto = { 1653 .name = "XDP", 1654 .owner = THIS_MODULE, 1655 .obj_size = sizeof(struct xdp_sock), 1656 }; 1657 1658 static const struct proto_ops xsk_proto_ops = { 1659 .family = PF_XDP, 1660 .owner = THIS_MODULE, 1661 .release = xsk_release, 1662 .bind = xsk_bind, 1663 .connect = sock_no_connect, 1664 .socketpair = sock_no_socketpair, 1665 .accept = sock_no_accept, 1666 .getname = sock_no_getname, 1667 .poll = xsk_poll, 1668 .ioctl = sock_no_ioctl, 1669 .listen = sock_no_listen, 1670 .shutdown = sock_no_shutdown, 1671 .setsockopt = xsk_setsockopt, 1672 .getsockopt = xsk_getsockopt, 1673 .sendmsg = xsk_sendmsg, 1674 .recvmsg = xsk_recvmsg, 1675 .mmap = xsk_mmap, 1676 }; 1677 1678 static void xsk_destruct(struct sock *sk) 1679 { 1680 struct xdp_sock *xs = xdp_sk(sk); 1681 1682 if (!sock_flag(sk, SOCK_DEAD)) 1683 return; 1684 1685 if (!xp_put_pool(xs->pool)) 1686 xdp_put_umem(xs->umem, !xs->pool); 1687 } 1688 1689 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1690 int kern) 1691 { 1692 struct xdp_sock *xs; 1693 struct sock *sk; 1694 1695 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1696 return -EPERM; 1697 if (sock->type != SOCK_RAW) 1698 return -ESOCKTNOSUPPORT; 1699 1700 if (protocol) 1701 return -EPROTONOSUPPORT; 1702 1703 sock->state = SS_UNCONNECTED; 1704 1705 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1706 if (!sk) 1707 return -ENOBUFS; 1708 1709 sock->ops = &xsk_proto_ops; 1710 1711 sock_init_data(sock, sk); 1712 1713 sk->sk_family = PF_XDP; 1714 1715 sk->sk_destruct = xsk_destruct; 1716 1717 sock_set_flag(sk, SOCK_RCU_FREE); 1718 1719 xs = xdp_sk(sk); 1720 xs->state = XSK_READY; 1721 mutex_init(&xs->mutex); 1722 spin_lock_init(&xs->rx_lock); 1723 1724 INIT_LIST_HEAD(&xs->map_list); 1725 spin_lock_init(&xs->map_list_lock); 1726 1727 mutex_lock(&net->xdp.lock); 1728 sk_add_node_rcu(sk, &net->xdp.list); 1729 mutex_unlock(&net->xdp.lock); 1730 1731 sock_prot_inuse_add(net, &xsk_proto, 1); 1732 1733 return 0; 1734 } 1735 1736 static const struct net_proto_family xsk_family_ops = { 1737 .family = PF_XDP, 1738 .create = xsk_create, 1739 .owner = THIS_MODULE, 1740 }; 1741 1742 static struct notifier_block xsk_netdev_notifier = { 1743 .notifier_call = xsk_notifier, 1744 }; 1745 1746 static int __net_init xsk_net_init(struct net *net) 1747 { 1748 mutex_init(&net->xdp.lock); 1749 INIT_HLIST_HEAD(&net->xdp.list); 1750 return 0; 1751 } 1752 1753 static void __net_exit xsk_net_exit(struct net *net) 1754 { 1755 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1756 } 1757 1758 static struct pernet_operations xsk_net_ops = { 1759 .init = xsk_net_init, 1760 .exit = xsk_net_exit, 1761 }; 1762 1763 static int __init xsk_init(void) 1764 { 1765 int err; 1766 1767 err = proto_register(&xsk_proto, 0 /* no slab */); 1768 if (err) 1769 goto out; 1770 1771 err = sock_register(&xsk_family_ops); 1772 if (err) 1773 goto out_proto; 1774 1775 err = register_pernet_subsys(&xsk_net_ops); 1776 if (err) 1777 goto out_sk; 1778 1779 err = register_netdevice_notifier(&xsk_netdev_notifier); 1780 if (err) 1781 goto out_pernet; 1782 1783 return 0; 1784 1785 out_pernet: 1786 unregister_pernet_subsys(&xsk_net_ops); 1787 out_sk: 1788 sock_unregister(PF_XDP); 1789 out_proto: 1790 proto_unregister(&xsk_proto); 1791 out: 1792 return err; 1793 } 1794 1795 fs_initcall(xsk_init); 1796