1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <linux/vmalloc.h> 26 #include <net/xdp_sock_drv.h> 27 #include <net/busy_poll.h> 28 #include <net/netdev_lock.h> 29 #include <net/netdev_rx_queue.h> 30 #include <net/xdp.h> 31 32 #include "xsk_queue.h" 33 #include "xdp_umem.h" 34 #include "xsk.h" 35 36 #define TX_BATCH_SIZE 32 37 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) 38 39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 40 { 41 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 42 return; 43 44 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 45 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 46 } 47 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 48 49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 50 { 51 struct xdp_sock *xs; 52 53 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 54 return; 55 56 rcu_read_lock(); 57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 58 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 59 } 60 rcu_read_unlock(); 61 62 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 63 } 64 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 65 66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 67 { 68 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 69 return; 70 71 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 72 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 73 } 74 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 75 76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 77 { 78 struct xdp_sock *xs; 79 80 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 81 return; 82 83 rcu_read_lock(); 84 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 85 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 86 } 87 rcu_read_unlock(); 88 89 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 90 } 91 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 92 93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 94 { 95 return pool->uses_need_wakeup; 96 } 97 EXPORT_SYMBOL(xsk_uses_need_wakeup); 98 99 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 100 u16 queue_id) 101 { 102 if (queue_id < dev->real_num_rx_queues) 103 return dev->_rx[queue_id].pool; 104 if (queue_id < dev->real_num_tx_queues) 105 return dev->_tx[queue_id].pool; 106 107 return NULL; 108 } 109 EXPORT_SYMBOL(xsk_get_pool_from_qid); 110 111 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 112 { 113 if (queue_id < dev->num_rx_queues) 114 dev->_rx[queue_id].pool = NULL; 115 if (queue_id < dev->num_tx_queues) 116 dev->_tx[queue_id].pool = NULL; 117 } 118 119 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 120 * not know if the device has more tx queues than rx, or the opposite. 121 * This might also change during run time. 122 */ 123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 124 u16 queue_id) 125 { 126 if (queue_id >= max_t(unsigned int, 127 dev->real_num_rx_queues, 128 dev->real_num_tx_queues)) 129 return -EINVAL; 130 131 if (queue_id < dev->real_num_rx_queues) 132 dev->_rx[queue_id].pool = pool; 133 if (queue_id < dev->real_num_tx_queues) 134 dev->_tx[queue_id].pool = pool; 135 136 return 0; 137 } 138 139 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, 140 u32 flags) 141 { 142 u64 addr; 143 int err; 144 145 addr = xp_get_handle(xskb, xskb->pool); 146 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); 147 if (err) { 148 xs->rx_queue_full++; 149 return err; 150 } 151 152 xp_release(xskb); 153 return 0; 154 } 155 156 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 157 { 158 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 159 u32 frags = xdp_buff_has_frags(xdp); 160 struct xdp_buff_xsk *pos, *tmp; 161 struct list_head *xskb_list; 162 u32 contd = 0; 163 int err; 164 165 if (frags) 166 contd = XDP_PKT_CONTD; 167 168 err = __xsk_rcv_zc(xs, xskb, len, contd); 169 if (err) 170 goto err; 171 if (likely(!frags)) 172 return 0; 173 174 xskb_list = &xskb->pool->xskb_list; 175 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) { 176 if (list_is_singular(xskb_list)) 177 contd = 0; 178 len = pos->xdp.data_end - pos->xdp.data; 179 err = __xsk_rcv_zc(xs, pos, len, contd); 180 if (err) 181 goto err; 182 list_del(&pos->list_node); 183 } 184 185 return 0; 186 err: 187 xsk_buff_free(xdp); 188 return err; 189 } 190 191 static void *xsk_copy_xdp_start(struct xdp_buff *from) 192 { 193 if (unlikely(xdp_data_meta_unsupported(from))) 194 return from->data; 195 else 196 return from->data_meta; 197 } 198 199 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len, 200 u32 *from_len, skb_frag_t **frag, u32 rem) 201 { 202 u32 copied = 0; 203 204 while (1) { 205 u32 copy_len = min_t(u32, *from_len, to_len); 206 207 memcpy(to, *from, copy_len); 208 copied += copy_len; 209 if (rem == copied) 210 return copied; 211 212 if (*from_len == copy_len) { 213 *from = skb_frag_address(*frag); 214 *from_len = skb_frag_size((*frag)++); 215 } else { 216 *from += copy_len; 217 *from_len -= copy_len; 218 } 219 if (to_len == copy_len) 220 return copied; 221 222 to_len -= copy_len; 223 to += copy_len; 224 } 225 } 226 227 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 228 { 229 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool); 230 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to; 231 u32 from_len, meta_len, rem, num_desc; 232 struct xdp_buff_xsk *xskb; 233 struct xdp_buff *xsk_xdp; 234 skb_frag_t *frag; 235 236 from_len = xdp->data_end - copy_from; 237 meta_len = xdp->data - copy_from; 238 rem = len + meta_len; 239 240 if (len <= frame_size && !xdp_buff_has_frags(xdp)) { 241 int err; 242 243 xsk_xdp = xsk_buff_alloc(xs->pool); 244 if (!xsk_xdp) { 245 xs->rx_dropped++; 246 return -ENOMEM; 247 } 248 memcpy(xsk_xdp->data - meta_len, copy_from, rem); 249 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 250 err = __xsk_rcv_zc(xs, xskb, len, 0); 251 if (err) { 252 xsk_buff_free(xsk_xdp); 253 return err; 254 } 255 256 return 0; 257 } 258 259 num_desc = (len - 1) / frame_size + 1; 260 261 if (!xsk_buff_can_alloc(xs->pool, num_desc)) { 262 xs->rx_dropped++; 263 return -ENOMEM; 264 } 265 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { 266 xs->rx_queue_full++; 267 return -ENOBUFS; 268 } 269 270 if (xdp_buff_has_frags(xdp)) { 271 struct skb_shared_info *sinfo; 272 273 sinfo = xdp_get_shared_info_from_buff(xdp); 274 frag = &sinfo->frags[0]; 275 } 276 277 do { 278 u32 to_len = frame_size + meta_len; 279 u32 copied; 280 281 xsk_xdp = xsk_buff_alloc(xs->pool); 282 copy_to = xsk_xdp->data - meta_len; 283 284 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem); 285 rem -= copied; 286 287 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 288 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); 289 meta_len = 0; 290 } while (rem); 291 292 return 0; 293 } 294 295 static bool xsk_tx_writeable(struct xdp_sock *xs) 296 { 297 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 298 return false; 299 300 return true; 301 } 302 303 static void __xsk_tx_release(struct xdp_sock *xs) 304 { 305 __xskq_cons_release(xs->tx); 306 if (xsk_tx_writeable(xs)) 307 xs->sk.sk_write_space(&xs->sk); 308 } 309 310 static bool xsk_is_bound(struct xdp_sock *xs) 311 { 312 if (READ_ONCE(xs->state) == XSK_BOUND) { 313 /* Matches smp_wmb() in bind(). */ 314 smp_rmb(); 315 return true; 316 } 317 return false; 318 } 319 320 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 321 { 322 if (!xsk_is_bound(xs)) 323 return -ENXIO; 324 325 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 326 return -EINVAL; 327 328 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 329 xs->rx_dropped++; 330 return -ENOSPC; 331 } 332 333 return 0; 334 } 335 336 static void xsk_flush(struct xdp_sock *xs) 337 { 338 xskq_prod_submit(xs->rx); 339 __xskq_cons_release(xs->pool->fq); 340 sock_def_readable(&xs->sk); 341 } 342 343 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 344 { 345 u32 len = xdp_get_buff_len(xdp); 346 int err; 347 348 err = xsk_rcv_check(xs, xdp, len); 349 if (!err) { 350 spin_lock_bh(&xs->pool->rx_lock); 351 err = __xsk_rcv(xs, xdp, len); 352 xsk_flush(xs); 353 spin_unlock_bh(&xs->pool->rx_lock); 354 } 355 356 return err; 357 } 358 359 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 360 { 361 u32 len = xdp_get_buff_len(xdp); 362 int err; 363 364 err = xsk_rcv_check(xs, xdp, len); 365 if (err) 366 return err; 367 368 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 369 len = xdp->data_end - xdp->data; 370 return xsk_rcv_zc(xs, xdp, len); 371 } 372 373 err = __xsk_rcv(xs, xdp, len); 374 if (!err) 375 xdp_return_buff(xdp); 376 return err; 377 } 378 379 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 380 { 381 int err; 382 383 err = xsk_rcv(xs, xdp); 384 if (err) 385 return err; 386 387 if (!xs->flush_node.prev) { 388 struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list(); 389 390 list_add(&xs->flush_node, flush_list); 391 } 392 393 return 0; 394 } 395 396 void __xsk_map_flush(struct list_head *flush_list) 397 { 398 struct xdp_sock *xs, *tmp; 399 400 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 401 xsk_flush(xs); 402 __list_del_clearprev(&xs->flush_node); 403 } 404 } 405 406 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 407 { 408 xskq_prod_submit_n(pool->cq, nb_entries); 409 } 410 EXPORT_SYMBOL(xsk_tx_completed); 411 412 void xsk_tx_release(struct xsk_buff_pool *pool) 413 { 414 struct xdp_sock *xs; 415 416 rcu_read_lock(); 417 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) 418 __xsk_tx_release(xs); 419 rcu_read_unlock(); 420 } 421 EXPORT_SYMBOL(xsk_tx_release); 422 423 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 424 { 425 bool budget_exhausted = false; 426 struct xdp_sock *xs; 427 428 rcu_read_lock(); 429 again: 430 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 431 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) { 432 budget_exhausted = true; 433 continue; 434 } 435 436 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 437 if (xskq_has_descs(xs->tx)) 438 xskq_cons_release(xs->tx); 439 continue; 440 } 441 442 xs->tx_budget_spent++; 443 444 /* This is the backpressure mechanism for the Tx path. 445 * Reserve space in the completion queue and only proceed 446 * if there is space in it. This avoids having to implement 447 * any buffering in the Tx path. 448 */ 449 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 450 goto out; 451 452 xskq_cons_release(xs->tx); 453 rcu_read_unlock(); 454 return true; 455 } 456 457 if (budget_exhausted) { 458 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) 459 xs->tx_budget_spent = 0; 460 461 budget_exhausted = false; 462 goto again; 463 } 464 465 out: 466 rcu_read_unlock(); 467 return false; 468 } 469 EXPORT_SYMBOL(xsk_tx_peek_desc); 470 471 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 472 { 473 struct xdp_desc *descs = pool->tx_descs; 474 u32 nb_pkts = 0; 475 476 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 477 nb_pkts++; 478 479 xsk_tx_release(pool); 480 return nb_pkts; 481 } 482 483 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) 484 { 485 struct xdp_sock *xs; 486 487 rcu_read_lock(); 488 if (!list_is_singular(&pool->xsk_tx_list)) { 489 /* Fallback to the non-batched version */ 490 rcu_read_unlock(); 491 return xsk_tx_peek_release_fallback(pool, nb_pkts); 492 } 493 494 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 495 if (!xs) { 496 nb_pkts = 0; 497 goto out; 498 } 499 500 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); 501 502 /* This is the backpressure mechanism for the Tx path. Try to 503 * reserve space in the completion queue for all packets, but 504 * if there are fewer slots available, just process that many 505 * packets. This avoids having to implement any buffering in 506 * the Tx path. 507 */ 508 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); 509 if (!nb_pkts) 510 goto out; 511 512 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); 513 if (!nb_pkts) { 514 xs->tx->queue_empty_descs++; 515 goto out; 516 } 517 518 __xskq_cons_release(xs->tx); 519 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 520 xs->sk.sk_write_space(&xs->sk); 521 522 out: 523 rcu_read_unlock(); 524 return nb_pkts; 525 } 526 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 527 528 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 529 { 530 struct net_device *dev = xs->dev; 531 532 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 533 } 534 535 static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr) 536 { 537 unsigned long flags; 538 int ret; 539 540 spin_lock_irqsave(&pool->cq_lock, flags); 541 ret = xskq_prod_reserve_addr(pool->cq, addr); 542 spin_unlock_irqrestore(&pool->cq_lock, flags); 543 544 return ret; 545 } 546 547 static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n) 548 { 549 unsigned long flags; 550 551 spin_lock_irqsave(&pool->cq_lock, flags); 552 xskq_prod_submit_n(pool->cq, n); 553 spin_unlock_irqrestore(&pool->cq_lock, flags); 554 } 555 556 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) 557 { 558 unsigned long flags; 559 560 spin_lock_irqsave(&pool->cq_lock, flags); 561 xskq_prod_cancel_n(pool->cq, n); 562 spin_unlock_irqrestore(&pool->cq_lock, flags); 563 } 564 565 static u32 xsk_get_num_desc(struct sk_buff *skb) 566 { 567 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0; 568 } 569 570 static void xsk_destruct_skb(struct sk_buff *skb) 571 { 572 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta; 573 574 if (compl->tx_timestamp) { 575 /* sw completion timestamp, not a real one */ 576 *compl->tx_timestamp = ktime_get_tai_fast_ns(); 577 } 578 579 xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb)); 580 sock_wfree(skb); 581 } 582 583 static void xsk_set_destructor_arg(struct sk_buff *skb) 584 { 585 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1; 586 587 skb_shinfo(skb)->destructor_arg = (void *)num; 588 } 589 590 static void xsk_consume_skb(struct sk_buff *skb) 591 { 592 struct xdp_sock *xs = xdp_sk(skb->sk); 593 594 skb->destructor = sock_wfree; 595 xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb)); 596 /* Free skb without triggering the perf drop trace */ 597 consume_skb(skb); 598 xs->skb = NULL; 599 } 600 601 static void xsk_drop_skb(struct sk_buff *skb) 602 { 603 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb); 604 xsk_consume_skb(skb); 605 } 606 607 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 608 struct xdp_desc *desc) 609 { 610 struct xsk_buff_pool *pool = xs->pool; 611 u32 hr, len, ts, offset, copy, copied; 612 struct sk_buff *skb = xs->skb; 613 struct page *page; 614 void *buffer; 615 int err, i; 616 u64 addr; 617 618 if (!skb) { 619 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 620 621 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 622 if (unlikely(!skb)) 623 return ERR_PTR(err); 624 625 skb_reserve(skb, hr); 626 } 627 628 addr = desc->addr; 629 len = desc->len; 630 ts = pool->unaligned ? len : pool->chunk_size; 631 632 buffer = xsk_buff_raw_get_data(pool, addr); 633 offset = offset_in_page(buffer); 634 addr = buffer - pool->addrs; 635 636 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) { 637 if (unlikely(i >= MAX_SKB_FRAGS)) 638 return ERR_PTR(-EOVERFLOW); 639 640 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 641 get_page(page); 642 643 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 644 skb_fill_page_desc(skb, i, page, offset, copy); 645 646 copied += copy; 647 addr += copy; 648 offset = 0; 649 } 650 651 skb->len += len; 652 skb->data_len += len; 653 skb->truesize += ts; 654 655 refcount_add(ts, &xs->sk.sk_wmem_alloc); 656 657 return skb; 658 } 659 660 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 661 struct xdp_desc *desc) 662 { 663 struct xsk_tx_metadata *meta = NULL; 664 struct net_device *dev = xs->dev; 665 struct sk_buff *skb = xs->skb; 666 bool first_frag = false; 667 int err; 668 669 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 670 skb = xsk_build_skb_zerocopy(xs, desc); 671 if (IS_ERR(skb)) { 672 err = PTR_ERR(skb); 673 goto free_err; 674 } 675 } else { 676 u32 hr, tr, len; 677 void *buffer; 678 679 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 680 len = desc->len; 681 682 if (!skb) { 683 first_frag = true; 684 685 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 686 tr = dev->needed_tailroom; 687 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 688 if (unlikely(!skb)) 689 goto free_err; 690 691 skb_reserve(skb, hr); 692 skb_put(skb, len); 693 694 err = skb_store_bits(skb, 0, buffer, len); 695 if (unlikely(err)) 696 goto free_err; 697 } else { 698 int nr_frags = skb_shinfo(skb)->nr_frags; 699 struct page *page; 700 u8 *vaddr; 701 702 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) { 703 err = -EOVERFLOW; 704 goto free_err; 705 } 706 707 page = alloc_page(xs->sk.sk_allocation); 708 if (unlikely(!page)) { 709 err = -EAGAIN; 710 goto free_err; 711 } 712 713 vaddr = kmap_local_page(page); 714 memcpy(vaddr, buffer, len); 715 kunmap_local(vaddr); 716 717 skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE); 718 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc); 719 } 720 721 if (first_frag && desc->options & XDP_TX_METADATA) { 722 if (unlikely(xs->pool->tx_metadata_len == 0)) { 723 err = -EINVAL; 724 goto free_err; 725 } 726 727 meta = buffer - xs->pool->tx_metadata_len; 728 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) { 729 err = -EINVAL; 730 goto free_err; 731 } 732 733 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) { 734 if (unlikely(meta->request.csum_start + 735 meta->request.csum_offset + 736 sizeof(__sum16) > len)) { 737 err = -EINVAL; 738 goto free_err; 739 } 740 741 skb->csum_start = hr + meta->request.csum_start; 742 skb->csum_offset = meta->request.csum_offset; 743 skb->ip_summed = CHECKSUM_PARTIAL; 744 745 if (unlikely(xs->pool->tx_sw_csum)) { 746 err = skb_checksum_help(skb); 747 if (err) 748 goto free_err; 749 } 750 } 751 752 if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) 753 skb->skb_mstamp_ns = meta->request.launch_time; 754 } 755 } 756 757 skb->dev = dev; 758 skb->priority = READ_ONCE(xs->sk.sk_priority); 759 skb->mark = READ_ONCE(xs->sk.sk_mark); 760 skb->destructor = xsk_destruct_skb; 761 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta); 762 xsk_set_destructor_arg(skb); 763 764 return skb; 765 766 free_err: 767 if (first_frag && skb) 768 kfree_skb(skb); 769 770 if (err == -EOVERFLOW) { 771 /* Drop the packet */ 772 xsk_set_destructor_arg(xs->skb); 773 xsk_drop_skb(xs->skb); 774 xskq_cons_release(xs->tx); 775 } else { 776 /* Let application retry */ 777 xsk_cq_cancel_locked(xs->pool, 1); 778 } 779 780 return ERR_PTR(err); 781 } 782 783 static int __xsk_generic_xmit(struct sock *sk) 784 { 785 struct xdp_sock *xs = xdp_sk(sk); 786 u32 max_batch = TX_BATCH_SIZE; 787 bool sent_frame = false; 788 struct xdp_desc desc; 789 struct sk_buff *skb; 790 int err = 0; 791 792 mutex_lock(&xs->mutex); 793 794 /* Since we dropped the RCU read lock, the socket state might have changed. */ 795 if (unlikely(!xsk_is_bound(xs))) { 796 err = -ENXIO; 797 goto out; 798 } 799 800 if (xs->queue_id >= xs->dev->real_num_tx_queues) 801 goto out; 802 803 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 804 if (max_batch-- == 0) { 805 err = -EAGAIN; 806 goto out; 807 } 808 809 /* This is the backpressure mechanism for the Tx path. 810 * Reserve space in the completion queue and only proceed 811 * if there is space in it. This avoids having to implement 812 * any buffering in the Tx path. 813 */ 814 err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr); 815 if (err) { 816 err = -EAGAIN; 817 goto out; 818 } 819 820 skb = xsk_build_skb(xs, &desc); 821 if (IS_ERR(skb)) { 822 err = PTR_ERR(skb); 823 if (err != -EOVERFLOW) 824 goto out; 825 err = 0; 826 continue; 827 } 828 829 xskq_cons_release(xs->tx); 830 831 if (xp_mb_desc(&desc)) { 832 xs->skb = skb; 833 continue; 834 } 835 836 err = __dev_direct_xmit(skb, xs->queue_id); 837 if (err == NETDEV_TX_BUSY) { 838 /* Tell user-space to retry the send */ 839 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb)); 840 xsk_consume_skb(skb); 841 err = -EAGAIN; 842 goto out; 843 } 844 845 /* Ignore NET_XMIT_CN as packet might have been sent */ 846 if (err == NET_XMIT_DROP) { 847 /* SKB completed but not sent */ 848 err = -EBUSY; 849 xs->skb = NULL; 850 goto out; 851 } 852 853 sent_frame = true; 854 xs->skb = NULL; 855 } 856 857 if (xskq_has_descs(xs->tx)) { 858 if (xs->skb) 859 xsk_drop_skb(xs->skb); 860 xskq_cons_release(xs->tx); 861 } 862 863 out: 864 if (sent_frame) 865 __xsk_tx_release(xs); 866 867 mutex_unlock(&xs->mutex); 868 return err; 869 } 870 871 static int xsk_generic_xmit(struct sock *sk) 872 { 873 int ret; 874 875 /* Drop the RCU lock since the SKB path might sleep. */ 876 rcu_read_unlock(); 877 ret = __xsk_generic_xmit(sk); 878 /* Reaquire RCU lock before going into common code. */ 879 rcu_read_lock(); 880 881 return ret; 882 } 883 884 static bool xsk_no_wakeup(struct sock *sk) 885 { 886 #ifdef CONFIG_NET_RX_BUSY_POLL 887 /* Prefer busy-polling, skip the wakeup. */ 888 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 889 napi_id_valid(READ_ONCE(sk->sk_napi_id)); 890 #else 891 return false; 892 #endif 893 } 894 895 static int xsk_check_common(struct xdp_sock *xs) 896 { 897 if (unlikely(!xsk_is_bound(xs))) 898 return -ENXIO; 899 if (unlikely(!(xs->dev->flags & IFF_UP))) 900 return -ENETDOWN; 901 902 return 0; 903 } 904 905 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 906 { 907 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 908 struct sock *sk = sock->sk; 909 struct xdp_sock *xs = xdp_sk(sk); 910 struct xsk_buff_pool *pool; 911 int err; 912 913 err = xsk_check_common(xs); 914 if (err) 915 return err; 916 if (unlikely(need_wait)) 917 return -EOPNOTSUPP; 918 if (unlikely(!xs->tx)) 919 return -ENOBUFS; 920 921 if (sk_can_busy_loop(sk)) 922 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 923 924 if (xs->zc && xsk_no_wakeup(sk)) 925 return 0; 926 927 pool = xs->pool; 928 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) { 929 if (xs->zc) 930 return xsk_wakeup(xs, XDP_WAKEUP_TX); 931 return xsk_generic_xmit(sk); 932 } 933 return 0; 934 } 935 936 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 937 { 938 int ret; 939 940 rcu_read_lock(); 941 ret = __xsk_sendmsg(sock, m, total_len); 942 rcu_read_unlock(); 943 944 return ret; 945 } 946 947 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 948 { 949 bool need_wait = !(flags & MSG_DONTWAIT); 950 struct sock *sk = sock->sk; 951 struct xdp_sock *xs = xdp_sk(sk); 952 int err; 953 954 err = xsk_check_common(xs); 955 if (err) 956 return err; 957 if (unlikely(!xs->rx)) 958 return -ENOBUFS; 959 if (unlikely(need_wait)) 960 return -EOPNOTSUPP; 961 962 if (sk_can_busy_loop(sk)) 963 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 964 965 if (xsk_no_wakeup(sk)) 966 return 0; 967 968 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 969 return xsk_wakeup(xs, XDP_WAKEUP_RX); 970 return 0; 971 } 972 973 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 974 { 975 int ret; 976 977 rcu_read_lock(); 978 ret = __xsk_recvmsg(sock, m, len, flags); 979 rcu_read_unlock(); 980 981 return ret; 982 } 983 984 static __poll_t xsk_poll(struct file *file, struct socket *sock, 985 struct poll_table_struct *wait) 986 { 987 __poll_t mask = 0; 988 struct sock *sk = sock->sk; 989 struct xdp_sock *xs = xdp_sk(sk); 990 struct xsk_buff_pool *pool; 991 992 sock_poll_wait(file, sock, wait); 993 994 rcu_read_lock(); 995 if (xsk_check_common(xs)) 996 goto out; 997 998 pool = xs->pool; 999 1000 if (pool->cached_need_wakeup) { 1001 if (xs->zc) 1002 xsk_wakeup(xs, pool->cached_need_wakeup); 1003 else if (xs->tx) 1004 /* Poll needs to drive Tx also in copy mode */ 1005 xsk_generic_xmit(sk); 1006 } 1007 1008 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 1009 mask |= EPOLLIN | EPOLLRDNORM; 1010 if (xs->tx && xsk_tx_writeable(xs)) 1011 mask |= EPOLLOUT | EPOLLWRNORM; 1012 out: 1013 rcu_read_unlock(); 1014 return mask; 1015 } 1016 1017 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 1018 bool umem_queue) 1019 { 1020 struct xsk_queue *q; 1021 1022 if (entries == 0 || *queue || !is_power_of_2(entries)) 1023 return -EINVAL; 1024 1025 q = xskq_create(entries, umem_queue); 1026 if (!q) 1027 return -ENOMEM; 1028 1029 /* Make sure queue is ready before it can be seen by others */ 1030 smp_wmb(); 1031 WRITE_ONCE(*queue, q); 1032 return 0; 1033 } 1034 1035 static void xsk_unbind_dev(struct xdp_sock *xs) 1036 { 1037 struct net_device *dev = xs->dev; 1038 1039 if (xs->state != XSK_BOUND) 1040 return; 1041 WRITE_ONCE(xs->state, XSK_UNBOUND); 1042 1043 /* Wait for driver to stop using the xdp socket. */ 1044 xp_del_xsk(xs->pool, xs); 1045 synchronize_net(); 1046 dev_put(dev); 1047 } 1048 1049 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 1050 struct xdp_sock __rcu ***map_entry) 1051 { 1052 struct xsk_map *map = NULL; 1053 struct xsk_map_node *node; 1054 1055 *map_entry = NULL; 1056 1057 spin_lock_bh(&xs->map_list_lock); 1058 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 1059 node); 1060 if (node) { 1061 bpf_map_inc(&node->map->map); 1062 map = node->map; 1063 *map_entry = node->map_entry; 1064 } 1065 spin_unlock_bh(&xs->map_list_lock); 1066 return map; 1067 } 1068 1069 static void xsk_delete_from_maps(struct xdp_sock *xs) 1070 { 1071 /* This function removes the current XDP socket from all the 1072 * maps it resides in. We need to take extra care here, due to 1073 * the two locks involved. Each map has a lock synchronizing 1074 * updates to the entries, and each socket has a lock that 1075 * synchronizes access to the list of maps (map_list). For 1076 * deadlock avoidance the locks need to be taken in the order 1077 * "map lock"->"socket map list lock". We start off by 1078 * accessing the socket map list, and take a reference to the 1079 * map to guarantee existence between the 1080 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 1081 * calls. Then we ask the map to remove the socket, which 1082 * tries to remove the socket from the map. Note that there 1083 * might be updates to the map between 1084 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 1085 */ 1086 struct xdp_sock __rcu **map_entry = NULL; 1087 struct xsk_map *map; 1088 1089 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 1090 xsk_map_try_sock_delete(map, xs, map_entry); 1091 bpf_map_put(&map->map); 1092 } 1093 } 1094 1095 static int xsk_release(struct socket *sock) 1096 { 1097 struct sock *sk = sock->sk; 1098 struct xdp_sock *xs = xdp_sk(sk); 1099 struct net *net; 1100 1101 if (!sk) 1102 return 0; 1103 1104 net = sock_net(sk); 1105 1106 if (xs->skb) 1107 xsk_drop_skb(xs->skb); 1108 1109 mutex_lock(&net->xdp.lock); 1110 sk_del_node_init_rcu(sk); 1111 mutex_unlock(&net->xdp.lock); 1112 1113 sock_prot_inuse_add(net, sk->sk_prot, -1); 1114 1115 xsk_delete_from_maps(xs); 1116 mutex_lock(&xs->mutex); 1117 xsk_unbind_dev(xs); 1118 mutex_unlock(&xs->mutex); 1119 1120 xskq_destroy(xs->rx); 1121 xskq_destroy(xs->tx); 1122 xskq_destroy(xs->fq_tmp); 1123 xskq_destroy(xs->cq_tmp); 1124 1125 sock_orphan(sk); 1126 sock->sk = NULL; 1127 1128 sock_put(sk); 1129 1130 return 0; 1131 } 1132 1133 static struct socket *xsk_lookup_xsk_from_fd(int fd) 1134 { 1135 struct socket *sock; 1136 int err; 1137 1138 sock = sockfd_lookup(fd, &err); 1139 if (!sock) 1140 return ERR_PTR(-ENOTSOCK); 1141 1142 if (sock->sk->sk_family != PF_XDP) { 1143 sockfd_put(sock); 1144 return ERR_PTR(-ENOPROTOOPT); 1145 } 1146 1147 return sock; 1148 } 1149 1150 static bool xsk_validate_queues(struct xdp_sock *xs) 1151 { 1152 return xs->fq_tmp && xs->cq_tmp; 1153 } 1154 1155 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 1156 { 1157 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 1158 struct sock *sk = sock->sk; 1159 struct xdp_sock *xs = xdp_sk(sk); 1160 struct net_device *dev; 1161 int bound_dev_if; 1162 u32 flags, qid; 1163 int err = 0; 1164 1165 if (addr_len < sizeof(struct sockaddr_xdp)) 1166 return -EINVAL; 1167 if (sxdp->sxdp_family != AF_XDP) 1168 return -EINVAL; 1169 1170 flags = sxdp->sxdp_flags; 1171 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 1172 XDP_USE_NEED_WAKEUP | XDP_USE_SG)) 1173 return -EINVAL; 1174 1175 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 1176 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) 1177 return -EINVAL; 1178 1179 rtnl_lock(); 1180 mutex_lock(&xs->mutex); 1181 if (xs->state != XSK_READY) { 1182 err = -EBUSY; 1183 goto out_release; 1184 } 1185 1186 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 1187 if (!dev) { 1188 err = -ENODEV; 1189 goto out_release; 1190 } 1191 1192 netdev_lock_ops(dev); 1193 1194 if (!xs->rx && !xs->tx) { 1195 err = -EINVAL; 1196 goto out_unlock; 1197 } 1198 1199 qid = sxdp->sxdp_queue_id; 1200 1201 if (flags & XDP_SHARED_UMEM) { 1202 struct xdp_sock *umem_xs; 1203 struct socket *sock; 1204 1205 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 1206 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) { 1207 /* Cannot specify flags for shared sockets. */ 1208 err = -EINVAL; 1209 goto out_unlock; 1210 } 1211 1212 if (xs->umem) { 1213 /* We have already our own. */ 1214 err = -EINVAL; 1215 goto out_unlock; 1216 } 1217 1218 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 1219 if (IS_ERR(sock)) { 1220 err = PTR_ERR(sock); 1221 goto out_unlock; 1222 } 1223 1224 umem_xs = xdp_sk(sock->sk); 1225 if (!xsk_is_bound(umem_xs)) { 1226 err = -EBADF; 1227 sockfd_put(sock); 1228 goto out_unlock; 1229 } 1230 1231 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 1232 /* Share the umem with another socket on another qid 1233 * and/or device. 1234 */ 1235 xs->pool = xp_create_and_assign_umem(xs, 1236 umem_xs->umem); 1237 if (!xs->pool) { 1238 err = -ENOMEM; 1239 sockfd_put(sock); 1240 goto out_unlock; 1241 } 1242 1243 err = xp_assign_dev_shared(xs->pool, umem_xs, dev, 1244 qid); 1245 if (err) { 1246 xp_destroy(xs->pool); 1247 xs->pool = NULL; 1248 sockfd_put(sock); 1249 goto out_unlock; 1250 } 1251 } else { 1252 /* Share the buffer pool with the other socket. */ 1253 if (xs->fq_tmp || xs->cq_tmp) { 1254 /* Do not allow setting your own fq or cq. */ 1255 err = -EINVAL; 1256 sockfd_put(sock); 1257 goto out_unlock; 1258 } 1259 1260 xp_get_pool(umem_xs->pool); 1261 xs->pool = umem_xs->pool; 1262 1263 /* If underlying shared umem was created without Tx 1264 * ring, allocate Tx descs array that Tx batching API 1265 * utilizes 1266 */ 1267 if (xs->tx && !xs->pool->tx_descs) { 1268 err = xp_alloc_tx_descs(xs->pool, xs); 1269 if (err) { 1270 xp_put_pool(xs->pool); 1271 xs->pool = NULL; 1272 sockfd_put(sock); 1273 goto out_unlock; 1274 } 1275 } 1276 } 1277 1278 xdp_get_umem(umem_xs->umem); 1279 WRITE_ONCE(xs->umem, umem_xs->umem); 1280 sockfd_put(sock); 1281 } else if (!xs->umem || !xsk_validate_queues(xs)) { 1282 err = -EINVAL; 1283 goto out_unlock; 1284 } else { 1285 /* This xsk has its own umem. */ 1286 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 1287 if (!xs->pool) { 1288 err = -ENOMEM; 1289 goto out_unlock; 1290 } 1291 1292 err = xp_assign_dev(xs->pool, dev, qid, flags); 1293 if (err) { 1294 xp_destroy(xs->pool); 1295 xs->pool = NULL; 1296 goto out_unlock; 1297 } 1298 } 1299 1300 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 1301 xs->fq_tmp = NULL; 1302 xs->cq_tmp = NULL; 1303 1304 xs->dev = dev; 1305 xs->zc = xs->umem->zc; 1306 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG); 1307 xs->queue_id = qid; 1308 xp_add_xsk(xs->pool, xs); 1309 1310 if (qid < dev->real_num_rx_queues) { 1311 struct netdev_rx_queue *rxq; 1312 1313 rxq = __netif_get_rx_queue(dev, qid); 1314 if (rxq->napi) 1315 __sk_mark_napi_id_once(sk, rxq->napi->napi_id); 1316 } 1317 1318 out_unlock: 1319 if (err) { 1320 dev_put(dev); 1321 } else { 1322 /* Matches smp_rmb() in bind() for shared umem 1323 * sockets, and xsk_is_bound(). 1324 */ 1325 smp_wmb(); 1326 WRITE_ONCE(xs->state, XSK_BOUND); 1327 } 1328 netdev_unlock_ops(dev); 1329 out_release: 1330 mutex_unlock(&xs->mutex); 1331 rtnl_unlock(); 1332 return err; 1333 } 1334 1335 struct xdp_umem_reg_v1 { 1336 __u64 addr; /* Start of packet data area */ 1337 __u64 len; /* Length of packet data area */ 1338 __u32 chunk_size; 1339 __u32 headroom; 1340 }; 1341 1342 static int xsk_setsockopt(struct socket *sock, int level, int optname, 1343 sockptr_t optval, unsigned int optlen) 1344 { 1345 struct sock *sk = sock->sk; 1346 struct xdp_sock *xs = xdp_sk(sk); 1347 int err; 1348 1349 if (level != SOL_XDP) 1350 return -ENOPROTOOPT; 1351 1352 switch (optname) { 1353 case XDP_RX_RING: 1354 case XDP_TX_RING: 1355 { 1356 struct xsk_queue **q; 1357 int entries; 1358 1359 if (optlen < sizeof(entries)) 1360 return -EINVAL; 1361 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1362 return -EFAULT; 1363 1364 mutex_lock(&xs->mutex); 1365 if (xs->state != XSK_READY) { 1366 mutex_unlock(&xs->mutex); 1367 return -EBUSY; 1368 } 1369 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1370 err = xsk_init_queue(entries, q, false); 1371 if (!err && optname == XDP_TX_RING) 1372 /* Tx needs to be explicitly woken up the first time */ 1373 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1374 mutex_unlock(&xs->mutex); 1375 return err; 1376 } 1377 case XDP_UMEM_REG: 1378 { 1379 size_t mr_size = sizeof(struct xdp_umem_reg); 1380 struct xdp_umem_reg mr = {}; 1381 struct xdp_umem *umem; 1382 1383 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1384 return -EINVAL; 1385 else if (optlen < sizeof(mr)) 1386 mr_size = sizeof(struct xdp_umem_reg_v1); 1387 1388 BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg)); 1389 1390 /* Make sure the last field of the struct doesn't have 1391 * uninitialized padding. All padding has to be explicit 1392 * and has to be set to zero by the userspace to make 1393 * struct xdp_umem_reg extensible in the future. 1394 */ 1395 BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) + 1396 sizeof_field(struct xdp_umem_reg, tx_metadata_len) != 1397 sizeof(struct xdp_umem_reg)); 1398 1399 if (copy_from_sockptr(&mr, optval, mr_size)) 1400 return -EFAULT; 1401 1402 mutex_lock(&xs->mutex); 1403 if (xs->state != XSK_READY || xs->umem) { 1404 mutex_unlock(&xs->mutex); 1405 return -EBUSY; 1406 } 1407 1408 umem = xdp_umem_create(&mr); 1409 if (IS_ERR(umem)) { 1410 mutex_unlock(&xs->mutex); 1411 return PTR_ERR(umem); 1412 } 1413 1414 /* Make sure umem is ready before it can be seen by others */ 1415 smp_wmb(); 1416 WRITE_ONCE(xs->umem, umem); 1417 mutex_unlock(&xs->mutex); 1418 return 0; 1419 } 1420 case XDP_UMEM_FILL_RING: 1421 case XDP_UMEM_COMPLETION_RING: 1422 { 1423 struct xsk_queue **q; 1424 int entries; 1425 1426 if (optlen < sizeof(entries)) 1427 return -EINVAL; 1428 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1429 return -EFAULT; 1430 1431 mutex_lock(&xs->mutex); 1432 if (xs->state != XSK_READY) { 1433 mutex_unlock(&xs->mutex); 1434 return -EBUSY; 1435 } 1436 1437 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1438 &xs->cq_tmp; 1439 err = xsk_init_queue(entries, q, true); 1440 mutex_unlock(&xs->mutex); 1441 return err; 1442 } 1443 default: 1444 break; 1445 } 1446 1447 return -ENOPROTOOPT; 1448 } 1449 1450 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1451 { 1452 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1453 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1454 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1455 } 1456 1457 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1458 { 1459 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1460 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1461 ring->desc = offsetof(struct xdp_umem_ring, desc); 1462 } 1463 1464 struct xdp_statistics_v1 { 1465 __u64 rx_dropped; 1466 __u64 rx_invalid_descs; 1467 __u64 tx_invalid_descs; 1468 }; 1469 1470 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1471 char __user *optval, int __user *optlen) 1472 { 1473 struct sock *sk = sock->sk; 1474 struct xdp_sock *xs = xdp_sk(sk); 1475 int len; 1476 1477 if (level != SOL_XDP) 1478 return -ENOPROTOOPT; 1479 1480 if (get_user(len, optlen)) 1481 return -EFAULT; 1482 if (len < 0) 1483 return -EINVAL; 1484 1485 switch (optname) { 1486 case XDP_STATISTICS: 1487 { 1488 struct xdp_statistics stats = {}; 1489 bool extra_stats = true; 1490 size_t stats_size; 1491 1492 if (len < sizeof(struct xdp_statistics_v1)) { 1493 return -EINVAL; 1494 } else if (len < sizeof(stats)) { 1495 extra_stats = false; 1496 stats_size = sizeof(struct xdp_statistics_v1); 1497 } else { 1498 stats_size = sizeof(stats); 1499 } 1500 1501 mutex_lock(&xs->mutex); 1502 stats.rx_dropped = xs->rx_dropped; 1503 if (extra_stats) { 1504 stats.rx_ring_full = xs->rx_queue_full; 1505 stats.rx_fill_ring_empty_descs = 1506 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1507 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1508 } else { 1509 stats.rx_dropped += xs->rx_queue_full; 1510 } 1511 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1512 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1513 mutex_unlock(&xs->mutex); 1514 1515 if (copy_to_user(optval, &stats, stats_size)) 1516 return -EFAULT; 1517 if (put_user(stats_size, optlen)) 1518 return -EFAULT; 1519 1520 return 0; 1521 } 1522 case XDP_MMAP_OFFSETS: 1523 { 1524 struct xdp_mmap_offsets off; 1525 struct xdp_mmap_offsets_v1 off_v1; 1526 bool flags_supported = true; 1527 void *to_copy; 1528 1529 if (len < sizeof(off_v1)) 1530 return -EINVAL; 1531 else if (len < sizeof(off)) 1532 flags_supported = false; 1533 1534 if (flags_supported) { 1535 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1536 * except for the flags field added to the end. 1537 */ 1538 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1539 &off.rx); 1540 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1541 &off.tx); 1542 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1543 &off.fr); 1544 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1545 &off.cr); 1546 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1547 ptrs.flags); 1548 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1549 ptrs.flags); 1550 off.fr.flags = offsetof(struct xdp_umem_ring, 1551 ptrs.flags); 1552 off.cr.flags = offsetof(struct xdp_umem_ring, 1553 ptrs.flags); 1554 1555 len = sizeof(off); 1556 to_copy = &off; 1557 } else { 1558 xsk_enter_rxtx_offsets(&off_v1.rx); 1559 xsk_enter_rxtx_offsets(&off_v1.tx); 1560 xsk_enter_umem_offsets(&off_v1.fr); 1561 xsk_enter_umem_offsets(&off_v1.cr); 1562 1563 len = sizeof(off_v1); 1564 to_copy = &off_v1; 1565 } 1566 1567 if (copy_to_user(optval, to_copy, len)) 1568 return -EFAULT; 1569 if (put_user(len, optlen)) 1570 return -EFAULT; 1571 1572 return 0; 1573 } 1574 case XDP_OPTIONS: 1575 { 1576 struct xdp_options opts = {}; 1577 1578 if (len < sizeof(opts)) 1579 return -EINVAL; 1580 1581 mutex_lock(&xs->mutex); 1582 if (xs->zc) 1583 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1584 mutex_unlock(&xs->mutex); 1585 1586 len = sizeof(opts); 1587 if (copy_to_user(optval, &opts, len)) 1588 return -EFAULT; 1589 if (put_user(len, optlen)) 1590 return -EFAULT; 1591 1592 return 0; 1593 } 1594 default: 1595 break; 1596 } 1597 1598 return -EOPNOTSUPP; 1599 } 1600 1601 static int xsk_mmap(struct file *file, struct socket *sock, 1602 struct vm_area_struct *vma) 1603 { 1604 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1605 unsigned long size = vma->vm_end - vma->vm_start; 1606 struct xdp_sock *xs = xdp_sk(sock->sk); 1607 int state = READ_ONCE(xs->state); 1608 struct xsk_queue *q = NULL; 1609 1610 if (state != XSK_READY && state != XSK_BOUND) 1611 return -EBUSY; 1612 1613 if (offset == XDP_PGOFF_RX_RING) { 1614 q = READ_ONCE(xs->rx); 1615 } else if (offset == XDP_PGOFF_TX_RING) { 1616 q = READ_ONCE(xs->tx); 1617 } else { 1618 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1619 smp_rmb(); 1620 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1621 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) : 1622 READ_ONCE(xs->pool->fq); 1623 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1624 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) : 1625 READ_ONCE(xs->pool->cq); 1626 } 1627 1628 if (!q) 1629 return -EINVAL; 1630 1631 /* Matches the smp_wmb() in xsk_init_queue */ 1632 smp_rmb(); 1633 if (size > q->ring_vmalloc_size) 1634 return -EINVAL; 1635 1636 return remap_vmalloc_range(vma, q->ring, 0); 1637 } 1638 1639 static int xsk_notifier(struct notifier_block *this, 1640 unsigned long msg, void *ptr) 1641 { 1642 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1643 struct net *net = dev_net(dev); 1644 struct sock *sk; 1645 1646 switch (msg) { 1647 case NETDEV_UNREGISTER: 1648 mutex_lock(&net->xdp.lock); 1649 sk_for_each(sk, &net->xdp.list) { 1650 struct xdp_sock *xs = xdp_sk(sk); 1651 1652 mutex_lock(&xs->mutex); 1653 if (xs->dev == dev) { 1654 sk->sk_err = ENETDOWN; 1655 if (!sock_flag(sk, SOCK_DEAD)) 1656 sk_error_report(sk); 1657 1658 xsk_unbind_dev(xs); 1659 1660 /* Clear device references. */ 1661 xp_clear_dev(xs->pool); 1662 } 1663 mutex_unlock(&xs->mutex); 1664 } 1665 mutex_unlock(&net->xdp.lock); 1666 break; 1667 } 1668 return NOTIFY_DONE; 1669 } 1670 1671 static struct proto xsk_proto = { 1672 .name = "XDP", 1673 .owner = THIS_MODULE, 1674 .obj_size = sizeof(struct xdp_sock), 1675 }; 1676 1677 static const struct proto_ops xsk_proto_ops = { 1678 .family = PF_XDP, 1679 .owner = THIS_MODULE, 1680 .release = xsk_release, 1681 .bind = xsk_bind, 1682 .connect = sock_no_connect, 1683 .socketpair = sock_no_socketpair, 1684 .accept = sock_no_accept, 1685 .getname = sock_no_getname, 1686 .poll = xsk_poll, 1687 .ioctl = sock_no_ioctl, 1688 .listen = sock_no_listen, 1689 .shutdown = sock_no_shutdown, 1690 .setsockopt = xsk_setsockopt, 1691 .getsockopt = xsk_getsockopt, 1692 .sendmsg = xsk_sendmsg, 1693 .recvmsg = xsk_recvmsg, 1694 .mmap = xsk_mmap, 1695 }; 1696 1697 static void xsk_destruct(struct sock *sk) 1698 { 1699 struct xdp_sock *xs = xdp_sk(sk); 1700 1701 if (!sock_flag(sk, SOCK_DEAD)) 1702 return; 1703 1704 if (!xp_put_pool(xs->pool)) 1705 xdp_put_umem(xs->umem, !xs->pool); 1706 } 1707 1708 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1709 int kern) 1710 { 1711 struct xdp_sock *xs; 1712 struct sock *sk; 1713 1714 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1715 return -EPERM; 1716 if (sock->type != SOCK_RAW) 1717 return -ESOCKTNOSUPPORT; 1718 1719 if (protocol) 1720 return -EPROTONOSUPPORT; 1721 1722 sock->state = SS_UNCONNECTED; 1723 1724 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1725 if (!sk) 1726 return -ENOBUFS; 1727 1728 sock->ops = &xsk_proto_ops; 1729 1730 sock_init_data(sock, sk); 1731 1732 sk->sk_family = PF_XDP; 1733 1734 sk->sk_destruct = xsk_destruct; 1735 1736 sock_set_flag(sk, SOCK_RCU_FREE); 1737 1738 xs = xdp_sk(sk); 1739 xs->state = XSK_READY; 1740 mutex_init(&xs->mutex); 1741 1742 INIT_LIST_HEAD(&xs->map_list); 1743 spin_lock_init(&xs->map_list_lock); 1744 1745 mutex_lock(&net->xdp.lock); 1746 sk_add_node_rcu(sk, &net->xdp.list); 1747 mutex_unlock(&net->xdp.lock); 1748 1749 sock_prot_inuse_add(net, &xsk_proto, 1); 1750 1751 return 0; 1752 } 1753 1754 static const struct net_proto_family xsk_family_ops = { 1755 .family = PF_XDP, 1756 .create = xsk_create, 1757 .owner = THIS_MODULE, 1758 }; 1759 1760 static struct notifier_block xsk_netdev_notifier = { 1761 .notifier_call = xsk_notifier, 1762 }; 1763 1764 static int __net_init xsk_net_init(struct net *net) 1765 { 1766 mutex_init(&net->xdp.lock); 1767 INIT_HLIST_HEAD(&net->xdp.list); 1768 return 0; 1769 } 1770 1771 static void __net_exit xsk_net_exit(struct net *net) 1772 { 1773 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1774 } 1775 1776 static struct pernet_operations xsk_net_ops = { 1777 .init = xsk_net_init, 1778 .exit = xsk_net_exit, 1779 }; 1780 1781 static int __init xsk_init(void) 1782 { 1783 int err; 1784 1785 err = proto_register(&xsk_proto, 0 /* no slab */); 1786 if (err) 1787 goto out; 1788 1789 err = sock_register(&xsk_family_ops); 1790 if (err) 1791 goto out_proto; 1792 1793 err = register_pernet_subsys(&xsk_net_ops); 1794 if (err) 1795 goto out_sk; 1796 1797 err = register_netdevice_notifier(&xsk_netdev_notifier); 1798 if (err) 1799 goto out_pernet; 1800 1801 return 0; 1802 1803 out_pernet: 1804 unregister_pernet_subsys(&xsk_net_ops); 1805 out_sk: 1806 sock_unregister(PF_XDP); 1807 out_proto: 1808 proto_unregister(&xsk_proto); 1809 out: 1810 return err; 1811 } 1812 1813 fs_initcall(xsk_init); 1814