1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <linux/vmalloc.h> 26 #include <net/xdp_sock_drv.h> 27 #include <net/busy_poll.h> 28 #include <net/netdev_rx_queue.h> 29 #include <net/xdp.h> 30 31 #include "xsk_queue.h" 32 #include "xdp_umem.h" 33 #include "xsk.h" 34 35 #define TX_BATCH_SIZE 32 36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) 37 38 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); 39 40 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 41 { 42 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 43 return; 44 45 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 46 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 47 } 48 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 49 50 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 51 { 52 struct xdp_sock *xs; 53 54 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 55 return; 56 57 rcu_read_lock(); 58 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 59 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 60 } 61 rcu_read_unlock(); 62 63 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 64 } 65 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 66 67 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 68 { 69 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 70 return; 71 72 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 73 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 74 } 75 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 76 77 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 78 { 79 struct xdp_sock *xs; 80 81 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 82 return; 83 84 rcu_read_lock(); 85 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 86 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 87 } 88 rcu_read_unlock(); 89 90 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 91 } 92 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 93 94 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 95 { 96 return pool->uses_need_wakeup; 97 } 98 EXPORT_SYMBOL(xsk_uses_need_wakeup); 99 100 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 101 u16 queue_id) 102 { 103 if (queue_id < dev->real_num_rx_queues) 104 return dev->_rx[queue_id].pool; 105 if (queue_id < dev->real_num_tx_queues) 106 return dev->_tx[queue_id].pool; 107 108 return NULL; 109 } 110 EXPORT_SYMBOL(xsk_get_pool_from_qid); 111 112 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 113 { 114 if (queue_id < dev->num_rx_queues) 115 dev->_rx[queue_id].pool = NULL; 116 if (queue_id < dev->num_tx_queues) 117 dev->_tx[queue_id].pool = NULL; 118 } 119 120 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 121 * not know if the device has more tx queues than rx, or the opposite. 122 * This might also change during run time. 123 */ 124 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 125 u16 queue_id) 126 { 127 if (queue_id >= max_t(unsigned int, 128 dev->real_num_rx_queues, 129 dev->real_num_tx_queues)) 130 return -EINVAL; 131 132 if (queue_id < dev->real_num_rx_queues) 133 dev->_rx[queue_id].pool = pool; 134 if (queue_id < dev->real_num_tx_queues) 135 dev->_tx[queue_id].pool = pool; 136 137 return 0; 138 } 139 140 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, 141 u32 flags) 142 { 143 u64 addr; 144 int err; 145 146 addr = xp_get_handle(xskb); 147 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); 148 if (err) { 149 xs->rx_queue_full++; 150 return err; 151 } 152 153 xp_release(xskb); 154 return 0; 155 } 156 157 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 158 { 159 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 160 u32 frags = xdp_buff_has_frags(xdp); 161 struct xdp_buff_xsk *pos, *tmp; 162 struct list_head *xskb_list; 163 u32 contd = 0; 164 int err; 165 166 if (frags) 167 contd = XDP_PKT_CONTD; 168 169 err = __xsk_rcv_zc(xs, xskb, len, contd); 170 if (err) 171 goto err; 172 if (likely(!frags)) 173 return 0; 174 175 xskb_list = &xskb->pool->xskb_list; 176 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { 177 if (list_is_singular(xskb_list)) 178 contd = 0; 179 len = pos->xdp.data_end - pos->xdp.data; 180 err = __xsk_rcv_zc(xs, pos, len, contd); 181 if (err) 182 goto err; 183 list_del(&pos->xskb_list_node); 184 } 185 186 return 0; 187 err: 188 xsk_buff_free(xdp); 189 return err; 190 } 191 192 static void *xsk_copy_xdp_start(struct xdp_buff *from) 193 { 194 if (unlikely(xdp_data_meta_unsupported(from))) 195 return from->data; 196 else 197 return from->data_meta; 198 } 199 200 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len, 201 u32 *from_len, skb_frag_t **frag, u32 rem) 202 { 203 u32 copied = 0; 204 205 while (1) { 206 u32 copy_len = min_t(u32, *from_len, to_len); 207 208 memcpy(to, *from, copy_len); 209 copied += copy_len; 210 if (rem == copied) 211 return copied; 212 213 if (*from_len == copy_len) { 214 *from = skb_frag_address(*frag); 215 *from_len = skb_frag_size((*frag)++); 216 } else { 217 *from += copy_len; 218 *from_len -= copy_len; 219 } 220 if (to_len == copy_len) 221 return copied; 222 223 to_len -= copy_len; 224 to += copy_len; 225 } 226 } 227 228 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 229 { 230 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool); 231 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to; 232 u32 from_len, meta_len, rem, num_desc; 233 struct xdp_buff_xsk *xskb; 234 struct xdp_buff *xsk_xdp; 235 skb_frag_t *frag; 236 237 from_len = xdp->data_end - copy_from; 238 meta_len = xdp->data - copy_from; 239 rem = len + meta_len; 240 241 if (len <= frame_size && !xdp_buff_has_frags(xdp)) { 242 int err; 243 244 xsk_xdp = xsk_buff_alloc(xs->pool); 245 if (!xsk_xdp) { 246 xs->rx_dropped++; 247 return -ENOMEM; 248 } 249 memcpy(xsk_xdp->data - meta_len, copy_from, rem); 250 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 251 err = __xsk_rcv_zc(xs, xskb, len, 0); 252 if (err) { 253 xsk_buff_free(xsk_xdp); 254 return err; 255 } 256 257 return 0; 258 } 259 260 num_desc = (len - 1) / frame_size + 1; 261 262 if (!xsk_buff_can_alloc(xs->pool, num_desc)) { 263 xs->rx_dropped++; 264 return -ENOMEM; 265 } 266 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { 267 xs->rx_queue_full++; 268 return -ENOBUFS; 269 } 270 271 if (xdp_buff_has_frags(xdp)) { 272 struct skb_shared_info *sinfo; 273 274 sinfo = xdp_get_shared_info_from_buff(xdp); 275 frag = &sinfo->frags[0]; 276 } 277 278 do { 279 u32 to_len = frame_size + meta_len; 280 u32 copied; 281 282 xsk_xdp = xsk_buff_alloc(xs->pool); 283 copy_to = xsk_xdp->data - meta_len; 284 285 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem); 286 rem -= copied; 287 288 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 289 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); 290 meta_len = 0; 291 } while (rem); 292 293 return 0; 294 } 295 296 static bool xsk_tx_writeable(struct xdp_sock *xs) 297 { 298 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 299 return false; 300 301 return true; 302 } 303 304 static bool xsk_is_bound(struct xdp_sock *xs) 305 { 306 if (READ_ONCE(xs->state) == XSK_BOUND) { 307 /* Matches smp_wmb() in bind(). */ 308 smp_rmb(); 309 return true; 310 } 311 return false; 312 } 313 314 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 315 { 316 if (!xsk_is_bound(xs)) 317 return -ENXIO; 318 319 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 320 return -EINVAL; 321 322 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 323 xs->rx_dropped++; 324 return -ENOSPC; 325 } 326 327 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 328 return 0; 329 } 330 331 static void xsk_flush(struct xdp_sock *xs) 332 { 333 xskq_prod_submit(xs->rx); 334 __xskq_cons_release(xs->pool->fq); 335 sock_def_readable(&xs->sk); 336 } 337 338 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 339 { 340 u32 len = xdp_get_buff_len(xdp); 341 int err; 342 343 spin_lock_bh(&xs->rx_lock); 344 err = xsk_rcv_check(xs, xdp, len); 345 if (!err) { 346 err = __xsk_rcv(xs, xdp, len); 347 xsk_flush(xs); 348 } 349 spin_unlock_bh(&xs->rx_lock); 350 return err; 351 } 352 353 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 354 { 355 u32 len = xdp_get_buff_len(xdp); 356 int err; 357 358 err = xsk_rcv_check(xs, xdp, len); 359 if (err) 360 return err; 361 362 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 363 len = xdp->data_end - xdp->data; 364 return xsk_rcv_zc(xs, xdp, len); 365 } 366 367 err = __xsk_rcv(xs, xdp, len); 368 if (!err) 369 xdp_return_buff(xdp); 370 return err; 371 } 372 373 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 374 { 375 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 376 int err; 377 378 err = xsk_rcv(xs, xdp); 379 if (err) 380 return err; 381 382 if (!xs->flush_node.prev) 383 list_add(&xs->flush_node, flush_list); 384 385 return 0; 386 } 387 388 void __xsk_map_flush(void) 389 { 390 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 391 struct xdp_sock *xs, *tmp; 392 393 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 394 xsk_flush(xs); 395 __list_del_clearprev(&xs->flush_node); 396 } 397 } 398 399 #ifdef CONFIG_DEBUG_NET 400 bool xsk_map_check_flush(void) 401 { 402 if (list_empty(this_cpu_ptr(&xskmap_flush_list))) 403 return false; 404 __xsk_map_flush(); 405 return true; 406 } 407 #endif 408 409 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 410 { 411 xskq_prod_submit_n(pool->cq, nb_entries); 412 } 413 EXPORT_SYMBOL(xsk_tx_completed); 414 415 void xsk_tx_release(struct xsk_buff_pool *pool) 416 { 417 struct xdp_sock *xs; 418 419 rcu_read_lock(); 420 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 421 __xskq_cons_release(xs->tx); 422 if (xsk_tx_writeable(xs)) 423 xs->sk.sk_write_space(&xs->sk); 424 } 425 rcu_read_unlock(); 426 } 427 EXPORT_SYMBOL(xsk_tx_release); 428 429 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 430 { 431 bool budget_exhausted = false; 432 struct xdp_sock *xs; 433 434 rcu_read_lock(); 435 again: 436 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 437 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) { 438 budget_exhausted = true; 439 continue; 440 } 441 442 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 443 if (xskq_has_descs(xs->tx)) 444 xskq_cons_release(xs->tx); 445 continue; 446 } 447 448 xs->tx_budget_spent++; 449 450 /* This is the backpressure mechanism for the Tx path. 451 * Reserve space in the completion queue and only proceed 452 * if there is space in it. This avoids having to implement 453 * any buffering in the Tx path. 454 */ 455 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 456 goto out; 457 458 xskq_cons_release(xs->tx); 459 rcu_read_unlock(); 460 return true; 461 } 462 463 if (budget_exhausted) { 464 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) 465 xs->tx_budget_spent = 0; 466 467 budget_exhausted = false; 468 goto again; 469 } 470 471 out: 472 rcu_read_unlock(); 473 return false; 474 } 475 EXPORT_SYMBOL(xsk_tx_peek_desc); 476 477 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 478 { 479 struct xdp_desc *descs = pool->tx_descs; 480 u32 nb_pkts = 0; 481 482 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 483 nb_pkts++; 484 485 xsk_tx_release(pool); 486 return nb_pkts; 487 } 488 489 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) 490 { 491 struct xdp_sock *xs; 492 493 rcu_read_lock(); 494 if (!list_is_singular(&pool->xsk_tx_list)) { 495 /* Fallback to the non-batched version */ 496 rcu_read_unlock(); 497 return xsk_tx_peek_release_fallback(pool, nb_pkts); 498 } 499 500 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 501 if (!xs) { 502 nb_pkts = 0; 503 goto out; 504 } 505 506 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); 507 508 /* This is the backpressure mechanism for the Tx path. Try to 509 * reserve space in the completion queue for all packets, but 510 * if there are fewer slots available, just process that many 511 * packets. This avoids having to implement any buffering in 512 * the Tx path. 513 */ 514 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); 515 if (!nb_pkts) 516 goto out; 517 518 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); 519 if (!nb_pkts) { 520 xs->tx->queue_empty_descs++; 521 goto out; 522 } 523 524 __xskq_cons_release(xs->tx); 525 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 526 xs->sk.sk_write_space(&xs->sk); 527 528 out: 529 rcu_read_unlock(); 530 return nb_pkts; 531 } 532 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 533 534 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 535 { 536 struct net_device *dev = xs->dev; 537 538 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 539 } 540 541 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr) 542 { 543 unsigned long flags; 544 int ret; 545 546 spin_lock_irqsave(&xs->pool->cq_lock, flags); 547 ret = xskq_prod_reserve_addr(xs->pool->cq, addr); 548 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 549 550 return ret; 551 } 552 553 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n) 554 { 555 unsigned long flags; 556 557 spin_lock_irqsave(&xs->pool->cq_lock, flags); 558 xskq_prod_submit_n(xs->pool->cq, n); 559 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 560 } 561 562 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n) 563 { 564 unsigned long flags; 565 566 spin_lock_irqsave(&xs->pool->cq_lock, flags); 567 xskq_prod_cancel_n(xs->pool->cq, n); 568 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 569 } 570 571 static u32 xsk_get_num_desc(struct sk_buff *skb) 572 { 573 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0; 574 } 575 576 static void xsk_destruct_skb(struct sk_buff *skb) 577 { 578 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta; 579 580 if (compl->tx_timestamp) { 581 /* sw completion timestamp, not a real one */ 582 *compl->tx_timestamp = ktime_get_tai_fast_ns(); 583 } 584 585 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb)); 586 sock_wfree(skb); 587 } 588 589 static void xsk_set_destructor_arg(struct sk_buff *skb) 590 { 591 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1; 592 593 skb_shinfo(skb)->destructor_arg = (void *)num; 594 } 595 596 static void xsk_consume_skb(struct sk_buff *skb) 597 { 598 struct xdp_sock *xs = xdp_sk(skb->sk); 599 600 skb->destructor = sock_wfree; 601 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb)); 602 /* Free skb without triggering the perf drop trace */ 603 consume_skb(skb); 604 xs->skb = NULL; 605 } 606 607 static void xsk_drop_skb(struct sk_buff *skb) 608 { 609 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb); 610 xsk_consume_skb(skb); 611 } 612 613 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 614 struct xdp_desc *desc) 615 { 616 struct xsk_buff_pool *pool = xs->pool; 617 u32 hr, len, ts, offset, copy, copied; 618 struct sk_buff *skb = xs->skb; 619 struct page *page; 620 void *buffer; 621 int err, i; 622 u64 addr; 623 624 if (!skb) { 625 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 626 627 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 628 if (unlikely(!skb)) 629 return ERR_PTR(err); 630 631 skb_reserve(skb, hr); 632 } 633 634 addr = desc->addr; 635 len = desc->len; 636 ts = pool->unaligned ? len : pool->chunk_size; 637 638 buffer = xsk_buff_raw_get_data(pool, addr); 639 offset = offset_in_page(buffer); 640 addr = buffer - pool->addrs; 641 642 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) { 643 if (unlikely(i >= MAX_SKB_FRAGS)) 644 return ERR_PTR(-EOVERFLOW); 645 646 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 647 get_page(page); 648 649 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 650 skb_fill_page_desc(skb, i, page, offset, copy); 651 652 copied += copy; 653 addr += copy; 654 offset = 0; 655 } 656 657 skb->len += len; 658 skb->data_len += len; 659 skb->truesize += ts; 660 661 refcount_add(ts, &xs->sk.sk_wmem_alloc); 662 663 return skb; 664 } 665 666 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 667 struct xdp_desc *desc) 668 { 669 struct xsk_tx_metadata *meta = NULL; 670 struct net_device *dev = xs->dev; 671 struct sk_buff *skb = xs->skb; 672 bool first_frag = false; 673 int err; 674 675 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 676 skb = xsk_build_skb_zerocopy(xs, desc); 677 if (IS_ERR(skb)) { 678 err = PTR_ERR(skb); 679 goto free_err; 680 } 681 } else { 682 u32 hr, tr, len; 683 void *buffer; 684 685 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 686 len = desc->len; 687 688 if (!skb) { 689 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 690 tr = dev->needed_tailroom; 691 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 692 if (unlikely(!skb)) 693 goto free_err; 694 695 skb_reserve(skb, hr); 696 skb_put(skb, len); 697 698 err = skb_store_bits(skb, 0, buffer, len); 699 if (unlikely(err)) { 700 kfree_skb(skb); 701 goto free_err; 702 } 703 704 first_frag = true; 705 } else { 706 int nr_frags = skb_shinfo(skb)->nr_frags; 707 struct page *page; 708 u8 *vaddr; 709 710 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) { 711 err = -EOVERFLOW; 712 goto free_err; 713 } 714 715 page = alloc_page(xs->sk.sk_allocation); 716 if (unlikely(!page)) { 717 err = -EAGAIN; 718 goto free_err; 719 } 720 721 vaddr = kmap_local_page(page); 722 memcpy(vaddr, buffer, len); 723 kunmap_local(vaddr); 724 725 skb_add_rx_frag(skb, nr_frags, page, 0, len, 0); 726 } 727 728 if (first_frag && desc->options & XDP_TX_METADATA) { 729 if (unlikely(xs->pool->tx_metadata_len == 0)) { 730 err = -EINVAL; 731 goto free_err; 732 } 733 734 meta = buffer - xs->pool->tx_metadata_len; 735 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) { 736 err = -EINVAL; 737 goto free_err; 738 } 739 740 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) { 741 if (unlikely(meta->request.csum_start + 742 meta->request.csum_offset + 743 sizeof(__sum16) > len)) { 744 err = -EINVAL; 745 goto free_err; 746 } 747 748 skb->csum_start = hr + meta->request.csum_start; 749 skb->csum_offset = meta->request.csum_offset; 750 skb->ip_summed = CHECKSUM_PARTIAL; 751 752 if (unlikely(xs->pool->tx_sw_csum)) { 753 err = skb_checksum_help(skb); 754 if (err) 755 goto free_err; 756 } 757 } 758 } 759 } 760 761 skb->dev = dev; 762 skb->priority = READ_ONCE(xs->sk.sk_priority); 763 skb->mark = READ_ONCE(xs->sk.sk_mark); 764 skb->destructor = xsk_destruct_skb; 765 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta); 766 xsk_set_destructor_arg(skb); 767 768 return skb; 769 770 free_err: 771 if (err == -EOVERFLOW) { 772 /* Drop the packet */ 773 xsk_set_destructor_arg(xs->skb); 774 xsk_drop_skb(xs->skb); 775 xskq_cons_release(xs->tx); 776 } else { 777 /* Let application retry */ 778 xsk_cq_cancel_locked(xs, 1); 779 } 780 781 return ERR_PTR(err); 782 } 783 784 static int __xsk_generic_xmit(struct sock *sk) 785 { 786 struct xdp_sock *xs = xdp_sk(sk); 787 u32 max_batch = TX_BATCH_SIZE; 788 bool sent_frame = false; 789 struct xdp_desc desc; 790 struct sk_buff *skb; 791 int err = 0; 792 793 mutex_lock(&xs->mutex); 794 795 /* Since we dropped the RCU read lock, the socket state might have changed. */ 796 if (unlikely(!xsk_is_bound(xs))) { 797 err = -ENXIO; 798 goto out; 799 } 800 801 if (xs->queue_id >= xs->dev->real_num_tx_queues) 802 goto out; 803 804 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 805 if (max_batch-- == 0) { 806 err = -EAGAIN; 807 goto out; 808 } 809 810 /* This is the backpressure mechanism for the Tx path. 811 * Reserve space in the completion queue and only proceed 812 * if there is space in it. This avoids having to implement 813 * any buffering in the Tx path. 814 */ 815 if (xsk_cq_reserve_addr_locked(xs, desc.addr)) 816 goto out; 817 818 skb = xsk_build_skb(xs, &desc); 819 if (IS_ERR(skb)) { 820 err = PTR_ERR(skb); 821 if (err != -EOVERFLOW) 822 goto out; 823 err = 0; 824 continue; 825 } 826 827 xskq_cons_release(xs->tx); 828 829 if (xp_mb_desc(&desc)) { 830 xs->skb = skb; 831 continue; 832 } 833 834 err = __dev_direct_xmit(skb, xs->queue_id); 835 if (err == NETDEV_TX_BUSY) { 836 /* Tell user-space to retry the send */ 837 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb)); 838 xsk_consume_skb(skb); 839 err = -EAGAIN; 840 goto out; 841 } 842 843 /* Ignore NET_XMIT_CN as packet might have been sent */ 844 if (err == NET_XMIT_DROP) { 845 /* SKB completed but not sent */ 846 err = -EBUSY; 847 xs->skb = NULL; 848 goto out; 849 } 850 851 sent_frame = true; 852 xs->skb = NULL; 853 } 854 855 if (xskq_has_descs(xs->tx)) { 856 if (xs->skb) 857 xsk_drop_skb(xs->skb); 858 xskq_cons_release(xs->tx); 859 } 860 861 out: 862 if (sent_frame) 863 if (xsk_tx_writeable(xs)) 864 sk->sk_write_space(sk); 865 866 mutex_unlock(&xs->mutex); 867 return err; 868 } 869 870 static int xsk_generic_xmit(struct sock *sk) 871 { 872 int ret; 873 874 /* Drop the RCU lock since the SKB path might sleep. */ 875 rcu_read_unlock(); 876 ret = __xsk_generic_xmit(sk); 877 /* Reaquire RCU lock before going into common code. */ 878 rcu_read_lock(); 879 880 return ret; 881 } 882 883 static bool xsk_no_wakeup(struct sock *sk) 884 { 885 #ifdef CONFIG_NET_RX_BUSY_POLL 886 /* Prefer busy-polling, skip the wakeup. */ 887 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 888 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 889 #else 890 return false; 891 #endif 892 } 893 894 static int xsk_check_common(struct xdp_sock *xs) 895 { 896 if (unlikely(!xsk_is_bound(xs))) 897 return -ENXIO; 898 if (unlikely(!(xs->dev->flags & IFF_UP))) 899 return -ENETDOWN; 900 901 return 0; 902 } 903 904 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 905 { 906 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 907 struct sock *sk = sock->sk; 908 struct xdp_sock *xs = xdp_sk(sk); 909 struct xsk_buff_pool *pool; 910 int err; 911 912 err = xsk_check_common(xs); 913 if (err) 914 return err; 915 if (unlikely(need_wait)) 916 return -EOPNOTSUPP; 917 if (unlikely(!xs->tx)) 918 return -ENOBUFS; 919 920 if (sk_can_busy_loop(sk)) { 921 if (xs->zc) 922 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool)); 923 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 924 } 925 926 if (xs->zc && xsk_no_wakeup(sk)) 927 return 0; 928 929 pool = xs->pool; 930 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) { 931 if (xs->zc) 932 return xsk_wakeup(xs, XDP_WAKEUP_TX); 933 return xsk_generic_xmit(sk); 934 } 935 return 0; 936 } 937 938 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 939 { 940 int ret; 941 942 rcu_read_lock(); 943 ret = __xsk_sendmsg(sock, m, total_len); 944 rcu_read_unlock(); 945 946 return ret; 947 } 948 949 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 950 { 951 bool need_wait = !(flags & MSG_DONTWAIT); 952 struct sock *sk = sock->sk; 953 struct xdp_sock *xs = xdp_sk(sk); 954 int err; 955 956 err = xsk_check_common(xs); 957 if (err) 958 return err; 959 if (unlikely(!xs->rx)) 960 return -ENOBUFS; 961 if (unlikely(need_wait)) 962 return -EOPNOTSUPP; 963 964 if (sk_can_busy_loop(sk)) 965 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 966 967 if (xsk_no_wakeup(sk)) 968 return 0; 969 970 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 971 return xsk_wakeup(xs, XDP_WAKEUP_RX); 972 return 0; 973 } 974 975 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 976 { 977 int ret; 978 979 rcu_read_lock(); 980 ret = __xsk_recvmsg(sock, m, len, flags); 981 rcu_read_unlock(); 982 983 return ret; 984 } 985 986 static __poll_t xsk_poll(struct file *file, struct socket *sock, 987 struct poll_table_struct *wait) 988 { 989 __poll_t mask = 0; 990 struct sock *sk = sock->sk; 991 struct xdp_sock *xs = xdp_sk(sk); 992 struct xsk_buff_pool *pool; 993 994 sock_poll_wait(file, sock, wait); 995 996 rcu_read_lock(); 997 if (xsk_check_common(xs)) 998 goto out; 999 1000 pool = xs->pool; 1001 1002 if (pool->cached_need_wakeup) { 1003 if (xs->zc) 1004 xsk_wakeup(xs, pool->cached_need_wakeup); 1005 else if (xs->tx) 1006 /* Poll needs to drive Tx also in copy mode */ 1007 xsk_generic_xmit(sk); 1008 } 1009 1010 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 1011 mask |= EPOLLIN | EPOLLRDNORM; 1012 if (xs->tx && xsk_tx_writeable(xs)) 1013 mask |= EPOLLOUT | EPOLLWRNORM; 1014 out: 1015 rcu_read_unlock(); 1016 return mask; 1017 } 1018 1019 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 1020 bool umem_queue) 1021 { 1022 struct xsk_queue *q; 1023 1024 if (entries == 0 || *queue || !is_power_of_2(entries)) 1025 return -EINVAL; 1026 1027 q = xskq_create(entries, umem_queue); 1028 if (!q) 1029 return -ENOMEM; 1030 1031 /* Make sure queue is ready before it can be seen by others */ 1032 smp_wmb(); 1033 WRITE_ONCE(*queue, q); 1034 return 0; 1035 } 1036 1037 static void xsk_unbind_dev(struct xdp_sock *xs) 1038 { 1039 struct net_device *dev = xs->dev; 1040 1041 if (xs->state != XSK_BOUND) 1042 return; 1043 WRITE_ONCE(xs->state, XSK_UNBOUND); 1044 1045 /* Wait for driver to stop using the xdp socket. */ 1046 xp_del_xsk(xs->pool, xs); 1047 synchronize_net(); 1048 dev_put(dev); 1049 } 1050 1051 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 1052 struct xdp_sock __rcu ***map_entry) 1053 { 1054 struct xsk_map *map = NULL; 1055 struct xsk_map_node *node; 1056 1057 *map_entry = NULL; 1058 1059 spin_lock_bh(&xs->map_list_lock); 1060 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 1061 node); 1062 if (node) { 1063 bpf_map_inc(&node->map->map); 1064 map = node->map; 1065 *map_entry = node->map_entry; 1066 } 1067 spin_unlock_bh(&xs->map_list_lock); 1068 return map; 1069 } 1070 1071 static void xsk_delete_from_maps(struct xdp_sock *xs) 1072 { 1073 /* This function removes the current XDP socket from all the 1074 * maps it resides in. We need to take extra care here, due to 1075 * the two locks involved. Each map has a lock synchronizing 1076 * updates to the entries, and each socket has a lock that 1077 * synchronizes access to the list of maps (map_list). For 1078 * deadlock avoidance the locks need to be taken in the order 1079 * "map lock"->"socket map list lock". We start off by 1080 * accessing the socket map list, and take a reference to the 1081 * map to guarantee existence between the 1082 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 1083 * calls. Then we ask the map to remove the socket, which 1084 * tries to remove the socket from the map. Note that there 1085 * might be updates to the map between 1086 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 1087 */ 1088 struct xdp_sock __rcu **map_entry = NULL; 1089 struct xsk_map *map; 1090 1091 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 1092 xsk_map_try_sock_delete(map, xs, map_entry); 1093 bpf_map_put(&map->map); 1094 } 1095 } 1096 1097 static int xsk_release(struct socket *sock) 1098 { 1099 struct sock *sk = sock->sk; 1100 struct xdp_sock *xs = xdp_sk(sk); 1101 struct net *net; 1102 1103 if (!sk) 1104 return 0; 1105 1106 net = sock_net(sk); 1107 1108 if (xs->skb) 1109 xsk_drop_skb(xs->skb); 1110 1111 mutex_lock(&net->xdp.lock); 1112 sk_del_node_init_rcu(sk); 1113 mutex_unlock(&net->xdp.lock); 1114 1115 sock_prot_inuse_add(net, sk->sk_prot, -1); 1116 1117 xsk_delete_from_maps(xs); 1118 mutex_lock(&xs->mutex); 1119 xsk_unbind_dev(xs); 1120 mutex_unlock(&xs->mutex); 1121 1122 xskq_destroy(xs->rx); 1123 xskq_destroy(xs->tx); 1124 xskq_destroy(xs->fq_tmp); 1125 xskq_destroy(xs->cq_tmp); 1126 1127 sock_orphan(sk); 1128 sock->sk = NULL; 1129 1130 sock_put(sk); 1131 1132 return 0; 1133 } 1134 1135 static struct socket *xsk_lookup_xsk_from_fd(int fd) 1136 { 1137 struct socket *sock; 1138 int err; 1139 1140 sock = sockfd_lookup(fd, &err); 1141 if (!sock) 1142 return ERR_PTR(-ENOTSOCK); 1143 1144 if (sock->sk->sk_family != PF_XDP) { 1145 sockfd_put(sock); 1146 return ERR_PTR(-ENOPROTOOPT); 1147 } 1148 1149 return sock; 1150 } 1151 1152 static bool xsk_validate_queues(struct xdp_sock *xs) 1153 { 1154 return xs->fq_tmp && xs->cq_tmp; 1155 } 1156 1157 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 1158 { 1159 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 1160 struct sock *sk = sock->sk; 1161 struct xdp_sock *xs = xdp_sk(sk); 1162 struct net_device *dev; 1163 int bound_dev_if; 1164 u32 flags, qid; 1165 int err = 0; 1166 1167 if (addr_len < sizeof(struct sockaddr_xdp)) 1168 return -EINVAL; 1169 if (sxdp->sxdp_family != AF_XDP) 1170 return -EINVAL; 1171 1172 flags = sxdp->sxdp_flags; 1173 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 1174 XDP_USE_NEED_WAKEUP | XDP_USE_SG)) 1175 return -EINVAL; 1176 1177 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 1178 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) 1179 return -EINVAL; 1180 1181 rtnl_lock(); 1182 mutex_lock(&xs->mutex); 1183 if (xs->state != XSK_READY) { 1184 err = -EBUSY; 1185 goto out_release; 1186 } 1187 1188 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 1189 if (!dev) { 1190 err = -ENODEV; 1191 goto out_release; 1192 } 1193 1194 if (!xs->rx && !xs->tx) { 1195 err = -EINVAL; 1196 goto out_unlock; 1197 } 1198 1199 qid = sxdp->sxdp_queue_id; 1200 1201 if (flags & XDP_SHARED_UMEM) { 1202 struct xdp_sock *umem_xs; 1203 struct socket *sock; 1204 1205 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 1206 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) { 1207 /* Cannot specify flags for shared sockets. */ 1208 err = -EINVAL; 1209 goto out_unlock; 1210 } 1211 1212 if (xs->umem) { 1213 /* We have already our own. */ 1214 err = -EINVAL; 1215 goto out_unlock; 1216 } 1217 1218 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 1219 if (IS_ERR(sock)) { 1220 err = PTR_ERR(sock); 1221 goto out_unlock; 1222 } 1223 1224 umem_xs = xdp_sk(sock->sk); 1225 if (!xsk_is_bound(umem_xs)) { 1226 err = -EBADF; 1227 sockfd_put(sock); 1228 goto out_unlock; 1229 } 1230 1231 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 1232 /* Share the umem with another socket on another qid 1233 * and/or device. 1234 */ 1235 xs->pool = xp_create_and_assign_umem(xs, 1236 umem_xs->umem); 1237 if (!xs->pool) { 1238 err = -ENOMEM; 1239 sockfd_put(sock); 1240 goto out_unlock; 1241 } 1242 1243 err = xp_assign_dev_shared(xs->pool, umem_xs, dev, 1244 qid); 1245 if (err) { 1246 xp_destroy(xs->pool); 1247 xs->pool = NULL; 1248 sockfd_put(sock); 1249 goto out_unlock; 1250 } 1251 } else { 1252 /* Share the buffer pool with the other socket. */ 1253 if (xs->fq_tmp || xs->cq_tmp) { 1254 /* Do not allow setting your own fq or cq. */ 1255 err = -EINVAL; 1256 sockfd_put(sock); 1257 goto out_unlock; 1258 } 1259 1260 xp_get_pool(umem_xs->pool); 1261 xs->pool = umem_xs->pool; 1262 1263 /* If underlying shared umem was created without Tx 1264 * ring, allocate Tx descs array that Tx batching API 1265 * utilizes 1266 */ 1267 if (xs->tx && !xs->pool->tx_descs) { 1268 err = xp_alloc_tx_descs(xs->pool, xs); 1269 if (err) { 1270 xp_put_pool(xs->pool); 1271 xs->pool = NULL; 1272 sockfd_put(sock); 1273 goto out_unlock; 1274 } 1275 } 1276 } 1277 1278 xdp_get_umem(umem_xs->umem); 1279 WRITE_ONCE(xs->umem, umem_xs->umem); 1280 sockfd_put(sock); 1281 } else if (!xs->umem || !xsk_validate_queues(xs)) { 1282 err = -EINVAL; 1283 goto out_unlock; 1284 } else { 1285 /* This xsk has its own umem. */ 1286 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 1287 if (!xs->pool) { 1288 err = -ENOMEM; 1289 goto out_unlock; 1290 } 1291 1292 err = xp_assign_dev(xs->pool, dev, qid, flags); 1293 if (err) { 1294 xp_destroy(xs->pool); 1295 xs->pool = NULL; 1296 goto out_unlock; 1297 } 1298 } 1299 1300 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 1301 xs->fq_tmp = NULL; 1302 xs->cq_tmp = NULL; 1303 1304 xs->dev = dev; 1305 xs->zc = xs->umem->zc; 1306 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG); 1307 xs->queue_id = qid; 1308 xp_add_xsk(xs->pool, xs); 1309 1310 out_unlock: 1311 if (err) { 1312 dev_put(dev); 1313 } else { 1314 /* Matches smp_rmb() in bind() for shared umem 1315 * sockets, and xsk_is_bound(). 1316 */ 1317 smp_wmb(); 1318 WRITE_ONCE(xs->state, XSK_BOUND); 1319 } 1320 out_release: 1321 mutex_unlock(&xs->mutex); 1322 rtnl_unlock(); 1323 return err; 1324 } 1325 1326 struct xdp_umem_reg_v1 { 1327 __u64 addr; /* Start of packet data area */ 1328 __u64 len; /* Length of packet data area */ 1329 __u32 chunk_size; 1330 __u32 headroom; 1331 }; 1332 1333 struct xdp_umem_reg_v2 { 1334 __u64 addr; /* Start of packet data area */ 1335 __u64 len; /* Length of packet data area */ 1336 __u32 chunk_size; 1337 __u32 headroom; 1338 __u32 flags; 1339 }; 1340 1341 static int xsk_setsockopt(struct socket *sock, int level, int optname, 1342 sockptr_t optval, unsigned int optlen) 1343 { 1344 struct sock *sk = sock->sk; 1345 struct xdp_sock *xs = xdp_sk(sk); 1346 int err; 1347 1348 if (level != SOL_XDP) 1349 return -ENOPROTOOPT; 1350 1351 switch (optname) { 1352 case XDP_RX_RING: 1353 case XDP_TX_RING: 1354 { 1355 struct xsk_queue **q; 1356 int entries; 1357 1358 if (optlen < sizeof(entries)) 1359 return -EINVAL; 1360 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1361 return -EFAULT; 1362 1363 mutex_lock(&xs->mutex); 1364 if (xs->state != XSK_READY) { 1365 mutex_unlock(&xs->mutex); 1366 return -EBUSY; 1367 } 1368 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1369 err = xsk_init_queue(entries, q, false); 1370 if (!err && optname == XDP_TX_RING) 1371 /* Tx needs to be explicitly woken up the first time */ 1372 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1373 mutex_unlock(&xs->mutex); 1374 return err; 1375 } 1376 case XDP_UMEM_REG: 1377 { 1378 size_t mr_size = sizeof(struct xdp_umem_reg); 1379 struct xdp_umem_reg mr = {}; 1380 struct xdp_umem *umem; 1381 1382 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1383 return -EINVAL; 1384 else if (optlen < sizeof(struct xdp_umem_reg_v2)) 1385 mr_size = sizeof(struct xdp_umem_reg_v1); 1386 else if (optlen < sizeof(mr)) 1387 mr_size = sizeof(struct xdp_umem_reg_v2); 1388 1389 if (copy_from_sockptr(&mr, optval, mr_size)) 1390 return -EFAULT; 1391 1392 mutex_lock(&xs->mutex); 1393 if (xs->state != XSK_READY || xs->umem) { 1394 mutex_unlock(&xs->mutex); 1395 return -EBUSY; 1396 } 1397 1398 umem = xdp_umem_create(&mr); 1399 if (IS_ERR(umem)) { 1400 mutex_unlock(&xs->mutex); 1401 return PTR_ERR(umem); 1402 } 1403 1404 /* Make sure umem is ready before it can be seen by others */ 1405 smp_wmb(); 1406 WRITE_ONCE(xs->umem, umem); 1407 mutex_unlock(&xs->mutex); 1408 return 0; 1409 } 1410 case XDP_UMEM_FILL_RING: 1411 case XDP_UMEM_COMPLETION_RING: 1412 { 1413 struct xsk_queue **q; 1414 int entries; 1415 1416 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1417 return -EFAULT; 1418 1419 mutex_lock(&xs->mutex); 1420 if (xs->state != XSK_READY) { 1421 mutex_unlock(&xs->mutex); 1422 return -EBUSY; 1423 } 1424 1425 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1426 &xs->cq_tmp; 1427 err = xsk_init_queue(entries, q, true); 1428 mutex_unlock(&xs->mutex); 1429 return err; 1430 } 1431 default: 1432 break; 1433 } 1434 1435 return -ENOPROTOOPT; 1436 } 1437 1438 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1439 { 1440 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1441 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1442 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1443 } 1444 1445 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1446 { 1447 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1448 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1449 ring->desc = offsetof(struct xdp_umem_ring, desc); 1450 } 1451 1452 struct xdp_statistics_v1 { 1453 __u64 rx_dropped; 1454 __u64 rx_invalid_descs; 1455 __u64 tx_invalid_descs; 1456 }; 1457 1458 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1459 char __user *optval, int __user *optlen) 1460 { 1461 struct sock *sk = sock->sk; 1462 struct xdp_sock *xs = xdp_sk(sk); 1463 int len; 1464 1465 if (level != SOL_XDP) 1466 return -ENOPROTOOPT; 1467 1468 if (get_user(len, optlen)) 1469 return -EFAULT; 1470 if (len < 0) 1471 return -EINVAL; 1472 1473 switch (optname) { 1474 case XDP_STATISTICS: 1475 { 1476 struct xdp_statistics stats = {}; 1477 bool extra_stats = true; 1478 size_t stats_size; 1479 1480 if (len < sizeof(struct xdp_statistics_v1)) { 1481 return -EINVAL; 1482 } else if (len < sizeof(stats)) { 1483 extra_stats = false; 1484 stats_size = sizeof(struct xdp_statistics_v1); 1485 } else { 1486 stats_size = sizeof(stats); 1487 } 1488 1489 mutex_lock(&xs->mutex); 1490 stats.rx_dropped = xs->rx_dropped; 1491 if (extra_stats) { 1492 stats.rx_ring_full = xs->rx_queue_full; 1493 stats.rx_fill_ring_empty_descs = 1494 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1495 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1496 } else { 1497 stats.rx_dropped += xs->rx_queue_full; 1498 } 1499 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1500 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1501 mutex_unlock(&xs->mutex); 1502 1503 if (copy_to_user(optval, &stats, stats_size)) 1504 return -EFAULT; 1505 if (put_user(stats_size, optlen)) 1506 return -EFAULT; 1507 1508 return 0; 1509 } 1510 case XDP_MMAP_OFFSETS: 1511 { 1512 struct xdp_mmap_offsets off; 1513 struct xdp_mmap_offsets_v1 off_v1; 1514 bool flags_supported = true; 1515 void *to_copy; 1516 1517 if (len < sizeof(off_v1)) 1518 return -EINVAL; 1519 else if (len < sizeof(off)) 1520 flags_supported = false; 1521 1522 if (flags_supported) { 1523 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1524 * except for the flags field added to the end. 1525 */ 1526 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1527 &off.rx); 1528 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1529 &off.tx); 1530 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1531 &off.fr); 1532 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1533 &off.cr); 1534 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1535 ptrs.flags); 1536 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1537 ptrs.flags); 1538 off.fr.flags = offsetof(struct xdp_umem_ring, 1539 ptrs.flags); 1540 off.cr.flags = offsetof(struct xdp_umem_ring, 1541 ptrs.flags); 1542 1543 len = sizeof(off); 1544 to_copy = &off; 1545 } else { 1546 xsk_enter_rxtx_offsets(&off_v1.rx); 1547 xsk_enter_rxtx_offsets(&off_v1.tx); 1548 xsk_enter_umem_offsets(&off_v1.fr); 1549 xsk_enter_umem_offsets(&off_v1.cr); 1550 1551 len = sizeof(off_v1); 1552 to_copy = &off_v1; 1553 } 1554 1555 if (copy_to_user(optval, to_copy, len)) 1556 return -EFAULT; 1557 if (put_user(len, optlen)) 1558 return -EFAULT; 1559 1560 return 0; 1561 } 1562 case XDP_OPTIONS: 1563 { 1564 struct xdp_options opts = {}; 1565 1566 if (len < sizeof(opts)) 1567 return -EINVAL; 1568 1569 mutex_lock(&xs->mutex); 1570 if (xs->zc) 1571 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1572 mutex_unlock(&xs->mutex); 1573 1574 len = sizeof(opts); 1575 if (copy_to_user(optval, &opts, len)) 1576 return -EFAULT; 1577 if (put_user(len, optlen)) 1578 return -EFAULT; 1579 1580 return 0; 1581 } 1582 default: 1583 break; 1584 } 1585 1586 return -EOPNOTSUPP; 1587 } 1588 1589 static int xsk_mmap(struct file *file, struct socket *sock, 1590 struct vm_area_struct *vma) 1591 { 1592 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1593 unsigned long size = vma->vm_end - vma->vm_start; 1594 struct xdp_sock *xs = xdp_sk(sock->sk); 1595 int state = READ_ONCE(xs->state); 1596 struct xsk_queue *q = NULL; 1597 1598 if (state != XSK_READY && state != XSK_BOUND) 1599 return -EBUSY; 1600 1601 if (offset == XDP_PGOFF_RX_RING) { 1602 q = READ_ONCE(xs->rx); 1603 } else if (offset == XDP_PGOFF_TX_RING) { 1604 q = READ_ONCE(xs->tx); 1605 } else { 1606 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1607 smp_rmb(); 1608 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1609 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) : 1610 READ_ONCE(xs->pool->fq); 1611 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1612 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) : 1613 READ_ONCE(xs->pool->cq); 1614 } 1615 1616 if (!q) 1617 return -EINVAL; 1618 1619 /* Matches the smp_wmb() in xsk_init_queue */ 1620 smp_rmb(); 1621 if (size > q->ring_vmalloc_size) 1622 return -EINVAL; 1623 1624 return remap_vmalloc_range(vma, q->ring, 0); 1625 } 1626 1627 static int xsk_notifier(struct notifier_block *this, 1628 unsigned long msg, void *ptr) 1629 { 1630 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1631 struct net *net = dev_net(dev); 1632 struct sock *sk; 1633 1634 switch (msg) { 1635 case NETDEV_UNREGISTER: 1636 mutex_lock(&net->xdp.lock); 1637 sk_for_each(sk, &net->xdp.list) { 1638 struct xdp_sock *xs = xdp_sk(sk); 1639 1640 mutex_lock(&xs->mutex); 1641 if (xs->dev == dev) { 1642 sk->sk_err = ENETDOWN; 1643 if (!sock_flag(sk, SOCK_DEAD)) 1644 sk_error_report(sk); 1645 1646 xsk_unbind_dev(xs); 1647 1648 /* Clear device references. */ 1649 xp_clear_dev(xs->pool); 1650 } 1651 mutex_unlock(&xs->mutex); 1652 } 1653 mutex_unlock(&net->xdp.lock); 1654 break; 1655 } 1656 return NOTIFY_DONE; 1657 } 1658 1659 static struct proto xsk_proto = { 1660 .name = "XDP", 1661 .owner = THIS_MODULE, 1662 .obj_size = sizeof(struct xdp_sock), 1663 }; 1664 1665 static const struct proto_ops xsk_proto_ops = { 1666 .family = PF_XDP, 1667 .owner = THIS_MODULE, 1668 .release = xsk_release, 1669 .bind = xsk_bind, 1670 .connect = sock_no_connect, 1671 .socketpair = sock_no_socketpair, 1672 .accept = sock_no_accept, 1673 .getname = sock_no_getname, 1674 .poll = xsk_poll, 1675 .ioctl = sock_no_ioctl, 1676 .listen = sock_no_listen, 1677 .shutdown = sock_no_shutdown, 1678 .setsockopt = xsk_setsockopt, 1679 .getsockopt = xsk_getsockopt, 1680 .sendmsg = xsk_sendmsg, 1681 .recvmsg = xsk_recvmsg, 1682 .mmap = xsk_mmap, 1683 }; 1684 1685 static void xsk_destruct(struct sock *sk) 1686 { 1687 struct xdp_sock *xs = xdp_sk(sk); 1688 1689 if (!sock_flag(sk, SOCK_DEAD)) 1690 return; 1691 1692 if (!xp_put_pool(xs->pool)) 1693 xdp_put_umem(xs->umem, !xs->pool); 1694 } 1695 1696 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1697 int kern) 1698 { 1699 struct xdp_sock *xs; 1700 struct sock *sk; 1701 1702 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1703 return -EPERM; 1704 if (sock->type != SOCK_RAW) 1705 return -ESOCKTNOSUPPORT; 1706 1707 if (protocol) 1708 return -EPROTONOSUPPORT; 1709 1710 sock->state = SS_UNCONNECTED; 1711 1712 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1713 if (!sk) 1714 return -ENOBUFS; 1715 1716 sock->ops = &xsk_proto_ops; 1717 1718 sock_init_data(sock, sk); 1719 1720 sk->sk_family = PF_XDP; 1721 1722 sk->sk_destruct = xsk_destruct; 1723 1724 sock_set_flag(sk, SOCK_RCU_FREE); 1725 1726 xs = xdp_sk(sk); 1727 xs->state = XSK_READY; 1728 mutex_init(&xs->mutex); 1729 spin_lock_init(&xs->rx_lock); 1730 1731 INIT_LIST_HEAD(&xs->map_list); 1732 spin_lock_init(&xs->map_list_lock); 1733 1734 mutex_lock(&net->xdp.lock); 1735 sk_add_node_rcu(sk, &net->xdp.list); 1736 mutex_unlock(&net->xdp.lock); 1737 1738 sock_prot_inuse_add(net, &xsk_proto, 1); 1739 1740 return 0; 1741 } 1742 1743 static const struct net_proto_family xsk_family_ops = { 1744 .family = PF_XDP, 1745 .create = xsk_create, 1746 .owner = THIS_MODULE, 1747 }; 1748 1749 static struct notifier_block xsk_netdev_notifier = { 1750 .notifier_call = xsk_notifier, 1751 }; 1752 1753 static int __net_init xsk_net_init(struct net *net) 1754 { 1755 mutex_init(&net->xdp.lock); 1756 INIT_HLIST_HEAD(&net->xdp.list); 1757 return 0; 1758 } 1759 1760 static void __net_exit xsk_net_exit(struct net *net) 1761 { 1762 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1763 } 1764 1765 static struct pernet_operations xsk_net_ops = { 1766 .init = xsk_net_init, 1767 .exit = xsk_net_exit, 1768 }; 1769 1770 static int __init xsk_init(void) 1771 { 1772 int err, cpu; 1773 1774 err = proto_register(&xsk_proto, 0 /* no slab */); 1775 if (err) 1776 goto out; 1777 1778 err = sock_register(&xsk_family_ops); 1779 if (err) 1780 goto out_proto; 1781 1782 err = register_pernet_subsys(&xsk_net_ops); 1783 if (err) 1784 goto out_sk; 1785 1786 err = register_netdevice_notifier(&xsk_netdev_notifier); 1787 if (err) 1788 goto out_pernet; 1789 1790 for_each_possible_cpu(cpu) 1791 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); 1792 return 0; 1793 1794 out_pernet: 1795 unregister_pernet_subsys(&xsk_net_ops); 1796 out_sk: 1797 sock_unregister(PF_XDP); 1798 out_proto: 1799 proto_unregister(&xsk_proto); 1800 out: 1801 return err; 1802 } 1803 1804 fs_initcall(xsk_init); 1805