1 // SPDX-License-Identifier: GPL-2.0 2 /* XDP sockets 3 * 4 * AF_XDP sockets allows a channel between XDP programs and userspace 5 * applications. 6 * Copyright(c) 2018 Intel Corporation. 7 * 8 * Author(s): Björn Töpel <bjorn.topel@intel.com> 9 * Magnus Karlsson <magnus.karlsson@intel.com> 10 */ 11 12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ 13 14 #include <linux/if_xdp.h> 15 #include <linux/init.h> 16 #include <linux/sched/mm.h> 17 #include <linux/sched/signal.h> 18 #include <linux/sched/task.h> 19 #include <linux/socket.h> 20 #include <linux/file.h> 21 #include <linux/uaccess.h> 22 #include <linux/net.h> 23 #include <linux/netdevice.h> 24 #include <linux/rculist.h> 25 #include <linux/vmalloc.h> 26 #include <net/xdp_sock_drv.h> 27 #include <net/busy_poll.h> 28 #include <net/netdev_rx_queue.h> 29 #include <net/xdp.h> 30 31 #include "xsk_queue.h" 32 #include "xdp_umem.h" 33 #include "xsk.h" 34 35 #define TX_BATCH_SIZE 32 36 37 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); 38 39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) 40 { 41 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) 42 return; 43 44 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 45 pool->cached_need_wakeup |= XDP_WAKEUP_RX; 46 } 47 EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 48 49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 50 { 51 struct xdp_sock *xs; 52 53 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 54 return; 55 56 rcu_read_lock(); 57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 58 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 59 } 60 rcu_read_unlock(); 61 62 pool->cached_need_wakeup |= XDP_WAKEUP_TX; 63 } 64 EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 65 66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) 67 { 68 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) 69 return; 70 71 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 72 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; 73 } 74 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 75 76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 77 { 78 struct xdp_sock *xs; 79 80 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 81 return; 82 83 rcu_read_lock(); 84 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 85 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 86 } 87 rcu_read_unlock(); 88 89 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; 90 } 91 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 92 93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) 94 { 95 return pool->uses_need_wakeup; 96 } 97 EXPORT_SYMBOL(xsk_uses_need_wakeup); 98 99 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, 100 u16 queue_id) 101 { 102 if (queue_id < dev->real_num_rx_queues) 103 return dev->_rx[queue_id].pool; 104 if (queue_id < dev->real_num_tx_queues) 105 return dev->_tx[queue_id].pool; 106 107 return NULL; 108 } 109 EXPORT_SYMBOL(xsk_get_pool_from_qid); 110 111 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) 112 { 113 if (queue_id < dev->num_rx_queues) 114 dev->_rx[queue_id].pool = NULL; 115 if (queue_id < dev->num_tx_queues) 116 dev->_tx[queue_id].pool = NULL; 117 } 118 119 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do 120 * not know if the device has more tx queues than rx, or the opposite. 121 * This might also change during run time. 122 */ 123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 124 u16 queue_id) 125 { 126 if (queue_id >= max_t(unsigned int, 127 dev->real_num_rx_queues, 128 dev->real_num_tx_queues)) 129 return -EINVAL; 130 131 if (queue_id < dev->real_num_rx_queues) 132 dev->_rx[queue_id].pool = pool; 133 if (queue_id < dev->real_num_tx_queues) 134 dev->_tx[queue_id].pool = pool; 135 136 return 0; 137 } 138 139 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, 140 u32 flags) 141 { 142 u64 addr; 143 int err; 144 145 addr = xp_get_handle(xskb); 146 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); 147 if (err) { 148 xs->rx_queue_full++; 149 return err; 150 } 151 152 xp_release(xskb); 153 return 0; 154 } 155 156 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 157 { 158 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); 159 u32 frags = xdp_buff_has_frags(xdp); 160 struct xdp_buff_xsk *pos, *tmp; 161 struct list_head *xskb_list; 162 u32 contd = 0; 163 int err; 164 165 if (frags) 166 contd = XDP_PKT_CONTD; 167 168 err = __xsk_rcv_zc(xs, xskb, len, contd); 169 if (err || likely(!frags)) 170 goto out; 171 172 xskb_list = &xskb->pool->xskb_list; 173 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { 174 if (list_is_singular(xskb_list)) 175 contd = 0; 176 len = pos->xdp.data_end - pos->xdp.data; 177 err = __xsk_rcv_zc(xs, pos, len, contd); 178 if (err) 179 return err; 180 list_del(&pos->xskb_list_node); 181 } 182 183 out: 184 return err; 185 } 186 187 static void *xsk_copy_xdp_start(struct xdp_buff *from) 188 { 189 if (unlikely(xdp_data_meta_unsupported(from))) 190 return from->data; 191 else 192 return from->data_meta; 193 } 194 195 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len, 196 u32 *from_len, skb_frag_t **frag, u32 rem) 197 { 198 u32 copied = 0; 199 200 while (1) { 201 u32 copy_len = min_t(u32, *from_len, to_len); 202 203 memcpy(to, *from, copy_len); 204 copied += copy_len; 205 if (rem == copied) 206 return copied; 207 208 if (*from_len == copy_len) { 209 *from = skb_frag_address(*frag); 210 *from_len = skb_frag_size((*frag)++); 211 } else { 212 *from += copy_len; 213 *from_len -= copy_len; 214 } 215 if (to_len == copy_len) 216 return copied; 217 218 to_len -= copy_len; 219 to += copy_len; 220 } 221 } 222 223 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 224 { 225 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool); 226 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to; 227 u32 from_len, meta_len, rem, num_desc; 228 struct xdp_buff_xsk *xskb; 229 struct xdp_buff *xsk_xdp; 230 skb_frag_t *frag; 231 232 from_len = xdp->data_end - copy_from; 233 meta_len = xdp->data - copy_from; 234 rem = len + meta_len; 235 236 if (len <= frame_size && !xdp_buff_has_frags(xdp)) { 237 int err; 238 239 xsk_xdp = xsk_buff_alloc(xs->pool); 240 if (!xsk_xdp) { 241 xs->rx_dropped++; 242 return -ENOMEM; 243 } 244 memcpy(xsk_xdp->data - meta_len, copy_from, rem); 245 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 246 err = __xsk_rcv_zc(xs, xskb, len, 0); 247 if (err) { 248 xsk_buff_free(xsk_xdp); 249 return err; 250 } 251 252 return 0; 253 } 254 255 num_desc = (len - 1) / frame_size + 1; 256 257 if (!xsk_buff_can_alloc(xs->pool, num_desc)) { 258 xs->rx_dropped++; 259 return -ENOMEM; 260 } 261 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { 262 xs->rx_queue_full++; 263 return -ENOBUFS; 264 } 265 266 if (xdp_buff_has_frags(xdp)) { 267 struct skb_shared_info *sinfo; 268 269 sinfo = xdp_get_shared_info_from_buff(xdp); 270 frag = &sinfo->frags[0]; 271 } 272 273 do { 274 u32 to_len = frame_size + meta_len; 275 u32 copied; 276 277 xsk_xdp = xsk_buff_alloc(xs->pool); 278 copy_to = xsk_xdp->data - meta_len; 279 280 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem); 281 rem -= copied; 282 283 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); 284 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); 285 meta_len = 0; 286 } while (rem); 287 288 return 0; 289 } 290 291 static bool xsk_tx_writeable(struct xdp_sock *xs) 292 { 293 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) 294 return false; 295 296 return true; 297 } 298 299 static bool xsk_is_bound(struct xdp_sock *xs) 300 { 301 if (READ_ONCE(xs->state) == XSK_BOUND) { 302 /* Matches smp_wmb() in bind(). */ 303 smp_rmb(); 304 return true; 305 } 306 return false; 307 } 308 309 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 310 { 311 if (!xsk_is_bound(xs)) 312 return -ENXIO; 313 314 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 315 return -EINVAL; 316 317 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 318 xs->rx_dropped++; 319 return -ENOSPC; 320 } 321 322 sk_mark_napi_id_once_xdp(&xs->sk, xdp); 323 return 0; 324 } 325 326 static void xsk_flush(struct xdp_sock *xs) 327 { 328 xskq_prod_submit(xs->rx); 329 __xskq_cons_release(xs->pool->fq); 330 sock_def_readable(&xs->sk); 331 } 332 333 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 334 { 335 u32 len = xdp_get_buff_len(xdp); 336 int err; 337 338 spin_lock_bh(&xs->rx_lock); 339 err = xsk_rcv_check(xs, xdp, len); 340 if (!err) { 341 err = __xsk_rcv(xs, xdp, len); 342 xsk_flush(xs); 343 } 344 spin_unlock_bh(&xs->rx_lock); 345 return err; 346 } 347 348 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 349 { 350 u32 len = xdp_get_buff_len(xdp); 351 int err; 352 353 err = xsk_rcv_check(xs, xdp, len); 354 if (err) 355 return err; 356 357 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 358 len = xdp->data_end - xdp->data; 359 return xsk_rcv_zc(xs, xdp, len); 360 } 361 362 err = __xsk_rcv(xs, xdp, len); 363 if (!err) 364 xdp_return_buff(xdp); 365 return err; 366 } 367 368 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 369 { 370 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 371 int err; 372 373 err = xsk_rcv(xs, xdp); 374 if (err) 375 return err; 376 377 if (!xs->flush_node.prev) 378 list_add(&xs->flush_node, flush_list); 379 380 return 0; 381 } 382 383 void __xsk_map_flush(void) 384 { 385 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); 386 struct xdp_sock *xs, *tmp; 387 388 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { 389 xsk_flush(xs); 390 __list_del_clearprev(&xs->flush_node); 391 } 392 } 393 394 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) 395 { 396 xskq_prod_submit_n(pool->cq, nb_entries); 397 } 398 EXPORT_SYMBOL(xsk_tx_completed); 399 400 void xsk_tx_release(struct xsk_buff_pool *pool) 401 { 402 struct xdp_sock *xs; 403 404 rcu_read_lock(); 405 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 406 __xskq_cons_release(xs->tx); 407 if (xsk_tx_writeable(xs)) 408 xs->sk.sk_write_space(&xs->sk); 409 } 410 rcu_read_unlock(); 411 } 412 EXPORT_SYMBOL(xsk_tx_release); 413 414 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 415 { 416 struct xdp_sock *xs; 417 418 rcu_read_lock(); 419 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 420 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 421 if (xskq_has_descs(xs->tx)) 422 xskq_cons_release(xs->tx); 423 continue; 424 } 425 426 /* This is the backpressure mechanism for the Tx path. 427 * Reserve space in the completion queue and only proceed 428 * if there is space in it. This avoids having to implement 429 * any buffering in the Tx path. 430 */ 431 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) 432 goto out; 433 434 xskq_cons_release(xs->tx); 435 rcu_read_unlock(); 436 return true; 437 } 438 439 out: 440 rcu_read_unlock(); 441 return false; 442 } 443 EXPORT_SYMBOL(xsk_tx_peek_desc); 444 445 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) 446 { 447 struct xdp_desc *descs = pool->tx_descs; 448 u32 nb_pkts = 0; 449 450 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) 451 nb_pkts++; 452 453 xsk_tx_release(pool); 454 return nb_pkts; 455 } 456 457 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) 458 { 459 struct xdp_sock *xs; 460 461 rcu_read_lock(); 462 if (!list_is_singular(&pool->xsk_tx_list)) { 463 /* Fallback to the non-batched version */ 464 rcu_read_unlock(); 465 return xsk_tx_peek_release_fallback(pool, nb_pkts); 466 } 467 468 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); 469 if (!xs) { 470 nb_pkts = 0; 471 goto out; 472 } 473 474 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); 475 476 /* This is the backpressure mechanism for the Tx path. Try to 477 * reserve space in the completion queue for all packets, but 478 * if there are fewer slots available, just process that many 479 * packets. This avoids having to implement any buffering in 480 * the Tx path. 481 */ 482 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); 483 if (!nb_pkts) 484 goto out; 485 486 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); 487 if (!nb_pkts) { 488 xs->tx->queue_empty_descs++; 489 goto out; 490 } 491 492 __xskq_cons_release(xs->tx); 493 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts); 494 xs->sk.sk_write_space(&xs->sk); 495 496 out: 497 rcu_read_unlock(); 498 return nb_pkts; 499 } 500 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); 501 502 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) 503 { 504 struct net_device *dev = xs->dev; 505 506 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); 507 } 508 509 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr) 510 { 511 unsigned long flags; 512 int ret; 513 514 spin_lock_irqsave(&xs->pool->cq_lock, flags); 515 ret = xskq_prod_reserve_addr(xs->pool->cq, addr); 516 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 517 518 return ret; 519 } 520 521 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n) 522 { 523 unsigned long flags; 524 525 spin_lock_irqsave(&xs->pool->cq_lock, flags); 526 xskq_prod_submit_n(xs->pool->cq, n); 527 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 528 } 529 530 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n) 531 { 532 unsigned long flags; 533 534 spin_lock_irqsave(&xs->pool->cq_lock, flags); 535 xskq_prod_cancel_n(xs->pool->cq, n); 536 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); 537 } 538 539 static u32 xsk_get_num_desc(struct sk_buff *skb) 540 { 541 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0; 542 } 543 544 static void xsk_destruct_skb(struct sk_buff *skb) 545 { 546 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb)); 547 sock_wfree(skb); 548 } 549 550 static void xsk_set_destructor_arg(struct sk_buff *skb) 551 { 552 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1; 553 554 skb_shinfo(skb)->destructor_arg = (void *)num; 555 } 556 557 static void xsk_consume_skb(struct sk_buff *skb) 558 { 559 struct xdp_sock *xs = xdp_sk(skb->sk); 560 561 skb->destructor = sock_wfree; 562 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb)); 563 /* Free skb without triggering the perf drop trace */ 564 consume_skb(skb); 565 xs->skb = NULL; 566 } 567 568 static void xsk_drop_skb(struct sk_buff *skb) 569 { 570 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb); 571 xsk_consume_skb(skb); 572 } 573 574 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, 575 struct xdp_desc *desc) 576 { 577 struct xsk_buff_pool *pool = xs->pool; 578 u32 hr, len, ts, offset, copy, copied; 579 struct sk_buff *skb = xs->skb; 580 struct page *page; 581 void *buffer; 582 int err, i; 583 u64 addr; 584 585 if (!skb) { 586 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); 587 588 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); 589 if (unlikely(!skb)) 590 return ERR_PTR(err); 591 592 skb_reserve(skb, hr); 593 } 594 595 addr = desc->addr; 596 len = desc->len; 597 ts = pool->unaligned ? len : pool->chunk_size; 598 599 buffer = xsk_buff_raw_get_data(pool, addr); 600 offset = offset_in_page(buffer); 601 addr = buffer - pool->addrs; 602 603 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) { 604 if (unlikely(i >= MAX_SKB_FRAGS)) 605 return ERR_PTR(-EOVERFLOW); 606 607 page = pool->umem->pgs[addr >> PAGE_SHIFT]; 608 get_page(page); 609 610 copy = min_t(u32, PAGE_SIZE - offset, len - copied); 611 skb_fill_page_desc(skb, i, page, offset, copy); 612 613 copied += copy; 614 addr += copy; 615 offset = 0; 616 } 617 618 skb->len += len; 619 skb->data_len += len; 620 skb->truesize += ts; 621 622 refcount_add(ts, &xs->sk.sk_wmem_alloc); 623 624 return skb; 625 } 626 627 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, 628 struct xdp_desc *desc) 629 { 630 struct net_device *dev = xs->dev; 631 struct sk_buff *skb = xs->skb; 632 int err; 633 634 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { 635 skb = xsk_build_skb_zerocopy(xs, desc); 636 if (IS_ERR(skb)) { 637 err = PTR_ERR(skb); 638 goto free_err; 639 } 640 } else { 641 u32 hr, tr, len; 642 void *buffer; 643 644 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); 645 len = desc->len; 646 647 if (!skb) { 648 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); 649 tr = dev->needed_tailroom; 650 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); 651 if (unlikely(!skb)) 652 goto free_err; 653 654 skb_reserve(skb, hr); 655 skb_put(skb, len); 656 657 err = skb_store_bits(skb, 0, buffer, len); 658 if (unlikely(err)) { 659 kfree_skb(skb); 660 goto free_err; 661 } 662 } else { 663 int nr_frags = skb_shinfo(skb)->nr_frags; 664 struct page *page; 665 u8 *vaddr; 666 667 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) { 668 err = -EOVERFLOW; 669 goto free_err; 670 } 671 672 page = alloc_page(xs->sk.sk_allocation); 673 if (unlikely(!page)) { 674 err = -EAGAIN; 675 goto free_err; 676 } 677 678 vaddr = kmap_local_page(page); 679 memcpy(vaddr, buffer, len); 680 kunmap_local(vaddr); 681 682 skb_add_rx_frag(skb, nr_frags, page, 0, len, 0); 683 } 684 } 685 686 skb->dev = dev; 687 skb->priority = xs->sk.sk_priority; 688 skb->mark = READ_ONCE(xs->sk.sk_mark); 689 skb->destructor = xsk_destruct_skb; 690 xsk_set_destructor_arg(skb); 691 692 return skb; 693 694 free_err: 695 if (err == -EOVERFLOW) { 696 /* Drop the packet */ 697 xsk_set_destructor_arg(xs->skb); 698 xsk_drop_skb(xs->skb); 699 xskq_cons_release(xs->tx); 700 } else { 701 /* Let application retry */ 702 xsk_cq_cancel_locked(xs, 1); 703 } 704 705 return ERR_PTR(err); 706 } 707 708 static int __xsk_generic_xmit(struct sock *sk) 709 { 710 struct xdp_sock *xs = xdp_sk(sk); 711 u32 max_batch = TX_BATCH_SIZE; 712 bool sent_frame = false; 713 struct xdp_desc desc; 714 struct sk_buff *skb; 715 int err = 0; 716 717 mutex_lock(&xs->mutex); 718 719 /* Since we dropped the RCU read lock, the socket state might have changed. */ 720 if (unlikely(!xsk_is_bound(xs))) { 721 err = -ENXIO; 722 goto out; 723 } 724 725 if (xs->queue_id >= xs->dev->real_num_tx_queues) 726 goto out; 727 728 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { 729 if (max_batch-- == 0) { 730 err = -EAGAIN; 731 goto out; 732 } 733 734 /* This is the backpressure mechanism for the Tx path. 735 * Reserve space in the completion queue and only proceed 736 * if there is space in it. This avoids having to implement 737 * any buffering in the Tx path. 738 */ 739 if (xsk_cq_reserve_addr_locked(xs, desc.addr)) 740 goto out; 741 742 skb = xsk_build_skb(xs, &desc); 743 if (IS_ERR(skb)) { 744 err = PTR_ERR(skb); 745 if (err != -EOVERFLOW) 746 goto out; 747 err = 0; 748 continue; 749 } 750 751 xskq_cons_release(xs->tx); 752 753 if (xp_mb_desc(&desc)) { 754 xs->skb = skb; 755 continue; 756 } 757 758 err = __dev_direct_xmit(skb, xs->queue_id); 759 if (err == NETDEV_TX_BUSY) { 760 /* Tell user-space to retry the send */ 761 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb)); 762 xsk_consume_skb(skb); 763 err = -EAGAIN; 764 goto out; 765 } 766 767 /* Ignore NET_XMIT_CN as packet might have been sent */ 768 if (err == NET_XMIT_DROP) { 769 /* SKB completed but not sent */ 770 err = -EBUSY; 771 xs->skb = NULL; 772 goto out; 773 } 774 775 sent_frame = true; 776 xs->skb = NULL; 777 } 778 779 if (xskq_has_descs(xs->tx)) { 780 if (xs->skb) 781 xsk_drop_skb(xs->skb); 782 xskq_cons_release(xs->tx); 783 } 784 785 out: 786 if (sent_frame) 787 if (xsk_tx_writeable(xs)) 788 sk->sk_write_space(sk); 789 790 mutex_unlock(&xs->mutex); 791 return err; 792 } 793 794 static int xsk_generic_xmit(struct sock *sk) 795 { 796 int ret; 797 798 /* Drop the RCU lock since the SKB path might sleep. */ 799 rcu_read_unlock(); 800 ret = __xsk_generic_xmit(sk); 801 /* Reaquire RCU lock before going into common code. */ 802 rcu_read_lock(); 803 804 return ret; 805 } 806 807 static bool xsk_no_wakeup(struct sock *sk) 808 { 809 #ifdef CONFIG_NET_RX_BUSY_POLL 810 /* Prefer busy-polling, skip the wakeup. */ 811 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && 812 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; 813 #else 814 return false; 815 #endif 816 } 817 818 static int xsk_check_common(struct xdp_sock *xs) 819 { 820 if (unlikely(!xsk_is_bound(xs))) 821 return -ENXIO; 822 if (unlikely(!(xs->dev->flags & IFF_UP))) 823 return -ENETDOWN; 824 825 return 0; 826 } 827 828 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 829 { 830 bool need_wait = !(m->msg_flags & MSG_DONTWAIT); 831 struct sock *sk = sock->sk; 832 struct xdp_sock *xs = xdp_sk(sk); 833 struct xsk_buff_pool *pool; 834 int err; 835 836 err = xsk_check_common(xs); 837 if (err) 838 return err; 839 if (unlikely(need_wait)) 840 return -EOPNOTSUPP; 841 if (unlikely(!xs->tx)) 842 return -ENOBUFS; 843 844 if (sk_can_busy_loop(sk)) { 845 if (xs->zc) 846 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool)); 847 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 848 } 849 850 if (xs->zc && xsk_no_wakeup(sk)) 851 return 0; 852 853 pool = xs->pool; 854 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) { 855 if (xs->zc) 856 return xsk_wakeup(xs, XDP_WAKEUP_TX); 857 return xsk_generic_xmit(sk); 858 } 859 return 0; 860 } 861 862 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 863 { 864 int ret; 865 866 rcu_read_lock(); 867 ret = __xsk_sendmsg(sock, m, total_len); 868 rcu_read_unlock(); 869 870 return ret; 871 } 872 873 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 874 { 875 bool need_wait = !(flags & MSG_DONTWAIT); 876 struct sock *sk = sock->sk; 877 struct xdp_sock *xs = xdp_sk(sk); 878 int err; 879 880 err = xsk_check_common(xs); 881 if (err) 882 return err; 883 if (unlikely(!xs->rx)) 884 return -ENOBUFS; 885 if (unlikely(need_wait)) 886 return -EOPNOTSUPP; 887 888 if (sk_can_busy_loop(sk)) 889 sk_busy_loop(sk, 1); /* only support non-blocking sockets */ 890 891 if (xsk_no_wakeup(sk)) 892 return 0; 893 894 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) 895 return xsk_wakeup(xs, XDP_WAKEUP_RX); 896 return 0; 897 } 898 899 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) 900 { 901 int ret; 902 903 rcu_read_lock(); 904 ret = __xsk_recvmsg(sock, m, len, flags); 905 rcu_read_unlock(); 906 907 return ret; 908 } 909 910 static __poll_t xsk_poll(struct file *file, struct socket *sock, 911 struct poll_table_struct *wait) 912 { 913 __poll_t mask = 0; 914 struct sock *sk = sock->sk; 915 struct xdp_sock *xs = xdp_sk(sk); 916 struct xsk_buff_pool *pool; 917 918 sock_poll_wait(file, sock, wait); 919 920 rcu_read_lock(); 921 if (xsk_check_common(xs)) 922 goto skip_tx; 923 924 pool = xs->pool; 925 926 if (pool->cached_need_wakeup) { 927 if (xs->zc) 928 xsk_wakeup(xs, pool->cached_need_wakeup); 929 else if (xs->tx) 930 /* Poll needs to drive Tx also in copy mode */ 931 xsk_generic_xmit(sk); 932 } 933 934 skip_tx: 935 if (xs->rx && !xskq_prod_is_empty(xs->rx)) 936 mask |= EPOLLIN | EPOLLRDNORM; 937 if (xs->tx && xsk_tx_writeable(xs)) 938 mask |= EPOLLOUT | EPOLLWRNORM; 939 940 rcu_read_unlock(); 941 return mask; 942 } 943 944 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, 945 bool umem_queue) 946 { 947 struct xsk_queue *q; 948 949 if (entries == 0 || *queue || !is_power_of_2(entries)) 950 return -EINVAL; 951 952 q = xskq_create(entries, umem_queue); 953 if (!q) 954 return -ENOMEM; 955 956 /* Make sure queue is ready before it can be seen by others */ 957 smp_wmb(); 958 WRITE_ONCE(*queue, q); 959 return 0; 960 } 961 962 static void xsk_unbind_dev(struct xdp_sock *xs) 963 { 964 struct net_device *dev = xs->dev; 965 966 if (xs->state != XSK_BOUND) 967 return; 968 WRITE_ONCE(xs->state, XSK_UNBOUND); 969 970 /* Wait for driver to stop using the xdp socket. */ 971 xp_del_xsk(xs->pool, xs); 972 synchronize_net(); 973 dev_put(dev); 974 } 975 976 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 977 struct xdp_sock __rcu ***map_entry) 978 { 979 struct xsk_map *map = NULL; 980 struct xsk_map_node *node; 981 982 *map_entry = NULL; 983 984 spin_lock_bh(&xs->map_list_lock); 985 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 986 node); 987 if (node) { 988 bpf_map_inc(&node->map->map); 989 map = node->map; 990 *map_entry = node->map_entry; 991 } 992 spin_unlock_bh(&xs->map_list_lock); 993 return map; 994 } 995 996 static void xsk_delete_from_maps(struct xdp_sock *xs) 997 { 998 /* This function removes the current XDP socket from all the 999 * maps it resides in. We need to take extra care here, due to 1000 * the two locks involved. Each map has a lock synchronizing 1001 * updates to the entries, and each socket has a lock that 1002 * synchronizes access to the list of maps (map_list). For 1003 * deadlock avoidance the locks need to be taken in the order 1004 * "map lock"->"socket map list lock". We start off by 1005 * accessing the socket map list, and take a reference to the 1006 * map to guarantee existence between the 1007 * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 1008 * calls. Then we ask the map to remove the socket, which 1009 * tries to remove the socket from the map. Note that there 1010 * might be updates to the map between 1011 * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 1012 */ 1013 struct xdp_sock __rcu **map_entry = NULL; 1014 struct xsk_map *map; 1015 1016 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 1017 xsk_map_try_sock_delete(map, xs, map_entry); 1018 bpf_map_put(&map->map); 1019 } 1020 } 1021 1022 static int xsk_release(struct socket *sock) 1023 { 1024 struct sock *sk = sock->sk; 1025 struct xdp_sock *xs = xdp_sk(sk); 1026 struct net *net; 1027 1028 if (!sk) 1029 return 0; 1030 1031 net = sock_net(sk); 1032 1033 if (xs->skb) 1034 xsk_drop_skb(xs->skb); 1035 1036 mutex_lock(&net->xdp.lock); 1037 sk_del_node_init_rcu(sk); 1038 mutex_unlock(&net->xdp.lock); 1039 1040 sock_prot_inuse_add(net, sk->sk_prot, -1); 1041 1042 xsk_delete_from_maps(xs); 1043 mutex_lock(&xs->mutex); 1044 xsk_unbind_dev(xs); 1045 mutex_unlock(&xs->mutex); 1046 1047 xskq_destroy(xs->rx); 1048 xskq_destroy(xs->tx); 1049 xskq_destroy(xs->fq_tmp); 1050 xskq_destroy(xs->cq_tmp); 1051 1052 sock_orphan(sk); 1053 sock->sk = NULL; 1054 1055 sock_put(sk); 1056 1057 return 0; 1058 } 1059 1060 static struct socket *xsk_lookup_xsk_from_fd(int fd) 1061 { 1062 struct socket *sock; 1063 int err; 1064 1065 sock = sockfd_lookup(fd, &err); 1066 if (!sock) 1067 return ERR_PTR(-ENOTSOCK); 1068 1069 if (sock->sk->sk_family != PF_XDP) { 1070 sockfd_put(sock); 1071 return ERR_PTR(-ENOPROTOOPT); 1072 } 1073 1074 return sock; 1075 } 1076 1077 static bool xsk_validate_queues(struct xdp_sock *xs) 1078 { 1079 return xs->fq_tmp && xs->cq_tmp; 1080 } 1081 1082 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 1083 { 1084 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; 1085 struct sock *sk = sock->sk; 1086 struct xdp_sock *xs = xdp_sk(sk); 1087 struct net_device *dev; 1088 int bound_dev_if; 1089 u32 flags, qid; 1090 int err = 0; 1091 1092 if (addr_len < sizeof(struct sockaddr_xdp)) 1093 return -EINVAL; 1094 if (sxdp->sxdp_family != AF_XDP) 1095 return -EINVAL; 1096 1097 flags = sxdp->sxdp_flags; 1098 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 1099 XDP_USE_NEED_WAKEUP | XDP_USE_SG)) 1100 return -EINVAL; 1101 1102 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 1103 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex) 1104 return -EINVAL; 1105 1106 rtnl_lock(); 1107 mutex_lock(&xs->mutex); 1108 if (xs->state != XSK_READY) { 1109 err = -EBUSY; 1110 goto out_release; 1111 } 1112 1113 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); 1114 if (!dev) { 1115 err = -ENODEV; 1116 goto out_release; 1117 } 1118 1119 if (!xs->rx && !xs->tx) { 1120 err = -EINVAL; 1121 goto out_unlock; 1122 } 1123 1124 qid = sxdp->sxdp_queue_id; 1125 1126 if (flags & XDP_SHARED_UMEM) { 1127 struct xdp_sock *umem_xs; 1128 struct socket *sock; 1129 1130 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 1131 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) { 1132 /* Cannot specify flags for shared sockets. */ 1133 err = -EINVAL; 1134 goto out_unlock; 1135 } 1136 1137 if (xs->umem) { 1138 /* We have already our own. */ 1139 err = -EINVAL; 1140 goto out_unlock; 1141 } 1142 1143 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); 1144 if (IS_ERR(sock)) { 1145 err = PTR_ERR(sock); 1146 goto out_unlock; 1147 } 1148 1149 umem_xs = xdp_sk(sock->sk); 1150 if (!xsk_is_bound(umem_xs)) { 1151 err = -EBADF; 1152 sockfd_put(sock); 1153 goto out_unlock; 1154 } 1155 1156 if (umem_xs->queue_id != qid || umem_xs->dev != dev) { 1157 /* Share the umem with another socket on another qid 1158 * and/or device. 1159 */ 1160 xs->pool = xp_create_and_assign_umem(xs, 1161 umem_xs->umem); 1162 if (!xs->pool) { 1163 err = -ENOMEM; 1164 sockfd_put(sock); 1165 goto out_unlock; 1166 } 1167 1168 err = xp_assign_dev_shared(xs->pool, umem_xs, dev, 1169 qid); 1170 if (err) { 1171 xp_destroy(xs->pool); 1172 xs->pool = NULL; 1173 sockfd_put(sock); 1174 goto out_unlock; 1175 } 1176 } else { 1177 /* Share the buffer pool with the other socket. */ 1178 if (xs->fq_tmp || xs->cq_tmp) { 1179 /* Do not allow setting your own fq or cq. */ 1180 err = -EINVAL; 1181 sockfd_put(sock); 1182 goto out_unlock; 1183 } 1184 1185 xp_get_pool(umem_xs->pool); 1186 xs->pool = umem_xs->pool; 1187 1188 /* If underlying shared umem was created without Tx 1189 * ring, allocate Tx descs array that Tx batching API 1190 * utilizes 1191 */ 1192 if (xs->tx && !xs->pool->tx_descs) { 1193 err = xp_alloc_tx_descs(xs->pool, xs); 1194 if (err) { 1195 xp_put_pool(xs->pool); 1196 xs->pool = NULL; 1197 sockfd_put(sock); 1198 goto out_unlock; 1199 } 1200 } 1201 } 1202 1203 xdp_get_umem(umem_xs->umem); 1204 WRITE_ONCE(xs->umem, umem_xs->umem); 1205 sockfd_put(sock); 1206 } else if (!xs->umem || !xsk_validate_queues(xs)) { 1207 err = -EINVAL; 1208 goto out_unlock; 1209 } else { 1210 /* This xsk has its own umem. */ 1211 xs->pool = xp_create_and_assign_umem(xs, xs->umem); 1212 if (!xs->pool) { 1213 err = -ENOMEM; 1214 goto out_unlock; 1215 } 1216 1217 err = xp_assign_dev(xs->pool, dev, qid, flags); 1218 if (err) { 1219 xp_destroy(xs->pool); 1220 xs->pool = NULL; 1221 goto out_unlock; 1222 } 1223 } 1224 1225 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ 1226 xs->fq_tmp = NULL; 1227 xs->cq_tmp = NULL; 1228 1229 xs->dev = dev; 1230 xs->zc = xs->umem->zc; 1231 xs->sg = !!(flags & XDP_USE_SG); 1232 xs->queue_id = qid; 1233 xp_add_xsk(xs->pool, xs); 1234 1235 out_unlock: 1236 if (err) { 1237 dev_put(dev); 1238 } else { 1239 /* Matches smp_rmb() in bind() for shared umem 1240 * sockets, and xsk_is_bound(). 1241 */ 1242 smp_wmb(); 1243 WRITE_ONCE(xs->state, XSK_BOUND); 1244 } 1245 out_release: 1246 mutex_unlock(&xs->mutex); 1247 rtnl_unlock(); 1248 return err; 1249 } 1250 1251 struct xdp_umem_reg_v1 { 1252 __u64 addr; /* Start of packet data area */ 1253 __u64 len; /* Length of packet data area */ 1254 __u32 chunk_size; 1255 __u32 headroom; 1256 }; 1257 1258 static int xsk_setsockopt(struct socket *sock, int level, int optname, 1259 sockptr_t optval, unsigned int optlen) 1260 { 1261 struct sock *sk = sock->sk; 1262 struct xdp_sock *xs = xdp_sk(sk); 1263 int err; 1264 1265 if (level != SOL_XDP) 1266 return -ENOPROTOOPT; 1267 1268 switch (optname) { 1269 case XDP_RX_RING: 1270 case XDP_TX_RING: 1271 { 1272 struct xsk_queue **q; 1273 int entries; 1274 1275 if (optlen < sizeof(entries)) 1276 return -EINVAL; 1277 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1278 return -EFAULT; 1279 1280 mutex_lock(&xs->mutex); 1281 if (xs->state != XSK_READY) { 1282 mutex_unlock(&xs->mutex); 1283 return -EBUSY; 1284 } 1285 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 1286 err = xsk_init_queue(entries, q, false); 1287 if (!err && optname == XDP_TX_RING) 1288 /* Tx needs to be explicitly woken up the first time */ 1289 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 1290 mutex_unlock(&xs->mutex); 1291 return err; 1292 } 1293 case XDP_UMEM_REG: 1294 { 1295 size_t mr_size = sizeof(struct xdp_umem_reg); 1296 struct xdp_umem_reg mr = {}; 1297 struct xdp_umem *umem; 1298 1299 if (optlen < sizeof(struct xdp_umem_reg_v1)) 1300 return -EINVAL; 1301 else if (optlen < sizeof(mr)) 1302 mr_size = sizeof(struct xdp_umem_reg_v1); 1303 1304 if (copy_from_sockptr(&mr, optval, mr_size)) 1305 return -EFAULT; 1306 1307 mutex_lock(&xs->mutex); 1308 if (xs->state != XSK_READY || xs->umem) { 1309 mutex_unlock(&xs->mutex); 1310 return -EBUSY; 1311 } 1312 1313 umem = xdp_umem_create(&mr); 1314 if (IS_ERR(umem)) { 1315 mutex_unlock(&xs->mutex); 1316 return PTR_ERR(umem); 1317 } 1318 1319 /* Make sure umem is ready before it can be seen by others */ 1320 smp_wmb(); 1321 WRITE_ONCE(xs->umem, umem); 1322 mutex_unlock(&xs->mutex); 1323 return 0; 1324 } 1325 case XDP_UMEM_FILL_RING: 1326 case XDP_UMEM_COMPLETION_RING: 1327 { 1328 struct xsk_queue **q; 1329 int entries; 1330 1331 if (copy_from_sockptr(&entries, optval, sizeof(entries))) 1332 return -EFAULT; 1333 1334 mutex_lock(&xs->mutex); 1335 if (xs->state != XSK_READY) { 1336 mutex_unlock(&xs->mutex); 1337 return -EBUSY; 1338 } 1339 1340 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : 1341 &xs->cq_tmp; 1342 err = xsk_init_queue(entries, q, true); 1343 mutex_unlock(&xs->mutex); 1344 return err; 1345 } 1346 default: 1347 break; 1348 } 1349 1350 return -ENOPROTOOPT; 1351 } 1352 1353 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 1354 { 1355 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 1356 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 1357 ring->desc = offsetof(struct xdp_rxtx_ring, desc); 1358 } 1359 1360 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 1361 { 1362 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 1363 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 1364 ring->desc = offsetof(struct xdp_umem_ring, desc); 1365 } 1366 1367 struct xdp_statistics_v1 { 1368 __u64 rx_dropped; 1369 __u64 rx_invalid_descs; 1370 __u64 tx_invalid_descs; 1371 }; 1372 1373 static int xsk_getsockopt(struct socket *sock, int level, int optname, 1374 char __user *optval, int __user *optlen) 1375 { 1376 struct sock *sk = sock->sk; 1377 struct xdp_sock *xs = xdp_sk(sk); 1378 int len; 1379 1380 if (level != SOL_XDP) 1381 return -ENOPROTOOPT; 1382 1383 if (get_user(len, optlen)) 1384 return -EFAULT; 1385 if (len < 0) 1386 return -EINVAL; 1387 1388 switch (optname) { 1389 case XDP_STATISTICS: 1390 { 1391 struct xdp_statistics stats = {}; 1392 bool extra_stats = true; 1393 size_t stats_size; 1394 1395 if (len < sizeof(struct xdp_statistics_v1)) { 1396 return -EINVAL; 1397 } else if (len < sizeof(stats)) { 1398 extra_stats = false; 1399 stats_size = sizeof(struct xdp_statistics_v1); 1400 } else { 1401 stats_size = sizeof(stats); 1402 } 1403 1404 mutex_lock(&xs->mutex); 1405 stats.rx_dropped = xs->rx_dropped; 1406 if (extra_stats) { 1407 stats.rx_ring_full = xs->rx_queue_full; 1408 stats.rx_fill_ring_empty_descs = 1409 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; 1410 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); 1411 } else { 1412 stats.rx_dropped += xs->rx_queue_full; 1413 } 1414 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); 1415 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); 1416 mutex_unlock(&xs->mutex); 1417 1418 if (copy_to_user(optval, &stats, stats_size)) 1419 return -EFAULT; 1420 if (put_user(stats_size, optlen)) 1421 return -EFAULT; 1422 1423 return 0; 1424 } 1425 case XDP_MMAP_OFFSETS: 1426 { 1427 struct xdp_mmap_offsets off; 1428 struct xdp_mmap_offsets_v1 off_v1; 1429 bool flags_supported = true; 1430 void *to_copy; 1431 1432 if (len < sizeof(off_v1)) 1433 return -EINVAL; 1434 else if (len < sizeof(off)) 1435 flags_supported = false; 1436 1437 if (flags_supported) { 1438 /* xdp_ring_offset is identical to xdp_ring_offset_v1 1439 * except for the flags field added to the end. 1440 */ 1441 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1442 &off.rx); 1443 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 1444 &off.tx); 1445 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1446 &off.fr); 1447 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 1448 &off.cr); 1449 off.rx.flags = offsetof(struct xdp_rxtx_ring, 1450 ptrs.flags); 1451 off.tx.flags = offsetof(struct xdp_rxtx_ring, 1452 ptrs.flags); 1453 off.fr.flags = offsetof(struct xdp_umem_ring, 1454 ptrs.flags); 1455 off.cr.flags = offsetof(struct xdp_umem_ring, 1456 ptrs.flags); 1457 1458 len = sizeof(off); 1459 to_copy = &off; 1460 } else { 1461 xsk_enter_rxtx_offsets(&off_v1.rx); 1462 xsk_enter_rxtx_offsets(&off_v1.tx); 1463 xsk_enter_umem_offsets(&off_v1.fr); 1464 xsk_enter_umem_offsets(&off_v1.cr); 1465 1466 len = sizeof(off_v1); 1467 to_copy = &off_v1; 1468 } 1469 1470 if (copy_to_user(optval, to_copy, len)) 1471 return -EFAULT; 1472 if (put_user(len, optlen)) 1473 return -EFAULT; 1474 1475 return 0; 1476 } 1477 case XDP_OPTIONS: 1478 { 1479 struct xdp_options opts = {}; 1480 1481 if (len < sizeof(opts)) 1482 return -EINVAL; 1483 1484 mutex_lock(&xs->mutex); 1485 if (xs->zc) 1486 opts.flags |= XDP_OPTIONS_ZEROCOPY; 1487 mutex_unlock(&xs->mutex); 1488 1489 len = sizeof(opts); 1490 if (copy_to_user(optval, &opts, len)) 1491 return -EFAULT; 1492 if (put_user(len, optlen)) 1493 return -EFAULT; 1494 1495 return 0; 1496 } 1497 default: 1498 break; 1499 } 1500 1501 return -EOPNOTSUPP; 1502 } 1503 1504 static int xsk_mmap(struct file *file, struct socket *sock, 1505 struct vm_area_struct *vma) 1506 { 1507 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 1508 unsigned long size = vma->vm_end - vma->vm_start; 1509 struct xdp_sock *xs = xdp_sk(sock->sk); 1510 int state = READ_ONCE(xs->state); 1511 struct xsk_queue *q = NULL; 1512 1513 if (state != XSK_READY && state != XSK_BOUND) 1514 return -EBUSY; 1515 1516 if (offset == XDP_PGOFF_RX_RING) { 1517 q = READ_ONCE(xs->rx); 1518 } else if (offset == XDP_PGOFF_TX_RING) { 1519 q = READ_ONCE(xs->tx); 1520 } else { 1521 /* Matches the smp_wmb() in XDP_UMEM_REG */ 1522 smp_rmb(); 1523 if (offset == XDP_UMEM_PGOFF_FILL_RING) 1524 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) : 1525 READ_ONCE(xs->pool->fq); 1526 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) 1527 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) : 1528 READ_ONCE(xs->pool->cq); 1529 } 1530 1531 if (!q) 1532 return -EINVAL; 1533 1534 /* Matches the smp_wmb() in xsk_init_queue */ 1535 smp_rmb(); 1536 if (size > q->ring_vmalloc_size) 1537 return -EINVAL; 1538 1539 return remap_vmalloc_range(vma, q->ring, 0); 1540 } 1541 1542 static int xsk_notifier(struct notifier_block *this, 1543 unsigned long msg, void *ptr) 1544 { 1545 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1546 struct net *net = dev_net(dev); 1547 struct sock *sk; 1548 1549 switch (msg) { 1550 case NETDEV_UNREGISTER: 1551 mutex_lock(&net->xdp.lock); 1552 sk_for_each(sk, &net->xdp.list) { 1553 struct xdp_sock *xs = xdp_sk(sk); 1554 1555 mutex_lock(&xs->mutex); 1556 if (xs->dev == dev) { 1557 sk->sk_err = ENETDOWN; 1558 if (!sock_flag(sk, SOCK_DEAD)) 1559 sk_error_report(sk); 1560 1561 xsk_unbind_dev(xs); 1562 1563 /* Clear device references. */ 1564 xp_clear_dev(xs->pool); 1565 } 1566 mutex_unlock(&xs->mutex); 1567 } 1568 mutex_unlock(&net->xdp.lock); 1569 break; 1570 } 1571 return NOTIFY_DONE; 1572 } 1573 1574 static struct proto xsk_proto = { 1575 .name = "XDP", 1576 .owner = THIS_MODULE, 1577 .obj_size = sizeof(struct xdp_sock), 1578 }; 1579 1580 static const struct proto_ops xsk_proto_ops = { 1581 .family = PF_XDP, 1582 .owner = THIS_MODULE, 1583 .release = xsk_release, 1584 .bind = xsk_bind, 1585 .connect = sock_no_connect, 1586 .socketpair = sock_no_socketpair, 1587 .accept = sock_no_accept, 1588 .getname = sock_no_getname, 1589 .poll = xsk_poll, 1590 .ioctl = sock_no_ioctl, 1591 .listen = sock_no_listen, 1592 .shutdown = sock_no_shutdown, 1593 .setsockopt = xsk_setsockopt, 1594 .getsockopt = xsk_getsockopt, 1595 .sendmsg = xsk_sendmsg, 1596 .recvmsg = xsk_recvmsg, 1597 .mmap = xsk_mmap, 1598 }; 1599 1600 static void xsk_destruct(struct sock *sk) 1601 { 1602 struct xdp_sock *xs = xdp_sk(sk); 1603 1604 if (!sock_flag(sk, SOCK_DEAD)) 1605 return; 1606 1607 if (!xp_put_pool(xs->pool)) 1608 xdp_put_umem(xs->umem, !xs->pool); 1609 } 1610 1611 static int xsk_create(struct net *net, struct socket *sock, int protocol, 1612 int kern) 1613 { 1614 struct xdp_sock *xs; 1615 struct sock *sk; 1616 1617 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 1618 return -EPERM; 1619 if (sock->type != SOCK_RAW) 1620 return -ESOCKTNOSUPPORT; 1621 1622 if (protocol) 1623 return -EPROTONOSUPPORT; 1624 1625 sock->state = SS_UNCONNECTED; 1626 1627 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); 1628 if (!sk) 1629 return -ENOBUFS; 1630 1631 sock->ops = &xsk_proto_ops; 1632 1633 sock_init_data(sock, sk); 1634 1635 sk->sk_family = PF_XDP; 1636 1637 sk->sk_destruct = xsk_destruct; 1638 1639 sock_set_flag(sk, SOCK_RCU_FREE); 1640 1641 xs = xdp_sk(sk); 1642 xs->state = XSK_READY; 1643 mutex_init(&xs->mutex); 1644 spin_lock_init(&xs->rx_lock); 1645 1646 INIT_LIST_HEAD(&xs->map_list); 1647 spin_lock_init(&xs->map_list_lock); 1648 1649 mutex_lock(&net->xdp.lock); 1650 sk_add_node_rcu(sk, &net->xdp.list); 1651 mutex_unlock(&net->xdp.lock); 1652 1653 sock_prot_inuse_add(net, &xsk_proto, 1); 1654 1655 return 0; 1656 } 1657 1658 static const struct net_proto_family xsk_family_ops = { 1659 .family = PF_XDP, 1660 .create = xsk_create, 1661 .owner = THIS_MODULE, 1662 }; 1663 1664 static struct notifier_block xsk_netdev_notifier = { 1665 .notifier_call = xsk_notifier, 1666 }; 1667 1668 static int __net_init xsk_net_init(struct net *net) 1669 { 1670 mutex_init(&net->xdp.lock); 1671 INIT_HLIST_HEAD(&net->xdp.list); 1672 return 0; 1673 } 1674 1675 static void __net_exit xsk_net_exit(struct net *net) 1676 { 1677 WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); 1678 } 1679 1680 static struct pernet_operations xsk_net_ops = { 1681 .init = xsk_net_init, 1682 .exit = xsk_net_exit, 1683 }; 1684 1685 static int __init xsk_init(void) 1686 { 1687 int err, cpu; 1688 1689 err = proto_register(&xsk_proto, 0 /* no slab */); 1690 if (err) 1691 goto out; 1692 1693 err = sock_register(&xsk_family_ops); 1694 if (err) 1695 goto out_proto; 1696 1697 err = register_pernet_subsys(&xsk_net_ops); 1698 if (err) 1699 goto out_sk; 1700 1701 err = register_netdevice_notifier(&xsk_netdev_notifier); 1702 if (err) 1703 goto out_pernet; 1704 1705 for_each_possible_cpu(cpu) 1706 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); 1707 return 0; 1708 1709 out_pernet: 1710 unregister_pernet_subsys(&xsk_net_ops); 1711 out_sk: 1712 sock_unregister(PF_XDP); 1713 out_proto: 1714 proto_unregister(&xsk_proto); 1715 out: 1716 return err; 1717 } 1718 1719 fs_initcall(xsk_init); 1720