1 /*- 2 * Copyright (c) 2004-2006 Kip Macy 3 * Copyright (c) 2015 Wei Liu <wei.liu2@citrix.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/sockio.h> 36 #include <sys/limits.h> 37 #include <sys/mbuf.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/kernel.h> 41 #include <sys/socket.h> 42 #include <sys/sysctl.h> 43 #include <sys/taskqueue.h> 44 45 #include <net/if.h> 46 #include <net/if_var.h> 47 #include <net/if_arp.h> 48 #include <net/ethernet.h> 49 #include <net/if_media.h> 50 #include <net/bpf.h> 51 #include <net/if_types.h> 52 53 #include <netinet/in.h> 54 #include <netinet/ip.h> 55 #include <netinet/if_ether.h> 56 #include <netinet/tcp.h> 57 #include <netinet/tcp_lro.h> 58 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 62 #include <sys/bus.h> 63 64 #include <xen/xen-os.h> 65 #include <xen/hypervisor.h> 66 #include <xen/xen_intr.h> 67 #include <xen/gnttab.h> 68 #include <xen/interface/memory.h> 69 #include <xen/interface/io/netif.h> 70 #include <xen/xenbus/xenbusvar.h> 71 72 #include "xenbus_if.h" 73 74 /* Features supported by all backends. TSO and LRO can be negotiated */ 75 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 76 77 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 78 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 79 80 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 81 82 /* 83 * Should the driver do LRO on the RX end 84 * this can be toggled on the fly, but the 85 * interface must be reset (down/up) for it 86 * to take effect. 87 */ 88 static int xn_enable_lro = 1; 89 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 90 91 /* 92 * Number of pairs of queues. 93 */ 94 static unsigned long xn_num_queues = 4; 95 TUNABLE_ULONG("hw.xn.num_queues", &xn_num_queues); 96 97 /** 98 * \brief The maximum allowed data fragments in a single transmit 99 * request. 100 * 101 * This limit is imposed by the backend driver. We assume here that 102 * we are dealing with a Linux driver domain and have set our limit 103 * to mirror the Linux MAX_SKB_FRAGS constant. 104 */ 105 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) 106 107 #define RX_COPY_THRESHOLD 256 108 109 #define net_ratelimit() 0 110 111 struct netfront_rxq; 112 struct netfront_txq; 113 struct netfront_info; 114 struct netfront_rx_info; 115 116 static void xn_txeof(struct netfront_txq *); 117 static void xn_rxeof(struct netfront_rxq *); 118 static void xn_alloc_rx_buffers(struct netfront_rxq *); 119 static void xn_alloc_rx_buffers_callout(void *arg); 120 121 static void xn_release_rx_bufs(struct netfront_rxq *); 122 static void xn_release_tx_bufs(struct netfront_txq *); 123 124 static void xn_rxq_intr(struct netfront_rxq *); 125 static void xn_txq_intr(struct netfront_txq *); 126 static void xn_intr(void *); 127 static inline int xn_count_frags(struct mbuf *m); 128 static int xn_assemble_tx_request(struct netfront_txq *, struct mbuf *); 129 static int xn_ioctl(struct ifnet *, u_long, caddr_t); 130 static void xn_ifinit_locked(struct netfront_info *); 131 static void xn_ifinit(void *); 132 static void xn_stop(struct netfront_info *); 133 static void xn_query_features(struct netfront_info *np); 134 static int xn_configure_features(struct netfront_info *np); 135 static void netif_free(struct netfront_info *info); 136 static int netfront_detach(device_t dev); 137 138 static int xn_txq_mq_start_locked(struct netfront_txq *, struct mbuf *); 139 static int xn_txq_mq_start(struct ifnet *, struct mbuf *); 140 141 static int talk_to_backend(device_t dev, struct netfront_info *info); 142 static int create_netdev(device_t dev); 143 static void netif_disconnect_backend(struct netfront_info *info); 144 static int setup_device(device_t dev, struct netfront_info *info, 145 unsigned long); 146 static int xn_ifmedia_upd(struct ifnet *ifp); 147 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 148 149 static int xn_connect(struct netfront_info *); 150 static void xn_kick_rings(struct netfront_info *); 151 152 static int xn_get_responses(struct netfront_rxq *, 153 struct netfront_rx_info *, RING_IDX, RING_IDX *, 154 struct mbuf **); 155 156 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) 157 158 #define INVALID_P2M_ENTRY (~0UL) 159 #define XN_QUEUE_NAME_LEN 8 /* xn{t,r}x_%u, allow for two digits */ 160 struct netfront_rxq { 161 struct netfront_info *info; 162 u_int id; 163 char name[XN_QUEUE_NAME_LEN]; 164 struct mtx lock; 165 166 int ring_ref; 167 netif_rx_front_ring_t ring; 168 xen_intr_handle_t xen_intr_handle; 169 170 grant_ref_t gref_head; 171 grant_ref_t grant_ref[NET_RX_RING_SIZE + 1]; 172 173 struct mbuf *mbufs[NET_RX_RING_SIZE + 1]; 174 175 struct lro_ctrl lro; 176 177 struct callout rx_refill; 178 }; 179 180 struct netfront_txq { 181 struct netfront_info *info; 182 u_int id; 183 char name[XN_QUEUE_NAME_LEN]; 184 struct mtx lock; 185 186 int ring_ref; 187 netif_tx_front_ring_t ring; 188 xen_intr_handle_t xen_intr_handle; 189 190 grant_ref_t gref_head; 191 grant_ref_t grant_ref[NET_TX_RING_SIZE + 1]; 192 193 struct mbuf *mbufs[NET_TX_RING_SIZE + 1]; 194 int mbufs_cnt; 195 struct buf_ring *br; 196 197 struct taskqueue *tq; 198 struct task defrtask; 199 200 bool full; 201 }; 202 203 struct netfront_info { 204 struct ifnet *xn_ifp; 205 206 struct mtx sc_lock; 207 208 u_int num_queues; 209 struct netfront_rxq *rxq; 210 struct netfront_txq *txq; 211 212 u_int carrier; 213 u_int maxfrags; 214 215 device_t xbdev; 216 uint8_t mac[ETHER_ADDR_LEN]; 217 218 int xn_if_flags; 219 220 struct ifmedia sc_media; 221 222 bool xn_reset; 223 }; 224 225 struct netfront_rx_info { 226 struct netif_rx_response rx; 227 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 228 }; 229 230 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock) 231 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock) 232 233 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock) 234 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock) 235 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock) 236 237 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); 238 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); 239 240 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); 241 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED); 242 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED); 243 244 #define netfront_carrier_on(netif) ((netif)->carrier = 1) 245 #define netfront_carrier_off(netif) ((netif)->carrier = 0) 246 #define netfront_carrier_ok(netif) ((netif)->carrier) 247 248 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 249 250 static inline void 251 add_id_to_freelist(struct mbuf **list, uintptr_t id) 252 { 253 254 KASSERT(id != 0, 255 ("%s: the head item (0) must always be free.", __func__)); 256 list[id] = list[0]; 257 list[0] = (struct mbuf *)id; 258 } 259 260 static inline unsigned short 261 get_id_from_freelist(struct mbuf **list) 262 { 263 uintptr_t id; 264 265 id = (uintptr_t)list[0]; 266 KASSERT(id != 0, 267 ("%s: the head item (0) must always remain free.", __func__)); 268 list[0] = list[id]; 269 return (id); 270 } 271 272 static inline int 273 xn_rxidx(RING_IDX idx) 274 { 275 276 return idx & (NET_RX_RING_SIZE - 1); 277 } 278 279 static inline struct mbuf * 280 xn_get_rx_mbuf(struct netfront_rxq *rxq, RING_IDX ri) 281 { 282 int i; 283 struct mbuf *m; 284 285 i = xn_rxidx(ri); 286 m = rxq->mbufs[i]; 287 rxq->mbufs[i] = NULL; 288 return (m); 289 } 290 291 static inline grant_ref_t 292 xn_get_rx_ref(struct netfront_rxq *rxq, RING_IDX ri) 293 { 294 int i = xn_rxidx(ri); 295 grant_ref_t ref = rxq->grant_ref[i]; 296 297 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 298 rxq->grant_ref[i] = GRANT_REF_INVALID; 299 return (ref); 300 } 301 302 #define IPRINTK(fmt, args...) \ 303 printf("[XEN] " fmt, ##args) 304 #ifdef INVARIANTS 305 #define WPRINTK(fmt, args...) \ 306 printf("[XEN] " fmt, ##args) 307 #else 308 #define WPRINTK(fmt, args...) 309 #endif 310 #ifdef DEBUG 311 #define DPRINTK(fmt, args...) \ 312 printf("[XEN] %s: " fmt, __func__, ##args) 313 #else 314 #define DPRINTK(fmt, args...) 315 #endif 316 317 /** 318 * Read the 'mac' node at the given device's node in the store, and parse that 319 * as colon-separated octets, placing result the given mac array. mac must be 320 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 321 * Return 0 on success, or errno on error. 322 */ 323 static int 324 xen_net_read_mac(device_t dev, uint8_t mac[]) 325 { 326 int error, i; 327 char *s, *e, *macstr; 328 const char *path; 329 330 path = xenbus_get_node(dev); 331 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 332 if (error == ENOENT) { 333 /* 334 * Deal with missing mac XenStore nodes on devices with 335 * HVM emulation (the 'ioemu' configuration attribute) 336 * enabled. 337 * 338 * The HVM emulator may execute in a stub device model 339 * domain which lacks the permission, only given to Dom0, 340 * to update the guest's XenStore tree. For this reason, 341 * the HVM emulator doesn't even attempt to write the 342 * front-side mac node, even when operating in Dom0. 343 * However, there should always be a mac listed in the 344 * backend tree. Fallback to this version if our query 345 * of the front side XenStore location doesn't find 346 * anything. 347 */ 348 path = xenbus_get_otherend_path(dev); 349 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 350 } 351 if (error != 0) { 352 xenbus_dev_fatal(dev, error, "parsing %s/mac", path); 353 return (error); 354 } 355 356 s = macstr; 357 for (i = 0; i < ETHER_ADDR_LEN; i++) { 358 mac[i] = strtoul(s, &e, 16); 359 if (s == e || (e[0] != ':' && e[0] != 0)) { 360 free(macstr, M_XENBUS); 361 return (ENOENT); 362 } 363 s = &e[1]; 364 } 365 free(macstr, M_XENBUS); 366 return (0); 367 } 368 369 /** 370 * Entry point to this code when a new device is created. Allocate the basic 371 * structures and the ring buffers for communication with the backend, and 372 * inform the backend of the appropriate details for those. Switch to 373 * Connected state. 374 */ 375 static int 376 netfront_probe(device_t dev) 377 { 378 379 if (xen_hvm_domain() && xen_disable_pv_nics != 0) 380 return (ENXIO); 381 382 if (!strcmp(xenbus_get_type(dev), "vif")) { 383 device_set_desc(dev, "Virtual Network Interface"); 384 return (0); 385 } 386 387 return (ENXIO); 388 } 389 390 static int 391 netfront_attach(device_t dev) 392 { 393 int err; 394 395 err = create_netdev(dev); 396 if (err != 0) { 397 xenbus_dev_fatal(dev, err, "creating netdev"); 398 return (err); 399 } 400 401 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 402 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 403 OID_AUTO, "enable_lro", CTLFLAG_RW, 404 &xn_enable_lro, 0, "Large Receive Offload"); 405 406 SYSCTL_ADD_ULONG(device_get_sysctl_ctx(dev), 407 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 408 OID_AUTO, "num_queues", CTLFLAG_RD, 409 &xn_num_queues, "Number of pairs of queues"); 410 411 return (0); 412 } 413 414 static int 415 netfront_suspend(device_t dev) 416 { 417 struct netfront_info *np = device_get_softc(dev); 418 u_int i; 419 420 for (i = 0; i < np->num_queues; i++) { 421 XN_RX_LOCK(&np->rxq[i]); 422 XN_TX_LOCK(&np->txq[i]); 423 } 424 netfront_carrier_off(np); 425 for (i = 0; i < np->num_queues; i++) { 426 XN_RX_UNLOCK(&np->rxq[i]); 427 XN_TX_UNLOCK(&np->txq[i]); 428 } 429 return (0); 430 } 431 432 /** 433 * We are reconnecting to the backend, due to a suspend/resume, or a backend 434 * driver restart. We tear down our netif structure and recreate it, but 435 * leave the device-layer structures intact so that this is transparent to the 436 * rest of the kernel. 437 */ 438 static int 439 netfront_resume(device_t dev) 440 { 441 struct netfront_info *info = device_get_softc(dev); 442 u_int i; 443 444 if (xen_suspend_cancelled) { 445 for (i = 0; i < info->num_queues; i++) { 446 XN_RX_LOCK(&info->rxq[i]); 447 XN_TX_LOCK(&info->txq[i]); 448 } 449 netfront_carrier_on(info); 450 for (i = 0; i < info->num_queues; i++) { 451 XN_RX_UNLOCK(&info->rxq[i]); 452 XN_TX_UNLOCK(&info->txq[i]); 453 } 454 return (0); 455 } 456 457 netif_disconnect_backend(info); 458 return (0); 459 } 460 461 static int 462 write_queue_xenstore_keys(device_t dev, 463 struct netfront_rxq *rxq, 464 struct netfront_txq *txq, 465 struct xs_transaction *xst, bool hierarchy) 466 { 467 int err; 468 const char *message; 469 const char *node = xenbus_get_node(dev); 470 char *path; 471 size_t path_size; 472 473 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids")); 474 /* Split event channel support is not yet there. */ 475 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle, 476 ("Split event channels are not supported")); 477 478 if (hierarchy) { 479 path_size = strlen(node) + 10; 480 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO); 481 snprintf(path, path_size, "%s/queue-%u", node, rxq->id); 482 } else { 483 path_size = strlen(node) + 1; 484 path = malloc(path_size, M_DEVBUF, M_WAITOK|M_ZERO); 485 snprintf(path, path_size, "%s", node); 486 } 487 488 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref); 489 if (err != 0) { 490 message = "writing tx ring-ref"; 491 goto error; 492 } 493 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref); 494 if (err != 0) { 495 message = "writing rx ring-ref"; 496 goto error; 497 } 498 err = xs_printf(*xst, path, "event-channel", "%u", 499 xen_intr_port(rxq->xen_intr_handle)); 500 if (err != 0) { 501 message = "writing event-channel"; 502 goto error; 503 } 504 505 free(path, M_DEVBUF); 506 507 return (0); 508 509 error: 510 free(path, M_DEVBUF); 511 xenbus_dev_fatal(dev, err, "%s", message); 512 513 return (err); 514 } 515 516 /* Common code used when first setting up, and when resuming. */ 517 static int 518 talk_to_backend(device_t dev, struct netfront_info *info) 519 { 520 const char *message; 521 struct xs_transaction xst; 522 const char *node = xenbus_get_node(dev); 523 int err; 524 unsigned long num_queues, max_queues = 0; 525 unsigned int i; 526 527 err = xen_net_read_mac(dev, info->mac); 528 if (err != 0) { 529 xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 530 goto out; 531 } 532 533 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev), 534 "multi-queue-max-queues", NULL, "%lu", &max_queues); 535 if (err != 0) 536 max_queues = 1; 537 num_queues = xn_num_queues; 538 if (num_queues > max_queues) 539 num_queues = max_queues; 540 541 err = setup_device(dev, info, num_queues); 542 if (err != 0) 543 goto out; 544 545 again: 546 err = xs_transaction_start(&xst); 547 if (err != 0) { 548 xenbus_dev_fatal(dev, err, "starting transaction"); 549 goto free; 550 } 551 552 if (info->num_queues == 1) { 553 err = write_queue_xenstore_keys(dev, &info->rxq[0], 554 &info->txq[0], &xst, false); 555 if (err != 0) 556 goto abort_transaction_no_def_error; 557 } else { 558 err = xs_printf(xst, node, "multi-queue-num-queues", 559 "%u", info->num_queues); 560 if (err != 0) { 561 message = "writing multi-queue-num-queues"; 562 goto abort_transaction; 563 } 564 565 for (i = 0; i < info->num_queues; i++) { 566 err = write_queue_xenstore_keys(dev, &info->rxq[i], 567 &info->txq[i], &xst, true); 568 if (err != 0) 569 goto abort_transaction_no_def_error; 570 } 571 } 572 573 err = xs_printf(xst, node, "request-rx-copy", "%u", 1); 574 if (err != 0) { 575 message = "writing request-rx-copy"; 576 goto abort_transaction; 577 } 578 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); 579 if (err != 0) { 580 message = "writing feature-rx-notify"; 581 goto abort_transaction; 582 } 583 err = xs_printf(xst, node, "feature-sg", "%d", 1); 584 if (err != 0) { 585 message = "writing feature-sg"; 586 goto abort_transaction; 587 } 588 if ((info->xn_ifp->if_capenable & IFCAP_LRO) != 0) { 589 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); 590 if (err != 0) { 591 message = "writing feature-gso-tcpv4"; 592 goto abort_transaction; 593 } 594 } 595 if ((info->xn_ifp->if_capenable & IFCAP_RXCSUM) == 0) { 596 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1); 597 if (err != 0) { 598 message = "writing feature-no-csum-offload"; 599 goto abort_transaction; 600 } 601 } 602 603 err = xs_transaction_end(xst, 0); 604 if (err != 0) { 605 if (err == EAGAIN) 606 goto again; 607 xenbus_dev_fatal(dev, err, "completing transaction"); 608 goto free; 609 } 610 611 return 0; 612 613 abort_transaction: 614 xenbus_dev_fatal(dev, err, "%s", message); 615 abort_transaction_no_def_error: 616 xs_transaction_end(xst, 1); 617 free: 618 netif_free(info); 619 out: 620 return (err); 621 } 622 623 static void 624 xn_rxq_intr(struct netfront_rxq *rxq) 625 { 626 627 XN_RX_LOCK(rxq); 628 xn_rxeof(rxq); 629 XN_RX_UNLOCK(rxq); 630 } 631 632 static void 633 xn_txq_start(struct netfront_txq *txq) 634 { 635 struct netfront_info *np = txq->info; 636 struct ifnet *ifp = np->xn_ifp; 637 638 XN_TX_LOCK_ASSERT(txq); 639 if (!drbr_empty(ifp, txq->br)) 640 xn_txq_mq_start_locked(txq, NULL); 641 } 642 643 static void 644 xn_txq_intr(struct netfront_txq *txq) 645 { 646 647 XN_TX_LOCK(txq); 648 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring)) 649 xn_txeof(txq); 650 xn_txq_start(txq); 651 XN_TX_UNLOCK(txq); 652 } 653 654 static void 655 xn_txq_tq_deferred(void *xtxq, int pending) 656 { 657 struct netfront_txq *txq = xtxq; 658 659 XN_TX_LOCK(txq); 660 xn_txq_start(txq); 661 XN_TX_UNLOCK(txq); 662 } 663 664 static void 665 disconnect_rxq(struct netfront_rxq *rxq) 666 { 667 668 xn_release_rx_bufs(rxq); 669 gnttab_free_grant_references(rxq->gref_head); 670 gnttab_end_foreign_access(rxq->ring_ref, NULL); 671 /* 672 * No split event channel support at the moment, handle will 673 * be unbound in tx. So no need to call xen_intr_unbind here, 674 * but we do want to reset the handler to 0. 675 */ 676 rxq->xen_intr_handle = 0; 677 } 678 679 static void 680 destroy_rxq(struct netfront_rxq *rxq) 681 { 682 683 callout_drain(&rxq->rx_refill); 684 free(rxq->ring.sring, M_DEVBUF); 685 } 686 687 static void 688 destroy_rxqs(struct netfront_info *np) 689 { 690 int i; 691 692 for (i = 0; i < np->num_queues; i++) 693 destroy_rxq(&np->rxq[i]); 694 695 free(np->rxq, M_DEVBUF); 696 np->rxq = NULL; 697 } 698 699 static int 700 setup_rxqs(device_t dev, struct netfront_info *info, 701 unsigned long num_queues) 702 { 703 int q, i; 704 int error; 705 netif_rx_sring_t *rxs; 706 struct netfront_rxq *rxq; 707 708 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues, 709 M_DEVBUF, M_WAITOK|M_ZERO); 710 711 for (q = 0; q < num_queues; q++) { 712 rxq = &info->rxq[q]; 713 714 rxq->id = q; 715 rxq->info = info; 716 rxq->ring_ref = GRANT_REF_INVALID; 717 rxq->ring.sring = NULL; 718 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q); 719 mtx_init(&rxq->lock, rxq->name, "netfront receive lock", 720 MTX_DEF); 721 722 for (i = 0; i <= NET_RX_RING_SIZE; i++) { 723 rxq->mbufs[i] = NULL; 724 rxq->grant_ref[i] = GRANT_REF_INVALID; 725 } 726 727 /* Start resources allocation */ 728 729 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 730 &rxq->gref_head) != 0) { 731 device_printf(dev, "allocating rx gref"); 732 error = ENOMEM; 733 goto fail; 734 } 735 736 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, 737 M_WAITOK|M_ZERO); 738 SHARED_RING_INIT(rxs); 739 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE); 740 741 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), 742 &rxq->ring_ref); 743 if (error != 0) { 744 device_printf(dev, "granting rx ring page"); 745 goto fail_grant_ring; 746 } 747 748 callout_init(&rxq->rx_refill, 1); 749 } 750 751 return (0); 752 753 fail_grant_ring: 754 gnttab_free_grant_references(rxq->gref_head); 755 free(rxq->ring.sring, M_DEVBUF); 756 fail: 757 for (; q >= 0; q--) { 758 disconnect_rxq(&info->rxq[q]); 759 destroy_rxq(&info->rxq[q]); 760 } 761 762 free(info->rxq, M_DEVBUF); 763 return (error); 764 } 765 766 static void 767 disconnect_txq(struct netfront_txq *txq) 768 { 769 770 xn_release_tx_bufs(txq); 771 gnttab_free_grant_references(txq->gref_head); 772 gnttab_end_foreign_access(txq->ring_ref, NULL); 773 xen_intr_unbind(&txq->xen_intr_handle); 774 } 775 776 static void 777 destroy_txq(struct netfront_txq *txq) 778 { 779 780 free(txq->ring.sring, M_DEVBUF); 781 buf_ring_free(txq->br, M_DEVBUF); 782 taskqueue_drain_all(txq->tq); 783 taskqueue_free(txq->tq); 784 } 785 786 static void 787 destroy_txqs(struct netfront_info *np) 788 { 789 int i; 790 791 for (i = 0; i < np->num_queues; i++) 792 destroy_txq(&np->txq[i]); 793 794 free(np->txq, M_DEVBUF); 795 np->txq = NULL; 796 } 797 798 static int 799 setup_txqs(device_t dev, struct netfront_info *info, 800 unsigned long num_queues) 801 { 802 int q, i; 803 int error; 804 netif_tx_sring_t *txs; 805 struct netfront_txq *txq; 806 807 info->txq = malloc(sizeof(struct netfront_txq) * num_queues, 808 M_DEVBUF, M_WAITOK|M_ZERO); 809 810 for (q = 0; q < num_queues; q++) { 811 txq = &info->txq[q]; 812 813 txq->id = q; 814 txq->info = info; 815 816 txq->ring_ref = GRANT_REF_INVALID; 817 txq->ring.sring = NULL; 818 819 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q); 820 821 mtx_init(&txq->lock, txq->name, "netfront transmit lock", 822 MTX_DEF); 823 824 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 825 txq->mbufs[i] = (void *) ((u_long) i+1); 826 txq->grant_ref[i] = GRANT_REF_INVALID; 827 } 828 txq->mbufs[NET_TX_RING_SIZE] = (void *)0; 829 830 /* Start resources allocation. */ 831 832 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 833 &txq->gref_head) != 0) { 834 device_printf(dev, "failed to allocate tx grant refs\n"); 835 error = ENOMEM; 836 goto fail; 837 } 838 839 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, 840 M_WAITOK|M_ZERO); 841 SHARED_RING_INIT(txs); 842 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE); 843 844 error = xenbus_grant_ring(dev, virt_to_mfn(txs), 845 &txq->ring_ref); 846 if (error != 0) { 847 device_printf(dev, "failed to grant tx ring\n"); 848 goto fail_grant_ring; 849 } 850 851 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF, 852 M_WAITOK, &txq->lock); 853 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq); 854 855 txq->tq = taskqueue_create(txq->name, M_WAITOK, 856 taskqueue_thread_enqueue, &txq->tq); 857 858 error = taskqueue_start_threads(&txq->tq, 1, PI_NET, 859 "%s txq %d", device_get_nameunit(dev), txq->id); 860 if (error != 0) { 861 device_printf(dev, "failed to start tx taskq %d\n", 862 txq->id); 863 goto fail_start_thread; 864 } 865 866 error = xen_intr_alloc_and_bind_local_port(dev, 867 xenbus_get_otherend_id(dev), /* filter */ NULL, xn_intr, 868 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, 869 &txq->xen_intr_handle); 870 871 if (error != 0) { 872 device_printf(dev, "xen_intr_alloc_and_bind_local_port failed\n"); 873 goto fail_bind_port; 874 } 875 } 876 877 return (0); 878 879 fail_bind_port: 880 taskqueue_drain_all(txq->tq); 881 fail_start_thread: 882 buf_ring_free(txq->br, M_DEVBUF); 883 taskqueue_free(txq->tq); 884 gnttab_end_foreign_access(txq->ring_ref, NULL); 885 fail_grant_ring: 886 gnttab_free_grant_references(txq->gref_head); 887 free(txq->ring.sring, M_DEVBUF); 888 fail: 889 for (; q >= 0; q--) { 890 disconnect_txq(&info->txq[q]); 891 destroy_txq(&info->txq[q]); 892 } 893 894 free(info->txq, M_DEVBUF); 895 return (error); 896 } 897 898 static int 899 setup_device(device_t dev, struct netfront_info *info, 900 unsigned long num_queues) 901 { 902 int error; 903 int q; 904 905 if (info->txq) 906 destroy_txqs(info); 907 908 if (info->rxq) 909 destroy_rxqs(info); 910 911 info->num_queues = 0; 912 913 error = setup_rxqs(dev, info, num_queues); 914 if (error != 0) 915 goto out; 916 error = setup_txqs(dev, info, num_queues); 917 if (error != 0) 918 goto out; 919 920 info->num_queues = num_queues; 921 922 /* No split event channel at the moment. */ 923 for (q = 0; q < num_queues; q++) 924 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle; 925 926 return (0); 927 928 out: 929 KASSERT(error != 0, ("Error path taken without providing an error code")); 930 return (error); 931 } 932 933 #ifdef INET 934 /** 935 * If this interface has an ipv4 address, send an arp for it. This 936 * helps to get the network going again after migrating hosts. 937 */ 938 static void 939 netfront_send_fake_arp(device_t dev, struct netfront_info *info) 940 { 941 struct ifnet *ifp; 942 struct ifaddr *ifa; 943 944 ifp = info->xn_ifp; 945 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 946 if (ifa->ifa_addr->sa_family == AF_INET) { 947 arp_ifinit(ifp, ifa); 948 } 949 } 950 } 951 #endif 952 953 /** 954 * Callback received when the backend's state changes. 955 */ 956 static void 957 netfront_backend_changed(device_t dev, XenbusState newstate) 958 { 959 struct netfront_info *sc = device_get_softc(dev); 960 961 DPRINTK("newstate=%d\n", newstate); 962 963 switch (newstate) { 964 case XenbusStateInitialising: 965 case XenbusStateInitialised: 966 case XenbusStateUnknown: 967 case XenbusStateReconfigured: 968 case XenbusStateReconfiguring: 969 break; 970 case XenbusStateInitWait: 971 if (xenbus_get_state(dev) != XenbusStateInitialising) 972 break; 973 if (xn_connect(sc) != 0) 974 break; 975 /* Switch to connected state before kicking the rings. */ 976 xenbus_set_state(sc->xbdev, XenbusStateConnected); 977 xn_kick_rings(sc); 978 break; 979 case XenbusStateClosing: 980 xenbus_set_state(dev, XenbusStateClosed); 981 break; 982 case XenbusStateClosed: 983 if (sc->xn_reset) { 984 netif_disconnect_backend(sc); 985 xenbus_set_state(dev, XenbusStateInitialising); 986 sc->xn_reset = false; 987 } 988 break; 989 case XenbusStateConnected: 990 #ifdef INET 991 netfront_send_fake_arp(dev, sc); 992 #endif 993 break; 994 } 995 } 996 997 /** 998 * \brief Verify that there is sufficient space in the Tx ring 999 * buffer for a maximally sized request to be enqueued. 1000 * 1001 * A transmit request requires a transmit descriptor for each packet 1002 * fragment, plus up to 2 entries for "options" (e.g. TSO). 1003 */ 1004 static inline int 1005 xn_tx_slot_available(struct netfront_txq *txq) 1006 { 1007 1008 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2)); 1009 } 1010 1011 static void 1012 xn_release_tx_bufs(struct netfront_txq *txq) 1013 { 1014 int i; 1015 1016 for (i = 1; i <= NET_TX_RING_SIZE; i++) { 1017 struct mbuf *m; 1018 1019 m = txq->mbufs[i]; 1020 1021 /* 1022 * We assume that no kernel addresses are 1023 * less than NET_TX_RING_SIZE. Any entry 1024 * in the table that is below this number 1025 * must be an index from free-list tracking. 1026 */ 1027 if (((uintptr_t)m) <= NET_TX_RING_SIZE) 1028 continue; 1029 gnttab_end_foreign_access_ref(txq->grant_ref[i]); 1030 gnttab_release_grant_reference(&txq->gref_head, 1031 txq->grant_ref[i]); 1032 txq->grant_ref[i] = GRANT_REF_INVALID; 1033 add_id_to_freelist(txq->mbufs, i); 1034 txq->mbufs_cnt--; 1035 if (txq->mbufs_cnt < 0) { 1036 panic("%s: tx_chain_cnt must be >= 0", __func__); 1037 } 1038 m_free(m); 1039 } 1040 } 1041 1042 static struct mbuf * 1043 xn_alloc_one_rx_buffer(struct netfront_rxq *rxq) 1044 { 1045 struct mbuf *m; 1046 1047 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1048 if (m == NULL) 1049 return NULL; 1050 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE; 1051 1052 return (m); 1053 } 1054 1055 static void 1056 xn_alloc_rx_buffers(struct netfront_rxq *rxq) 1057 { 1058 RING_IDX req_prod; 1059 int notify; 1060 1061 XN_RX_LOCK_ASSERT(rxq); 1062 1063 if (__predict_false(rxq->info->carrier == 0)) 1064 return; 1065 1066 for (req_prod = rxq->ring.req_prod_pvt; 1067 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE; 1068 req_prod++) { 1069 struct mbuf *m; 1070 unsigned short id; 1071 grant_ref_t ref; 1072 struct netif_rx_request *req; 1073 unsigned long pfn; 1074 1075 m = xn_alloc_one_rx_buffer(rxq); 1076 if (m == NULL) 1077 break; 1078 1079 id = xn_rxidx(req_prod); 1080 1081 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain")); 1082 rxq->mbufs[id] = m; 1083 1084 ref = gnttab_claim_grant_reference(&rxq->gref_head); 1085 KASSERT(ref != GNTTAB_LIST_END, 1086 ("reserved grant references exhuasted")); 1087 rxq->grant_ref[id] = ref; 1088 1089 pfn = atop(vtophys(mtod(m, vm_offset_t))); 1090 req = RING_GET_REQUEST(&rxq->ring, req_prod); 1091 1092 gnttab_grant_foreign_access_ref(ref, 1093 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0); 1094 req->id = id; 1095 req->gref = ref; 1096 } 1097 1098 rxq->ring.req_prod_pvt = req_prod; 1099 1100 /* Not enough requests? Try again later. */ 1101 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) { 1102 callout_reset_curcpu(&rxq->rx_refill, hz/10, 1103 xn_alloc_rx_buffers_callout, rxq); 1104 return; 1105 } 1106 1107 wmb(); /* barrier so backend seens requests */ 1108 1109 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify); 1110 if (notify) 1111 xen_intr_signal(rxq->xen_intr_handle); 1112 } 1113 1114 static void xn_alloc_rx_buffers_callout(void *arg) 1115 { 1116 struct netfront_rxq *rxq; 1117 1118 rxq = (struct netfront_rxq *)arg; 1119 XN_RX_LOCK(rxq); 1120 xn_alloc_rx_buffers(rxq); 1121 XN_RX_UNLOCK(rxq); 1122 } 1123 1124 static void 1125 xn_release_rx_bufs(struct netfront_rxq *rxq) 1126 { 1127 int i, ref; 1128 struct mbuf *m; 1129 1130 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1131 m = rxq->mbufs[i]; 1132 1133 if (m == NULL) 1134 continue; 1135 1136 ref = rxq->grant_ref[i]; 1137 if (ref == GRANT_REF_INVALID) 1138 continue; 1139 1140 gnttab_end_foreign_access_ref(ref); 1141 gnttab_release_grant_reference(&rxq->gref_head, ref); 1142 rxq->mbufs[i] = NULL; 1143 rxq->grant_ref[i] = GRANT_REF_INVALID; 1144 m_freem(m); 1145 } 1146 } 1147 1148 static void 1149 xn_rxeof(struct netfront_rxq *rxq) 1150 { 1151 struct ifnet *ifp; 1152 struct netfront_info *np = rxq->info; 1153 #if (defined(INET) || defined(INET6)) 1154 struct lro_ctrl *lro = &rxq->lro; 1155 #endif 1156 struct netfront_rx_info rinfo; 1157 struct netif_rx_response *rx = &rinfo.rx; 1158 struct netif_extra_info *extras = rinfo.extras; 1159 RING_IDX i, rp; 1160 struct mbuf *m; 1161 struct mbufq mbufq_rxq, mbufq_errq; 1162 int err, work_to_do; 1163 1164 do { 1165 XN_RX_LOCK_ASSERT(rxq); 1166 if (!netfront_carrier_ok(np)) 1167 return; 1168 1169 /* XXX: there should be some sane limit. */ 1170 mbufq_init(&mbufq_errq, INT_MAX); 1171 mbufq_init(&mbufq_rxq, INT_MAX); 1172 1173 ifp = np->xn_ifp; 1174 1175 rp = rxq->ring.sring->rsp_prod; 1176 rmb(); /* Ensure we see queued responses up to 'rp'. */ 1177 1178 i = rxq->ring.rsp_cons; 1179 while ((i != rp)) { 1180 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx)); 1181 memset(extras, 0, sizeof(rinfo.extras)); 1182 1183 m = NULL; 1184 err = xn_get_responses(rxq, &rinfo, rp, &i, &m); 1185 1186 if (__predict_false(err)) { 1187 if (m) 1188 (void )mbufq_enqueue(&mbufq_errq, m); 1189 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1190 continue; 1191 } 1192 1193 m->m_pkthdr.rcvif = ifp; 1194 if ( rx->flags & NETRXF_data_validated ) { 1195 /* Tell the stack the checksums are okay */ 1196 /* 1197 * XXX this isn't necessarily the case - need to add 1198 * check 1199 */ 1200 1201 m->m_pkthdr.csum_flags |= 1202 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 1203 | CSUM_PSEUDO_HDR); 1204 m->m_pkthdr.csum_data = 0xffff; 1205 } 1206 if ((rx->flags & NETRXF_extra_info) != 0 && 1207 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type == 1208 XEN_NETIF_EXTRA_TYPE_GSO)) { 1209 m->m_pkthdr.tso_segsz = 1210 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size; 1211 m->m_pkthdr.csum_flags |= CSUM_TSO; 1212 } 1213 1214 (void )mbufq_enqueue(&mbufq_rxq, m); 1215 rxq->ring.rsp_cons = i; 1216 } 1217 1218 mbufq_drain(&mbufq_errq); 1219 1220 /* 1221 * Process all the mbufs after the remapping is complete. 1222 * Break the mbuf chain first though. 1223 */ 1224 while ((m = mbufq_dequeue(&mbufq_rxq)) != NULL) { 1225 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1226 1227 /* XXX: Do we really need to drop the rx lock? */ 1228 XN_RX_UNLOCK(rxq); 1229 #if (defined(INET) || defined(INET6)) 1230 /* Use LRO if possible */ 1231 if ((ifp->if_capenable & IFCAP_LRO) == 0 || 1232 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 1233 /* 1234 * If LRO fails, pass up to the stack 1235 * directly. 1236 */ 1237 (*ifp->if_input)(ifp, m); 1238 } 1239 #else 1240 (*ifp->if_input)(ifp, m); 1241 #endif 1242 1243 XN_RX_LOCK(rxq); 1244 } 1245 1246 rxq->ring.rsp_cons = i; 1247 1248 #if (defined(INET) || defined(INET6)) 1249 /* 1250 * Flush any outstanding LRO work 1251 */ 1252 tcp_lro_flush_all(lro); 1253 #endif 1254 1255 xn_alloc_rx_buffers(rxq); 1256 1257 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do); 1258 } while (work_to_do); 1259 } 1260 1261 static void 1262 xn_txeof(struct netfront_txq *txq) 1263 { 1264 RING_IDX i, prod; 1265 unsigned short id; 1266 struct ifnet *ifp; 1267 netif_tx_response_t *txr; 1268 struct mbuf *m; 1269 struct netfront_info *np = txq->info; 1270 1271 XN_TX_LOCK_ASSERT(txq); 1272 1273 if (!netfront_carrier_ok(np)) 1274 return; 1275 1276 ifp = np->xn_ifp; 1277 1278 do { 1279 prod = txq->ring.sring->rsp_prod; 1280 rmb(); /* Ensure we see responses up to 'rp'. */ 1281 1282 for (i = txq->ring.rsp_cons; i != prod; i++) { 1283 txr = RING_GET_RESPONSE(&txq->ring, i); 1284 if (txr->status == NETIF_RSP_NULL) 1285 continue; 1286 1287 if (txr->status != NETIF_RSP_OKAY) { 1288 printf("%s: WARNING: response is %d!\n", 1289 __func__, txr->status); 1290 } 1291 id = txr->id; 1292 m = txq->mbufs[id]; 1293 KASSERT(m != NULL, ("mbuf not found in chain")); 1294 KASSERT((uintptr_t)m > NET_TX_RING_SIZE, 1295 ("mbuf already on the free list, but we're " 1296 "trying to free it again!")); 1297 M_ASSERTVALID(m); 1298 1299 if (__predict_false(gnttab_query_foreign_access( 1300 txq->grant_ref[id]) != 0)) { 1301 panic("%s: grant id %u still in use by the " 1302 "backend", __func__, id); 1303 } 1304 gnttab_end_foreign_access_ref(txq->grant_ref[id]); 1305 gnttab_release_grant_reference( 1306 &txq->gref_head, txq->grant_ref[id]); 1307 txq->grant_ref[id] = GRANT_REF_INVALID; 1308 1309 txq->mbufs[id] = NULL; 1310 add_id_to_freelist(txq->mbufs, id); 1311 txq->mbufs_cnt--; 1312 m_free(m); 1313 /* Only mark the txq active if we've freed up at least one slot to try */ 1314 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1315 } 1316 txq->ring.rsp_cons = prod; 1317 1318 /* 1319 * Set a new event, then check for race with update of 1320 * tx_cons. Note that it is essential to schedule a 1321 * callback, no matter how few buffers are pending. Even if 1322 * there is space in the transmit ring, higher layers may 1323 * be blocked because too much data is outstanding: in such 1324 * cases notification from Xen is likely to be the only kick 1325 * that we'll get. 1326 */ 1327 txq->ring.sring->rsp_event = 1328 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1; 1329 1330 mb(); 1331 } while (prod != txq->ring.sring->rsp_prod); 1332 1333 if (txq->full && 1334 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 1335 txq->full = false; 1336 xn_txq_start(txq); 1337 } 1338 } 1339 1340 static void 1341 xn_intr(void *xsc) 1342 { 1343 struct netfront_txq *txq = xsc; 1344 struct netfront_info *np = txq->info; 1345 struct netfront_rxq *rxq = &np->rxq[txq->id]; 1346 1347 /* kick both tx and rx */ 1348 xn_rxq_intr(rxq); 1349 xn_txq_intr(txq); 1350 } 1351 1352 static void 1353 xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m, 1354 grant_ref_t ref) 1355 { 1356 int new = xn_rxidx(rxq->ring.req_prod_pvt); 1357 1358 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL")); 1359 rxq->mbufs[new] = m; 1360 rxq->grant_ref[new] = ref; 1361 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new; 1362 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref; 1363 rxq->ring.req_prod_pvt++; 1364 } 1365 1366 static int 1367 xn_get_extras(struct netfront_rxq *rxq, 1368 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) 1369 { 1370 struct netif_extra_info *extra; 1371 1372 int err = 0; 1373 1374 do { 1375 struct mbuf *m; 1376 grant_ref_t ref; 1377 1378 if (__predict_false(*cons + 1 == rp)) { 1379 err = EINVAL; 1380 break; 1381 } 1382 1383 extra = (struct netif_extra_info *) 1384 RING_GET_RESPONSE(&rxq->ring, ++(*cons)); 1385 1386 if (__predict_false(!extra->type || 1387 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1388 err = EINVAL; 1389 } else { 1390 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 1391 } 1392 1393 m = xn_get_rx_mbuf(rxq, *cons); 1394 ref = xn_get_rx_ref(rxq, *cons); 1395 xn_move_rx_slot(rxq, m, ref); 1396 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 1397 1398 return err; 1399 } 1400 1401 static int 1402 xn_get_responses(struct netfront_rxq *rxq, 1403 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 1404 struct mbuf **list) 1405 { 1406 struct netif_rx_response *rx = &rinfo->rx; 1407 struct netif_extra_info *extras = rinfo->extras; 1408 struct mbuf *m, *m0, *m_prev; 1409 grant_ref_t ref = xn_get_rx_ref(rxq, *cons); 1410 RING_IDX ref_cons = *cons; 1411 int frags = 1; 1412 int err = 0; 1413 u_long ret; 1414 1415 m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons); 1416 1417 if (rx->flags & NETRXF_extra_info) { 1418 err = xn_get_extras(rxq, extras, rp, cons); 1419 } 1420 1421 if (m0 != NULL) { 1422 m0->m_pkthdr.len = 0; 1423 m0->m_next = NULL; 1424 } 1425 1426 for (;;) { 1427 #if 0 1428 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", 1429 rx->status, rx->offset, frags); 1430 #endif 1431 if (__predict_false(rx->status < 0 || 1432 rx->offset + rx->status > PAGE_SIZE)) { 1433 1434 xn_move_rx_slot(rxq, m, ref); 1435 if (m0 == m) 1436 m0 = NULL; 1437 m = NULL; 1438 err = EINVAL; 1439 goto next_skip_queue; 1440 } 1441 1442 /* 1443 * This definitely indicates a bug, either in this driver or in 1444 * the backend driver. In future this should flag the bad 1445 * situation to the system controller to reboot the backed. 1446 */ 1447 if (ref == GRANT_REF_INVALID) { 1448 printf("%s: Bad rx response id %d.\n", __func__, rx->id); 1449 err = EINVAL; 1450 goto next; 1451 } 1452 1453 ret = gnttab_end_foreign_access_ref(ref); 1454 KASSERT(ret, ("Unable to end access to grant references")); 1455 1456 gnttab_release_grant_reference(&rxq->gref_head, ref); 1457 1458 next: 1459 if (m == NULL) 1460 break; 1461 1462 m->m_len = rx->status; 1463 m->m_data += rx->offset; 1464 m0->m_pkthdr.len += rx->status; 1465 1466 next_skip_queue: 1467 if (!(rx->flags & NETRXF_more_data)) 1468 break; 1469 1470 if (*cons + frags == rp) { 1471 if (net_ratelimit()) 1472 WPRINTK("Need more frags\n"); 1473 err = ENOENT; 1474 printf("%s: cons %u frags %u rp %u, not enough frags\n", 1475 __func__, *cons, frags, rp); 1476 break; 1477 } 1478 /* 1479 * Note that m can be NULL, if rx->status < 0 or if 1480 * rx->offset + rx->status > PAGE_SIZE above. 1481 */ 1482 m_prev = m; 1483 1484 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags); 1485 m = xn_get_rx_mbuf(rxq, *cons + frags); 1486 1487 /* 1488 * m_prev == NULL can happen if rx->status < 0 or if 1489 * rx->offset + * rx->status > PAGE_SIZE above. 1490 */ 1491 if (m_prev != NULL) 1492 m_prev->m_next = m; 1493 1494 /* 1495 * m0 can be NULL if rx->status < 0 or if * rx->offset + 1496 * rx->status > PAGE_SIZE above. 1497 */ 1498 if (m0 == NULL) 1499 m0 = m; 1500 m->m_next = NULL; 1501 ref = xn_get_rx_ref(rxq, *cons + frags); 1502 ref_cons = *cons + frags; 1503 frags++; 1504 } 1505 *list = m0; 1506 *cons += frags; 1507 1508 return (err); 1509 } 1510 1511 /** 1512 * \brief Count the number of fragments in an mbuf chain. 1513 * 1514 * Surprisingly, there isn't an M* macro for this. 1515 */ 1516 static inline int 1517 xn_count_frags(struct mbuf *m) 1518 { 1519 int nfrags; 1520 1521 for (nfrags = 0; m != NULL; m = m->m_next) 1522 nfrags++; 1523 1524 return (nfrags); 1525 } 1526 1527 /** 1528 * Given an mbuf chain, make sure we have enough room and then push 1529 * it onto the transmit ring. 1530 */ 1531 static int 1532 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head) 1533 { 1534 struct mbuf *m; 1535 struct netfront_info *np = txq->info; 1536 struct ifnet *ifp = np->xn_ifp; 1537 u_int nfrags; 1538 int otherend_id; 1539 1540 /** 1541 * Defragment the mbuf if necessary. 1542 */ 1543 nfrags = xn_count_frags(m_head); 1544 1545 /* 1546 * Check to see whether this request is longer than netback 1547 * can handle, and try to defrag it. 1548 */ 1549 /** 1550 * It is a bit lame, but the netback driver in Linux can't 1551 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1552 * the Linux network stack. 1553 */ 1554 if (nfrags > np->maxfrags) { 1555 m = m_defrag(m_head, M_NOWAIT); 1556 if (!m) { 1557 /* 1558 * Defrag failed, so free the mbuf and 1559 * therefore drop the packet. 1560 */ 1561 m_freem(m_head); 1562 return (EMSGSIZE); 1563 } 1564 m_head = m; 1565 } 1566 1567 /* Determine how many fragments now exist */ 1568 nfrags = xn_count_frags(m_head); 1569 1570 /* 1571 * Check to see whether the defragmented packet has too many 1572 * segments for the Linux netback driver. 1573 */ 1574 /** 1575 * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1576 * of mbufs longer than Linux can handle. Make sure we don't 1577 * pass a too-long chain over to the other side by dropping the 1578 * packet. It doesn't look like there is currently a way to 1579 * tell the TCP stack to generate a shorter chain of packets. 1580 */ 1581 if (nfrags > MAX_TX_REQ_FRAGS) { 1582 #ifdef DEBUG 1583 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1584 "won't be able to handle it, dropping\n", 1585 __func__, nfrags, MAX_TX_REQ_FRAGS); 1586 #endif 1587 m_freem(m_head); 1588 return (EMSGSIZE); 1589 } 1590 1591 /* 1592 * This check should be redundant. We've already verified that we 1593 * have enough slots in the ring to handle a packet of maximum 1594 * size, and that our packet is less than the maximum size. Keep 1595 * it in here as an assert for now just to make certain that 1596 * chain_cnt is accurate. 1597 */ 1598 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE, 1599 ("%s: chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " 1600 "(%d)!", __func__, (int) txq->mbufs_cnt, 1601 (int) nfrags, (int) NET_TX_RING_SIZE)); 1602 1603 /* 1604 * Start packing the mbufs in this chain into 1605 * the fragment pointers. Stop when we run out 1606 * of fragments or hit the end of the mbuf chain. 1607 */ 1608 m = m_head; 1609 otherend_id = xenbus_get_otherend_id(np->xbdev); 1610 for (m = m_head; m; m = m->m_next) { 1611 netif_tx_request_t *tx; 1612 uintptr_t id; 1613 grant_ref_t ref; 1614 u_long mfn; /* XXX Wrong type? */ 1615 1616 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); 1617 id = get_id_from_freelist(txq->mbufs); 1618 if (id == 0) 1619 panic("%s: was allocated the freelist head!\n", 1620 __func__); 1621 txq->mbufs_cnt++; 1622 if (txq->mbufs_cnt > NET_TX_RING_SIZE) 1623 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", 1624 __func__); 1625 txq->mbufs[id] = m; 1626 tx->id = id; 1627 ref = gnttab_claim_grant_reference(&txq->gref_head); 1628 KASSERT((short)ref >= 0, ("Negative ref")); 1629 mfn = virt_to_mfn(mtod(m, vm_offset_t)); 1630 gnttab_grant_foreign_access_ref(ref, otherend_id, 1631 mfn, GNTMAP_readonly); 1632 tx->gref = txq->grant_ref[id] = ref; 1633 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 1634 tx->flags = 0; 1635 if (m == m_head) { 1636 /* 1637 * The first fragment has the entire packet 1638 * size, subsequent fragments have just the 1639 * fragment size. The backend works out the 1640 * true size of the first fragment by 1641 * subtracting the sizes of the other 1642 * fragments. 1643 */ 1644 tx->size = m->m_pkthdr.len; 1645 1646 /* 1647 * The first fragment contains the checksum flags 1648 * and is optionally followed by extra data for 1649 * TSO etc. 1650 */ 1651 /** 1652 * CSUM_TSO requires checksum offloading. 1653 * Some versions of FreeBSD fail to 1654 * set CSUM_TCP in the CSUM_TSO case, 1655 * so we have to test for CSUM_TSO 1656 * explicitly. 1657 */ 1658 if (m->m_pkthdr.csum_flags 1659 & (CSUM_DELAY_DATA | CSUM_TSO)) { 1660 tx->flags |= (NETTXF_csum_blank 1661 | NETTXF_data_validated); 1662 } 1663 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1664 struct netif_extra_info *gso = 1665 (struct netif_extra_info *) 1666 RING_GET_REQUEST(&txq->ring, 1667 ++txq->ring.req_prod_pvt); 1668 1669 tx->flags |= NETTXF_extra_info; 1670 1671 gso->u.gso.size = m->m_pkthdr.tso_segsz; 1672 gso->u.gso.type = 1673 XEN_NETIF_GSO_TYPE_TCPV4; 1674 gso->u.gso.pad = 0; 1675 gso->u.gso.features = 0; 1676 1677 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1678 gso->flags = 0; 1679 } 1680 } else { 1681 tx->size = m->m_len; 1682 } 1683 if (m->m_next) 1684 tx->flags |= NETTXF_more_data; 1685 1686 txq->ring.req_prod_pvt++; 1687 } 1688 BPF_MTAP(ifp, m_head); 1689 1690 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1691 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len); 1692 if (m_head->m_flags & M_MCAST) 1693 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 1694 1695 xn_txeof(txq); 1696 1697 return (0); 1698 } 1699 1700 /* equivalent of network_open() in Linux */ 1701 static void 1702 xn_ifinit_locked(struct netfront_info *np) 1703 { 1704 struct ifnet *ifp; 1705 int i; 1706 struct netfront_rxq *rxq; 1707 1708 XN_LOCK_ASSERT(np); 1709 1710 ifp = np->xn_ifp; 1711 1712 if (ifp->if_drv_flags & IFF_DRV_RUNNING || !netfront_carrier_ok(np)) 1713 return; 1714 1715 xn_stop(np); 1716 1717 for (i = 0; i < np->num_queues; i++) { 1718 rxq = &np->rxq[i]; 1719 XN_RX_LOCK(rxq); 1720 xn_alloc_rx_buffers(rxq); 1721 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1; 1722 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring)) 1723 xn_rxeof(rxq); 1724 XN_RX_UNLOCK(rxq); 1725 } 1726 1727 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1728 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1729 if_link_state_change(ifp, LINK_STATE_UP); 1730 } 1731 1732 static void 1733 xn_ifinit(void *xsc) 1734 { 1735 struct netfront_info *sc = xsc; 1736 1737 XN_LOCK(sc); 1738 xn_ifinit_locked(sc); 1739 XN_UNLOCK(sc); 1740 } 1741 1742 static int 1743 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1744 { 1745 struct netfront_info *sc = ifp->if_softc; 1746 struct ifreq *ifr = (struct ifreq *) data; 1747 device_t dev; 1748 #ifdef INET 1749 struct ifaddr *ifa = (struct ifaddr *)data; 1750 #endif 1751 int mask, error = 0, reinit; 1752 1753 dev = sc->xbdev; 1754 1755 switch(cmd) { 1756 case SIOCSIFADDR: 1757 #ifdef INET 1758 XN_LOCK(sc); 1759 if (ifa->ifa_addr->sa_family == AF_INET) { 1760 ifp->if_flags |= IFF_UP; 1761 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1762 xn_ifinit_locked(sc); 1763 arp_ifinit(ifp, ifa); 1764 XN_UNLOCK(sc); 1765 } else { 1766 XN_UNLOCK(sc); 1767 #endif 1768 error = ether_ioctl(ifp, cmd, data); 1769 #ifdef INET 1770 } 1771 #endif 1772 break; 1773 case SIOCSIFMTU: 1774 ifp->if_mtu = ifr->ifr_mtu; 1775 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1776 xn_ifinit(sc); 1777 break; 1778 case SIOCSIFFLAGS: 1779 XN_LOCK(sc); 1780 if (ifp->if_flags & IFF_UP) { 1781 /* 1782 * If only the state of the PROMISC flag changed, 1783 * then just use the 'set promisc mode' command 1784 * instead of reinitializing the entire NIC. Doing 1785 * a full re-init means reloading the firmware and 1786 * waiting for it to start up, which may take a 1787 * second or two. 1788 */ 1789 xn_ifinit_locked(sc); 1790 } else { 1791 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1792 xn_stop(sc); 1793 } 1794 } 1795 sc->xn_if_flags = ifp->if_flags; 1796 XN_UNLOCK(sc); 1797 break; 1798 case SIOCSIFCAP: 1799 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1800 reinit = 0; 1801 1802 if (mask & IFCAP_TXCSUM) { 1803 ifp->if_capenable ^= IFCAP_TXCSUM; 1804 ifp->if_hwassist ^= XN_CSUM_FEATURES; 1805 } 1806 if (mask & IFCAP_TSO4) { 1807 ifp->if_capenable ^= IFCAP_TSO4; 1808 ifp->if_hwassist ^= CSUM_TSO; 1809 } 1810 1811 if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) { 1812 /* These Rx features require us to renegotiate. */ 1813 reinit = 1; 1814 1815 if (mask & IFCAP_RXCSUM) 1816 ifp->if_capenable ^= IFCAP_RXCSUM; 1817 if (mask & IFCAP_LRO) 1818 ifp->if_capenable ^= IFCAP_LRO; 1819 } 1820 1821 if (reinit == 0) 1822 break; 1823 1824 /* 1825 * We must reset the interface so the backend picks up the 1826 * new features. 1827 */ 1828 device_printf(sc->xbdev, 1829 "performing interface reset due to feature change\n"); 1830 XN_LOCK(sc); 1831 netfront_carrier_off(sc); 1832 sc->xn_reset = true; 1833 /* 1834 * NB: the pending packet queue is not flushed, since 1835 * the interface should still support the old options. 1836 */ 1837 XN_UNLOCK(sc); 1838 /* 1839 * Delete the xenstore nodes that export features. 1840 * 1841 * NB: There's a xenbus state called 1842 * "XenbusStateReconfiguring", which is what we should set 1843 * here. Sadly none of the backends know how to handle it, 1844 * and simply disconnect from the frontend, so we will just 1845 * switch back to XenbusStateInitialising in order to force 1846 * a reconnection. 1847 */ 1848 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4"); 1849 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload"); 1850 xenbus_set_state(dev, XenbusStateClosing); 1851 1852 /* 1853 * Wait for the frontend to reconnect before returning 1854 * from the ioctl. 30s should be more than enough for any 1855 * sane backend to reconnect. 1856 */ 1857 error = tsleep(sc, 0, "xn_rst", 30*hz); 1858 break; 1859 case SIOCADDMULTI: 1860 case SIOCDELMULTI: 1861 break; 1862 case SIOCSIFMEDIA: 1863 case SIOCGIFMEDIA: 1864 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1865 break; 1866 default: 1867 error = ether_ioctl(ifp, cmd, data); 1868 } 1869 1870 return (error); 1871 } 1872 1873 static void 1874 xn_stop(struct netfront_info *sc) 1875 { 1876 struct ifnet *ifp; 1877 1878 XN_LOCK_ASSERT(sc); 1879 1880 ifp = sc->xn_ifp; 1881 1882 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1883 if_link_state_change(ifp, LINK_STATE_DOWN); 1884 } 1885 1886 static void 1887 xn_rebuild_rx_bufs(struct netfront_rxq *rxq) 1888 { 1889 int requeue_idx, i; 1890 grant_ref_t ref; 1891 netif_rx_request_t *req; 1892 1893 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1894 struct mbuf *m; 1895 u_long pfn; 1896 1897 if (rxq->mbufs[i] == NULL) 1898 continue; 1899 1900 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i); 1901 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i); 1902 1903 req = RING_GET_REQUEST(&rxq->ring, requeue_idx); 1904 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 1905 1906 gnttab_grant_foreign_access_ref(ref, 1907 xenbus_get_otherend_id(rxq->info->xbdev), 1908 pfn, 0); 1909 1910 req->gref = ref; 1911 req->id = requeue_idx; 1912 1913 requeue_idx++; 1914 } 1915 1916 rxq->ring.req_prod_pvt = requeue_idx; 1917 } 1918 1919 /* START of Xenolinux helper functions adapted to FreeBSD */ 1920 static int 1921 xn_connect(struct netfront_info *np) 1922 { 1923 int i, error; 1924 u_int feature_rx_copy; 1925 struct netfront_rxq *rxq; 1926 struct netfront_txq *txq; 1927 1928 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1929 "feature-rx-copy", NULL, "%u", &feature_rx_copy); 1930 if (error != 0) 1931 feature_rx_copy = 0; 1932 1933 /* We only support rx copy. */ 1934 if (!feature_rx_copy) 1935 return (EPROTONOSUPPORT); 1936 1937 /* Recovery procedure: */ 1938 error = talk_to_backend(np->xbdev, np); 1939 if (error != 0) 1940 return (error); 1941 1942 /* Step 1: Reinitialise variables. */ 1943 xn_query_features(np); 1944 xn_configure_features(np); 1945 1946 /* Step 2: Release TX buffer */ 1947 for (i = 0; i < np->num_queues; i++) { 1948 txq = &np->txq[i]; 1949 xn_release_tx_bufs(txq); 1950 } 1951 1952 /* Step 3: Rebuild the RX buffer freelist and the RX ring itself. */ 1953 for (i = 0; i < np->num_queues; i++) { 1954 rxq = &np->rxq[i]; 1955 xn_rebuild_rx_bufs(rxq); 1956 } 1957 1958 /* Step 4: All public and private state should now be sane. Get 1959 * ready to start sending and receiving packets and give the driver 1960 * domain a kick because we've probably just requeued some 1961 * packets. 1962 */ 1963 netfront_carrier_on(np); 1964 wakeup(np); 1965 1966 return (0); 1967 } 1968 1969 static void 1970 xn_kick_rings(struct netfront_info *np) 1971 { 1972 struct netfront_rxq *rxq; 1973 struct netfront_txq *txq; 1974 int i; 1975 1976 for (i = 0; i < np->num_queues; i++) { 1977 txq = &np->txq[i]; 1978 rxq = &np->rxq[i]; 1979 xen_intr_signal(txq->xen_intr_handle); 1980 XN_TX_LOCK(txq); 1981 xn_txeof(txq); 1982 XN_TX_UNLOCK(txq); 1983 XN_RX_LOCK(rxq); 1984 xn_alloc_rx_buffers(rxq); 1985 XN_RX_UNLOCK(rxq); 1986 } 1987 } 1988 1989 static void 1990 xn_query_features(struct netfront_info *np) 1991 { 1992 int val; 1993 1994 device_printf(np->xbdev, "backend features:"); 1995 1996 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1997 "feature-sg", NULL, "%d", &val) != 0) 1998 val = 0; 1999 2000 np->maxfrags = 1; 2001 if (val) { 2002 np->maxfrags = MAX_TX_REQ_FRAGS; 2003 printf(" feature-sg"); 2004 } 2005 2006 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 2007 "feature-gso-tcpv4", NULL, "%d", &val) != 0) 2008 val = 0; 2009 2010 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); 2011 if (val) { 2012 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; 2013 printf(" feature-gso-tcp4"); 2014 } 2015 2016 /* 2017 * HW CSUM offload is assumed to be available unless 2018 * feature-no-csum-offload is set in xenstore. 2019 */ 2020 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 2021 "feature-no-csum-offload", NULL, "%d", &val) != 0) 2022 val = 0; 2023 2024 np->xn_ifp->if_capabilities |= IFCAP_HWCSUM; 2025 if (val) { 2026 np->xn_ifp->if_capabilities &= ~(IFCAP_HWCSUM); 2027 printf(" feature-no-csum-offload"); 2028 } 2029 2030 printf("\n"); 2031 } 2032 2033 static int 2034 xn_configure_features(struct netfront_info *np) 2035 { 2036 int err, cap_enabled; 2037 #if (defined(INET) || defined(INET6)) 2038 int i; 2039 #endif 2040 struct ifnet *ifp; 2041 2042 ifp = np->xn_ifp; 2043 err = 0; 2044 2045 if ((ifp->if_capenable & ifp->if_capabilities) == ifp->if_capenable) { 2046 /* Current options are available, no need to do anything. */ 2047 return (0); 2048 } 2049 2050 /* Try to preserve as many options as possible. */ 2051 cap_enabled = ifp->if_capenable; 2052 ifp->if_capenable = ifp->if_hwassist = 0; 2053 2054 #if (defined(INET) || defined(INET6)) 2055 if ((cap_enabled & IFCAP_LRO) != 0) 2056 for (i = 0; i < np->num_queues; i++) 2057 tcp_lro_free(&np->rxq[i].lro); 2058 if (xn_enable_lro && 2059 (ifp->if_capabilities & cap_enabled & IFCAP_LRO) != 0) { 2060 ifp->if_capenable |= IFCAP_LRO; 2061 for (i = 0; i < np->num_queues; i++) { 2062 err = tcp_lro_init(&np->rxq[i].lro); 2063 if (err != 0) { 2064 device_printf(np->xbdev, 2065 "LRO initialization failed\n"); 2066 ifp->if_capenable &= ~IFCAP_LRO; 2067 break; 2068 } 2069 np->rxq[i].lro.ifp = ifp; 2070 } 2071 } 2072 if ((ifp->if_capabilities & cap_enabled & IFCAP_TSO4) != 0) { 2073 ifp->if_capenable |= IFCAP_TSO4; 2074 ifp->if_hwassist |= CSUM_TSO; 2075 } 2076 #endif 2077 if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) { 2078 ifp->if_capenable |= IFCAP_TXCSUM; 2079 ifp->if_hwassist |= XN_CSUM_FEATURES; 2080 } 2081 if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0) 2082 ifp->if_capenable |= IFCAP_RXCSUM; 2083 2084 return (err); 2085 } 2086 2087 static int 2088 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m) 2089 { 2090 struct netfront_info *np; 2091 struct ifnet *ifp; 2092 struct buf_ring *br; 2093 int error, notify; 2094 2095 np = txq->info; 2096 br = txq->br; 2097 ifp = np->xn_ifp; 2098 error = 0; 2099 2100 XN_TX_LOCK_ASSERT(txq); 2101 2102 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2103 !netfront_carrier_ok(np)) { 2104 if (m != NULL) 2105 error = drbr_enqueue(ifp, br, m); 2106 return (error); 2107 } 2108 2109 if (m != NULL) { 2110 error = drbr_enqueue(ifp, br, m); 2111 if (error != 0) 2112 return (error); 2113 } 2114 2115 while ((m = drbr_peek(ifp, br)) != NULL) { 2116 if (!xn_tx_slot_available(txq)) { 2117 drbr_putback(ifp, br, m); 2118 break; 2119 } 2120 2121 error = xn_assemble_tx_request(txq, m); 2122 /* xn_assemble_tx_request always consumes the mbuf*/ 2123 if (error != 0) { 2124 drbr_advance(ifp, br); 2125 break; 2126 } 2127 2128 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify); 2129 if (notify) 2130 xen_intr_signal(txq->xen_intr_handle); 2131 2132 drbr_advance(ifp, br); 2133 } 2134 2135 if (RING_FULL(&txq->ring)) 2136 txq->full = true; 2137 2138 return (0); 2139 } 2140 2141 static int 2142 xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m) 2143 { 2144 struct netfront_info *np; 2145 struct netfront_txq *txq; 2146 int i, npairs, error; 2147 2148 np = ifp->if_softc; 2149 npairs = np->num_queues; 2150 2151 if (!netfront_carrier_ok(np)) 2152 return (ENOBUFS); 2153 2154 KASSERT(npairs != 0, ("called with 0 available queues")); 2155 2156 /* check if flowid is set */ 2157 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 2158 i = m->m_pkthdr.flowid % npairs; 2159 else 2160 i = curcpu % npairs; 2161 2162 txq = &np->txq[i]; 2163 2164 if (XN_TX_TRYLOCK(txq) != 0) { 2165 error = xn_txq_mq_start_locked(txq, m); 2166 XN_TX_UNLOCK(txq); 2167 } else { 2168 error = drbr_enqueue(ifp, txq->br, m); 2169 taskqueue_enqueue(txq->tq, &txq->defrtask); 2170 } 2171 2172 return (error); 2173 } 2174 2175 static void 2176 xn_qflush(struct ifnet *ifp) 2177 { 2178 struct netfront_info *np; 2179 struct netfront_txq *txq; 2180 struct mbuf *m; 2181 int i; 2182 2183 np = ifp->if_softc; 2184 2185 for (i = 0; i < np->num_queues; i++) { 2186 txq = &np->txq[i]; 2187 2188 XN_TX_LOCK(txq); 2189 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 2190 m_freem(m); 2191 XN_TX_UNLOCK(txq); 2192 } 2193 2194 if_qflush(ifp); 2195 } 2196 2197 /** 2198 * Create a network device. 2199 * @param dev Newbus device representing this virtual NIC. 2200 */ 2201 int 2202 create_netdev(device_t dev) 2203 { 2204 struct netfront_info *np; 2205 int err; 2206 struct ifnet *ifp; 2207 2208 np = device_get_softc(dev); 2209 2210 np->xbdev = dev; 2211 2212 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF); 2213 2214 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); 2215 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 2216 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); 2217 2218 err = xen_net_read_mac(dev, np->mac); 2219 if (err != 0) 2220 goto error; 2221 2222 /* Set up ifnet structure */ 2223 ifp = np->xn_ifp = if_alloc(IFT_ETHER); 2224 ifp->if_softc = np; 2225 if_initname(ifp, "xn", device_get_unit(dev)); 2226 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2227 ifp->if_ioctl = xn_ioctl; 2228 2229 ifp->if_transmit = xn_txq_mq_start; 2230 ifp->if_qflush = xn_qflush; 2231 2232 ifp->if_init = xn_ifinit; 2233 2234 ifp->if_hwassist = XN_CSUM_FEATURES; 2235 /* Enable all supported features at device creation. */ 2236 ifp->if_capenable = ifp->if_capabilities = 2237 IFCAP_HWCSUM|IFCAP_TSO4|IFCAP_LRO; 2238 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2239 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; 2240 ifp->if_hw_tsomaxsegsize = PAGE_SIZE; 2241 2242 ether_ifattach(ifp, np->mac); 2243 netfront_carrier_off(np); 2244 2245 return (0); 2246 2247 error: 2248 KASSERT(err != 0, ("Error path with no error code specified")); 2249 return (err); 2250 } 2251 2252 static int 2253 netfront_detach(device_t dev) 2254 { 2255 struct netfront_info *info = device_get_softc(dev); 2256 2257 DPRINTK("%s\n", xenbus_get_node(dev)); 2258 2259 netif_free(info); 2260 2261 return 0; 2262 } 2263 2264 static void 2265 netif_free(struct netfront_info *np) 2266 { 2267 2268 XN_LOCK(np); 2269 xn_stop(np); 2270 XN_UNLOCK(np); 2271 netif_disconnect_backend(np); 2272 ether_ifdetach(np->xn_ifp); 2273 free(np->rxq, M_DEVBUF); 2274 free(np->txq, M_DEVBUF); 2275 if_free(np->xn_ifp); 2276 np->xn_ifp = NULL; 2277 ifmedia_removeall(&np->sc_media); 2278 } 2279 2280 static void 2281 netif_disconnect_backend(struct netfront_info *np) 2282 { 2283 u_int i; 2284 2285 for (i = 0; i < np->num_queues; i++) { 2286 XN_RX_LOCK(&np->rxq[i]); 2287 XN_TX_LOCK(&np->txq[i]); 2288 } 2289 netfront_carrier_off(np); 2290 for (i = 0; i < np->num_queues; i++) { 2291 XN_RX_UNLOCK(&np->rxq[i]); 2292 XN_TX_UNLOCK(&np->txq[i]); 2293 } 2294 2295 for (i = 0; i < np->num_queues; i++) { 2296 disconnect_rxq(&np->rxq[i]); 2297 disconnect_txq(&np->txq[i]); 2298 } 2299 } 2300 2301 static int 2302 xn_ifmedia_upd(struct ifnet *ifp) 2303 { 2304 2305 return (0); 2306 } 2307 2308 static void 2309 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2310 { 2311 2312 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2313 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2314 } 2315 2316 /* ** Driver registration ** */ 2317 static device_method_t netfront_methods[] = { 2318 /* Device interface */ 2319 DEVMETHOD(device_probe, netfront_probe), 2320 DEVMETHOD(device_attach, netfront_attach), 2321 DEVMETHOD(device_detach, netfront_detach), 2322 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2323 DEVMETHOD(device_suspend, netfront_suspend), 2324 DEVMETHOD(device_resume, netfront_resume), 2325 2326 /* Xenbus interface */ 2327 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), 2328 2329 DEVMETHOD_END 2330 }; 2331 2332 static driver_t netfront_driver = { 2333 "xn", 2334 netfront_methods, 2335 sizeof(struct netfront_info), 2336 }; 2337 devclass_t netfront_devclass; 2338 2339 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, 2340 NULL); 2341