1 /*- 2 * Copyright (c) 2004-2006 Kip Macy 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 33 #include <sys/param.h> 34 #include <sys/sockio.h> 35 #include <sys/limits.h> 36 #include <sys/mbuf.h> 37 #include <sys/malloc.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #include <sys/socket.h> 41 #include <sys/sysctl.h> 42 43 #include <net/if.h> 44 #include <net/if_var.h> 45 #include <net/if_arp.h> 46 #include <net/ethernet.h> 47 #include <net/if_media.h> 48 49 #include <net/bpf.h> 50 51 #include <net/if_types.h> 52 53 #include <netinet/in.h> 54 #include <netinet/ip.h> 55 #include <netinet/if_ether.h> 56 #include <netinet/tcp.h> 57 #include <netinet/tcp_lro.h> 58 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 62 #include <sys/bus.h> 63 64 #include <xen/xen-os.h> 65 #include <xen/hypervisor.h> 66 #include <xen/xen_intr.h> 67 #include <xen/gnttab.h> 68 #include <xen/interface/memory.h> 69 #include <xen/interface/io/netif.h> 70 #include <xen/xenbus/xenbusvar.h> 71 72 #include "xenbus_if.h" 73 74 /* Features supported by all backends. TSO and LRO can be negotiated */ 75 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 76 77 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 78 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 79 80 /* 81 * Should the driver do LRO on the RX end 82 * this can be toggled on the fly, but the 83 * interface must be reset (down/up) for it 84 * to take effect. 85 */ 86 static int xn_enable_lro = 1; 87 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 88 89 /** 90 * \brief The maximum allowed data fragments in a single transmit 91 * request. 92 * 93 * This limit is imposed by the backend driver. We assume here that 94 * we are dealing with a Linux driver domain and have set our limit 95 * to mirror the Linux MAX_SKB_FRAGS constant. 96 */ 97 #define MAX_TX_REQ_FRAGS (65536 / PAGE_SIZE + 2) 98 99 #define RX_COPY_THRESHOLD 256 100 101 #define net_ratelimit() 0 102 103 struct netfront_info; 104 struct netfront_rx_info; 105 106 static void xn_txeof(struct netfront_info *); 107 static void xn_rxeof(struct netfront_info *); 108 static void network_alloc_rx_buffers(struct netfront_info *); 109 110 static void xn_tick_locked(struct netfront_info *); 111 static void xn_tick(void *); 112 113 static void xn_intr(void *); 114 static inline int xn_count_frags(struct mbuf *m); 115 static int xn_assemble_tx_request(struct netfront_info *sc, 116 struct mbuf *m_head); 117 static void xn_start_locked(struct ifnet *); 118 static void xn_start(struct ifnet *); 119 static int xn_ioctl(struct ifnet *, u_long, caddr_t); 120 static void xn_ifinit_locked(struct netfront_info *); 121 static void xn_ifinit(void *); 122 static void xn_stop(struct netfront_info *); 123 static void xn_query_features(struct netfront_info *np); 124 static int xn_configure_features(struct netfront_info *np); 125 #ifdef notyet 126 static void xn_watchdog(struct ifnet *); 127 #endif 128 129 #ifdef notyet 130 static void netfront_closing(device_t dev); 131 #endif 132 static void netif_free(struct netfront_info *info); 133 static int netfront_detach(device_t dev); 134 135 static int talk_to_backend(device_t dev, struct netfront_info *info); 136 static int create_netdev(device_t dev); 137 static void netif_disconnect_backend(struct netfront_info *info); 138 static int setup_device(device_t dev, struct netfront_info *info); 139 static void free_ring(int *ref, void *ring_ptr_ref); 140 141 static int xn_ifmedia_upd(struct ifnet *ifp); 142 static void xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 143 144 /* Xenolinux helper functions */ 145 int network_connect(struct netfront_info *); 146 147 static void xn_free_rx_ring(struct netfront_info *); 148 149 static void xn_free_tx_ring(struct netfront_info *); 150 151 static int xennet_get_responses(struct netfront_info *np, 152 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 153 struct mbuf **list); 154 155 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) 156 157 #define INVALID_P2M_ENTRY (~0UL) 158 159 /* 160 * Mbuf pointers. We need these to keep track of the virtual addresses 161 * of our mbuf chains since we can only convert from virtual to physical, 162 * not the other way around. The size must track the free index arrays. 163 */ 164 struct xn_chain_data { 165 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 166 int xn_tx_chain_cnt; 167 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 168 }; 169 170 struct netfront_stats 171 { 172 u_long rx_packets; /* total packets received */ 173 u_long tx_packets; /* total packets transmitted */ 174 u_long rx_bytes; /* total bytes received */ 175 u_long tx_bytes; /* total bytes transmitted */ 176 u_long rx_errors; /* bad packets received */ 177 u_long tx_errors; /* packet transmit problems */ 178 }; 179 180 struct netfront_info { 181 struct ifnet *xn_ifp; 182 struct lro_ctrl xn_lro; 183 184 struct netfront_stats stats; 185 u_int tx_full; 186 187 netif_tx_front_ring_t tx; 188 netif_rx_front_ring_t rx; 189 190 struct mtx tx_lock; 191 struct mtx rx_lock; 192 struct mtx sc_lock; 193 194 xen_intr_handle_t xen_intr_handle; 195 u_int carrier; 196 u_int maxfrags; 197 198 /* Receive-ring batched refills. */ 199 #define RX_MIN_TARGET 32 200 #define RX_MAX_TARGET NET_RX_RING_SIZE 201 int rx_min_target; 202 int rx_max_target; 203 int rx_target; 204 205 grant_ref_t gref_tx_head; 206 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 207 grant_ref_t gref_rx_head; 208 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 209 210 device_t xbdev; 211 int tx_ring_ref; 212 int rx_ring_ref; 213 uint8_t mac[ETHER_ADDR_LEN]; 214 struct xn_chain_data xn_cdata; /* mbufs */ 215 struct mbufq xn_rx_batch; /* batch queue */ 216 217 int xn_if_flags; 218 struct callout xn_stat_ch; 219 220 xen_pfn_t rx_pfn_array[NET_RX_RING_SIZE]; 221 struct ifmedia sc_media; 222 223 bool xn_resume; 224 }; 225 226 #define rx_mbufs xn_cdata.xn_rx_chain 227 #define tx_mbufs xn_cdata.xn_tx_chain 228 229 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 230 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 231 232 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 233 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 234 235 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock); 236 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock); 237 238 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED); 239 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 240 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 241 242 struct netfront_rx_info { 243 struct netif_rx_response rx; 244 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 245 }; 246 247 #define netfront_carrier_on(netif) ((netif)->carrier = 1) 248 #define netfront_carrier_off(netif) ((netif)->carrier = 0) 249 #define netfront_carrier_ok(netif) ((netif)->carrier) 250 251 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 252 253 static inline void 254 add_id_to_freelist(struct mbuf **list, uintptr_t id) 255 { 256 KASSERT(id != 0, 257 ("%s: the head item (0) must always be free.", __func__)); 258 list[id] = list[0]; 259 list[0] = (struct mbuf *)id; 260 } 261 262 static inline unsigned short 263 get_id_from_freelist(struct mbuf **list) 264 { 265 uintptr_t id; 266 267 id = (uintptr_t)list[0]; 268 KASSERT(id != 0, 269 ("%s: the head item (0) must always remain free.", __func__)); 270 list[0] = list[id]; 271 return (id); 272 } 273 274 static inline int 275 xennet_rxidx(RING_IDX idx) 276 { 277 return idx & (NET_RX_RING_SIZE - 1); 278 } 279 280 static inline struct mbuf * 281 xennet_get_rx_mbuf(struct netfront_info *np, RING_IDX ri) 282 { 283 int i = xennet_rxidx(ri); 284 struct mbuf *m; 285 286 m = np->rx_mbufs[i]; 287 np->rx_mbufs[i] = NULL; 288 return (m); 289 } 290 291 static inline grant_ref_t 292 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 293 { 294 int i = xennet_rxidx(ri); 295 grant_ref_t ref = np->grant_rx_ref[i]; 296 KASSERT(ref != GRANT_REF_INVALID, ("Invalid grant reference!\n")); 297 np->grant_rx_ref[i] = GRANT_REF_INVALID; 298 return ref; 299 } 300 301 #define IPRINTK(fmt, args...) \ 302 printf("[XEN] " fmt, ##args) 303 #ifdef INVARIANTS 304 #define WPRINTK(fmt, args...) \ 305 printf("[XEN] " fmt, ##args) 306 #else 307 #define WPRINTK(fmt, args...) 308 #endif 309 #ifdef DEBUG 310 #define DPRINTK(fmt, args...) \ 311 printf("[XEN] %s: " fmt, __func__, ##args) 312 #else 313 #define DPRINTK(fmt, args...) 314 #endif 315 316 /** 317 * Read the 'mac' node at the given device's node in the store, and parse that 318 * as colon-separated octets, placing result the given mac array. mac must be 319 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 320 * Return 0 on success, or errno on error. 321 */ 322 static int 323 xen_net_read_mac(device_t dev, uint8_t mac[]) 324 { 325 int error, i; 326 char *s, *e, *macstr; 327 const char *path; 328 329 path = xenbus_get_node(dev); 330 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 331 if (error == ENOENT) { 332 /* 333 * Deal with missing mac XenStore nodes on devices with 334 * HVM emulation (the 'ioemu' configuration attribute) 335 * enabled. 336 * 337 * The HVM emulator may execute in a stub device model 338 * domain which lacks the permission, only given to Dom0, 339 * to update the guest's XenStore tree. For this reason, 340 * the HVM emulator doesn't even attempt to write the 341 * front-side mac node, even when operating in Dom0. 342 * However, there should always be a mac listed in the 343 * backend tree. Fallback to this version if our query 344 * of the front side XenStore location doesn't find 345 * anything. 346 */ 347 path = xenbus_get_otherend_path(dev); 348 error = xs_read(XST_NIL, path, "mac", NULL, (void **) &macstr); 349 } 350 if (error != 0) { 351 xenbus_dev_fatal(dev, error, "parsing %s/mac", path); 352 return (error); 353 } 354 355 s = macstr; 356 for (i = 0; i < ETHER_ADDR_LEN; i++) { 357 mac[i] = strtoul(s, &e, 16); 358 if (s == e || (e[0] != ':' && e[0] != 0)) { 359 free(macstr, M_XENBUS); 360 return (ENOENT); 361 } 362 s = &e[1]; 363 } 364 free(macstr, M_XENBUS); 365 return (0); 366 } 367 368 /** 369 * Entry point to this code when a new device is created. Allocate the basic 370 * structures and the ring buffers for communication with the backend, and 371 * inform the backend of the appropriate details for those. Switch to 372 * Connected state. 373 */ 374 static int 375 netfront_probe(device_t dev) 376 { 377 378 if (xen_hvm_domain() && xen_disable_pv_nics != 0) 379 return (ENXIO); 380 381 if (!strcmp(xenbus_get_type(dev), "vif")) { 382 device_set_desc(dev, "Virtual Network Interface"); 383 return (0); 384 } 385 386 return (ENXIO); 387 } 388 389 static int 390 netfront_attach(device_t dev) 391 { 392 int err; 393 394 err = create_netdev(dev); 395 if (err) { 396 xenbus_dev_fatal(dev, err, "creating netdev"); 397 return (err); 398 } 399 400 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 401 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 402 OID_AUTO, "enable_lro", CTLFLAG_RW, 403 &xn_enable_lro, 0, "Large Receive Offload"); 404 405 return (0); 406 } 407 408 static int 409 netfront_suspend(device_t dev) 410 { 411 struct netfront_info *info = device_get_softc(dev); 412 413 XN_RX_LOCK(info); 414 XN_TX_LOCK(info); 415 netfront_carrier_off(info); 416 XN_TX_UNLOCK(info); 417 XN_RX_UNLOCK(info); 418 return (0); 419 } 420 421 /** 422 * We are reconnecting to the backend, due to a suspend/resume, or a backend 423 * driver restart. We tear down our netif structure and recreate it, but 424 * leave the device-layer structures intact so that this is transparent to the 425 * rest of the kernel. 426 */ 427 static int 428 netfront_resume(device_t dev) 429 { 430 struct netfront_info *info = device_get_softc(dev); 431 432 info->xn_resume = true; 433 netif_disconnect_backend(info); 434 return (0); 435 } 436 437 /* Common code used when first setting up, and when resuming. */ 438 static int 439 talk_to_backend(device_t dev, struct netfront_info *info) 440 { 441 const char *message; 442 struct xs_transaction xst; 443 const char *node = xenbus_get_node(dev); 444 int err; 445 446 err = xen_net_read_mac(dev, info->mac); 447 if (err) { 448 xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 449 goto out; 450 } 451 452 /* Create shared ring, alloc event channel. */ 453 err = setup_device(dev, info); 454 if (err) 455 goto out; 456 457 again: 458 err = xs_transaction_start(&xst); 459 if (err) { 460 xenbus_dev_fatal(dev, err, "starting transaction"); 461 goto destroy_ring; 462 } 463 err = xs_printf(xst, node, "tx-ring-ref","%u", 464 info->tx_ring_ref); 465 if (err) { 466 message = "writing tx ring-ref"; 467 goto abort_transaction; 468 } 469 err = xs_printf(xst, node, "rx-ring-ref","%u", 470 info->rx_ring_ref); 471 if (err) { 472 message = "writing rx ring-ref"; 473 goto abort_transaction; 474 } 475 err = xs_printf(xst, node, 476 "event-channel", "%u", 477 xen_intr_port(info->xen_intr_handle)); 478 if (err) { 479 message = "writing event-channel"; 480 goto abort_transaction; 481 } 482 err = xs_printf(xst, node, "request-rx-copy", "%u", 1); 483 if (err) { 484 message = "writing request-rx-copy"; 485 goto abort_transaction; 486 } 487 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1); 488 if (err) { 489 message = "writing feature-rx-notify"; 490 goto abort_transaction; 491 } 492 err = xs_printf(xst, node, "feature-sg", "%d", 1); 493 if (err) { 494 message = "writing feature-sg"; 495 goto abort_transaction; 496 } 497 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1); 498 if (err) { 499 message = "writing feature-gso-tcpv4"; 500 goto abort_transaction; 501 } 502 503 err = xs_transaction_end(xst, 0); 504 if (err) { 505 if (err == EAGAIN) 506 goto again; 507 xenbus_dev_fatal(dev, err, "completing transaction"); 508 goto destroy_ring; 509 } 510 511 return 0; 512 513 abort_transaction: 514 xs_transaction_end(xst, 1); 515 xenbus_dev_fatal(dev, err, "%s", message); 516 destroy_ring: 517 netif_free(info); 518 out: 519 return err; 520 } 521 522 static int 523 setup_device(device_t dev, struct netfront_info *info) 524 { 525 netif_tx_sring_t *txs; 526 netif_rx_sring_t *rxs; 527 int error; 528 529 info->tx_ring_ref = GRANT_REF_INVALID; 530 info->rx_ring_ref = GRANT_REF_INVALID; 531 info->rx.sring = NULL; 532 info->tx.sring = NULL; 533 534 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 535 if (!txs) { 536 error = ENOMEM; 537 xenbus_dev_fatal(dev, error, "allocating tx ring page"); 538 goto fail; 539 } 540 SHARED_RING_INIT(txs); 541 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 542 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 543 if (error) 544 goto fail; 545 546 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 547 if (!rxs) { 548 error = ENOMEM; 549 xenbus_dev_fatal(dev, error, "allocating rx ring page"); 550 goto fail; 551 } 552 SHARED_RING_INIT(rxs); 553 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 554 555 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 556 if (error) 557 goto fail; 558 559 error = xen_intr_alloc_and_bind_local_port(dev, 560 xenbus_get_otherend_id(dev), /*filter*/NULL, xn_intr, info, 561 INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, &info->xen_intr_handle); 562 563 if (error) { 564 xenbus_dev_fatal(dev, error, 565 "xen_intr_alloc_and_bind_local_port failed"); 566 goto fail; 567 } 568 569 return (0); 570 571 fail: 572 netif_free(info); 573 return (error); 574 } 575 576 #ifdef INET 577 /** 578 * If this interface has an ipv4 address, send an arp for it. This 579 * helps to get the network going again after migrating hosts. 580 */ 581 static void 582 netfront_send_fake_arp(device_t dev, struct netfront_info *info) 583 { 584 struct ifnet *ifp; 585 struct ifaddr *ifa; 586 587 ifp = info->xn_ifp; 588 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 589 if (ifa->ifa_addr->sa_family == AF_INET) { 590 arp_ifinit(ifp, ifa); 591 } 592 } 593 } 594 #endif 595 596 /** 597 * Callback received when the backend's state changes. 598 */ 599 static void 600 netfront_backend_changed(device_t dev, XenbusState newstate) 601 { 602 struct netfront_info *sc = device_get_softc(dev); 603 604 DPRINTK("newstate=%d\n", newstate); 605 606 switch (newstate) { 607 case XenbusStateInitialising: 608 case XenbusStateInitialised: 609 case XenbusStateUnknown: 610 case XenbusStateClosed: 611 case XenbusStateReconfigured: 612 case XenbusStateReconfiguring: 613 break; 614 case XenbusStateInitWait: 615 if (xenbus_get_state(dev) != XenbusStateInitialising) 616 break; 617 if (network_connect(sc) != 0) 618 break; 619 xenbus_set_state(dev, XenbusStateConnected); 620 break; 621 case XenbusStateClosing: 622 xenbus_set_state(dev, XenbusStateClosed); 623 break; 624 case XenbusStateConnected: 625 #ifdef INET 626 netfront_send_fake_arp(dev, sc); 627 #endif 628 break; 629 } 630 } 631 632 static void 633 xn_free_rx_ring(struct netfront_info *sc) 634 { 635 #if 0 636 int i; 637 638 for (i = 0; i < NET_RX_RING_SIZE; i++) { 639 if (sc->xn_cdata.rx_mbufs[i] != NULL) { 640 m_freem(sc->rx_mbufs[i]); 641 sc->rx_mbufs[i] = NULL; 642 } 643 } 644 645 sc->rx.rsp_cons = 0; 646 sc->xn_rx_if->req_prod = 0; 647 sc->xn_rx_if->event = sc->rx.rsp_cons ; 648 #endif 649 } 650 651 static void 652 xn_free_tx_ring(struct netfront_info *sc) 653 { 654 #if 0 655 int i; 656 657 for (i = 0; i < NET_TX_RING_SIZE; i++) { 658 if (sc->tx_mbufs[i] != NULL) { 659 m_freem(sc->tx_mbufs[i]); 660 sc->xn_cdata.xn_tx_chain[i] = NULL; 661 } 662 } 663 664 return; 665 #endif 666 } 667 668 /** 669 * \brief Verify that there is sufficient space in the Tx ring 670 * buffer for a maximally sized request to be enqueued. 671 * 672 * A transmit request requires a transmit descriptor for each packet 673 * fragment, plus up to 2 entries for "options" (e.g. TSO). 674 */ 675 static inline int 676 xn_tx_slot_available(struct netfront_info *np) 677 { 678 return (RING_FREE_REQUESTS(&np->tx) > (MAX_TX_REQ_FRAGS + 2)); 679 } 680 681 static void 682 netif_release_tx_bufs(struct netfront_info *np) 683 { 684 int i; 685 686 for (i = 1; i <= NET_TX_RING_SIZE; i++) { 687 struct mbuf *m; 688 689 m = np->tx_mbufs[i]; 690 691 /* 692 * We assume that no kernel addresses are 693 * less than NET_TX_RING_SIZE. Any entry 694 * in the table that is below this number 695 * must be an index from free-list tracking. 696 */ 697 if (((uintptr_t)m) <= NET_TX_RING_SIZE) 698 continue; 699 gnttab_end_foreign_access_ref(np->grant_tx_ref[i]); 700 gnttab_release_grant_reference(&np->gref_tx_head, 701 np->grant_tx_ref[i]); 702 np->grant_tx_ref[i] = GRANT_REF_INVALID; 703 add_id_to_freelist(np->tx_mbufs, i); 704 np->xn_cdata.xn_tx_chain_cnt--; 705 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 706 panic("%s: tx_chain_cnt must be >= 0", __func__); 707 } 708 m_free(m); 709 } 710 } 711 712 static void 713 network_alloc_rx_buffers(struct netfront_info *sc) 714 { 715 int otherend_id = xenbus_get_otherend_id(sc->xbdev); 716 unsigned short id; 717 struct mbuf *m_new; 718 int i, batch_target, notify; 719 RING_IDX req_prod; 720 grant_ref_t ref; 721 netif_rx_request_t *req; 722 vm_offset_t vaddr; 723 u_long pfn; 724 725 req_prod = sc->rx.req_prod_pvt; 726 727 if (__predict_false(sc->carrier == 0)) 728 return; 729 730 /* 731 * Allocate mbufs greedily, even though we batch updates to the 732 * receive ring. This creates a less bursty demand on the memory 733 * allocator, and so should reduce the chance of failed allocation 734 * requests both for ourself and for other kernel subsystems. 735 * 736 * Here we attempt to maintain rx_target buffers in flight, counting 737 * buffers that we have yet to process in the receive ring. 738 */ 739 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 740 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 741 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 742 if (m_new == NULL) { 743 if (i != 0) 744 goto refill; 745 /* 746 * XXX set timer 747 */ 748 break; 749 } 750 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 751 752 /* queue the mbufs allocated */ 753 (void )mbufq_enqueue(&sc->xn_rx_batch, m_new); 754 } 755 756 /* 757 * If we've allocated at least half of our target number of entries, 758 * submit them to the backend - we have enough to make the overhead 759 * of submission worthwhile. Otherwise wait for more mbufs and 760 * request entries to become available. 761 */ 762 if (i < (sc->rx_target/2)) { 763 if (req_prod >sc->rx.sring->req_prod) 764 goto push; 765 return; 766 } 767 768 /* 769 * Double floating fill target if we risked having the backend 770 * run out of empty buffers for receive traffic. We define "running 771 * low" as having less than a fourth of our target buffers free 772 * at the time we refilled the queue. 773 */ 774 if ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) { 775 sc->rx_target *= 2; 776 if (sc->rx_target > sc->rx_max_target) 777 sc->rx_target = sc->rx_max_target; 778 } 779 780 refill: 781 for (i = 0; ; i++) { 782 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 783 break; 784 785 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 786 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 787 788 id = xennet_rxidx(req_prod + i); 789 790 KASSERT(sc->rx_mbufs[id] == NULL, ("non-NULL xm_rx_chain")); 791 sc->rx_mbufs[id] = m_new; 792 793 ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 794 KASSERT(ref != GNTTAB_LIST_END, 795 ("reserved grant references exhuasted")); 796 sc->grant_rx_ref[id] = ref; 797 798 vaddr = mtod(m_new, vm_offset_t); 799 pfn = vtophys(vaddr) >> PAGE_SHIFT; 800 req = RING_GET_REQUEST(&sc->rx, req_prod + i); 801 802 gnttab_grant_foreign_access_ref(ref, otherend_id, pfn, 0); 803 req->id = id; 804 req->gref = ref; 805 806 sc->rx_pfn_array[i] = 807 vtophys(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 808 } 809 810 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 811 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 812 /* 813 * We may have allocated buffers which have entries outstanding 814 * in the page * update queue -- make sure we flush those first! 815 */ 816 wmb(); 817 818 /* Above is a suitable barrier to ensure backend will see requests. */ 819 sc->rx.req_prod_pvt = req_prod + i; 820 push: 821 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 822 if (notify) 823 xen_intr_signal(sc->xen_intr_handle); 824 } 825 826 static void 827 xn_rxeof(struct netfront_info *np) 828 { 829 struct ifnet *ifp; 830 #if (defined(INET) || defined(INET6)) 831 struct lro_ctrl *lro = &np->xn_lro; 832 struct lro_entry *queued; 833 #endif 834 struct netfront_rx_info rinfo; 835 struct netif_rx_response *rx = &rinfo.rx; 836 struct netif_extra_info *extras = rinfo.extras; 837 RING_IDX i, rp; 838 struct mbuf *m; 839 struct mbufq rxq, errq; 840 int err, work_to_do; 841 842 do { 843 XN_RX_LOCK_ASSERT(np); 844 if (!netfront_carrier_ok(np)) 845 return; 846 847 /* XXX: there should be some sane limit. */ 848 mbufq_init(&errq, INT_MAX); 849 mbufq_init(&rxq, INT_MAX); 850 851 ifp = np->xn_ifp; 852 853 rp = np->rx.sring->rsp_prod; 854 rmb(); /* Ensure we see queued responses up to 'rp'. */ 855 856 i = np->rx.rsp_cons; 857 while ((i != rp)) { 858 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 859 memset(extras, 0, sizeof(rinfo.extras)); 860 861 m = NULL; 862 err = xennet_get_responses(np, &rinfo, rp, &i, &m); 863 864 if (__predict_false(err)) { 865 if (m) 866 (void )mbufq_enqueue(&errq, m); 867 np->stats.rx_errors++; 868 continue; 869 } 870 871 m->m_pkthdr.rcvif = ifp; 872 if ( rx->flags & NETRXF_data_validated ) { 873 /* Tell the stack the checksums are okay */ 874 /* 875 * XXX this isn't necessarily the case - need to add 876 * check 877 */ 878 879 m->m_pkthdr.csum_flags |= 880 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 881 | CSUM_PSEUDO_HDR); 882 m->m_pkthdr.csum_data = 0xffff; 883 } 884 885 np->stats.rx_packets++; 886 np->stats.rx_bytes += m->m_pkthdr.len; 887 888 (void )mbufq_enqueue(&rxq, m); 889 np->rx.rsp_cons = i; 890 } 891 892 mbufq_drain(&errq); 893 894 /* 895 * Process all the mbufs after the remapping is complete. 896 * Break the mbuf chain first though. 897 */ 898 while ((m = mbufq_dequeue(&rxq)) != NULL) { 899 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 900 901 /* 902 * Do we really need to drop the rx lock? 903 */ 904 XN_RX_UNLOCK(np); 905 #if (defined(INET) || defined(INET6)) 906 /* Use LRO if possible */ 907 if ((ifp->if_capenable & IFCAP_LRO) == 0 || 908 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 909 /* 910 * If LRO fails, pass up to the stack 911 * directly. 912 */ 913 (*ifp->if_input)(ifp, m); 914 } 915 #else 916 (*ifp->if_input)(ifp, m); 917 #endif 918 XN_RX_LOCK(np); 919 } 920 921 np->rx.rsp_cons = i; 922 923 #if (defined(INET) || defined(INET6)) 924 /* 925 * Flush any outstanding LRO work 926 */ 927 while (!SLIST_EMPTY(&lro->lro_active)) { 928 queued = SLIST_FIRST(&lro->lro_active); 929 SLIST_REMOVE_HEAD(&lro->lro_active, next); 930 tcp_lro_flush(lro, queued); 931 } 932 #endif 933 934 #if 0 935 /* If we get a callback with very few responses, reduce fill target. */ 936 /* NB. Note exponential increase, linear decrease. */ 937 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 938 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 939 np->rx_target = np->rx_min_target; 940 #endif 941 942 network_alloc_rx_buffers(np); 943 944 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 945 } while (work_to_do); 946 } 947 948 static void 949 xn_txeof(struct netfront_info *np) 950 { 951 RING_IDX i, prod; 952 unsigned short id; 953 struct ifnet *ifp; 954 netif_tx_response_t *txr; 955 struct mbuf *m; 956 957 XN_TX_LOCK_ASSERT(np); 958 959 if (!netfront_carrier_ok(np)) 960 return; 961 962 ifp = np->xn_ifp; 963 964 do { 965 prod = np->tx.sring->rsp_prod; 966 rmb(); /* Ensure we see responses up to 'rp'. */ 967 968 for (i = np->tx.rsp_cons; i != prod; i++) { 969 txr = RING_GET_RESPONSE(&np->tx, i); 970 if (txr->status == NETIF_RSP_NULL) 971 continue; 972 973 if (txr->status != NETIF_RSP_OKAY) { 974 printf("%s: WARNING: response is %d!\n", 975 __func__, txr->status); 976 } 977 id = txr->id; 978 m = np->tx_mbufs[id]; 979 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 980 KASSERT((uintptr_t)m > NET_TX_RING_SIZE, 981 ("mbuf already on the free list, but we're " 982 "trying to free it again!")); 983 M_ASSERTVALID(m); 984 985 /* 986 * Increment packet count if this is the last 987 * mbuf of the chain. 988 */ 989 if (!m->m_next) 990 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 991 if (__predict_false(gnttab_query_foreign_access( 992 np->grant_tx_ref[id]) != 0)) { 993 panic("%s: grant id %u still in use by the " 994 "backend", __func__, id); 995 } 996 gnttab_end_foreign_access_ref( 997 np->grant_tx_ref[id]); 998 gnttab_release_grant_reference( 999 &np->gref_tx_head, np->grant_tx_ref[id]); 1000 np->grant_tx_ref[id] = GRANT_REF_INVALID; 1001 1002 np->tx_mbufs[id] = NULL; 1003 add_id_to_freelist(np->tx_mbufs, id); 1004 np->xn_cdata.xn_tx_chain_cnt--; 1005 m_free(m); 1006 /* Only mark the queue active if we've freed up at least one slot to try */ 1007 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1008 } 1009 np->tx.rsp_cons = prod; 1010 1011 /* 1012 * Set a new event, then check for race with update of 1013 * tx_cons. Note that it is essential to schedule a 1014 * callback, no matter how few buffers are pending. Even if 1015 * there is space in the transmit ring, higher layers may 1016 * be blocked because too much data is outstanding: in such 1017 * cases notification from Xen is likely to be the only kick 1018 * that we'll get. 1019 */ 1020 np->tx.sring->rsp_event = 1021 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 1022 1023 mb(); 1024 } while (prod != np->tx.sring->rsp_prod); 1025 1026 if (np->tx_full && 1027 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 1028 np->tx_full = 0; 1029 #if 0 1030 if (np->user_state == UST_OPEN) 1031 netif_wake_queue(dev); 1032 #endif 1033 } 1034 } 1035 1036 static void 1037 xn_intr(void *xsc) 1038 { 1039 struct netfront_info *np = xsc; 1040 struct ifnet *ifp = np->xn_ifp; 1041 1042 #if 0 1043 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 1044 likely(netfront_carrier_ok(np)) && 1045 ifp->if_drv_flags & IFF_DRV_RUNNING)) 1046 return; 1047 #endif 1048 if (RING_HAS_UNCONSUMED_RESPONSES(&np->tx)) { 1049 XN_TX_LOCK(np); 1050 xn_txeof(np); 1051 XN_TX_UNLOCK(np); 1052 } 1053 1054 XN_RX_LOCK(np); 1055 xn_rxeof(np); 1056 XN_RX_UNLOCK(np); 1057 1058 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1059 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1060 xn_start(ifp); 1061 } 1062 1063 static void 1064 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 1065 grant_ref_t ref) 1066 { 1067 int new = xennet_rxidx(np->rx.req_prod_pvt); 1068 1069 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 1070 np->rx_mbufs[new] = m; 1071 np->grant_rx_ref[new] = ref; 1072 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 1073 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 1074 np->rx.req_prod_pvt++; 1075 } 1076 1077 static int 1078 xennet_get_extras(struct netfront_info *np, 1079 struct netif_extra_info *extras, RING_IDX rp, RING_IDX *cons) 1080 { 1081 struct netif_extra_info *extra; 1082 1083 int err = 0; 1084 1085 do { 1086 struct mbuf *m; 1087 grant_ref_t ref; 1088 1089 if (__predict_false(*cons + 1 == rp)) { 1090 #if 0 1091 if (net_ratelimit()) 1092 WPRINTK("Missing extra info\n"); 1093 #endif 1094 err = EINVAL; 1095 break; 1096 } 1097 1098 extra = (struct netif_extra_info *) 1099 RING_GET_RESPONSE(&np->rx, ++(*cons)); 1100 1101 if (__predict_false(!extra->type || 1102 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1103 #if 0 1104 if (net_ratelimit()) 1105 WPRINTK("Invalid extra type: %d\n", 1106 extra->type); 1107 #endif 1108 err = EINVAL; 1109 } else { 1110 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 1111 } 1112 1113 m = xennet_get_rx_mbuf(np, *cons); 1114 ref = xennet_get_rx_ref(np, *cons); 1115 xennet_move_rx_slot(np, m, ref); 1116 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 1117 1118 return err; 1119 } 1120 1121 static int 1122 xennet_get_responses(struct netfront_info *np, 1123 struct netfront_rx_info *rinfo, RING_IDX rp, RING_IDX *cons, 1124 struct mbuf **list) 1125 { 1126 struct netif_rx_response *rx = &rinfo->rx; 1127 struct netif_extra_info *extras = rinfo->extras; 1128 struct mbuf *m, *m0, *m_prev; 1129 grant_ref_t ref = xennet_get_rx_ref(np, *cons); 1130 RING_IDX ref_cons = *cons; 1131 int frags = 1; 1132 int err = 0; 1133 u_long ret; 1134 1135 m0 = m = m_prev = xennet_get_rx_mbuf(np, *cons); 1136 1137 if (rx->flags & NETRXF_extra_info) { 1138 err = xennet_get_extras(np, extras, rp, cons); 1139 } 1140 1141 if (m0 != NULL) { 1142 m0->m_pkthdr.len = 0; 1143 m0->m_next = NULL; 1144 } 1145 1146 for (;;) { 1147 #if 0 1148 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n", 1149 rx->status, rx->offset, frags); 1150 #endif 1151 if (__predict_false(rx->status < 0 || 1152 rx->offset + rx->status > PAGE_SIZE)) { 1153 1154 #if 0 1155 if (net_ratelimit()) 1156 WPRINTK("rx->offset: %x, size: %u\n", 1157 rx->offset, rx->status); 1158 #endif 1159 xennet_move_rx_slot(np, m, ref); 1160 if (m0 == m) 1161 m0 = NULL; 1162 m = NULL; 1163 err = EINVAL; 1164 goto next_skip_queue; 1165 } 1166 1167 /* 1168 * This definitely indicates a bug, either in this driver or in 1169 * the backend driver. In future this should flag the bad 1170 * situation to the system controller to reboot the backed. 1171 */ 1172 if (ref == GRANT_REF_INVALID) { 1173 1174 #if 0 1175 if (net_ratelimit()) 1176 WPRINTK("Bad rx response id %d.\n", rx->id); 1177 #endif 1178 printf("%s: Bad rx response id %d.\n", __func__,rx->id); 1179 err = EINVAL; 1180 goto next; 1181 } 1182 1183 ret = gnttab_end_foreign_access_ref(ref); 1184 KASSERT(ret, ("Unable to end access to grant references")); 1185 1186 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1187 1188 next: 1189 if (m == NULL) 1190 break; 1191 1192 m->m_len = rx->status; 1193 m->m_data += rx->offset; 1194 m0->m_pkthdr.len += rx->status; 1195 1196 next_skip_queue: 1197 if (!(rx->flags & NETRXF_more_data)) 1198 break; 1199 1200 if (*cons + frags == rp) { 1201 if (net_ratelimit()) 1202 WPRINTK("Need more frags\n"); 1203 err = ENOENT; 1204 printf("%s: cons %u frags %u rp %u, not enough frags\n", 1205 __func__, *cons, frags, rp); 1206 break; 1207 } 1208 /* 1209 * Note that m can be NULL, if rx->status < 0 or if 1210 * rx->offset + rx->status > PAGE_SIZE above. 1211 */ 1212 m_prev = m; 1213 1214 rx = RING_GET_RESPONSE(&np->rx, *cons + frags); 1215 m = xennet_get_rx_mbuf(np, *cons + frags); 1216 1217 /* 1218 * m_prev == NULL can happen if rx->status < 0 or if 1219 * rx->offset + * rx->status > PAGE_SIZE above. 1220 */ 1221 if (m_prev != NULL) 1222 m_prev->m_next = m; 1223 1224 /* 1225 * m0 can be NULL if rx->status < 0 or if * rx->offset + 1226 * rx->status > PAGE_SIZE above. 1227 */ 1228 if (m0 == NULL) 1229 m0 = m; 1230 m->m_next = NULL; 1231 ref = xennet_get_rx_ref(np, *cons + frags); 1232 ref_cons = *cons + frags; 1233 frags++; 1234 } 1235 *list = m0; 1236 *cons += frags; 1237 1238 return (err); 1239 } 1240 1241 static void 1242 xn_tick_locked(struct netfront_info *sc) 1243 { 1244 XN_RX_LOCK_ASSERT(sc); 1245 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1246 1247 /* XXX placeholder for printing debug information */ 1248 } 1249 1250 static void 1251 xn_tick(void *xsc) 1252 { 1253 struct netfront_info *sc; 1254 1255 sc = xsc; 1256 XN_RX_LOCK(sc); 1257 xn_tick_locked(sc); 1258 XN_RX_UNLOCK(sc); 1259 } 1260 1261 /** 1262 * \brief Count the number of fragments in an mbuf chain. 1263 * 1264 * Surprisingly, there isn't an M* macro for this. 1265 */ 1266 static inline int 1267 xn_count_frags(struct mbuf *m) 1268 { 1269 int nfrags; 1270 1271 for (nfrags = 0; m != NULL; m = m->m_next) 1272 nfrags++; 1273 1274 return (nfrags); 1275 } 1276 1277 /** 1278 * Given an mbuf chain, make sure we have enough room and then push 1279 * it onto the transmit ring. 1280 */ 1281 static int 1282 xn_assemble_tx_request(struct netfront_info *sc, struct mbuf *m_head) 1283 { 1284 struct ifnet *ifp; 1285 struct mbuf *m; 1286 u_int nfrags; 1287 int otherend_id; 1288 1289 ifp = sc->xn_ifp; 1290 1291 /** 1292 * Defragment the mbuf if necessary. 1293 */ 1294 nfrags = xn_count_frags(m_head); 1295 1296 /* 1297 * Check to see whether this request is longer than netback 1298 * can handle, and try to defrag it. 1299 */ 1300 /** 1301 * It is a bit lame, but the netback driver in Linux can't 1302 * deal with nfrags > MAX_TX_REQ_FRAGS, which is a quirk of 1303 * the Linux network stack. 1304 */ 1305 if (nfrags > sc->maxfrags) { 1306 m = m_defrag(m_head, M_NOWAIT); 1307 if (!m) { 1308 /* 1309 * Defrag failed, so free the mbuf and 1310 * therefore drop the packet. 1311 */ 1312 m_freem(m_head); 1313 return (EMSGSIZE); 1314 } 1315 m_head = m; 1316 } 1317 1318 /* Determine how many fragments now exist */ 1319 nfrags = xn_count_frags(m_head); 1320 1321 /* 1322 * Check to see whether the defragmented packet has too many 1323 * segments for the Linux netback driver. 1324 */ 1325 /** 1326 * The FreeBSD TCP stack, with TSO enabled, can produce a chain 1327 * of mbufs longer than Linux can handle. Make sure we don't 1328 * pass a too-long chain over to the other side by dropping the 1329 * packet. It doesn't look like there is currently a way to 1330 * tell the TCP stack to generate a shorter chain of packets. 1331 */ 1332 if (nfrags > MAX_TX_REQ_FRAGS) { 1333 #ifdef DEBUG 1334 printf("%s: nfrags %d > MAX_TX_REQ_FRAGS %d, netback " 1335 "won't be able to handle it, dropping\n", 1336 __func__, nfrags, MAX_TX_REQ_FRAGS); 1337 #endif 1338 m_freem(m_head); 1339 return (EMSGSIZE); 1340 } 1341 1342 /* 1343 * This check should be redundant. We've already verified that we 1344 * have enough slots in the ring to handle a packet of maximum 1345 * size, and that our packet is less than the maximum size. Keep 1346 * it in here as an assert for now just to make certain that 1347 * xn_tx_chain_cnt is accurate. 1348 */ 1349 KASSERT((sc->xn_cdata.xn_tx_chain_cnt + nfrags) <= NET_TX_RING_SIZE, 1350 ("%s: xn_tx_chain_cnt (%d) + nfrags (%d) > NET_TX_RING_SIZE " 1351 "(%d)!", __func__, (int) sc->xn_cdata.xn_tx_chain_cnt, 1352 (int) nfrags, (int) NET_TX_RING_SIZE)); 1353 1354 /* 1355 * Start packing the mbufs in this chain into 1356 * the fragment pointers. Stop when we run out 1357 * of fragments or hit the end of the mbuf chain. 1358 */ 1359 m = m_head; 1360 otherend_id = xenbus_get_otherend_id(sc->xbdev); 1361 for (m = m_head; m; m = m->m_next) { 1362 netif_tx_request_t *tx; 1363 uintptr_t id; 1364 grant_ref_t ref; 1365 u_long mfn; /* XXX Wrong type? */ 1366 1367 tx = RING_GET_REQUEST(&sc->tx, sc->tx.req_prod_pvt); 1368 id = get_id_from_freelist(sc->tx_mbufs); 1369 if (id == 0) 1370 panic("%s: was allocated the freelist head!\n", 1371 __func__); 1372 sc->xn_cdata.xn_tx_chain_cnt++; 1373 if (sc->xn_cdata.xn_tx_chain_cnt > NET_TX_RING_SIZE) 1374 panic("%s: tx_chain_cnt must be <= NET_TX_RING_SIZE\n", 1375 __func__); 1376 sc->tx_mbufs[id] = m; 1377 tx->id = id; 1378 ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 1379 KASSERT((short)ref >= 0, ("Negative ref")); 1380 mfn = virt_to_mfn(mtod(m, vm_offset_t)); 1381 gnttab_grant_foreign_access_ref(ref, otherend_id, 1382 mfn, GNTMAP_readonly); 1383 tx->gref = sc->grant_tx_ref[id] = ref; 1384 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 1385 tx->flags = 0; 1386 if (m == m_head) { 1387 /* 1388 * The first fragment has the entire packet 1389 * size, subsequent fragments have just the 1390 * fragment size. The backend works out the 1391 * true size of the first fragment by 1392 * subtracting the sizes of the other 1393 * fragments. 1394 */ 1395 tx->size = m->m_pkthdr.len; 1396 1397 /* 1398 * The first fragment contains the checksum flags 1399 * and is optionally followed by extra data for 1400 * TSO etc. 1401 */ 1402 /** 1403 * CSUM_TSO requires checksum offloading. 1404 * Some versions of FreeBSD fail to 1405 * set CSUM_TCP in the CSUM_TSO case, 1406 * so we have to test for CSUM_TSO 1407 * explicitly. 1408 */ 1409 if (m->m_pkthdr.csum_flags 1410 & (CSUM_DELAY_DATA | CSUM_TSO)) { 1411 tx->flags |= (NETTXF_csum_blank 1412 | NETTXF_data_validated); 1413 } 1414 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1415 struct netif_extra_info *gso = 1416 (struct netif_extra_info *) 1417 RING_GET_REQUEST(&sc->tx, 1418 ++sc->tx.req_prod_pvt); 1419 1420 tx->flags |= NETTXF_extra_info; 1421 1422 gso->u.gso.size = m->m_pkthdr.tso_segsz; 1423 gso->u.gso.type = 1424 XEN_NETIF_GSO_TYPE_TCPV4; 1425 gso->u.gso.pad = 0; 1426 gso->u.gso.features = 0; 1427 1428 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1429 gso->flags = 0; 1430 } 1431 } else { 1432 tx->size = m->m_len; 1433 } 1434 if (m->m_next) 1435 tx->flags |= NETTXF_more_data; 1436 1437 sc->tx.req_prod_pvt++; 1438 } 1439 BPF_MTAP(ifp, m_head); 1440 1441 sc->stats.tx_bytes += m_head->m_pkthdr.len; 1442 sc->stats.tx_packets++; 1443 1444 return (0); 1445 } 1446 1447 static void 1448 xn_start_locked(struct ifnet *ifp) 1449 { 1450 struct netfront_info *sc; 1451 struct mbuf *m_head; 1452 int notify; 1453 1454 sc = ifp->if_softc; 1455 1456 if (!netfront_carrier_ok(sc)) 1457 return; 1458 1459 /* 1460 * While we have enough transmit slots available for at least one 1461 * maximum-sized packet, pull mbufs off the queue and put them on 1462 * the transmit ring. 1463 */ 1464 while (xn_tx_slot_available(sc)) { 1465 IF_DEQUEUE(&ifp->if_snd, m_head); 1466 if (m_head == NULL) 1467 break; 1468 1469 if (xn_assemble_tx_request(sc, m_head) != 0) 1470 break; 1471 } 1472 1473 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 1474 if (notify) 1475 xen_intr_signal(sc->xen_intr_handle); 1476 1477 if (RING_FULL(&sc->tx)) { 1478 sc->tx_full = 1; 1479 #if 0 1480 netif_stop_queue(dev); 1481 #endif 1482 } 1483 } 1484 1485 static void 1486 xn_start(struct ifnet *ifp) 1487 { 1488 struct netfront_info *sc; 1489 sc = ifp->if_softc; 1490 XN_TX_LOCK(sc); 1491 xn_start_locked(ifp); 1492 XN_TX_UNLOCK(sc); 1493 } 1494 1495 /* equivalent of network_open() in Linux */ 1496 static void 1497 xn_ifinit_locked(struct netfront_info *sc) 1498 { 1499 struct ifnet *ifp; 1500 1501 XN_LOCK_ASSERT(sc); 1502 1503 ifp = sc->xn_ifp; 1504 1505 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1506 return; 1507 1508 xn_stop(sc); 1509 1510 network_alloc_rx_buffers(sc); 1511 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 1512 1513 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1514 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1515 if_link_state_change(ifp, LINK_STATE_UP); 1516 1517 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1518 } 1519 1520 static void 1521 xn_ifinit(void *xsc) 1522 { 1523 struct netfront_info *sc = xsc; 1524 1525 XN_LOCK(sc); 1526 xn_ifinit_locked(sc); 1527 XN_UNLOCK(sc); 1528 } 1529 1530 static int 1531 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1532 { 1533 struct netfront_info *sc = ifp->if_softc; 1534 struct ifreq *ifr = (struct ifreq *) data; 1535 #ifdef INET 1536 struct ifaddr *ifa = (struct ifaddr *)data; 1537 #endif 1538 1539 int mask, error = 0; 1540 switch(cmd) { 1541 case SIOCSIFADDR: 1542 #ifdef INET 1543 XN_LOCK(sc); 1544 if (ifa->ifa_addr->sa_family == AF_INET) { 1545 ifp->if_flags |= IFF_UP; 1546 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1547 xn_ifinit_locked(sc); 1548 arp_ifinit(ifp, ifa); 1549 XN_UNLOCK(sc); 1550 } else { 1551 XN_UNLOCK(sc); 1552 #endif 1553 error = ether_ioctl(ifp, cmd, data); 1554 #ifdef INET 1555 } 1556 #endif 1557 break; 1558 case SIOCSIFMTU: 1559 /* XXX can we alter the MTU on a VN ?*/ 1560 #ifdef notyet 1561 if (ifr->ifr_mtu > XN_JUMBO_MTU) 1562 error = EINVAL; 1563 else 1564 #endif 1565 { 1566 ifp->if_mtu = ifr->ifr_mtu; 1567 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1568 xn_ifinit(sc); 1569 } 1570 break; 1571 case SIOCSIFFLAGS: 1572 XN_LOCK(sc); 1573 if (ifp->if_flags & IFF_UP) { 1574 /* 1575 * If only the state of the PROMISC flag changed, 1576 * then just use the 'set promisc mode' command 1577 * instead of reinitializing the entire NIC. Doing 1578 * a full re-init means reloading the firmware and 1579 * waiting for it to start up, which may take a 1580 * second or two. 1581 */ 1582 #ifdef notyet 1583 /* No promiscuous mode with Xen */ 1584 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1585 ifp->if_flags & IFF_PROMISC && 1586 !(sc->xn_if_flags & IFF_PROMISC)) { 1587 XN_SETBIT(sc, XN_RX_MODE, 1588 XN_RXMODE_RX_PROMISC); 1589 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1590 !(ifp->if_flags & IFF_PROMISC) && 1591 sc->xn_if_flags & IFF_PROMISC) { 1592 XN_CLRBIT(sc, XN_RX_MODE, 1593 XN_RXMODE_RX_PROMISC); 1594 } else 1595 #endif 1596 xn_ifinit_locked(sc); 1597 } else { 1598 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1599 xn_stop(sc); 1600 } 1601 } 1602 sc->xn_if_flags = ifp->if_flags; 1603 XN_UNLOCK(sc); 1604 error = 0; 1605 break; 1606 case SIOCSIFCAP: 1607 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1608 if (mask & IFCAP_TXCSUM) { 1609 if (IFCAP_TXCSUM & ifp->if_capenable) { 1610 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 1611 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 1612 | CSUM_IP | CSUM_TSO); 1613 } else { 1614 ifp->if_capenable |= IFCAP_TXCSUM; 1615 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 1616 | CSUM_IP); 1617 } 1618 } 1619 if (mask & IFCAP_RXCSUM) { 1620 ifp->if_capenable ^= IFCAP_RXCSUM; 1621 } 1622 if (mask & IFCAP_TSO4) { 1623 if (IFCAP_TSO4 & ifp->if_capenable) { 1624 ifp->if_capenable &= ~IFCAP_TSO4; 1625 ifp->if_hwassist &= ~CSUM_TSO; 1626 } else if (IFCAP_TXCSUM & ifp->if_capenable) { 1627 ifp->if_capenable |= IFCAP_TSO4; 1628 ifp->if_hwassist |= CSUM_TSO; 1629 } else { 1630 IPRINTK("Xen requires tx checksum offload" 1631 " be enabled to use TSO\n"); 1632 error = EINVAL; 1633 } 1634 } 1635 if (mask & IFCAP_LRO) { 1636 ifp->if_capenable ^= IFCAP_LRO; 1637 1638 } 1639 error = 0; 1640 break; 1641 case SIOCADDMULTI: 1642 case SIOCDELMULTI: 1643 #ifdef notyet 1644 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1645 XN_LOCK(sc); 1646 xn_setmulti(sc); 1647 XN_UNLOCK(sc); 1648 error = 0; 1649 } 1650 #endif 1651 break; 1652 case SIOCSIFMEDIA: 1653 case SIOCGIFMEDIA: 1654 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1655 break; 1656 default: 1657 error = ether_ioctl(ifp, cmd, data); 1658 } 1659 1660 return (error); 1661 } 1662 1663 static void 1664 xn_stop(struct netfront_info *sc) 1665 { 1666 struct ifnet *ifp; 1667 1668 XN_LOCK_ASSERT(sc); 1669 1670 ifp = sc->xn_ifp; 1671 1672 callout_stop(&sc->xn_stat_ch); 1673 1674 xn_free_rx_ring(sc); 1675 xn_free_tx_ring(sc); 1676 1677 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1678 if_link_state_change(ifp, LINK_STATE_DOWN); 1679 } 1680 1681 /* START of Xenolinux helper functions adapted to FreeBSD */ 1682 int 1683 network_connect(struct netfront_info *np) 1684 { 1685 int i, requeue_idx, error; 1686 grant_ref_t ref; 1687 netif_rx_request_t *req; 1688 u_int feature_rx_copy; 1689 1690 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1691 "feature-rx-copy", NULL, "%u", &feature_rx_copy); 1692 if (error) 1693 feature_rx_copy = 0; 1694 1695 /* We only support rx copy. */ 1696 if (!feature_rx_copy) 1697 return (EPROTONOSUPPORT); 1698 1699 /* Recovery procedure: */ 1700 error = talk_to_backend(np->xbdev, np); 1701 if (error) 1702 return (error); 1703 1704 /* Step 1: Reinitialise variables. */ 1705 xn_query_features(np); 1706 xn_configure_features(np); 1707 netif_release_tx_bufs(np); 1708 1709 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1710 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1711 struct mbuf *m; 1712 u_long pfn; 1713 1714 if (np->rx_mbufs[i] == NULL) 1715 continue; 1716 1717 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 1718 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1719 1720 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1721 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 1722 1723 gnttab_grant_foreign_access_ref(ref, 1724 xenbus_get_otherend_id(np->xbdev), 1725 pfn, 0); 1726 1727 req->gref = ref; 1728 req->id = requeue_idx; 1729 1730 requeue_idx++; 1731 } 1732 1733 np->rx.req_prod_pvt = requeue_idx; 1734 1735 /* Step 3: All public and private state should now be sane. Get 1736 * ready to start sending and receiving packets and give the driver 1737 * domain a kick because we've probably just requeued some 1738 * packets. 1739 */ 1740 netfront_carrier_on(np); 1741 xen_intr_signal(np->xen_intr_handle); 1742 XN_TX_LOCK(np); 1743 xn_txeof(np); 1744 XN_TX_UNLOCK(np); 1745 network_alloc_rx_buffers(np); 1746 1747 return (0); 1748 } 1749 1750 static void 1751 xn_query_features(struct netfront_info *np) 1752 { 1753 int val; 1754 1755 device_printf(np->xbdev, "backend features:"); 1756 1757 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1758 "feature-sg", NULL, "%d", &val) < 0) 1759 val = 0; 1760 1761 np->maxfrags = 1; 1762 if (val) { 1763 np->maxfrags = MAX_TX_REQ_FRAGS; 1764 printf(" feature-sg"); 1765 } 1766 1767 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev), 1768 "feature-gso-tcpv4", NULL, "%d", &val) < 0) 1769 val = 0; 1770 1771 np->xn_ifp->if_capabilities &= ~(IFCAP_TSO4|IFCAP_LRO); 1772 if (val) { 1773 np->xn_ifp->if_capabilities |= IFCAP_TSO4|IFCAP_LRO; 1774 printf(" feature-gso-tcp4"); 1775 } 1776 1777 printf("\n"); 1778 } 1779 1780 static int 1781 xn_configure_features(struct netfront_info *np) 1782 { 1783 int err, cap_enabled; 1784 1785 err = 0; 1786 1787 if (np->xn_resume && 1788 ((np->xn_ifp->if_capenable & np->xn_ifp->if_capabilities) 1789 == np->xn_ifp->if_capenable)) { 1790 /* Current options are available, no need to do anything. */ 1791 return (0); 1792 } 1793 1794 /* Try to preserve as many options as possible. */ 1795 if (np->xn_resume) 1796 cap_enabled = np->xn_ifp->if_capenable; 1797 else 1798 cap_enabled = UINT_MAX; 1799 1800 #if (defined(INET) || defined(INET6)) 1801 if ((np->xn_ifp->if_capenable & IFCAP_LRO) == (cap_enabled & IFCAP_LRO)) 1802 tcp_lro_free(&np->xn_lro); 1803 #endif 1804 np->xn_ifp->if_capenable = 1805 np->xn_ifp->if_capabilities & ~(IFCAP_LRO|IFCAP_TSO4) & cap_enabled; 1806 np->xn_ifp->if_hwassist &= ~CSUM_TSO; 1807 #if (defined(INET) || defined(INET6)) 1808 if (xn_enable_lro && (np->xn_ifp->if_capabilities & IFCAP_LRO) == 1809 (cap_enabled & IFCAP_LRO)) { 1810 err = tcp_lro_init(&np->xn_lro); 1811 if (err) { 1812 device_printf(np->xbdev, "LRO initialization failed\n"); 1813 } else { 1814 np->xn_lro.ifp = np->xn_ifp; 1815 np->xn_ifp->if_capenable |= IFCAP_LRO; 1816 } 1817 } 1818 if ((np->xn_ifp->if_capabilities & IFCAP_TSO4) == 1819 (cap_enabled & IFCAP_TSO4)) { 1820 np->xn_ifp->if_capenable |= IFCAP_TSO4; 1821 np->xn_ifp->if_hwassist |= CSUM_TSO; 1822 } 1823 #endif 1824 return (err); 1825 } 1826 1827 /** 1828 * Create a network device. 1829 * @param dev Newbus device representing this virtual NIC. 1830 */ 1831 int 1832 create_netdev(device_t dev) 1833 { 1834 int i; 1835 struct netfront_info *np; 1836 int err; 1837 struct ifnet *ifp; 1838 1839 np = device_get_softc(dev); 1840 1841 np->xbdev = dev; 1842 1843 mtx_init(&np->tx_lock, "xntx", "netfront transmit lock", MTX_DEF); 1844 mtx_init(&np->rx_lock, "xnrx", "netfront receive lock", MTX_DEF); 1845 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF); 1846 1847 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts); 1848 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1849 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL); 1850 1851 np->rx_target = RX_MIN_TARGET; 1852 np->rx_min_target = RX_MIN_TARGET; 1853 np->rx_max_target = RX_MAX_TARGET; 1854 1855 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 1856 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 1857 np->tx_mbufs[i] = (void *) ((u_long) i+1); 1858 np->grant_tx_ref[i] = GRANT_REF_INVALID; 1859 } 1860 np->tx_mbufs[NET_TX_RING_SIZE] = (void *)0; 1861 1862 for (i = 0; i <= NET_RX_RING_SIZE; i++) { 1863 1864 np->rx_mbufs[i] = NULL; 1865 np->grant_rx_ref[i] = GRANT_REF_INVALID; 1866 } 1867 1868 mbufq_init(&np->xn_rx_batch, INT_MAX); 1869 1870 /* A grant for every tx ring slot */ 1871 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 1872 &np->gref_tx_head) != 0) { 1873 IPRINTK("#### netfront can't alloc tx grant refs\n"); 1874 err = ENOMEM; 1875 goto error; 1876 } 1877 /* A grant for every rx ring slot */ 1878 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1879 &np->gref_rx_head) != 0) { 1880 WPRINTK("#### netfront can't alloc rx grant refs\n"); 1881 gnttab_free_grant_references(np->gref_tx_head); 1882 err = ENOMEM; 1883 goto error; 1884 } 1885 1886 err = xen_net_read_mac(dev, np->mac); 1887 if (err) { 1888 gnttab_free_grant_references(np->gref_rx_head); 1889 gnttab_free_grant_references(np->gref_tx_head); 1890 goto error; 1891 } 1892 1893 /* Set up ifnet structure */ 1894 ifp = np->xn_ifp = if_alloc(IFT_ETHER); 1895 ifp->if_softc = np; 1896 if_initname(ifp, "xn", device_get_unit(dev)); 1897 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1898 ifp->if_ioctl = xn_ioctl; 1899 ifp->if_start = xn_start; 1900 #ifdef notyet 1901 ifp->if_watchdog = xn_watchdog; 1902 #endif 1903 ifp->if_init = xn_ifinit; 1904 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 1905 1906 ifp->if_hwassist = XN_CSUM_FEATURES; 1907 ifp->if_capabilities = IFCAP_HWCSUM; 1908 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1909 ifp->if_hw_tsomaxsegcount = MAX_TX_REQ_FRAGS; 1910 ifp->if_hw_tsomaxsegsize = PAGE_SIZE; 1911 1912 ether_ifattach(ifp, np->mac); 1913 callout_init(&np->xn_stat_ch, 1); 1914 netfront_carrier_off(np); 1915 1916 return (0); 1917 1918 error: 1919 KASSERT(err != 0, ("Error path with no error code specified")); 1920 return (err); 1921 } 1922 1923 /** 1924 * Handle the change of state of the backend to Closing. We must delete our 1925 * device-layer structures now, to ensure that writes are flushed through to 1926 * the backend. Once is this done, we can switch to Closed in 1927 * acknowledgement. 1928 */ 1929 #if 0 1930 static void 1931 netfront_closing(device_t dev) 1932 { 1933 #if 0 1934 struct netfront_info *info = dev->dev_driver_data; 1935 1936 DPRINTK("netfront_closing: %s removed\n", dev->nodename); 1937 1938 close_netdev(info); 1939 #endif 1940 xenbus_switch_state(dev, XenbusStateClosed); 1941 } 1942 #endif 1943 1944 static int 1945 netfront_detach(device_t dev) 1946 { 1947 struct netfront_info *info = device_get_softc(dev); 1948 1949 DPRINTK("%s\n", xenbus_get_node(dev)); 1950 1951 netif_free(info); 1952 1953 return 0; 1954 } 1955 1956 static void 1957 netif_free(struct netfront_info *info) 1958 { 1959 XN_LOCK(info); 1960 xn_stop(info); 1961 XN_UNLOCK(info); 1962 callout_drain(&info->xn_stat_ch); 1963 netif_disconnect_backend(info); 1964 if (info->xn_ifp != NULL) { 1965 ether_ifdetach(info->xn_ifp); 1966 if_free(info->xn_ifp); 1967 info->xn_ifp = NULL; 1968 } 1969 ifmedia_removeall(&info->sc_media); 1970 } 1971 1972 static void 1973 netif_disconnect_backend(struct netfront_info *info) 1974 { 1975 XN_RX_LOCK(info); 1976 XN_TX_LOCK(info); 1977 netfront_carrier_off(info); 1978 XN_TX_UNLOCK(info); 1979 XN_RX_UNLOCK(info); 1980 1981 free_ring(&info->tx_ring_ref, &info->tx.sring); 1982 free_ring(&info->rx_ring_ref, &info->rx.sring); 1983 1984 xen_intr_unbind(&info->xen_intr_handle); 1985 } 1986 1987 static void 1988 free_ring(int *ref, void *ring_ptr_ref) 1989 { 1990 void **ring_ptr_ptr = ring_ptr_ref; 1991 1992 if (*ref != GRANT_REF_INVALID) { 1993 /* This API frees the associated storage. */ 1994 gnttab_end_foreign_access(*ref, *ring_ptr_ptr); 1995 *ref = GRANT_REF_INVALID; 1996 } 1997 *ring_ptr_ptr = NULL; 1998 } 1999 2000 static int 2001 xn_ifmedia_upd(struct ifnet *ifp) 2002 { 2003 return (0); 2004 } 2005 2006 static void 2007 xn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2008 { 2009 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2010 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2011 } 2012 2013 /* ** Driver registration ** */ 2014 static device_method_t netfront_methods[] = { 2015 /* Device interface */ 2016 DEVMETHOD(device_probe, netfront_probe), 2017 DEVMETHOD(device_attach, netfront_attach), 2018 DEVMETHOD(device_detach, netfront_detach), 2019 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2020 DEVMETHOD(device_suspend, netfront_suspend), 2021 DEVMETHOD(device_resume, netfront_resume), 2022 2023 /* Xenbus interface */ 2024 DEVMETHOD(xenbus_otherend_changed, netfront_backend_changed), 2025 2026 DEVMETHOD_END 2027 }; 2028 2029 static driver_t netfront_driver = { 2030 "xn", 2031 netfront_methods, 2032 sizeof(struct netfront_info), 2033 }; 2034 devclass_t netfront_devclass; 2035 2036 DRIVER_MODULE(xe, xenbusb_front, netfront_driver, netfront_devclass, NULL, 2037 NULL); 2038