1 /* 2 * 3 * Copyright (c) 2004-2006 Kip Macy 4 * All rights reserved. 5 * 6 * 7 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 8 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 9 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 10 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 11 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 12 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 13 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 14 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 15 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 16 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 17 */ 18 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/sockio.h> 26 #include <sys/mbuf.h> 27 #include <sys/malloc.h> 28 #include <sys/module.h> 29 #include <sys/kernel.h> 30 #include <sys/socket.h> 31 #include <sys/sysctl.h> 32 #include <sys/queue.h> 33 #include <sys/lock.h> 34 #include <sys/sx.h> 35 36 #include <net/if.h> 37 #include <net/if_arp.h> 38 #include <net/ethernet.h> 39 #include <net/if_dl.h> 40 #include <net/if_media.h> 41 42 #include <net/bpf.h> 43 44 #include <net/if_types.h> 45 #include <net/if.h> 46 47 #include <netinet/in_systm.h> 48 #include <netinet/in.h> 49 #include <netinet/ip.h> 50 #include <netinet/if_ether.h> 51 #if __FreeBSD_version >= 700000 52 #include <netinet/tcp.h> 53 #include <netinet/tcp_lro.h> 54 #endif 55 56 #include <vm/vm.h> 57 #include <vm/pmap.h> 58 59 #include <machine/clock.h> /* for DELAY */ 60 #include <machine/bus.h> 61 #include <machine/resource.h> 62 #include <machine/frame.h> 63 #include <machine/vmparam.h> 64 65 #include <sys/bus.h> 66 #include <sys/rman.h> 67 68 #include <machine/intr_machdep.h> 69 70 #include <machine/xen/xen-os.h> 71 #include <machine/xen/xenfunc.h> 72 #include <xen/hypervisor.h> 73 #include <xen/xen_intr.h> 74 #include <xen/evtchn.h> 75 #include <xen/gnttab.h> 76 #include <xen/interface/memory.h> 77 #include <xen/interface/io/netif.h> 78 #include <xen/xenbus/xenbusvar.h> 79 80 #include <dev/xen/netfront/mbufq.h> 81 82 #include "xenbus_if.h" 83 84 #define XN_CSUM_FEATURES (CSUM_TCP | CSUM_UDP | CSUM_TSO) 85 86 #define GRANT_INVALID_REF 0 87 88 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 89 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 90 91 #if __FreeBSD_version >= 700000 92 /* 93 * Should the driver do LRO on the RX end 94 * this can be toggled on the fly, but the 95 * interface must be reset (down/up) for it 96 * to take effect. 97 */ 98 static int xn_enable_lro = 1; 99 TUNABLE_INT("hw.xn.enable_lro", &xn_enable_lro); 100 #else 101 102 #define IFCAP_TSO4 0 103 #define CSUM_TSO 0 104 105 #endif 106 107 #ifdef CONFIG_XEN 108 static int MODPARM_rx_copy = 0; 109 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0); 110 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)"); 111 static int MODPARM_rx_flip = 0; 112 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0); 113 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)"); 114 #else 115 static const int MODPARM_rx_copy = 1; 116 static const int MODPARM_rx_flip = 0; 117 #endif 118 119 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 120 #define RX_COPY_THRESHOLD 256 121 122 #define net_ratelimit() 0 123 124 struct netfront_info; 125 struct netfront_rx_info; 126 127 static void xn_txeof(struct netfront_info *); 128 static void xn_rxeof(struct netfront_info *); 129 static void network_alloc_rx_buffers(struct netfront_info *); 130 131 static void xn_tick_locked(struct netfront_info *); 132 static void xn_tick(void *); 133 134 static void xn_intr(void *); 135 static void xn_start_locked(struct ifnet *); 136 static void xn_start(struct ifnet *); 137 static int xn_ioctl(struct ifnet *, u_long, caddr_t); 138 static void xn_ifinit_locked(struct netfront_info *); 139 static void xn_ifinit(void *); 140 static void xn_stop(struct netfront_info *); 141 #ifdef notyet 142 static void xn_watchdog(struct ifnet *); 143 #endif 144 145 static void show_device(struct netfront_info *sc); 146 #ifdef notyet 147 static void netfront_closing(device_t dev); 148 #endif 149 static void netif_free(struct netfront_info *info); 150 static int netfront_detach(device_t dev); 151 152 static int talk_to_backend(device_t dev, struct netfront_info *info); 153 static int create_netdev(device_t dev); 154 static void netif_disconnect_backend(struct netfront_info *info); 155 static int setup_device(device_t dev, struct netfront_info *info); 156 static void end_access(int ref, void *page); 157 158 /* Xenolinux helper functions */ 159 int network_connect(struct netfront_info *); 160 161 static void xn_free_rx_ring(struct netfront_info *); 162 163 static void xn_free_tx_ring(struct netfront_info *); 164 165 static int xennet_get_responses(struct netfront_info *np, 166 struct netfront_rx_info *rinfo, RING_IDX rp, struct mbuf **list, 167 int *pages_flipped_p); 168 169 #define virt_to_mfn(x) (vtomach(x) >> PAGE_SHIFT) 170 171 #define INVALID_P2M_ENTRY (~0UL) 172 173 /* 174 * Mbuf pointers. We need these to keep track of the virtual addresses 175 * of our mbuf chains since we can only convert from virtual to physical, 176 * not the other way around. The size must track the free index arrays. 177 */ 178 struct xn_chain_data { 179 struct mbuf *xn_tx_chain[NET_TX_RING_SIZE+1]; 180 int xn_tx_chain_cnt; 181 struct mbuf *xn_rx_chain[NET_RX_RING_SIZE+1]; 182 }; 183 184 185 struct net_device_stats 186 { 187 u_long rx_packets; /* total packets received */ 188 u_long tx_packets; /* total packets transmitted */ 189 u_long rx_bytes; /* total bytes received */ 190 u_long tx_bytes; /* total bytes transmitted */ 191 u_long rx_errors; /* bad packets received */ 192 u_long tx_errors; /* packet transmit problems */ 193 u_long rx_dropped; /* no space in linux buffers */ 194 u_long tx_dropped; /* no space available in linux */ 195 u_long multicast; /* multicast packets received */ 196 u_long collisions; 197 198 /* detailed rx_errors: */ 199 u_long rx_length_errors; 200 u_long rx_over_errors; /* receiver ring buff overflow */ 201 u_long rx_crc_errors; /* recved pkt with crc error */ 202 u_long rx_frame_errors; /* recv'd frame alignment error */ 203 u_long rx_fifo_errors; /* recv'r fifo overrun */ 204 u_long rx_missed_errors; /* receiver missed packet */ 205 206 /* detailed tx_errors */ 207 u_long tx_aborted_errors; 208 u_long tx_carrier_errors; 209 u_long tx_fifo_errors; 210 u_long tx_heartbeat_errors; 211 u_long tx_window_errors; 212 213 /* for cslip etc */ 214 u_long rx_compressed; 215 u_long tx_compressed; 216 }; 217 218 struct netfront_info { 219 220 struct ifnet *xn_ifp; 221 #if __FreeBSD_version >= 700000 222 struct lro_ctrl xn_lro; 223 #endif 224 225 struct net_device_stats stats; 226 u_int tx_full; 227 228 netif_tx_front_ring_t tx; 229 netif_rx_front_ring_t rx; 230 231 struct mtx tx_lock; 232 struct mtx rx_lock; 233 struct sx sc_lock; 234 235 u_int handle; 236 u_int irq; 237 u_int copying_receiver; 238 u_int carrier; 239 240 /* Receive-ring batched refills. */ 241 #define RX_MIN_TARGET 32 242 #define RX_MAX_TARGET NET_RX_RING_SIZE 243 int rx_min_target, rx_max_target, rx_target; 244 245 /* 246 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each 247 * array is an index into a chain of free entries. 248 */ 249 250 grant_ref_t gref_tx_head; 251 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 252 grant_ref_t gref_rx_head; 253 grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 254 255 #define TX_MAX_TARGET min(NET_RX_RING_SIZE, 256) 256 device_t xbdev; 257 int tx_ring_ref; 258 int rx_ring_ref; 259 uint8_t mac[ETHER_ADDR_LEN]; 260 struct xn_chain_data xn_cdata; /* mbufs */ 261 struct mbuf_head xn_rx_batch; /* head of the batch queue */ 262 263 int xn_if_flags; 264 struct callout xn_stat_ch; 265 266 u_long rx_pfn_array[NET_RX_RING_SIZE]; 267 multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 268 mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 269 }; 270 271 #define rx_mbufs xn_cdata.xn_rx_chain 272 #define tx_mbufs xn_cdata.xn_tx_chain 273 274 #define XN_LOCK_INIT(_sc, _name) \ 275 mtx_init(&(_sc)->tx_lock, #_name"_tx", "network transmit lock", MTX_DEF); \ 276 mtx_init(&(_sc)->rx_lock, #_name"_rx", "network receive lock", MTX_DEF); \ 277 sx_init(&(_sc)->sc_lock, #_name"_rx") 278 279 #define XN_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_lock) 280 #define XN_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_lock) 281 282 #define XN_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_lock) 283 #define XN_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_lock) 284 285 #define XN_LOCK(_sc) sx_xlock(&(_sc)->sc_lock); 286 #define XN_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_lock); 287 288 #define XN_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_lock, SX_LOCKED); 289 #define XN_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_lock, MA_OWNED); 290 #define XN_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_lock, MA_OWNED); 291 #define XN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_lock); \ 292 mtx_destroy(&(_sc)->tx_lock); \ 293 sx_destroy(&(_sc)->sc_lock); 294 295 struct netfront_rx_info { 296 struct netif_rx_response rx; 297 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 298 }; 299 300 #define netfront_carrier_on(netif) ((netif)->carrier = 1) 301 #define netfront_carrier_off(netif) ((netif)->carrier = 0) 302 #define netfront_carrier_ok(netif) ((netif)->carrier) 303 304 /* Access macros for acquiring freeing slots in xn_free_{tx,rx}_idxs[]. */ 305 306 307 308 /* 309 * Access macros for acquiring freeing slots in tx_skbs[]. 310 */ 311 312 static inline void 313 add_id_to_freelist(struct mbuf **list, unsigned short id) 314 { 315 KASSERT(id != 0, ("add_id_to_freelist: the head item (0) must always be free.")); 316 list[id] = list[0]; 317 list[0] = (void *)(u_long)id; 318 } 319 320 static inline unsigned short 321 get_id_from_freelist(struct mbuf **list) 322 { 323 u_int id = (u_int)(u_long)list[0]; 324 KASSERT(id != 0, ("get_id_from_freelist: the head item (0) must always remain free.")); 325 list[0] = list[id]; 326 return (id); 327 } 328 329 static inline int 330 xennet_rxidx(RING_IDX idx) 331 { 332 return idx & (NET_RX_RING_SIZE - 1); 333 } 334 335 static inline struct mbuf * 336 xennet_get_rx_mbuf(struct netfront_info *np, 337 RING_IDX ri) 338 { 339 int i = xennet_rxidx(ri); 340 struct mbuf *m; 341 342 m = np->rx_mbufs[i]; 343 np->rx_mbufs[i] = NULL; 344 return (m); 345 } 346 347 static inline grant_ref_t 348 xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) 349 { 350 int i = xennet_rxidx(ri); 351 grant_ref_t ref = np->grant_rx_ref[i]; 352 np->grant_rx_ref[i] = GRANT_INVALID_REF; 353 return ref; 354 } 355 356 #define IPRINTK(fmt, args...) \ 357 printf("[XEN] " fmt, ##args) 358 #define WPRINTK(fmt, args...) \ 359 printf("[XEN] " fmt, ##args) 360 #if 0 361 #define DPRINTK(fmt, args...) \ 362 printf("[XEN] %s: " fmt, __func__, ##args) 363 #else 364 #define DPRINTK(fmt, args...) 365 #endif 366 367 /** 368 * Read the 'mac' node at the given device's node in the store, and parse that 369 * as colon-separated octets, placing result the given mac array. mac must be 370 * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h). 371 * Return 0 on success, or errno on error. 372 */ 373 static int 374 xen_net_read_mac(device_t dev, uint8_t mac[]) 375 { 376 int error, i; 377 char *s, *e, *macstr; 378 379 error = xenbus_read(XBT_NIL, xenbus_get_node(dev), "mac", NULL, 380 (void **) &macstr); 381 if (error) 382 return (error); 383 384 s = macstr; 385 for (i = 0; i < ETHER_ADDR_LEN; i++) { 386 mac[i] = strtoul(s, &e, 16); 387 if (s == e || (e[0] != ':' && e[0] != 0)) { 388 free(macstr, M_DEVBUF); 389 return (ENOENT); 390 } 391 s = &e[1]; 392 } 393 free(macstr, M_DEVBUF); 394 return (0); 395 } 396 397 /** 398 * Entry point to this code when a new device is created. Allocate the basic 399 * structures and the ring buffers for communication with the backend, and 400 * inform the backend of the appropriate details for those. Switch to 401 * Connected state. 402 */ 403 static int 404 netfront_probe(device_t dev) 405 { 406 407 if (!strcmp(xenbus_get_type(dev), "vif")) { 408 device_set_desc(dev, "Virtual Network Interface"); 409 return (0); 410 } 411 412 return (ENXIO); 413 } 414 415 static int 416 netfront_attach(device_t dev) 417 { 418 int err; 419 420 err = create_netdev(dev); 421 if (err) { 422 xenbus_dev_fatal(dev, err, "creating netdev"); 423 return err; 424 } 425 426 #if __FreeBSD_version >= 700000 427 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 428 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 429 OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, 430 &xn_enable_lro, 0, "Large Receive Offload"); 431 #endif 432 433 return 0; 434 } 435 436 437 /** 438 * We are reconnecting to the backend, due to a suspend/resume, or a backend 439 * driver restart. We tear down our netif structure and recreate it, but 440 * leave the device-layer structures intact so that this is transparent to the 441 * rest of the kernel. 442 */ 443 static int 444 netfront_resume(device_t dev) 445 { 446 struct netfront_info *info = device_get_softc(dev); 447 448 netif_disconnect_backend(info); 449 return (0); 450 } 451 452 453 /* Common code used when first setting up, and when resuming. */ 454 static int 455 talk_to_backend(device_t dev, struct netfront_info *info) 456 { 457 const char *message; 458 struct xenbus_transaction xbt; 459 const char *node = xenbus_get_node(dev); 460 int err; 461 462 err = xen_net_read_mac(dev, info->mac); 463 if (err) { 464 xenbus_dev_fatal(dev, err, "parsing %s/mac", node); 465 goto out; 466 } 467 468 /* Create shared ring, alloc event channel. */ 469 err = setup_device(dev, info); 470 if (err) 471 goto out; 472 473 again: 474 err = xenbus_transaction_start(&xbt); 475 if (err) { 476 xenbus_dev_fatal(dev, err, "starting transaction"); 477 goto destroy_ring; 478 } 479 err = xenbus_printf(xbt, node, "tx-ring-ref","%u", 480 info->tx_ring_ref); 481 if (err) { 482 message = "writing tx ring-ref"; 483 goto abort_transaction; 484 } 485 err = xenbus_printf(xbt, node, "rx-ring-ref","%u", 486 info->rx_ring_ref); 487 if (err) { 488 message = "writing rx ring-ref"; 489 goto abort_transaction; 490 } 491 err = xenbus_printf(xbt, node, 492 "event-channel", "%u", irq_to_evtchn_port(info->irq)); 493 if (err) { 494 message = "writing event-channel"; 495 goto abort_transaction; 496 } 497 err = xenbus_printf(xbt, node, "request-rx-copy", "%u", 498 info->copying_receiver); 499 if (err) { 500 message = "writing request-rx-copy"; 501 goto abort_transaction; 502 } 503 err = xenbus_printf(xbt, node, "feature-rx-notify", "%d", 1); 504 if (err) { 505 message = "writing feature-rx-notify"; 506 goto abort_transaction; 507 } 508 err = xenbus_printf(xbt, node, "feature-sg", "%d", 1); 509 if (err) { 510 message = "writing feature-sg"; 511 goto abort_transaction; 512 } 513 #if __FreeBSD_version >= 700000 514 err = xenbus_printf(xbt, node, "feature-gso-tcpv4", "%d", 1); 515 if (err) { 516 message = "writing feature-gso-tcpv4"; 517 goto abort_transaction; 518 } 519 #endif 520 521 err = xenbus_transaction_end(xbt, 0); 522 if (err) { 523 if (err == EAGAIN) 524 goto again; 525 xenbus_dev_fatal(dev, err, "completing transaction"); 526 goto destroy_ring; 527 } 528 529 return 0; 530 531 abort_transaction: 532 xenbus_transaction_end(xbt, 1); 533 xenbus_dev_fatal(dev, err, "%s", message); 534 destroy_ring: 535 netif_free(info); 536 out: 537 return err; 538 } 539 540 541 static int 542 setup_device(device_t dev, struct netfront_info *info) 543 { 544 netif_tx_sring_t *txs; 545 netif_rx_sring_t *rxs; 546 int error; 547 struct ifnet *ifp; 548 549 ifp = info->xn_ifp; 550 551 info->tx_ring_ref = GRANT_INVALID_REF; 552 info->rx_ring_ref = GRANT_INVALID_REF; 553 info->rx.sring = NULL; 554 info->tx.sring = NULL; 555 info->irq = 0; 556 557 txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 558 if (!txs) { 559 error = ENOMEM; 560 xenbus_dev_fatal(dev, error, "allocating tx ring page"); 561 goto fail; 562 } 563 SHARED_RING_INIT(txs); 564 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 565 error = xenbus_grant_ring(dev, virt_to_mfn(txs), &info->tx_ring_ref); 566 if (error) 567 goto fail; 568 569 rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT|M_ZERO); 570 if (!rxs) { 571 error = ENOMEM; 572 xenbus_dev_fatal(dev, error, "allocating rx ring page"); 573 goto fail; 574 } 575 SHARED_RING_INIT(rxs); 576 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 577 578 error = xenbus_grant_ring(dev, virt_to_mfn(rxs), &info->rx_ring_ref); 579 if (error) 580 goto fail; 581 582 error = bind_listening_port_to_irqhandler(xenbus_get_otherend_id(dev), 583 "xn", xn_intr, info, INTR_TYPE_NET | INTR_MPSAFE, &info->irq); 584 585 if (error) { 586 xenbus_dev_fatal(dev, error, 587 "bind_evtchn_to_irqhandler failed"); 588 goto fail; 589 } 590 591 show_device(info); 592 593 return (0); 594 595 fail: 596 netif_free(info); 597 return (error); 598 } 599 600 /** 601 * If this interface has an ipv4 address, send an arp for it. This 602 * helps to get the network going again after migrating hosts. 603 */ 604 static void 605 netfront_send_fake_arp(device_t dev, struct netfront_info *info) 606 { 607 struct ifnet *ifp; 608 struct ifaddr *ifa; 609 610 ifp = info->xn_ifp; 611 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 612 if (ifa->ifa_addr->sa_family == AF_INET) { 613 arp_ifinit(ifp, ifa); 614 } 615 } 616 } 617 618 /** 619 * Callback received when the backend's state changes. 620 */ 621 static void 622 netfront_backend_changed(device_t dev, XenbusState newstate) 623 { 624 struct netfront_info *sc = device_get_softc(dev); 625 626 DPRINTK("newstate=%d\n", newstate); 627 628 switch (newstate) { 629 case XenbusStateInitialising: 630 case XenbusStateInitialised: 631 case XenbusStateConnected: 632 case XenbusStateUnknown: 633 case XenbusStateClosed: 634 case XenbusStateReconfigured: 635 case XenbusStateReconfiguring: 636 break; 637 case XenbusStateInitWait: 638 if (xenbus_get_state(dev) != XenbusStateInitialising) 639 break; 640 if (network_connect(sc) != 0) 641 break; 642 xenbus_set_state(dev, XenbusStateConnected); 643 netfront_send_fake_arp(dev, sc); 644 break; 645 case XenbusStateClosing: 646 xenbus_set_state(dev, XenbusStateClosed); 647 break; 648 } 649 } 650 651 static void 652 xn_free_rx_ring(struct netfront_info *sc) 653 { 654 #if 0 655 int i; 656 657 for (i = 0; i < NET_RX_RING_SIZE; i++) { 658 if (sc->xn_cdata.xn_rx_chain[i] != NULL) { 659 m_freem(sc->xn_cdata.xn_rx_chain[i]); 660 sc->xn_cdata.xn_rx_chain[i] = NULL; 661 } 662 } 663 664 sc->rx.rsp_cons = 0; 665 sc->xn_rx_if->req_prod = 0; 666 sc->xn_rx_if->event = sc->rx.rsp_cons ; 667 #endif 668 } 669 670 static void 671 xn_free_tx_ring(struct netfront_info *sc) 672 { 673 #if 0 674 int i; 675 676 for (i = 0; i < NET_TX_RING_SIZE; i++) { 677 if (sc->xn_cdata.xn_tx_chain[i] != NULL) { 678 m_freem(sc->xn_cdata.xn_tx_chain[i]); 679 sc->xn_cdata.xn_tx_chain[i] = NULL; 680 } 681 } 682 683 return; 684 #endif 685 } 686 687 /* 688 * Do some brief math on the number of descriptors available to 689 * determine how many slots are available. 690 * 691 * Firstly - wouldn't something with RING_FREE_REQUESTS() be more applicable? 692 * Secondly - MAX_SKB_FRAGS is a Linux construct which may not apply here. 693 * Thirdly - it isn't used here anyway; the magic constant '24' is possibly 694 * wrong? 695 * The "2" is presumably to ensure there are also enough slots available for 696 * the ring entries used for "options" (eg, the TSO entry before a packet 697 * is queued); I'm not sure why its 2 and not 1. Perhaps to make sure there's 698 * a "free" node in the tx mbuf list (node 0) to represent the freelist? 699 * 700 * This only figures out whether any xenbus ring descriptors are available; 701 * it doesn't at all reflect how many tx mbuf ring descriptors are also 702 * available. 703 */ 704 static inline int 705 netfront_tx_slot_available(struct netfront_info *np) 706 { 707 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 708 (TX_MAX_TARGET - /* MAX_SKB_FRAGS */ 24 - 2)); 709 } 710 static void 711 netif_release_tx_bufs(struct netfront_info *np) 712 { 713 struct mbuf *m; 714 int i; 715 716 for (i = 1; i <= NET_TX_RING_SIZE; i++) { 717 m = np->xn_cdata.xn_tx_chain[i]; 718 719 if (((u_long)m) < KERNBASE) 720 continue; 721 gnttab_grant_foreign_access_ref(np->grant_tx_ref[i], 722 xenbus_get_otherend_id(np->xbdev), 723 virt_to_mfn(mtod(m, vm_offset_t)), 724 GNTMAP_readonly); 725 gnttab_release_grant_reference(&np->gref_tx_head, 726 np->grant_tx_ref[i]); 727 np->grant_tx_ref[i] = GRANT_INVALID_REF; 728 add_id_to_freelist(np->tx_mbufs, i); 729 np->xn_cdata.xn_tx_chain_cnt--; 730 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 731 panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 732 } 733 m_freem(m); 734 } 735 } 736 737 static void 738 network_alloc_rx_buffers(struct netfront_info *sc) 739 { 740 int otherend_id = xenbus_get_otherend_id(sc->xbdev); 741 unsigned short id; 742 struct mbuf *m_new; 743 int i, batch_target, notify; 744 RING_IDX req_prod; 745 struct xen_memory_reservation reservation; 746 grant_ref_t ref; 747 int nr_flips; 748 netif_rx_request_t *req; 749 vm_offset_t vaddr; 750 u_long pfn; 751 752 req_prod = sc->rx.req_prod_pvt; 753 754 if (unlikely(sc->carrier == 0)) 755 return; 756 757 /* 758 * Allocate skbuffs greedily, even though we batch updates to the 759 * receive ring. This creates a less bursty demand on the memory 760 * allocator, so should reduce the chance of failed allocation 761 * requests both for ourself and for other kernel subsystems. 762 */ 763 batch_target = sc->rx_target - (req_prod - sc->rx.rsp_cons); 764 for (i = mbufq_len(&sc->xn_rx_batch); i < batch_target; i++) { 765 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 766 if (m_new == NULL) 767 goto no_mbuf; 768 769 m_cljget(m_new, M_DONTWAIT, MJUMPAGESIZE); 770 if ((m_new->m_flags & M_EXT) == 0) { 771 m_freem(m_new); 772 773 no_mbuf: 774 if (i != 0) 775 goto refill; 776 /* 777 * XXX set timer 778 */ 779 break; 780 } 781 m_new->m_len = m_new->m_pkthdr.len = MJUMPAGESIZE; 782 783 /* queue the mbufs allocated */ 784 mbufq_tail(&sc->xn_rx_batch, m_new); 785 } 786 787 /* Is the batch large enough to be worthwhile? */ 788 if (i < (sc->rx_target/2)) { 789 if (req_prod >sc->rx.sring->req_prod) 790 goto push; 791 return; 792 } 793 /* Adjust floating fill target if we risked running out of buffers. */ 794 if ( ((req_prod - sc->rx.sring->rsp_prod) < (sc->rx_target / 4)) && 795 ((sc->rx_target *= 2) > sc->rx_max_target) ) 796 sc->rx_target = sc->rx_max_target; 797 798 refill: 799 for (nr_flips = i = 0; ; i++) { 800 if ((m_new = mbufq_dequeue(&sc->xn_rx_batch)) == NULL) 801 break; 802 803 m_new->m_ext.ext_arg1 = (vm_paddr_t *)(uintptr_t)( 804 vtophys(m_new->m_ext.ext_buf) >> PAGE_SHIFT); 805 806 id = xennet_rxidx(req_prod + i); 807 808 KASSERT(sc->xn_cdata.xn_rx_chain[id] == NULL, 809 ("non-NULL xm_rx_chain")); 810 sc->xn_cdata.xn_rx_chain[id] = m_new; 811 812 ref = gnttab_claim_grant_reference(&sc->gref_rx_head); 813 KASSERT((short)ref >= 0, ("negative ref")); 814 sc->grant_rx_ref[id] = ref; 815 816 vaddr = mtod(m_new, vm_offset_t); 817 pfn = vtophys(vaddr) >> PAGE_SHIFT; 818 req = RING_GET_REQUEST(&sc->rx, req_prod + i); 819 820 if (sc->copying_receiver == 0) { 821 gnttab_grant_foreign_transfer_ref(ref, 822 otherend_id, pfn); 823 sc->rx_pfn_array[nr_flips] = PFNTOMFN(pfn); 824 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 825 /* Remove this page before passing 826 * back to Xen. 827 */ 828 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 829 MULTI_update_va_mapping(&sc->rx_mcl[i], 830 vaddr, 0, 0); 831 } 832 nr_flips++; 833 } else { 834 gnttab_grant_foreign_access_ref(ref, 835 otherend_id, 836 PFNTOMFN(pfn), 0); 837 } 838 req->id = id; 839 req->gref = ref; 840 841 sc->rx_pfn_array[i] = 842 vtomach(mtod(m_new,vm_offset_t)) >> PAGE_SHIFT; 843 } 844 845 KASSERT(i, ("no mbufs processed")); /* should have returned earlier */ 846 KASSERT(mbufq_len(&sc->xn_rx_batch) == 0, ("not all mbufs processed")); 847 /* 848 * We may have allocated buffers which have entries outstanding 849 * in the page * update queue -- make sure we flush those first! 850 */ 851 PT_UPDATES_FLUSH(); 852 if (nr_flips != 0) { 853 #ifdef notyet 854 /* Tell the ballon driver what is going on. */ 855 balloon_update_driver_allowance(i); 856 #endif 857 set_xen_guest_handle(reservation.extent_start, sc->rx_pfn_array); 858 reservation.nr_extents = i; 859 reservation.extent_order = 0; 860 reservation.address_bits = 0; 861 reservation.domid = DOMID_SELF; 862 863 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 864 865 /* After all PTEs have been zapped, flush the TLB. */ 866 sc->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = 867 UVMF_TLB_FLUSH|UVMF_ALL; 868 869 /* Give away a batch of pages. */ 870 sc->rx_mcl[i].op = __HYPERVISOR_memory_op; 871 sc->rx_mcl[i].args[0] = XENMEM_decrease_reservation; 872 sc->rx_mcl[i].args[1] = (u_long)&reservation; 873 /* Zap PTEs and give away pages in one big multicall. */ 874 (void)HYPERVISOR_multicall(sc->rx_mcl, i+1); 875 876 /* Check return status of HYPERVISOR_dom_mem_op(). */ 877 if (unlikely(sc->rx_mcl[i].result != i)) 878 panic("Unable to reduce memory reservation\n"); 879 } else { 880 if (HYPERVISOR_memory_op( 881 XENMEM_decrease_reservation, &reservation) 882 != i) 883 panic("Unable to reduce memory " 884 "reservation\n"); 885 } 886 } else { 887 wmb(); 888 } 889 890 /* Above is a suitable barrier to ensure backend will see requests. */ 891 sc->rx.req_prod_pvt = req_prod + i; 892 push: 893 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->rx, notify); 894 if (notify) 895 notify_remote_via_irq(sc->irq); 896 } 897 898 static void 899 xn_rxeof(struct netfront_info *np) 900 { 901 struct ifnet *ifp; 902 #if __FreeBSD_version >= 700000 903 struct lro_ctrl *lro = &np->xn_lro; 904 struct lro_entry *queued; 905 #endif 906 struct netfront_rx_info rinfo; 907 struct netif_rx_response *rx = &rinfo.rx; 908 struct netif_extra_info *extras = rinfo.extras; 909 RING_IDX i, rp; 910 multicall_entry_t *mcl; 911 struct mbuf *m; 912 struct mbuf_head rxq, errq; 913 int err, pages_flipped = 0, work_to_do; 914 915 do { 916 XN_RX_LOCK_ASSERT(np); 917 if (!netfront_carrier_ok(np)) 918 return; 919 920 mbufq_init(&errq); 921 mbufq_init(&rxq); 922 923 ifp = np->xn_ifp; 924 925 rp = np->rx.sring->rsp_prod; 926 rmb(); /* Ensure we see queued responses up to 'rp'. */ 927 928 i = np->rx.rsp_cons; 929 while ((i != rp)) { 930 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 931 memset(extras, 0, sizeof(rinfo.extras)); 932 933 m = NULL; 934 err = xennet_get_responses(np, &rinfo, rp, &m, 935 &pages_flipped); 936 937 if (unlikely(err)) { 938 if (m) 939 mbufq_tail(&errq, m); 940 np->stats.rx_errors++; 941 i = np->rx.rsp_cons; 942 continue; 943 } 944 945 m->m_pkthdr.rcvif = ifp; 946 if ( rx->flags & NETRXF_data_validated ) { 947 /* Tell the stack the checksums are okay */ 948 /* 949 * XXX this isn't necessarily the case - need to add 950 * check 951 */ 952 953 m->m_pkthdr.csum_flags |= 954 (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID 955 | CSUM_PSEUDO_HDR); 956 m->m_pkthdr.csum_data = 0xffff; 957 } 958 959 np->stats.rx_packets++; 960 np->stats.rx_bytes += m->m_pkthdr.len; 961 962 mbufq_tail(&rxq, m); 963 np->rx.rsp_cons = ++i; 964 } 965 966 if (pages_flipped) { 967 /* Some pages are no longer absent... */ 968 #ifdef notyet 969 balloon_update_driver_allowance(-pages_flipped); 970 #endif 971 /* Do all the remapping work, and M->P updates, in one big 972 * hypercall. 973 */ 974 if (!!xen_feature(XENFEAT_auto_translated_physmap)) { 975 mcl = np->rx_mcl + pages_flipped; 976 mcl->op = __HYPERVISOR_mmu_update; 977 mcl->args[0] = (u_long)np->rx_mmu; 978 mcl->args[1] = pages_flipped; 979 mcl->args[2] = 0; 980 mcl->args[3] = DOMID_SELF; 981 (void)HYPERVISOR_multicall(np->rx_mcl, 982 pages_flipped + 1); 983 } 984 } 985 986 while ((m = mbufq_dequeue(&errq))) 987 m_freem(m); 988 989 /* 990 * Process all the mbufs after the remapping is complete. 991 * Break the mbuf chain first though. 992 */ 993 while ((m = mbufq_dequeue(&rxq)) != NULL) { 994 ifp->if_ipackets++; 995 996 /* 997 * Do we really need to drop the rx lock? 998 */ 999 XN_RX_UNLOCK(np); 1000 #if __FreeBSD_version >= 700000 1001 /* Use LRO if possible */ 1002 if ((ifp->if_capenable & IFCAP_LRO) == 0 || 1003 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) { 1004 /* 1005 * If LRO fails, pass up to the stack 1006 * directly. 1007 */ 1008 (*ifp->if_input)(ifp, m); 1009 } 1010 #else 1011 (*ifp->if_input)(ifp, m); 1012 #endif 1013 XN_RX_LOCK(np); 1014 } 1015 1016 np->rx.rsp_cons = i; 1017 1018 #if __FreeBSD_version >= 700000 1019 /* 1020 * Flush any outstanding LRO work 1021 */ 1022 while (!SLIST_EMPTY(&lro->lro_active)) { 1023 queued = SLIST_FIRST(&lro->lro_active); 1024 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1025 tcp_lro_flush(lro, queued); 1026 } 1027 #endif 1028 1029 #if 0 1030 /* If we get a callback with very few responses, reduce fill target. */ 1031 /* NB. Note exponential increase, linear decrease. */ 1032 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 1033 ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) 1034 np->rx_target = np->rx_min_target; 1035 #endif 1036 1037 network_alloc_rx_buffers(np); 1038 1039 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, work_to_do); 1040 } while (work_to_do); 1041 } 1042 1043 static void 1044 xn_txeof(struct netfront_info *np) 1045 { 1046 RING_IDX i, prod; 1047 unsigned short id; 1048 struct ifnet *ifp; 1049 netif_tx_response_t *txr; 1050 struct mbuf *m; 1051 1052 XN_TX_LOCK_ASSERT(np); 1053 1054 if (!netfront_carrier_ok(np)) 1055 return; 1056 1057 ifp = np->xn_ifp; 1058 ifp->if_timer = 0; 1059 1060 do { 1061 prod = np->tx.sring->rsp_prod; 1062 rmb(); /* Ensure we see responses up to 'rp'. */ 1063 1064 for (i = np->tx.rsp_cons; i != prod; i++) { 1065 txr = RING_GET_RESPONSE(&np->tx, i); 1066 if (txr->status == NETIF_RSP_NULL) 1067 continue; 1068 1069 id = txr->id; 1070 m = np->xn_cdata.xn_tx_chain[id]; 1071 KASSERT(m != NULL, ("mbuf not found in xn_tx_chain")); 1072 M_ASSERTVALID(m); 1073 1074 /* 1075 * Increment packet count if this is the last 1076 * mbuf of the chain. 1077 */ 1078 if (!m->m_next) 1079 ifp->if_opackets++; 1080 if (unlikely(gnttab_query_foreign_access( 1081 np->grant_tx_ref[id]) != 0)) { 1082 printf("network_tx_buf_gc: warning " 1083 "-- grant still in use by backend " 1084 "domain.\n"); 1085 goto out; 1086 } 1087 gnttab_end_foreign_access_ref( 1088 np->grant_tx_ref[id]); 1089 gnttab_release_grant_reference( 1090 &np->gref_tx_head, np->grant_tx_ref[id]); 1091 np->grant_tx_ref[id] = GRANT_INVALID_REF; 1092 1093 np->xn_cdata.xn_tx_chain[id] = NULL; 1094 add_id_to_freelist(np->xn_cdata.xn_tx_chain, id); 1095 np->xn_cdata.xn_tx_chain_cnt--; 1096 if (np->xn_cdata.xn_tx_chain_cnt < 0) { 1097 panic("netif_release_tx_bufs: tx_chain_cnt must be >= 0"); 1098 } 1099 m_free(m); 1100 /* Only mark the queue active if we've freed up at least one slot to try */ 1101 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1102 } 1103 np->tx.rsp_cons = prod; 1104 1105 /* 1106 * Set a new event, then check for race with update of 1107 * tx_cons. Note that it is essential to schedule a 1108 * callback, no matter how few buffers are pending. Even if 1109 * there is space in the transmit ring, higher layers may 1110 * be blocked because too much data is outstanding: in such 1111 * cases notification from Xen is likely to be the only kick 1112 * that we'll get. 1113 */ 1114 np->tx.sring->rsp_event = 1115 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 1116 1117 mb(); 1118 } while (prod != np->tx.sring->rsp_prod); 1119 1120 out: 1121 if (np->tx_full && 1122 ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) { 1123 np->tx_full = 0; 1124 #if 0 1125 if (np->user_state == UST_OPEN) 1126 netif_wake_queue(dev); 1127 #endif 1128 } 1129 1130 } 1131 1132 static void 1133 xn_intr(void *xsc) 1134 { 1135 struct netfront_info *np = xsc; 1136 struct ifnet *ifp = np->xn_ifp; 1137 1138 #if 0 1139 if (!(np->rx.rsp_cons != np->rx.sring->rsp_prod && 1140 likely(netfront_carrier_ok(np)) && 1141 ifp->if_drv_flags & IFF_DRV_RUNNING)) 1142 return; 1143 #endif 1144 if (np->tx.rsp_cons != np->tx.sring->rsp_prod) { 1145 XN_TX_LOCK(np); 1146 xn_txeof(np); 1147 XN_TX_UNLOCK(np); 1148 } 1149 1150 XN_RX_LOCK(np); 1151 xn_rxeof(np); 1152 XN_RX_UNLOCK(np); 1153 1154 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1155 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1156 xn_start(ifp); 1157 } 1158 1159 1160 static void 1161 xennet_move_rx_slot(struct netfront_info *np, struct mbuf *m, 1162 grant_ref_t ref) 1163 { 1164 int new = xennet_rxidx(np->rx.req_prod_pvt); 1165 1166 KASSERT(np->rx_mbufs[new] == NULL, ("rx_mbufs != NULL")); 1167 np->rx_mbufs[new] = m; 1168 np->grant_rx_ref[new] = ref; 1169 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 1170 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 1171 np->rx.req_prod_pvt++; 1172 } 1173 1174 static int 1175 xennet_get_extras(struct netfront_info *np, 1176 struct netif_extra_info *extras, RING_IDX rp) 1177 { 1178 struct netif_extra_info *extra; 1179 RING_IDX cons = np->rx.rsp_cons; 1180 1181 int err = 0; 1182 1183 do { 1184 struct mbuf *m; 1185 grant_ref_t ref; 1186 1187 if (unlikely(cons + 1 == rp)) { 1188 #if 0 1189 if (net_ratelimit()) 1190 WPRINTK("Missing extra info\n"); 1191 #endif 1192 err = -EINVAL; 1193 break; 1194 } 1195 1196 extra = (struct netif_extra_info *) 1197 RING_GET_RESPONSE(&np->rx, ++cons); 1198 1199 if (unlikely(!extra->type || 1200 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1201 #if 0 1202 if (net_ratelimit()) 1203 WPRINTK("Invalid extra type: %d\n", 1204 extra->type); 1205 #endif 1206 err = -EINVAL; 1207 } else { 1208 memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); 1209 } 1210 1211 m = xennet_get_rx_mbuf(np, cons); 1212 ref = xennet_get_rx_ref(np, cons); 1213 xennet_move_rx_slot(np, m, ref); 1214 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 1215 1216 np->rx.rsp_cons = cons; 1217 return err; 1218 } 1219 1220 static int 1221 xennet_get_responses(struct netfront_info *np, 1222 struct netfront_rx_info *rinfo, RING_IDX rp, 1223 struct mbuf **list, 1224 int *pages_flipped_p) 1225 { 1226 int pages_flipped = *pages_flipped_p; 1227 struct mmu_update *mmu; 1228 struct multicall_entry *mcl; 1229 struct netif_rx_response *rx = &rinfo->rx; 1230 struct netif_extra_info *extras = rinfo->extras; 1231 RING_IDX cons = np->rx.rsp_cons; 1232 struct mbuf *m, *m0, *m_prev; 1233 grant_ref_t ref = xennet_get_rx_ref(np, cons); 1234 int max = 5 /* MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD) */; 1235 int frags = 1; 1236 int err = 0; 1237 u_long ret; 1238 1239 m0 = m = m_prev = xennet_get_rx_mbuf(np, cons); 1240 1241 1242 if (rx->flags & NETRXF_extra_info) { 1243 err = xennet_get_extras(np, extras, rp); 1244 cons = np->rx.rsp_cons; 1245 } 1246 1247 1248 if (m0 != NULL) { 1249 m0->m_pkthdr.len = 0; 1250 m0->m_next = NULL; 1251 } 1252 1253 for (;;) { 1254 u_long mfn; 1255 1256 #if 0 1257 printf("rx->status=%hd rx->offset=%hu frags=%u\n", 1258 rx->status, rx->offset, frags); 1259 #endif 1260 if (unlikely(rx->status < 0 || 1261 rx->offset + rx->status > PAGE_SIZE)) { 1262 #if 0 1263 if (net_ratelimit()) 1264 WPRINTK("rx->offset: %x, size: %u\n", 1265 rx->offset, rx->status); 1266 #endif 1267 xennet_move_rx_slot(np, m, ref); 1268 err = -EINVAL; 1269 goto next; 1270 } 1271 1272 /* 1273 * This definitely indicates a bug, either in this driver or in 1274 * the backend driver. In future this should flag the bad 1275 * situation to the system controller to reboot the backed. 1276 */ 1277 if (ref == GRANT_INVALID_REF) { 1278 #if 0 1279 if (net_ratelimit()) 1280 WPRINTK("Bad rx response id %d.\n", rx->id); 1281 #endif 1282 err = -EINVAL; 1283 goto next; 1284 } 1285 1286 if (!np->copying_receiver) { 1287 /* Memory pressure, insufficient buffer 1288 * headroom, ... 1289 */ 1290 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) { 1291 if (net_ratelimit()) 1292 WPRINTK("Unfulfilled rx req " 1293 "(id=%d, st=%d).\n", 1294 rx->id, rx->status); 1295 xennet_move_rx_slot(np, m, ref); 1296 err = -ENOMEM; 1297 goto next; 1298 } 1299 1300 if (!xen_feature( XENFEAT_auto_translated_physmap)) { 1301 /* Remap the page. */ 1302 void *vaddr = mtod(m, void *); 1303 uint32_t pfn; 1304 1305 mcl = np->rx_mcl + pages_flipped; 1306 mmu = np->rx_mmu + pages_flipped; 1307 1308 MULTI_update_va_mapping(mcl, (u_long)vaddr, 1309 (((vm_paddr_t)mfn) << PAGE_SHIFT) | PG_RW | 1310 PG_V | PG_M | PG_A, 0); 1311 pfn = (uintptr_t)m->m_ext.ext_arg1; 1312 mmu->ptr = ((vm_paddr_t)mfn << PAGE_SHIFT) | 1313 MMU_MACHPHYS_UPDATE; 1314 mmu->val = pfn; 1315 1316 set_phys_to_machine(pfn, mfn); 1317 } 1318 pages_flipped++; 1319 } else { 1320 ret = gnttab_end_foreign_access_ref(ref); 1321 KASSERT(ret, ("ret != 0")); 1322 } 1323 1324 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1325 1326 next: 1327 if (m == NULL) 1328 break; 1329 1330 m->m_len = rx->status; 1331 m->m_data += rx->offset; 1332 m0->m_pkthdr.len += rx->status; 1333 1334 if (!(rx->flags & NETRXF_more_data)) 1335 break; 1336 1337 if (cons + frags == rp) { 1338 if (net_ratelimit()) 1339 WPRINTK("Need more frags\n"); 1340 err = -ENOENT; 1341 break; 1342 } 1343 m_prev = m; 1344 1345 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 1346 m = xennet_get_rx_mbuf(np, cons + frags); 1347 1348 m_prev->m_next = m; 1349 m->m_next = NULL; 1350 ref = xennet_get_rx_ref(np, cons + frags); 1351 frags++; 1352 } 1353 *list = m0; 1354 1355 if (unlikely(frags > max)) { 1356 if (net_ratelimit()) 1357 WPRINTK("Too many frags\n"); 1358 err = -E2BIG; 1359 } 1360 1361 if (unlikely(err)) 1362 np->rx.rsp_cons = cons + frags; 1363 1364 *pages_flipped_p = pages_flipped; 1365 1366 return err; 1367 } 1368 1369 static void 1370 xn_tick_locked(struct netfront_info *sc) 1371 { 1372 XN_RX_LOCK_ASSERT(sc); 1373 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1374 1375 /* XXX placeholder for printing debug information */ 1376 1377 } 1378 1379 1380 static void 1381 xn_tick(void *xsc) 1382 { 1383 struct netfront_info *sc; 1384 1385 sc = xsc; 1386 XN_RX_LOCK(sc); 1387 xn_tick_locked(sc); 1388 XN_RX_UNLOCK(sc); 1389 1390 } 1391 static void 1392 xn_start_locked(struct ifnet *ifp) 1393 { 1394 int otherend_id; 1395 unsigned short id; 1396 struct mbuf *m_head, *m; 1397 struct netfront_info *sc; 1398 netif_tx_request_t *tx; 1399 netif_extra_info_t *extra; 1400 RING_IDX i; 1401 grant_ref_t ref; 1402 u_long mfn, tx_bytes; 1403 int notify, nfrags; 1404 1405 sc = ifp->if_softc; 1406 otherend_id = xenbus_get_otherend_id(sc->xbdev); 1407 tx_bytes = 0; 1408 1409 if (!netfront_carrier_ok(sc)) 1410 return; 1411 1412 for (i = sc->tx.req_prod_pvt; TRUE; i++) { 1413 IF_DEQUEUE(&ifp->if_snd, m_head); 1414 if (m_head == NULL) 1415 break; 1416 1417 /* 1418 * netfront_tx_slot_available() tries to do some math to 1419 * ensure that there'll be enough xenbus ring slots available 1420 * for the maximum number of packet fragments (and a couple more 1421 * for what I guess are TSO and other ring entry items.) 1422 */ 1423 if (!netfront_tx_slot_available(sc)) { 1424 IF_PREPEND(&ifp->if_snd, m_head); 1425 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1426 break; 1427 } 1428 1429 /* 1430 * Defragment the mbuf if necessary. 1431 */ 1432 for (m = m_head, nfrags = 0; m; m = m->m_next) 1433 nfrags++; 1434 if (nfrags > MAX_SKB_FRAGS) { 1435 m = m_defrag(m_head, M_DONTWAIT); 1436 if (!m) { 1437 m_freem(m_head); 1438 break; 1439 } 1440 m_head = m; 1441 } 1442 1443 /* Determine how many fragments now exist */ 1444 for (m = m_head, nfrags = 0; m; m = m->m_next) 1445 nfrags++; 1446 1447 /* 1448 * Don't attempt to queue this packet if there aren't 1449 * enough free entries in the chain. 1450 * 1451 * There isn't a 1:1 correspondance between the mbuf TX ring 1452 * and the xenbus TX ring. 1453 * xn_txeof() may need to be called to free up some slots. 1454 * 1455 * It is quite possible that this can be later eliminated if 1456 * it turns out that partial * packets can be pushed into 1457 * the ringbuffer, with fragments pushed in when further slots 1458 * free up. 1459 * 1460 * It is also quite possible that the driver will lock up 1461 * if the TX queue fills up with no RX traffic, and 1462 * the mbuf ring is exhausted. The queue may need 1463 * a swift kick to continue. 1464 */ 1465 1466 /* 1467 * It is not +1 like the allocation because we need to keep 1468 * slot [0] free for the freelist head 1469 */ 1470 if (sc->xn_cdata.xn_tx_chain_cnt + nfrags >= NET_TX_RING_SIZE) { 1471 printf("xn_start_locked: xn_tx_chain_cnt (%d) + nfrags %d >= NET_TX_RING_SIZE (%d); must be full!\n", 1472 (int) sc->xn_cdata.xn_tx_chain_cnt, 1473 (int) nfrags, (int) NET_TX_RING_SIZE); 1474 IF_PREPEND(&ifp->if_snd, m_head); 1475 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1476 break; 1477 } 1478 1479 /* 1480 * Make sure there's actually space available in the 1481 * Xen TX ring for this. Overcompensate for the possibility 1482 * of having a TCP offload fragment just in case for now 1483 * (the +1) rather than adding logic to accurately calculate 1484 * the required size. 1485 */ 1486 if (RING_FREE_REQUESTS(&sc->tx) < (nfrags + 1)) { 1487 printf("xn_start_locked: free ring slots (%d) < (nfrags + 1) (%d); must be full!\n", 1488 (int) RING_FREE_REQUESTS(&sc->tx), 1489 (int) (nfrags + 1)); 1490 IF_PREPEND(&ifp->if_snd, m_head); 1491 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1492 break; 1493 } 1494 1495 /* 1496 * Start packing the mbufs in this chain into 1497 * the fragment pointers. Stop when we run out 1498 * of fragments or hit the end of the mbuf chain. 1499 */ 1500 m = m_head; 1501 extra = NULL; 1502 for (m = m_head; m; m = m->m_next) { 1503 tx = RING_GET_REQUEST(&sc->tx, i); 1504 id = get_id_from_freelist(sc->xn_cdata.xn_tx_chain); 1505 if (id == 0) 1506 panic("xn_start_locked: was allocated the freelist head!\n"); 1507 sc->xn_cdata.xn_tx_chain_cnt++; 1508 if (sc->xn_cdata.xn_tx_chain_cnt >= NET_TX_RING_SIZE+1) 1509 panic("xn_start_locked: tx_chain_cnt must be < NET_TX_RING_SIZE+1\n"); 1510 sc->xn_cdata.xn_tx_chain[id] = m; 1511 tx->id = id; 1512 ref = gnttab_claim_grant_reference(&sc->gref_tx_head); 1513 KASSERT((short)ref >= 0, ("Negative ref")); 1514 mfn = virt_to_mfn(mtod(m, vm_offset_t)); 1515 gnttab_grant_foreign_access_ref(ref, otherend_id, 1516 mfn, GNTMAP_readonly); 1517 tx->gref = sc->grant_tx_ref[id] = ref; 1518 tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); 1519 tx->flags = 0; 1520 if (m == m_head) { 1521 /* 1522 * The first fragment has the entire packet 1523 * size, subsequent fragments have just the 1524 * fragment size. The backend works out the 1525 * true size of the first fragment by 1526 * subtracting the sizes of the other 1527 * fragments. 1528 */ 1529 tx->size = m->m_pkthdr.len; 1530 1531 /* 1532 * The first fragment contains the 1533 * checksum flags and is optionally 1534 * followed by extra data for TSO etc. 1535 */ 1536 if (m->m_pkthdr.csum_flags 1537 & CSUM_DELAY_DATA) { 1538 tx->flags |= (NETTXF_csum_blank 1539 | NETTXF_data_validated); 1540 } 1541 #if __FreeBSD_version >= 700000 1542 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1543 struct netif_extra_info *gso = 1544 (struct netif_extra_info *) 1545 RING_GET_REQUEST(&sc->tx, ++i); 1546 1547 if (extra) 1548 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 1549 else 1550 tx->flags |= NETTXF_extra_info; 1551 1552 gso->u.gso.size = m->m_pkthdr.tso_segsz; 1553 gso->u.gso.type = 1554 XEN_NETIF_GSO_TYPE_TCPV4; 1555 gso->u.gso.pad = 0; 1556 gso->u.gso.features = 0; 1557 1558 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 1559 gso->flags = 0; 1560 extra = gso; 1561 } 1562 #endif 1563 } else { 1564 tx->size = m->m_len; 1565 } 1566 if (m->m_next) { 1567 tx->flags |= NETTXF_more_data; 1568 i++; 1569 } 1570 } 1571 1572 BPF_MTAP(ifp, m_head); 1573 1574 sc->stats.tx_bytes += m_head->m_pkthdr.len; 1575 sc->stats.tx_packets++; 1576 } 1577 1578 sc->tx.req_prod_pvt = i; 1579 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->tx, notify); 1580 if (notify) 1581 notify_remote_via_irq(sc->irq); 1582 1583 xn_txeof(sc); 1584 1585 if (RING_FULL(&sc->tx)) { 1586 sc->tx_full = 1; 1587 #if 0 1588 netif_stop_queue(dev); 1589 #endif 1590 } 1591 1592 return; 1593 } 1594 1595 static void 1596 xn_start(struct ifnet *ifp) 1597 { 1598 struct netfront_info *sc; 1599 sc = ifp->if_softc; 1600 XN_TX_LOCK(sc); 1601 xn_start_locked(ifp); 1602 XN_TX_UNLOCK(sc); 1603 } 1604 1605 /* equivalent of network_open() in Linux */ 1606 static void 1607 xn_ifinit_locked(struct netfront_info *sc) 1608 { 1609 struct ifnet *ifp; 1610 1611 XN_LOCK_ASSERT(sc); 1612 1613 ifp = sc->xn_ifp; 1614 1615 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1616 return; 1617 1618 xn_stop(sc); 1619 1620 network_alloc_rx_buffers(sc); 1621 sc->rx.sring->rsp_event = sc->rx.rsp_cons + 1; 1622 1623 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1624 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1625 1626 callout_reset(&sc->xn_stat_ch, hz, xn_tick, sc); 1627 1628 } 1629 1630 1631 static void 1632 xn_ifinit(void *xsc) 1633 { 1634 struct netfront_info *sc = xsc; 1635 1636 XN_LOCK(sc); 1637 xn_ifinit_locked(sc); 1638 XN_UNLOCK(sc); 1639 1640 } 1641 1642 1643 static int 1644 xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1645 { 1646 struct netfront_info *sc = ifp->if_softc; 1647 struct ifreq *ifr = (struct ifreq *) data; 1648 struct ifaddr *ifa = (struct ifaddr *)data; 1649 1650 int mask, error = 0; 1651 switch(cmd) { 1652 case SIOCSIFADDR: 1653 case SIOCGIFADDR: 1654 XN_LOCK(sc); 1655 if (ifa->ifa_addr->sa_family == AF_INET) { 1656 ifp->if_flags |= IFF_UP; 1657 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1658 xn_ifinit_locked(sc); 1659 arp_ifinit(ifp, ifa); 1660 XN_UNLOCK(sc); 1661 } else { 1662 XN_UNLOCK(sc); 1663 error = ether_ioctl(ifp, cmd, data); 1664 } 1665 break; 1666 case SIOCSIFMTU: 1667 /* XXX can we alter the MTU on a VN ?*/ 1668 #ifdef notyet 1669 if (ifr->ifr_mtu > XN_JUMBO_MTU) 1670 error = EINVAL; 1671 else 1672 #endif 1673 { 1674 ifp->if_mtu = ifr->ifr_mtu; 1675 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1676 xn_ifinit(sc); 1677 } 1678 break; 1679 case SIOCSIFFLAGS: 1680 XN_LOCK(sc); 1681 if (ifp->if_flags & IFF_UP) { 1682 /* 1683 * If only the state of the PROMISC flag changed, 1684 * then just use the 'set promisc mode' command 1685 * instead of reinitializing the entire NIC. Doing 1686 * a full re-init means reloading the firmware and 1687 * waiting for it to start up, which may take a 1688 * second or two. 1689 */ 1690 #ifdef notyet 1691 /* No promiscuous mode with Xen */ 1692 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1693 ifp->if_flags & IFF_PROMISC && 1694 !(sc->xn_if_flags & IFF_PROMISC)) { 1695 XN_SETBIT(sc, XN_RX_MODE, 1696 XN_RXMODE_RX_PROMISC); 1697 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1698 !(ifp->if_flags & IFF_PROMISC) && 1699 sc->xn_if_flags & IFF_PROMISC) { 1700 XN_CLRBIT(sc, XN_RX_MODE, 1701 XN_RXMODE_RX_PROMISC); 1702 } else 1703 #endif 1704 xn_ifinit_locked(sc); 1705 } else { 1706 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1707 xn_stop(sc); 1708 } 1709 } 1710 sc->xn_if_flags = ifp->if_flags; 1711 XN_UNLOCK(sc); 1712 error = 0; 1713 break; 1714 case SIOCSIFCAP: 1715 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1716 if (mask & IFCAP_TXCSUM) { 1717 if (IFCAP_TXCSUM & ifp->if_capenable) { 1718 ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 1719 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 1720 | CSUM_IP | CSUM_TSO); 1721 } else { 1722 ifp->if_capenable |= IFCAP_TXCSUM; 1723 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP 1724 | CSUM_IP); 1725 } 1726 } 1727 if (mask & IFCAP_RXCSUM) { 1728 ifp->if_capenable ^= IFCAP_RXCSUM; 1729 } 1730 #if __FreeBSD_version >= 700000 1731 if (mask & IFCAP_TSO4) { 1732 if (IFCAP_TSO4 & ifp->if_capenable) { 1733 ifp->if_capenable &= ~IFCAP_TSO4; 1734 ifp->if_hwassist &= ~CSUM_TSO; 1735 } else if (IFCAP_TXCSUM & ifp->if_capenable) { 1736 ifp->if_capenable |= IFCAP_TSO4; 1737 ifp->if_hwassist |= CSUM_TSO; 1738 } else { 1739 IPRINTK("Xen requires tx checksum offload" 1740 " be enabled to use TSO\n"); 1741 error = EINVAL; 1742 } 1743 } 1744 if (mask & IFCAP_LRO) { 1745 ifp->if_capenable ^= IFCAP_LRO; 1746 1747 } 1748 #endif 1749 error = 0; 1750 break; 1751 case SIOCADDMULTI: 1752 case SIOCDELMULTI: 1753 #ifdef notyet 1754 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1755 XN_LOCK(sc); 1756 xn_setmulti(sc); 1757 XN_UNLOCK(sc); 1758 error = 0; 1759 } 1760 #endif 1761 /* FALLTHROUGH */ 1762 case SIOCSIFMEDIA: 1763 case SIOCGIFMEDIA: 1764 error = EINVAL; 1765 break; 1766 default: 1767 error = ether_ioctl(ifp, cmd, data); 1768 } 1769 1770 return (error); 1771 } 1772 1773 static void 1774 xn_stop(struct netfront_info *sc) 1775 { 1776 struct ifnet *ifp; 1777 1778 XN_LOCK_ASSERT(sc); 1779 1780 ifp = sc->xn_ifp; 1781 1782 callout_stop(&sc->xn_stat_ch); 1783 1784 xn_free_rx_ring(sc); 1785 xn_free_tx_ring(sc); 1786 1787 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1788 } 1789 1790 /* START of Xenolinux helper functions adapted to FreeBSD */ 1791 int 1792 network_connect(struct netfront_info *np) 1793 { 1794 int i, requeue_idx, error; 1795 grant_ref_t ref; 1796 netif_rx_request_t *req; 1797 u_int feature_rx_copy, feature_rx_flip; 1798 1799 error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), 1800 "feature-rx-copy", NULL, "%u", &feature_rx_copy); 1801 if (error) 1802 feature_rx_copy = 0; 1803 error = xenbus_scanf(XBT_NIL, xenbus_get_otherend_path(np->xbdev), 1804 "feature-rx-flip", NULL, "%u", &feature_rx_flip); 1805 if (error) 1806 feature_rx_flip = 1; 1807 1808 /* 1809 * Copy packets on receive path if: 1810 * (a) This was requested by user, and the backend supports it; or 1811 * (b) Flipping was requested, but this is unsupported by the backend. 1812 */ 1813 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) || 1814 (MODPARM_rx_flip && !feature_rx_flip)); 1815 1816 XN_LOCK(np); 1817 /* Recovery procedure: */ 1818 error = talk_to_backend(np->xbdev, np); 1819 if (error) 1820 return (error); 1821 1822 /* Step 1: Reinitialise variables. */ 1823 netif_release_tx_bufs(np); 1824 1825 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1826 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1827 struct mbuf *m; 1828 u_long pfn; 1829 1830 if (np->rx_mbufs[i] == NULL) 1831 continue; 1832 1833 m = np->rx_mbufs[requeue_idx] = xennet_get_rx_mbuf(np, i); 1834 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1835 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1836 pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; 1837 1838 if (!np->copying_receiver) { 1839 gnttab_grant_foreign_transfer_ref(ref, 1840 xenbus_get_otherend_id(np->xbdev), 1841 pfn); 1842 } else { 1843 gnttab_grant_foreign_access_ref(ref, 1844 xenbus_get_otherend_id(np->xbdev), 1845 PFNTOMFN(pfn), 0); 1846 } 1847 req->gref = ref; 1848 req->id = requeue_idx; 1849 1850 requeue_idx++; 1851 } 1852 1853 np->rx.req_prod_pvt = requeue_idx; 1854 1855 /* Step 3: All public and private state should now be sane. Get 1856 * ready to start sending and receiving packets and give the driver 1857 * domain a kick because we've probably just requeued some 1858 * packets. 1859 */ 1860 netfront_carrier_on(np); 1861 notify_remote_via_irq(np->irq); 1862 XN_TX_LOCK(np); 1863 xn_txeof(np); 1864 XN_TX_UNLOCK(np); 1865 network_alloc_rx_buffers(np); 1866 XN_UNLOCK(np); 1867 1868 return (0); 1869 } 1870 1871 static void 1872 show_device(struct netfront_info *sc) 1873 { 1874 #ifdef DEBUG 1875 if (sc) { 1876 IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n", 1877 sc->xn_ifno, 1878 be_state_name[sc->xn_backend_state], 1879 sc->xn_user_state ? "open" : "closed", 1880 sc->xn_evtchn, 1881 sc->xn_irq, 1882 sc->xn_tx_if, 1883 sc->xn_rx_if); 1884 } else { 1885 IPRINTK("<vif NULL>\n"); 1886 } 1887 #endif 1888 } 1889 1890 /** Create a network device. 1891 * @param handle device handle 1892 */ 1893 int 1894 create_netdev(device_t dev) 1895 { 1896 int i; 1897 struct netfront_info *np; 1898 int err; 1899 struct ifnet *ifp; 1900 1901 np = device_get_softc(dev); 1902 1903 np->xbdev = dev; 1904 1905 XN_LOCK_INIT(np, xennetif); 1906 np->rx_target = RX_MIN_TARGET; 1907 np->rx_min_target = RX_MIN_TARGET; 1908 np->rx_max_target = RX_MAX_TARGET; 1909 1910 /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 1911 for (i = 0; i <= NET_TX_RING_SIZE; i++) { 1912 np->tx_mbufs[i] = (void *) ((u_long) i+1); 1913 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1914 } 1915 for (i = 0; i <= NET_RX_RING_SIZE; i++) { 1916 np->rx_mbufs[i] = NULL; 1917 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1918 } 1919 /* A grant for every tx ring slot */ 1920 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1921 &np->gref_tx_head) < 0) { 1922 printf("#### netfront can't alloc tx grant refs\n"); 1923 err = ENOMEM; 1924 goto exit; 1925 } 1926 /* A grant for every rx ring slot */ 1927 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1928 &np->gref_rx_head) < 0) { 1929 printf("#### netfront can't alloc rx grant refs\n"); 1930 gnttab_free_grant_references(np->gref_tx_head); 1931 err = ENOMEM; 1932 goto exit; 1933 } 1934 1935 err = xen_net_read_mac(dev, np->mac); 1936 if (err) { 1937 xenbus_dev_fatal(dev, err, "parsing %s/mac", 1938 xenbus_get_node(dev)); 1939 goto out; 1940 } 1941 1942 /* Set up ifnet structure */ 1943 ifp = np->xn_ifp = if_alloc(IFT_ETHER); 1944 ifp->if_softc = np; 1945 if_initname(ifp, "xn", device_get_unit(dev)); 1946 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1947 ifp->if_ioctl = xn_ioctl; 1948 ifp->if_output = ether_output; 1949 ifp->if_start = xn_start; 1950 #ifdef notyet 1951 ifp->if_watchdog = xn_watchdog; 1952 #endif 1953 ifp->if_init = xn_ifinit; 1954 ifp->if_mtu = ETHERMTU; 1955 ifp->if_snd.ifq_maxlen = NET_TX_RING_SIZE - 1; 1956 1957 ifp->if_hwassist = XN_CSUM_FEATURES; 1958 ifp->if_capabilities = IFCAP_HWCSUM; 1959 #if __FreeBSD_version >= 700000 1960 ifp->if_capabilities |= IFCAP_TSO4; 1961 if (xn_enable_lro) { 1962 int err = tcp_lro_init(&np->xn_lro); 1963 if (err) { 1964 device_printf(dev, "LRO initialization failed\n"); 1965 goto exit; 1966 } 1967 np->xn_lro.ifp = ifp; 1968 ifp->if_capabilities |= IFCAP_LRO; 1969 } 1970 #endif 1971 ifp->if_capenable = ifp->if_capabilities; 1972 1973 ether_ifattach(ifp, np->mac); 1974 callout_init(&np->xn_stat_ch, CALLOUT_MPSAFE); 1975 netfront_carrier_off(np); 1976 1977 return (0); 1978 1979 exit: 1980 gnttab_free_grant_references(np->gref_tx_head); 1981 out: 1982 panic("do something smart"); 1983 1984 } 1985 1986 /** 1987 * Handle the change of state of the backend to Closing. We must delete our 1988 * device-layer structures now, to ensure that writes are flushed through to 1989 * the backend. Once is this done, we can switch to Closed in 1990 * acknowledgement. 1991 */ 1992 #if 0 1993 static void netfront_closing(device_t dev) 1994 { 1995 #if 0 1996 struct netfront_info *info = dev->dev_driver_data; 1997 1998 DPRINTK("netfront_closing: %s removed\n", dev->nodename); 1999 2000 close_netdev(info); 2001 #endif 2002 xenbus_switch_state(dev, XenbusStateClosed); 2003 } 2004 #endif 2005 2006 static int netfront_detach(device_t dev) 2007 { 2008 struct netfront_info *info = device_get_softc(dev); 2009 2010 DPRINTK("%s\n", xenbus_get_node(dev)); 2011 2012 netif_free(info); 2013 2014 return 0; 2015 } 2016 2017 2018 static void netif_free(struct netfront_info *info) 2019 { 2020 netif_disconnect_backend(info); 2021 #if 0 2022 close_netdev(info); 2023 #endif 2024 } 2025 2026 static void netif_disconnect_backend(struct netfront_info *info) 2027 { 2028 XN_RX_LOCK(info); 2029 XN_TX_LOCK(info); 2030 netfront_carrier_off(info); 2031 XN_TX_UNLOCK(info); 2032 XN_RX_UNLOCK(info); 2033 2034 end_access(info->tx_ring_ref, info->tx.sring); 2035 end_access(info->rx_ring_ref, info->rx.sring); 2036 info->tx_ring_ref = GRANT_INVALID_REF; 2037 info->rx_ring_ref = GRANT_INVALID_REF; 2038 info->tx.sring = NULL; 2039 info->rx.sring = NULL; 2040 2041 if (info->irq) 2042 unbind_from_irqhandler(info->irq); 2043 2044 info->irq = 0; 2045 } 2046 2047 2048 static void end_access(int ref, void *page) 2049 { 2050 if (ref != GRANT_INVALID_REF) 2051 gnttab_end_foreign_access(ref, page); 2052 } 2053 2054 /* ** Driver registration ** */ 2055 static device_method_t netfront_methods[] = { 2056 /* Device interface */ 2057 DEVMETHOD(device_probe, netfront_probe), 2058 DEVMETHOD(device_attach, netfront_attach), 2059 DEVMETHOD(device_detach, netfront_detach), 2060 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2061 DEVMETHOD(device_suspend, bus_generic_suspend), 2062 DEVMETHOD(device_resume, netfront_resume), 2063 2064 /* Xenbus interface */ 2065 DEVMETHOD(xenbus_backend_changed, netfront_backend_changed), 2066 2067 { 0, 0 } 2068 }; 2069 2070 static driver_t netfront_driver = { 2071 "xn", 2072 netfront_methods, 2073 sizeof(struct netfront_info), 2074 }; 2075 devclass_t netfront_devclass; 2076 2077 DRIVER_MODULE(xe, xenbus, netfront_driver, netfront_devclass, 0, 0); 2078