1 /*- 2 * Copyright (c) 2009-2011 Spectra Logic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * Authors: Justin T. Gibbs (Spectra Logic Corporation) 31 * Alan Somers (Spectra Logic Corporation) 32 * John Suykerbuyk (Spectra Logic Corporation) 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /** 39 * \file netback.c 40 * 41 * \brief Device driver supporting the vending of network access 42 * from this FreeBSD domain to other domains. 43 */ 44 #include "opt_inet.h" 45 #include "opt_inet6.h" 46 47 #include "opt_sctp.h" 48 49 #include <sys/param.h> 50 #include <sys/kernel.h> 51 52 #include <sys/bus.h> 53 #include <sys/module.h> 54 #include <sys/rman.h> 55 #include <sys/socket.h> 56 #include <sys/sockio.h> 57 #include <sys/sysctl.h> 58 59 #include <net/if.h> 60 #include <net/if_var.h> 61 #include <net/if_arp.h> 62 #include <net/ethernet.h> 63 #include <net/if_dl.h> 64 #include <net/if_media.h> 65 #include <net/if_types.h> 66 67 #include <netinet/in.h> 68 #include <netinet/ip.h> 69 #include <netinet/if_ether.h> 70 #if __FreeBSD_version >= 700000 71 #include <netinet/tcp.h> 72 #endif 73 #include <netinet/ip_icmp.h> 74 #include <netinet/udp.h> 75 #include <machine/in_cksum.h> 76 77 #include <vm/vm.h> 78 #include <vm/pmap.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_kern.h> 81 82 #include <machine/_inttypes.h> 83 84 #include <xen/xen-os.h> 85 #include <xen/hypervisor.h> 86 #include <xen/xen_intr.h> 87 #include <xen/interface/io/netif.h> 88 #include <xen/xenbus/xenbusvar.h> 89 90 /*--------------------------- Compile-time Tunables --------------------------*/ 91 92 /*---------------------------------- Macros ----------------------------------*/ 93 /** 94 * Custom malloc type for all driver allocations. 95 */ 96 static MALLOC_DEFINE(M_XENNETBACK, "xnb", "Xen Net Back Driver Data"); 97 98 #define XNB_SG 1 /* netback driver supports feature-sg */ 99 #define XNB_GSO_TCPV4 0 /* netback driver supports feature-gso-tcpv4 */ 100 #define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */ 101 #define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */ 102 103 #undef XNB_DEBUG 104 #define XNB_DEBUG /* hardcode on during development */ 105 106 #ifdef XNB_DEBUG 107 #define DPRINTF(fmt, args...) \ 108 printf("xnb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 109 #else 110 #define DPRINTF(fmt, args...) do {} while (0) 111 #endif 112 113 /* Default length for stack-allocated grant tables */ 114 #define GNTTAB_LEN (64) 115 116 /* Features supported by all backends. TSO and LRO can be negotiated */ 117 #define XNB_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 118 119 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 120 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 121 122 /** 123 * Two argument version of the standard macro. Second argument is a tentative 124 * value of req_cons 125 */ 126 #define RING_HAS_UNCONSUMED_REQUESTS_2(_r, cons) ({ \ 127 unsigned int req = (_r)->sring->req_prod - cons; \ 128 unsigned int rsp = RING_SIZE(_r) - \ 129 (cons - (_r)->rsp_prod_pvt); \ 130 req < rsp ? req : rsp; \ 131 }) 132 133 #define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT) 134 #define virt_to_offset(x) ((x) & (PAGE_SIZE - 1)) 135 136 /** 137 * Predefined array type of grant table copy descriptors. Used to pass around 138 * statically allocated memory structures. 139 */ 140 typedef struct gnttab_copy gnttab_copy_table[GNTTAB_LEN]; 141 142 /*--------------------------- Forward Declarations ---------------------------*/ 143 struct xnb_softc; 144 struct xnb_pkt; 145 146 static void xnb_attach_failed(struct xnb_softc *xnb, 147 int err, const char *fmt, ...) 148 __printflike(3,4); 149 static int xnb_shutdown(struct xnb_softc *xnb); 150 static int create_netdev(device_t dev); 151 static int xnb_detach(device_t dev); 152 static int xnb_ifmedia_upd(struct ifnet *ifp); 153 static void xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 154 static void xnb_intr(void *arg); 155 static int xnb_send(netif_rx_back_ring_t *rxb, domid_t otherend, 156 const struct mbuf *mbufc, gnttab_copy_table gnttab); 157 static int xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, 158 struct mbuf **mbufc, struct ifnet *ifnet, 159 gnttab_copy_table gnttab); 160 static int xnb_ring2pkt(struct xnb_pkt *pkt, 161 const netif_tx_back_ring_t *tx_ring, 162 RING_IDX start); 163 static void xnb_txpkt2rsp(const struct xnb_pkt *pkt, 164 netif_tx_back_ring_t *ring, int error); 165 static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp); 166 static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt, 167 struct mbuf *mbufc, 168 gnttab_copy_table gnttab, 169 const netif_tx_back_ring_t *txb, 170 domid_t otherend_id); 171 static void xnb_update_mbufc(struct mbuf *mbufc, 172 const gnttab_copy_table gnttab, int n_entries); 173 static int xnb_mbufc2pkt(const struct mbuf *mbufc, 174 struct xnb_pkt *pkt, 175 RING_IDX start, int space); 176 static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, 177 const struct mbuf *mbufc, 178 gnttab_copy_table gnttab, 179 const netif_rx_back_ring_t *rxb, 180 domid_t otherend_id); 181 static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt, 182 const gnttab_copy_table gnttab, int n_entries, 183 netif_rx_back_ring_t *ring); 184 static void xnb_stop(struct xnb_softc*); 185 static int xnb_ioctl(struct ifnet*, u_long, caddr_t); 186 static void xnb_start_locked(struct ifnet*); 187 static void xnb_start(struct ifnet*); 188 static void xnb_ifinit_locked(struct xnb_softc*); 189 static void xnb_ifinit(void*); 190 #ifdef XNB_DEBUG 191 static int xnb_unit_test_main(SYSCTL_HANDLER_ARGS); 192 static int xnb_dump_rings(SYSCTL_HANDLER_ARGS); 193 #endif 194 #if defined(INET) || defined(INET6) 195 static void xnb_add_mbuf_cksum(struct mbuf *mbufc); 196 #endif 197 /*------------------------------ Data Structures -----------------------------*/ 198 199 200 /** 201 * Representation of a xennet packet. Simplified version of a packet as 202 * stored in the Xen tx ring. Applicable to both RX and TX packets 203 */ 204 struct xnb_pkt{ 205 /** 206 * Array index of the first data-bearing (eg, not extra info) entry 207 * for this packet 208 */ 209 RING_IDX car; 210 211 /** 212 * Array index of the second data-bearing entry for this packet. 213 * Invalid if the packet has only one data-bearing entry. If the 214 * packet has more than two data-bearing entries, then the second 215 * through the last will be sequential modulo the ring size 216 */ 217 RING_IDX cdr; 218 219 /** 220 * Optional extra info. Only valid if flags contains 221 * NETTXF_extra_info. Note that extra.type will always be 222 * XEN_NETIF_EXTRA_TYPE_GSO. Currently, no known netfront or netback 223 * driver will ever set XEN_NETIF_EXTRA_TYPE_MCAST_* 224 */ 225 netif_extra_info_t extra; 226 227 /** Size of entire packet in bytes. */ 228 uint16_t size; 229 230 /** The size of the first entry's data in bytes */ 231 uint16_t car_size; 232 233 /** 234 * Either NETTXF_ or NETRXF_ flags. Note that the flag values are 235 * not the same for TX and RX packets 236 */ 237 uint16_t flags; 238 239 /** 240 * The number of valid data-bearing entries (either netif_tx_request's 241 * or netif_rx_response's) in the packet. If this is 0, it means the 242 * entire packet is invalid. 243 */ 244 uint16_t list_len; 245 246 /** There was an error processing the packet */ 247 uint8_t error; 248 }; 249 250 /** xnb_pkt method: initialize it */ 251 static inline void 252 xnb_pkt_initialize(struct xnb_pkt *pxnb) 253 { 254 bzero(pxnb, sizeof(*pxnb)); 255 } 256 257 /** xnb_pkt method: mark the packet as valid */ 258 static inline void 259 xnb_pkt_validate(struct xnb_pkt *pxnb) 260 { 261 pxnb->error = 0; 262 }; 263 264 /** xnb_pkt method: mark the packet as invalid */ 265 static inline void 266 xnb_pkt_invalidate(struct xnb_pkt *pxnb) 267 { 268 pxnb->error = 1; 269 }; 270 271 /** xnb_pkt method: Check whether the packet is valid */ 272 static inline int 273 xnb_pkt_is_valid(const struct xnb_pkt *pxnb) 274 { 275 return (! pxnb->error); 276 } 277 278 #ifdef XNB_DEBUG 279 /** xnb_pkt method: print the packet's contents in human-readable format*/ 280 static void __unused 281 xnb_dump_pkt(const struct xnb_pkt *pkt) { 282 if (pkt == NULL) { 283 DPRINTF("Was passed a null pointer.\n"); 284 return; 285 } 286 DPRINTF("pkt address= %p\n", pkt); 287 DPRINTF("pkt->size=%d\n", pkt->size); 288 DPRINTF("pkt->car_size=%d\n", pkt->car_size); 289 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); 290 DPRINTF("pkt->list_len=%d\n", pkt->list_len); 291 /* DPRINTF("pkt->extra"); TODO */ 292 DPRINTF("pkt->car=%d\n", pkt->car); 293 DPRINTF("pkt->cdr=%d\n", pkt->cdr); 294 DPRINTF("pkt->error=%d\n", pkt->error); 295 } 296 #endif /* XNB_DEBUG */ 297 298 static void 299 xnb_dump_txreq(RING_IDX idx, const struct netif_tx_request *txreq) 300 { 301 if (txreq != NULL) { 302 DPRINTF("netif_tx_request index =%u\n", idx); 303 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); 304 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); 305 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); 306 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); 307 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); 308 } 309 } 310 311 312 /** 313 * \brief Configuration data for a shared memory request ring 314 * used to communicate with the front-end client of this 315 * this driver. 316 */ 317 struct xnb_ring_config { 318 /** 319 * Runtime structures for ring access. Unfortunately, TX and RX rings 320 * use different data structures, and that cannot be changed since it 321 * is part of the interdomain protocol. 322 */ 323 union{ 324 netif_rx_back_ring_t rx_ring; 325 netif_tx_back_ring_t tx_ring; 326 } back_ring; 327 328 /** 329 * The device bus address returned by the hypervisor when 330 * mapping the ring and required to unmap it when a connection 331 * is torn down. 332 */ 333 uint64_t bus_addr; 334 335 /** The pseudo-physical address where ring memory is mapped.*/ 336 uint64_t gnt_addr; 337 338 /** KVA address where ring memory is mapped. */ 339 vm_offset_t va; 340 341 /** 342 * Grant table handles, one per-ring page, returned by the 343 * hyperpervisor upon mapping of the ring and required to 344 * unmap it when a connection is torn down. 345 */ 346 grant_handle_t handle; 347 348 /** The number of ring pages mapped for the current connection. */ 349 unsigned ring_pages; 350 351 /** 352 * The grant references, one per-ring page, supplied by the 353 * front-end, allowing us to reference the ring pages in the 354 * front-end's domain and to map these pages into our own domain. 355 */ 356 grant_ref_t ring_ref; 357 }; 358 359 /** 360 * Per-instance connection state flags. 361 */ 362 typedef enum 363 { 364 /** Communication with the front-end has been established. */ 365 XNBF_RING_CONNECTED = 0x01, 366 367 /** 368 * Front-end requests exist in the ring and are waiting for 369 * xnb_xen_req objects to free up. 370 */ 371 XNBF_RESOURCE_SHORTAGE = 0x02, 372 373 /** Connection teardown has started. */ 374 XNBF_SHUTDOWN = 0x04, 375 376 /** A thread is already performing shutdown processing. */ 377 XNBF_IN_SHUTDOWN = 0x08 378 } xnb_flag_t; 379 380 /** 381 * Types of rings. Used for array indices and to identify a ring's control 382 * data structure type 383 */ 384 typedef enum{ 385 XNB_RING_TYPE_TX = 0, /* ID of TX rings, used for array indices */ 386 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */ 387 XNB_NUM_RING_TYPES 388 } xnb_ring_type_t; 389 390 /** 391 * Per-instance configuration data. 392 */ 393 struct xnb_softc { 394 /** NewBus device corresponding to this instance. */ 395 device_t dev; 396 397 /* Media related fields */ 398 399 /** Generic network media state */ 400 struct ifmedia sc_media; 401 402 /** Media carrier info */ 403 struct ifnet *xnb_ifp; 404 405 /** Our own private carrier state */ 406 unsigned carrier; 407 408 /** Device MAC Address */ 409 uint8_t mac[ETHER_ADDR_LEN]; 410 411 /* Xen related fields */ 412 413 /** 414 * \brief The netif protocol abi in effect. 415 * 416 * There are situations where the back and front ends can 417 * have a different, native abi (e.g. intel x86_64 and 418 * 32bit x86 domains on the same machine). The back-end 419 * always accommodates the front-end's native abi. That 420 * value is pulled from the XenStore and recorded here. 421 */ 422 int abi; 423 424 /** 425 * Name of the bridge to which this VIF is connected, if any 426 * This field is dynamically allocated by xenbus and must be free()ed 427 * when no longer needed 428 */ 429 char *bridge; 430 431 /** The interrupt driven even channel used to signal ring events. */ 432 evtchn_port_t evtchn; 433 434 /** Xen device handle.*/ 435 long handle; 436 437 /** Handle to the communication ring event channel. */ 438 xen_intr_handle_t xen_intr_handle; 439 440 /** 441 * \brief Cached value of the front-end's domain id. 442 * 443 * This value is used at once for each mapped page in 444 * a transaction. We cache it to avoid incuring the 445 * cost of an ivar access every time this is needed. 446 */ 447 domid_t otherend_id; 448 449 /** 450 * Undocumented frontend feature. Has something to do with 451 * scatter/gather IO 452 */ 453 uint8_t can_sg; 454 /** Undocumented frontend feature */ 455 uint8_t gso; 456 /** Undocumented frontend feature */ 457 uint8_t gso_prefix; 458 /** Can checksum TCP/UDP over IPv4 */ 459 uint8_t ip_csum; 460 461 /* Implementation related fields */ 462 /** 463 * Preallocated grant table copy descriptor for RX operations. 464 * Access must be protected by rx_lock 465 */ 466 gnttab_copy_table rx_gnttab; 467 468 /** 469 * Preallocated grant table copy descriptor for TX operations. 470 * Access must be protected by tx_lock 471 */ 472 gnttab_copy_table tx_gnttab; 473 474 /** 475 * Resource representing allocated physical address space 476 * associated with our per-instance kva region. 477 */ 478 struct resource *pseudo_phys_res; 479 480 /** Resource id for allocated physical address space. */ 481 int pseudo_phys_res_id; 482 483 /** Ring mapping and interrupt configuration data. */ 484 struct xnb_ring_config ring_configs[XNB_NUM_RING_TYPES]; 485 486 /** 487 * Global pool of kva used for mapping remote domain ring 488 * and I/O transaction data. 489 */ 490 vm_offset_t kva; 491 492 /** Psuedo-physical address corresponding to kva. */ 493 uint64_t gnt_base_addr; 494 495 /** Various configuration and state bit flags. */ 496 xnb_flag_t flags; 497 498 /** Mutex protecting per-instance data in the receive path. */ 499 struct mtx rx_lock; 500 501 /** Mutex protecting per-instance data in the softc structure. */ 502 struct mtx sc_lock; 503 504 /** Mutex protecting per-instance data in the transmit path. */ 505 struct mtx tx_lock; 506 507 /** The size of the global kva pool. */ 508 int kva_size; 509 510 /** Name of the interface */ 511 char if_name[IFNAMSIZ]; 512 }; 513 514 /*---------------------------- Debugging functions ---------------------------*/ 515 #ifdef XNB_DEBUG 516 static void __unused 517 xnb_dump_gnttab_copy(const struct gnttab_copy *entry) 518 { 519 if (entry == NULL) { 520 printf("NULL grant table pointer\n"); 521 return; 522 } 523 524 if (entry->flags & GNTCOPY_dest_gref) 525 printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); 526 else 527 printf("gnttab dest gmfn=\t%"PRI_xen_pfn"\n", 528 entry->dest.u.gmfn); 529 printf("gnttab dest offset=\t%hu\n", entry->dest.offset); 530 printf("gnttab dest domid=\t%hu\n", entry->dest.domid); 531 if (entry->flags & GNTCOPY_source_gref) 532 printf("gnttab source ref=\t%u\n", entry->source.u.ref); 533 else 534 printf("gnttab source gmfn=\t%"PRI_xen_pfn"\n", 535 entry->source.u.gmfn); 536 printf("gnttab source offset=\t%hu\n", entry->source.offset); 537 printf("gnttab source domid=\t%hu\n", entry->source.domid); 538 printf("gnttab len=\t%hu\n", entry->len); 539 printf("gnttab flags=\t%hu\n", entry->flags); 540 printf("gnttab status=\t%hd\n", entry->status); 541 } 542 543 static int 544 xnb_dump_rings(SYSCTL_HANDLER_ARGS) 545 { 546 static char results[720]; 547 struct xnb_softc const* xnb = (struct xnb_softc*)arg1; 548 netif_rx_back_ring_t const* rxb = 549 &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 550 netif_tx_back_ring_t const* txb = 551 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 552 553 /* empty the result strings */ 554 results[0] = 0; 555 556 if ( !txb || !txb->sring || !rxb || !rxb->sring ) 557 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 558 559 snprintf(results, 720, 560 "\n\t%35s %18s\n" /* TX, RX */ 561 "\t%16s %18d %18d\n" /* req_cons */ 562 "\t%16s %18d %18d\n" /* nr_ents */ 563 "\t%16s %18d %18d\n" /* rsp_prod_pvt */ 564 "\t%16s %18p %18p\n" /* sring */ 565 "\t%16s %18d %18d\n" /* req_prod */ 566 "\t%16s %18d %18d\n" /* req_event */ 567 "\t%16s %18d %18d\n" /* rsp_prod */ 568 "\t%16s %18d %18d\n", /* rsp_event */ 569 "TX", "RX", 570 "req_cons", txb->req_cons, rxb->req_cons, 571 "nr_ents", txb->nr_ents, rxb->nr_ents, 572 "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, 573 "sring", txb->sring, rxb->sring, 574 "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, 575 "sring->req_event", txb->sring->req_event, rxb->sring->req_event, 576 "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, 577 "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); 578 579 return (SYSCTL_OUT(req, results, strnlen(results, 720))); 580 } 581 582 static void __unused 583 xnb_dump_mbuf(const struct mbuf *m) 584 { 585 int len; 586 uint8_t *d; 587 if (m == NULL) 588 return; 589 590 printf("xnb_dump_mbuf:\n"); 591 if (m->m_flags & M_PKTHDR) { 592 printf(" flowid=%10d, csum_flags=%#8x, csum_data=%#8x, " 593 "tso_segsz=%5hd\n", 594 m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, 595 m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); 596 printf(" rcvif=%16p, len=%19d\n", 597 m->m_pkthdr.rcvif, m->m_pkthdr.len); 598 } 599 printf(" m_next=%16p, m_nextpk=%16p, m_data=%16p\n", 600 m->m_next, m->m_nextpkt, m->m_data); 601 printf(" m_len=%17d, m_flags=%#15x, m_type=%18u\n", 602 m->m_len, m->m_flags, m->m_type); 603 604 len = m->m_len; 605 d = mtod(m, uint8_t*); 606 while (len > 0) { 607 int i; 608 printf(" "); 609 for (i = 0; (i < 16) && (len > 0); i++, len--) { 610 printf("%02hhx ", *(d++)); 611 } 612 printf("\n"); 613 } 614 } 615 #endif /* XNB_DEBUG */ 616 617 /*------------------------ Inter-Domain Communication ------------------------*/ 618 /** 619 * Free dynamically allocated KVA or pseudo-physical address allocations. 620 * 621 * \param xnb Per-instance xnb configuration structure. 622 */ 623 static void 624 xnb_free_communication_mem(struct xnb_softc *xnb) 625 { 626 if (xnb->kva != 0) { 627 if (xnb->pseudo_phys_res != NULL) { 628 xenmem_free(xnb->dev, xnb->pseudo_phys_res_id, 629 xnb->pseudo_phys_res); 630 xnb->pseudo_phys_res = NULL; 631 } 632 } 633 xnb->kva = 0; 634 xnb->gnt_base_addr = 0; 635 } 636 637 /** 638 * Cleanup all inter-domain communication mechanisms. 639 * 640 * \param xnb Per-instance xnb configuration structure. 641 */ 642 static int 643 xnb_disconnect(struct xnb_softc *xnb) 644 { 645 struct gnttab_unmap_grant_ref gnts[XNB_NUM_RING_TYPES]; 646 int error; 647 int i; 648 649 if (xnb->xen_intr_handle != NULL) 650 xen_intr_unbind(&xnb->xen_intr_handle); 651 652 /* 653 * We may still have another thread currently processing requests. We 654 * must acquire the rx and tx locks to make sure those threads are done, 655 * but we can release those locks as soon as we acquire them, because no 656 * more interrupts will be arriving. 657 */ 658 mtx_lock(&xnb->tx_lock); 659 mtx_unlock(&xnb->tx_lock); 660 mtx_lock(&xnb->rx_lock); 661 mtx_unlock(&xnb->rx_lock); 662 663 /* Free malloc'd softc member variables */ 664 if (xnb->bridge != NULL) { 665 free(xnb->bridge, M_XENSTORE); 666 xnb->bridge = NULL; 667 } 668 669 /* All request processing has stopped, so unmap the rings */ 670 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 671 gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; 672 gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; 673 gnts[i].handle = xnb->ring_configs[i].handle; 674 } 675 error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, gnts, 676 XNB_NUM_RING_TYPES); 677 KASSERT(error == 0, ("Grant table unmap op failed (%d)", error)); 678 679 xnb_free_communication_mem(xnb); 680 /* 681 * Zero the ring config structs because the pointers, handles, and 682 * grant refs contained therein are no longer valid. 683 */ 684 bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], 685 sizeof(struct xnb_ring_config)); 686 bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], 687 sizeof(struct xnb_ring_config)); 688 689 xnb->flags &= ~XNBF_RING_CONNECTED; 690 return (0); 691 } 692 693 /** 694 * Map a single shared memory ring into domain local address space and 695 * initialize its control structure 696 * 697 * \param xnb Per-instance xnb configuration structure 698 * \param ring_type Array index of this ring in the xnb's array of rings 699 * \return An errno 700 */ 701 static int 702 xnb_connect_ring(struct xnb_softc *xnb, xnb_ring_type_t ring_type) 703 { 704 struct gnttab_map_grant_ref gnt; 705 struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; 706 int error; 707 708 /* TX ring type = 0, RX =1 */ 709 ring->va = xnb->kva + ring_type * PAGE_SIZE; 710 ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; 711 712 gnt.host_addr = ring->gnt_addr; 713 gnt.flags = GNTMAP_host_map; 714 gnt.ref = ring->ring_ref; 715 gnt.dom = xnb->otherend_id; 716 717 error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &gnt, 1); 718 if (error != 0) 719 panic("netback: Ring page grant table op failed (%d)", error); 720 721 if (gnt.status != 0) { 722 ring->va = 0; 723 error = EACCES; 724 xenbus_dev_fatal(xnb->dev, error, 725 "Ring shared page mapping failed. " 726 "Status %d.", gnt.status); 727 } else { 728 ring->handle = gnt.handle; 729 ring->bus_addr = gnt.dev_bus_addr; 730 731 if (ring_type == XNB_RING_TYPE_TX) { 732 BACK_RING_INIT(&ring->back_ring.tx_ring, 733 (netif_tx_sring_t*)ring->va, 734 ring->ring_pages * PAGE_SIZE); 735 } else if (ring_type == XNB_RING_TYPE_RX) { 736 BACK_RING_INIT(&ring->back_ring.rx_ring, 737 (netif_rx_sring_t*)ring->va, 738 ring->ring_pages * PAGE_SIZE); 739 } else { 740 xenbus_dev_fatal(xnb->dev, error, 741 "Unknown ring type %d", ring_type); 742 } 743 } 744 745 return error; 746 } 747 748 /** 749 * Setup the shared memory rings and bind an interrupt to the event channel 750 * used to notify us of ring changes. 751 * 752 * \param xnb Per-instance xnb configuration structure. 753 */ 754 static int 755 xnb_connect_comms(struct xnb_softc *xnb) 756 { 757 int error; 758 xnb_ring_type_t i; 759 760 if ((xnb->flags & XNBF_RING_CONNECTED) != 0) 761 return (0); 762 763 /* 764 * Kva for our rings are at the tail of the region of kva allocated 765 * by xnb_alloc_communication_mem(). 766 */ 767 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 768 error = xnb_connect_ring(xnb, i); 769 if (error != 0) 770 return error; 771 } 772 773 xnb->flags |= XNBF_RING_CONNECTED; 774 775 error = xen_intr_bind_remote_port(xnb->dev, 776 xnb->otherend_id, 777 xnb->evtchn, 778 /*filter*/NULL, 779 xnb_intr, /*arg*/xnb, 780 INTR_TYPE_BIO | INTR_MPSAFE, 781 &xnb->xen_intr_handle); 782 if (error != 0) { 783 (void)xnb_disconnect(xnb); 784 xenbus_dev_fatal(xnb->dev, error, "binding event channel"); 785 return (error); 786 } 787 788 DPRINTF("rings connected!\n"); 789 790 return (0); 791 } 792 793 /** 794 * Size KVA and pseudo-physical address allocations based on negotiated 795 * values for the size and number of I/O requests, and the size of our 796 * communication ring. 797 * 798 * \param xnb Per-instance xnb configuration structure. 799 * 800 * These address spaces are used to dynamically map pages in the 801 * front-end's domain into our own. 802 */ 803 static int 804 xnb_alloc_communication_mem(struct xnb_softc *xnb) 805 { 806 xnb_ring_type_t i; 807 808 xnb->kva_size = 0; 809 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 810 xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; 811 } 812 813 /* 814 * Reserve a range of pseudo physical memory that we can map 815 * into kva. These pages will only be backed by machine 816 * pages ("real memory") during the lifetime of front-end requests 817 * via grant table operations. We will map the netif tx and rx rings 818 * into this space. 819 */ 820 xnb->pseudo_phys_res_id = 0; 821 xnb->pseudo_phys_res = xenmem_alloc(xnb->dev, &xnb->pseudo_phys_res_id, 822 xnb->kva_size); 823 if (xnb->pseudo_phys_res == NULL) { 824 xnb->kva = 0; 825 return (ENOMEM); 826 } 827 xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); 828 xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); 829 return (0); 830 } 831 832 /** 833 * Collect information from the XenStore related to our device and its frontend 834 * 835 * \param xnb Per-instance xnb configuration structure. 836 */ 837 static int 838 xnb_collect_xenstore_info(struct xnb_softc *xnb) 839 { 840 /** 841 * \todo Linux collects the following info. We should collect most 842 * of this, too: 843 * "feature-rx-notify" 844 */ 845 const char *otherend_path; 846 const char *our_path; 847 int err; 848 unsigned int rx_copy, bridge_len; 849 uint8_t no_csum_offload; 850 851 otherend_path = xenbus_get_otherend_path(xnb->dev); 852 our_path = xenbus_get_node(xnb->dev); 853 854 /* Collect the critical communication parameters */ 855 err = xs_gather(XST_NIL, otherend_path, 856 "tx-ring-ref", "%l" PRIu32, 857 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, 858 "rx-ring-ref", "%l" PRIu32, 859 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, 860 "event-channel", "%" PRIu32, &xnb->evtchn, 861 NULL); 862 if (err != 0) { 863 xenbus_dev_fatal(xnb->dev, err, 864 "Unable to retrieve ring information from " 865 "frontend %s. Unable to connect.", 866 otherend_path); 867 return (err); 868 } 869 870 /* Collect the handle from xenstore */ 871 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); 872 if (err != 0) { 873 xenbus_dev_fatal(xnb->dev, err, 874 "Error reading handle from frontend %s. " 875 "Unable to connect.", otherend_path); 876 } 877 878 /* 879 * Collect the bridgename, if any. We do not need bridge_len; we just 880 * throw it away 881 */ 882 err = xs_read(XST_NIL, our_path, "bridge", &bridge_len, 883 (void**)&xnb->bridge); 884 if (err != 0) 885 xnb->bridge = NULL; 886 887 /* 888 * Does the frontend request that we use rx copy? If not, return an 889 * error because this driver only supports rx copy. 890 */ 891 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, 892 "%" PRIu32, &rx_copy); 893 if (err == ENOENT) { 894 err = 0; 895 rx_copy = 0; 896 } 897 if (err < 0) { 898 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", 899 otherend_path); 900 return err; 901 } 902 /** 903 * \todo: figure out the exact meaning of this feature, and when 904 * the frontend will set it to true. It should be set to true 905 * at some point 906 */ 907 /* if (!rx_copy)*/ 908 /* return EOPNOTSUPP;*/ 909 910 /** \todo Collect the rx notify feature */ 911 912 /* Collect the feature-sg. */ 913 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, 914 "%hhu", &xnb->can_sg) < 0) 915 xnb->can_sg = 0; 916 917 /* Collect remaining frontend features */ 918 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, 919 "%hhu", &xnb->gso) < 0) 920 xnb->gso = 0; 921 922 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, 923 "%hhu", &xnb->gso_prefix) < 0) 924 xnb->gso_prefix = 0; 925 926 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, 927 "%hhu", &no_csum_offload) < 0) 928 no_csum_offload = 0; 929 xnb->ip_csum = (no_csum_offload == 0); 930 931 return (0); 932 } 933 934 /** 935 * Supply information about the physical device to the frontend 936 * via XenBus. 937 * 938 * \param xnb Per-instance xnb configuration structure. 939 */ 940 static int 941 xnb_publish_backend_info(struct xnb_softc *xnb) 942 { 943 struct xs_transaction xst; 944 const char *our_path; 945 int error; 946 947 our_path = xenbus_get_node(xnb->dev); 948 949 do { 950 error = xs_transaction_start(&xst); 951 if (error != 0) { 952 xenbus_dev_fatal(xnb->dev, error, 953 "Error publishing backend info " 954 "(start transaction)"); 955 break; 956 } 957 958 error = xs_printf(xst, our_path, "feature-sg", 959 "%d", XNB_SG); 960 if (error != 0) 961 break; 962 963 error = xs_printf(xst, our_path, "feature-gso-tcpv4", 964 "%d", XNB_GSO_TCPV4); 965 if (error != 0) 966 break; 967 968 error = xs_printf(xst, our_path, "feature-rx-copy", 969 "%d", XNB_RX_COPY); 970 if (error != 0) 971 break; 972 973 error = xs_printf(xst, our_path, "feature-rx-flip", 974 "%d", XNB_RX_FLIP); 975 if (error != 0) 976 break; 977 978 error = xs_transaction_end(xst, 0); 979 if (error != 0 && error != EAGAIN) { 980 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); 981 break; 982 } 983 984 } while (error == EAGAIN); 985 986 return (error); 987 } 988 989 /** 990 * Connect to our netfront peer now that it has completed publishing 991 * its configuration into the XenStore. 992 * 993 * \param xnb Per-instance xnb configuration structure. 994 */ 995 static void 996 xnb_connect(struct xnb_softc *xnb) 997 { 998 int error; 999 1000 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) 1001 return; 1002 1003 if (xnb_collect_xenstore_info(xnb) != 0) 1004 return; 1005 1006 xnb->flags &= ~XNBF_SHUTDOWN; 1007 1008 /* Read front end configuration. */ 1009 1010 /* Allocate resources whose size depends on front-end configuration. */ 1011 error = xnb_alloc_communication_mem(xnb); 1012 if (error != 0) { 1013 xenbus_dev_fatal(xnb->dev, error, 1014 "Unable to allocate communication memory"); 1015 return; 1016 } 1017 1018 /* 1019 * Connect communication channel. 1020 */ 1021 error = xnb_connect_comms(xnb); 1022 if (error != 0) { 1023 /* Specific errors are reported by xnb_connect_comms(). */ 1024 return; 1025 } 1026 xnb->carrier = 1; 1027 1028 /* Ready for I/O. */ 1029 xenbus_set_state(xnb->dev, XenbusStateConnected); 1030 } 1031 1032 /*-------------------------- Device Teardown Support -------------------------*/ 1033 /** 1034 * Perform device shutdown functions. 1035 * 1036 * \param xnb Per-instance xnb configuration structure. 1037 * 1038 * Mark this instance as shutting down, wait for any active requests 1039 * to drain, disconnect from the front-end, and notify any waiters (e.g. 1040 * a thread invoking our detach method) that detach can now proceed. 1041 */ 1042 static int 1043 xnb_shutdown(struct xnb_softc *xnb) 1044 { 1045 /* 1046 * Due to the need to drop our mutex during some 1047 * xenbus operations, it is possible for two threads 1048 * to attempt to close out shutdown processing at 1049 * the same time. Tell the caller that hits this 1050 * race to try back later. 1051 */ 1052 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) 1053 return (EAGAIN); 1054 1055 xnb->flags |= XNBF_SHUTDOWN; 1056 1057 xnb->flags |= XNBF_IN_SHUTDOWN; 1058 1059 mtx_unlock(&xnb->sc_lock); 1060 /* Free the network interface */ 1061 xnb->carrier = 0; 1062 if (xnb->xnb_ifp != NULL) { 1063 ether_ifdetach(xnb->xnb_ifp); 1064 if_free(xnb->xnb_ifp); 1065 xnb->xnb_ifp = NULL; 1066 } 1067 mtx_lock(&xnb->sc_lock); 1068 1069 xnb_disconnect(xnb); 1070 1071 mtx_unlock(&xnb->sc_lock); 1072 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) 1073 xenbus_set_state(xnb->dev, XenbusStateClosing); 1074 mtx_lock(&xnb->sc_lock); 1075 1076 xnb->flags &= ~XNBF_IN_SHUTDOWN; 1077 1078 1079 /* Indicate to xnb_detach() that is it safe to proceed. */ 1080 wakeup(xnb); 1081 1082 return (0); 1083 } 1084 1085 /** 1086 * Report an attach time error to the console and Xen, and cleanup 1087 * this instance by forcing immediate detach processing. 1088 * 1089 * \param xnb Per-instance xnb configuration structure. 1090 * \param err Errno describing the error. 1091 * \param fmt Printf style format and arguments 1092 */ 1093 static void 1094 xnb_attach_failed(struct xnb_softc *xnb, int err, const char *fmt, ...) 1095 { 1096 va_list ap; 1097 va_list ap_hotplug; 1098 1099 va_start(ap, fmt); 1100 va_copy(ap_hotplug, ap); 1101 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), 1102 "hotplug-error", fmt, ap_hotplug); 1103 va_end(ap_hotplug); 1104 (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1105 "hotplug-status", "error"); 1106 1107 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); 1108 va_end(ap); 1109 1110 (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "online", "0"); 1111 xnb_detach(xnb->dev); 1112 } 1113 1114 /*---------------------------- NewBus Entrypoints ----------------------------*/ 1115 /** 1116 * Inspect a XenBus device and claim it if is of the appropriate type. 1117 * 1118 * \param dev NewBus device object representing a candidate XenBus device. 1119 * 1120 * \return 0 for success, errno codes for failure. 1121 */ 1122 static int 1123 xnb_probe(device_t dev) 1124 { 1125 if (!strcmp(xenbus_get_type(dev), "vif")) { 1126 DPRINTF("Claiming device %d, %s\n", device_get_unit(dev), 1127 devclass_get_name(device_get_devclass(dev))); 1128 device_set_desc(dev, "Backend Virtual Network Device"); 1129 device_quiet(dev); 1130 return (0); 1131 } 1132 return (ENXIO); 1133 } 1134 1135 /** 1136 * Setup sysctl variables to control various Network Back parameters. 1137 * 1138 * \param xnb Xen Net Back softc. 1139 * 1140 */ 1141 static void 1142 xnb_setup_sysctl(struct xnb_softc *xnb) 1143 { 1144 struct sysctl_ctx_list *sysctl_ctx = NULL; 1145 struct sysctl_oid *sysctl_tree = NULL; 1146 1147 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); 1148 if (sysctl_ctx == NULL) 1149 return; 1150 1151 sysctl_tree = device_get_sysctl_tree(xnb->dev); 1152 if (sysctl_tree == NULL) 1153 return; 1154 1155 #ifdef XNB_DEBUG 1156 SYSCTL_ADD_PROC(sysctl_ctx, 1157 SYSCTL_CHILDREN(sysctl_tree), 1158 OID_AUTO, 1159 "unit_test_results", 1160 CTLTYPE_STRING | CTLFLAG_RD, 1161 xnb, 1162 0, 1163 xnb_unit_test_main, 1164 "A", 1165 "Results of builtin unit tests"); 1166 1167 SYSCTL_ADD_PROC(sysctl_ctx, 1168 SYSCTL_CHILDREN(sysctl_tree), 1169 OID_AUTO, 1170 "dump_rings", 1171 CTLTYPE_STRING | CTLFLAG_RD, 1172 xnb, 1173 0, 1174 xnb_dump_rings, 1175 "A", 1176 "Xennet Back Rings"); 1177 #endif /* XNB_DEBUG */ 1178 } 1179 1180 /** 1181 * Create a network device. 1182 * @param handle device handle 1183 */ 1184 int 1185 create_netdev(device_t dev) 1186 { 1187 struct ifnet *ifp; 1188 struct xnb_softc *xnb; 1189 int err = 0; 1190 uint32_t handle; 1191 1192 xnb = device_get_softc(dev); 1193 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); 1194 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); 1195 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); 1196 1197 xnb->dev = dev; 1198 1199 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); 1200 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 1201 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); 1202 1203 /* 1204 * Set the MAC address to a dummy value (00:00:00:00:00), 1205 * if the MAC address of the host-facing interface is set 1206 * to the same as the guest-facing one (the value found in 1207 * xenstore), the bridge would stop delivering packets to 1208 * us because it would see that the destination address of 1209 * the packet is the same as the interface, and so the bridge 1210 * would expect the packet has already been delivered locally 1211 * (and just drop it). 1212 */ 1213 bzero(&xnb->mac[0], sizeof(xnb->mac)); 1214 1215 /* The interface will be named using the following nomenclature: 1216 * 1217 * xnb<domid>.<handle> 1218 * 1219 * Where handle is the oder of the interface referred to the guest. 1220 */ 1221 err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, 1222 "%" PRIu32, &handle); 1223 if (err != 0) 1224 return (err); 1225 snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, 1226 xenbus_get_otherend_id(dev), handle); 1227 1228 if (err == 0) { 1229 /* Set up ifnet structure */ 1230 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); 1231 ifp->if_softc = xnb; 1232 if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); 1233 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1234 ifp->if_ioctl = xnb_ioctl; 1235 ifp->if_output = ether_output; 1236 ifp->if_start = xnb_start; 1237 #ifdef notyet 1238 ifp->if_watchdog = xnb_watchdog; 1239 #endif 1240 ifp->if_init = xnb_ifinit; 1241 ifp->if_mtu = ETHERMTU; 1242 ifp->if_snd.ifq_maxlen = NET_RX_RING_SIZE - 1; 1243 1244 ifp->if_hwassist = XNB_CSUM_FEATURES; 1245 ifp->if_capabilities = IFCAP_HWCSUM; 1246 ifp->if_capenable = IFCAP_HWCSUM; 1247 1248 ether_ifattach(ifp, xnb->mac); 1249 xnb->carrier = 0; 1250 } 1251 1252 return err; 1253 } 1254 1255 /** 1256 * Attach to a XenBus device that has been claimed by our probe routine. 1257 * 1258 * \param dev NewBus device object representing this Xen Net Back instance. 1259 * 1260 * \return 0 for success, errno codes for failure. 1261 */ 1262 static int 1263 xnb_attach(device_t dev) 1264 { 1265 struct xnb_softc *xnb; 1266 int error; 1267 xnb_ring_type_t i; 1268 1269 error = create_netdev(dev); 1270 if (error != 0) { 1271 xenbus_dev_fatal(dev, error, "creating netdev"); 1272 return (error); 1273 } 1274 1275 DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); 1276 1277 /* 1278 * Basic initialization. 1279 * After this block it is safe to call xnb_detach() 1280 * to clean up any allocated data for this instance. 1281 */ 1282 xnb = device_get_softc(dev); 1283 xnb->otherend_id = xenbus_get_otherend_id(dev); 1284 for (i=0; i < XNB_NUM_RING_TYPES; i++) { 1285 xnb->ring_configs[i].ring_pages = 1; 1286 } 1287 1288 /* 1289 * Setup sysctl variables. 1290 */ 1291 xnb_setup_sysctl(xnb); 1292 1293 /* Update hot-plug status to satisfy xend. */ 1294 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), 1295 "hotplug-status", "connected"); 1296 if (error != 0) { 1297 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", 1298 xenbus_get_node(xnb->dev)); 1299 return (error); 1300 } 1301 1302 if ((error = xnb_publish_backend_info(xnb)) != 0) { 1303 /* 1304 * If we can't publish our data, we cannot participate 1305 * in this connection, and waiting for a front-end state 1306 * change will not help the situation. 1307 */ 1308 xnb_attach_failed(xnb, error, 1309 "Publishing backend status for %s", 1310 xenbus_get_node(xnb->dev)); 1311 return error; 1312 } 1313 1314 /* Tell the front end that we are ready to connect. */ 1315 xenbus_set_state(dev, XenbusStateInitWait); 1316 1317 return (0); 1318 } 1319 1320 /** 1321 * Detach from a net back device instance. 1322 * 1323 * \param dev NewBus device object representing this Xen Net Back instance. 1324 * 1325 * \return 0 for success, errno codes for failure. 1326 * 1327 * \note A net back device may be detached at any time in its life-cycle, 1328 * including part way through the attach process. For this reason, 1329 * initialization order and the initialization state checks in this 1330 * routine must be carefully coupled so that attach time failures 1331 * are gracefully handled. 1332 */ 1333 static int 1334 xnb_detach(device_t dev) 1335 { 1336 struct xnb_softc *xnb; 1337 1338 DPRINTF("\n"); 1339 1340 xnb = device_get_softc(dev); 1341 mtx_lock(&xnb->sc_lock); 1342 while (xnb_shutdown(xnb) == EAGAIN) { 1343 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, 1344 "xnb_shutdown", 0); 1345 } 1346 mtx_unlock(&xnb->sc_lock); 1347 DPRINTF("\n"); 1348 1349 mtx_destroy(&xnb->tx_lock); 1350 mtx_destroy(&xnb->rx_lock); 1351 mtx_destroy(&xnb->sc_lock); 1352 return (0); 1353 } 1354 1355 /** 1356 * Prepare this net back device for suspension of this VM. 1357 * 1358 * \param dev NewBus device object representing this Xen net Back instance. 1359 * 1360 * \return 0 for success, errno codes for failure. 1361 */ 1362 static int 1363 xnb_suspend(device_t dev) 1364 { 1365 return (0); 1366 } 1367 1368 /** 1369 * Perform any processing required to recover from a suspended state. 1370 * 1371 * \param dev NewBus device object representing this Xen Net Back instance. 1372 * 1373 * \return 0 for success, errno codes for failure. 1374 */ 1375 static int 1376 xnb_resume(device_t dev) 1377 { 1378 return (0); 1379 } 1380 1381 /** 1382 * Handle state changes expressed via the XenStore by our front-end peer. 1383 * 1384 * \param dev NewBus device object representing this Xen 1385 * Net Back instance. 1386 * \param frontend_state The new state of the front-end. 1387 * 1388 * \return 0 for success, errno codes for failure. 1389 */ 1390 static void 1391 xnb_frontend_changed(device_t dev, XenbusState frontend_state) 1392 { 1393 struct xnb_softc *xnb; 1394 1395 xnb = device_get_softc(dev); 1396 1397 DPRINTF("frontend_state=%s, xnb_state=%s\n", 1398 xenbus_strstate(frontend_state), 1399 xenbus_strstate(xenbus_get_state(xnb->dev))); 1400 1401 switch (frontend_state) { 1402 case XenbusStateInitialising: 1403 break; 1404 case XenbusStateInitialised: 1405 case XenbusStateConnected: 1406 xnb_connect(xnb); 1407 break; 1408 case XenbusStateClosing: 1409 case XenbusStateClosed: 1410 mtx_lock(&xnb->sc_lock); 1411 xnb_shutdown(xnb); 1412 mtx_unlock(&xnb->sc_lock); 1413 if (frontend_state == XenbusStateClosed) 1414 xenbus_set_state(xnb->dev, XenbusStateClosed); 1415 break; 1416 default: 1417 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", 1418 frontend_state); 1419 break; 1420 } 1421 } 1422 1423 1424 /*---------------------------- Request Processing ----------------------------*/ 1425 /** 1426 * Interrupt handler bound to the shared ring's event channel. 1427 * Entry point for the xennet transmit path in netback 1428 * Transfers packets from the Xen ring to the host's generic networking stack 1429 * 1430 * \param arg Callback argument registerd during event channel 1431 * binding - the xnb_softc for this instance. 1432 */ 1433 static void 1434 xnb_intr(void *arg) 1435 { 1436 struct xnb_softc *xnb; 1437 struct ifnet *ifp; 1438 netif_tx_back_ring_t *txb; 1439 RING_IDX req_prod_local; 1440 1441 xnb = (struct xnb_softc *)arg; 1442 ifp = xnb->xnb_ifp; 1443 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; 1444 1445 mtx_lock(&xnb->tx_lock); 1446 do { 1447 int notify; 1448 req_prod_local = txb->sring->req_prod; 1449 xen_rmb(); 1450 1451 for (;;) { 1452 struct mbuf *mbufc; 1453 int err; 1454 1455 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, 1456 xnb->tx_gnttab); 1457 if (err || (mbufc == NULL)) 1458 break; 1459 1460 /* Send the packet to the generic network stack */ 1461 (*xnb->xnb_ifp->if_input)(xnb->xnb_ifp, mbufc); 1462 } 1463 1464 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(txb, notify); 1465 if (notify != 0) 1466 xen_intr_signal(xnb->xen_intr_handle); 1467 1468 txb->sring->req_event = txb->req_cons + 1; 1469 xen_mb(); 1470 } while (txb->sring->req_prod != req_prod_local) ; 1471 mtx_unlock(&xnb->tx_lock); 1472 1473 xnb_start(ifp); 1474 } 1475 1476 1477 /** 1478 * Build a struct xnb_pkt based on netif_tx_request's from a netif tx ring. 1479 * Will read exactly 0 or 1 packets from the ring; never a partial packet. 1480 * \param[out] pkt The returned packet. If there is an error building 1481 * the packet, pkt.list_len will be set to 0. 1482 * \param[in] tx_ring Pointer to the Ring that is the input to this function 1483 * \param[in] start The ring index of the first potential request 1484 * \return The number of requests consumed to build this packet 1485 */ 1486 static int 1487 xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, 1488 RING_IDX start) 1489 { 1490 /* 1491 * Outline: 1492 * 1) Initialize pkt 1493 * 2) Read the first request of the packet 1494 * 3) Read the extras 1495 * 4) Set cdr 1496 * 5) Loop on the remainder of the packet 1497 * 6) Finalize pkt (stuff like car_size and list_len) 1498 */ 1499 int idx = start; 1500 int discard = 0; /* whether to discard the packet */ 1501 int more_data = 0; /* there are more request past the last one */ 1502 uint16_t cdr_size = 0; /* accumulated size of requests 2 through n */ 1503 1504 xnb_pkt_initialize(pkt); 1505 1506 /* Read the first request */ 1507 if (RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1508 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1509 pkt->size = tx->size; 1510 pkt->flags = tx->flags & ~NETTXF_more_data; 1511 more_data = tx->flags & NETTXF_more_data; 1512 pkt->list_len++; 1513 pkt->car = idx; 1514 idx++; 1515 } 1516 1517 /* Read the extra info */ 1518 if ((pkt->flags & NETTXF_extra_info) && 1519 RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1520 netif_extra_info_t *ext = 1521 (netif_extra_info_t*) RING_GET_REQUEST(tx_ring, idx); 1522 pkt->extra.type = ext->type; 1523 switch (pkt->extra.type) { 1524 case XEN_NETIF_EXTRA_TYPE_GSO: 1525 pkt->extra.u.gso = ext->u.gso; 1526 break; 1527 default: 1528 /* 1529 * The reference Linux netfront driver will 1530 * never set any other extra.type. So we don't 1531 * know what to do with it. Let's print an 1532 * error, then consume and discard the packet 1533 */ 1534 printf("xnb(%s:%d): Unknown extra info type %d." 1535 " Discarding packet\n", 1536 __func__, __LINE__, pkt->extra.type); 1537 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, 1538 start)); 1539 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, 1540 idx)); 1541 discard = 1; 1542 break; 1543 } 1544 1545 pkt->extra.flags = ext->flags; 1546 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { 1547 /* 1548 * The reference linux netfront driver never sets this 1549 * flag (nor does any other known netfront). So we 1550 * will discard the packet. 1551 */ 1552 printf("xnb(%s:%d): Request sets " 1553 "XEN_NETIF_EXTRA_FLAG_MORE, but we can't handle " 1554 "that\n", __func__, __LINE__); 1555 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1556 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1557 discard = 1; 1558 } 1559 1560 idx++; 1561 } 1562 1563 /* Set cdr. If there is not more data, cdr is invalid */ 1564 pkt->cdr = idx; 1565 1566 /* Loop on remainder of packet */ 1567 while (more_data && RING_HAS_UNCONSUMED_REQUESTS_2(tx_ring, idx)) { 1568 netif_tx_request_t *tx = RING_GET_REQUEST(tx_ring, idx); 1569 pkt->list_len++; 1570 cdr_size += tx->size; 1571 if (tx->flags & ~NETTXF_more_data) { 1572 /* There should be no other flags set at this point */ 1573 printf("xnb(%s:%d): Request sets unknown flags %d " 1574 "after the 1st request in the packet.\n", 1575 __func__, __LINE__, tx->flags); 1576 xnb_dump_txreq(start, RING_GET_REQUEST(tx_ring, start)); 1577 xnb_dump_txreq(idx, RING_GET_REQUEST(tx_ring, idx)); 1578 } 1579 1580 more_data = tx->flags & NETTXF_more_data; 1581 idx++; 1582 } 1583 1584 /* Finalize packet */ 1585 if (more_data != 0) { 1586 /* The ring ran out of requests before finishing the packet */ 1587 xnb_pkt_invalidate(pkt); 1588 idx = start; /* tell caller that we consumed no requests */ 1589 } else { 1590 /* Calculate car_size */ 1591 pkt->car_size = pkt->size - cdr_size; 1592 } 1593 if (discard != 0) { 1594 xnb_pkt_invalidate(pkt); 1595 } 1596 1597 return idx - start; 1598 } 1599 1600 1601 /** 1602 * Respond to all the requests that constituted pkt. Builds the responses and 1603 * writes them to the ring, but doesn't push them to the shared ring. 1604 * \param[in] pkt the packet that needs a response 1605 * \param[in] error true if there was an error handling the packet, such 1606 * as in the hypervisor copy op or mbuf allocation 1607 * \param[out] ring Responses go here 1608 */ 1609 static void 1610 xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, 1611 int error) 1612 { 1613 /* 1614 * Outline: 1615 * 1) Respond to the first request 1616 * 2) Respond to the extra info reques 1617 * Loop through every remaining request in the packet, generating 1618 * responses that copy those requests' ids and sets the status 1619 * appropriately. 1620 */ 1621 netif_tx_request_t *tx; 1622 netif_tx_response_t *rsp; 1623 int i; 1624 uint16_t status; 1625 1626 status = (xnb_pkt_is_valid(pkt) == 0) || error ? 1627 NETIF_RSP_ERROR : NETIF_RSP_OKAY; 1628 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), 1629 ("Cannot respond to ring requests out of order")); 1630 1631 if (pkt->list_len >= 1) { 1632 uint16_t id; 1633 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1634 id = tx->id; 1635 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1636 rsp->id = id; 1637 rsp->status = status; 1638 ring->rsp_prod_pvt++; 1639 1640 if (pkt->flags & NETRXF_extra_info) { 1641 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1642 rsp->status = NETIF_RSP_NULL; 1643 ring->rsp_prod_pvt++; 1644 } 1645 } 1646 1647 for (i=0; i < pkt->list_len - 1; i++) { 1648 uint16_t id; 1649 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); 1650 id = tx->id; 1651 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 1652 rsp->id = id; 1653 rsp->status = status; 1654 ring->rsp_prod_pvt++; 1655 } 1656 } 1657 1658 /** 1659 * Create an mbuf chain to represent a packet. Initializes all of the headers 1660 * in the mbuf chain, but does not copy the data. The returned chain must be 1661 * free()'d when no longer needed 1662 * \param[in] pkt A packet to model the mbuf chain after 1663 * \return A newly allocated mbuf chain, possibly with clusters attached. 1664 * NULL on failure 1665 */ 1666 static struct mbuf* 1667 xnb_pkt2mbufc(const struct xnb_pkt *pkt, struct ifnet *ifp) 1668 { 1669 /** 1670 * \todo consider using a memory pool for mbufs instead of 1671 * reallocating them for every packet 1672 */ 1673 /** \todo handle extra data */ 1674 struct mbuf *m; 1675 1676 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); 1677 1678 if (m != NULL) { 1679 m->m_pkthdr.rcvif = ifp; 1680 if (pkt->flags & NETTXF_data_validated) { 1681 /* 1682 * We lie to the host OS and always tell it that the 1683 * checksums are ok, because the packet is unlikely to 1684 * get corrupted going across domains. 1685 */ 1686 m->m_pkthdr.csum_flags = ( 1687 CSUM_IP_CHECKED | 1688 CSUM_IP_VALID | 1689 CSUM_DATA_VALID | 1690 CSUM_PSEUDO_HDR 1691 ); 1692 m->m_pkthdr.csum_data = 0xffff; 1693 } 1694 } 1695 return m; 1696 } 1697 1698 /** 1699 * Build a gnttab_copy table that can be used to copy data from a pkt 1700 * to an mbufc. Does not actually perform the copy. Always uses gref's on 1701 * the packet side. 1702 * \param[in] pkt pkt's associated requests form the src for 1703 * the copy operation 1704 * \param[in] mbufc mbufc's storage forms the dest for the copy operation 1705 * \param[out] gnttab Storage for the returned grant table 1706 * \param[in] txb Pointer to the backend ring structure 1707 * \param[in] otherend_id The domain ID of the other end of the copy 1708 * \return The number of gnttab entries filled 1709 */ 1710 static int 1711 xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, 1712 gnttab_copy_table gnttab, const netif_tx_back_ring_t *txb, 1713 domid_t otherend_id) 1714 { 1715 1716 struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1717 int gnt_idx = 0; /* index into grant table */ 1718 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ 1719 int r_ofs = 0; /* offset of next data within tx request's data area */ 1720 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1721 /* size in bytes that still needs to be represented in the table */ 1722 uint16_t size_remaining = pkt->size; 1723 1724 while (size_remaining > 0) { 1725 const netif_tx_request_t *txq = RING_GET_REQUEST(txb, r_idx); 1726 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; 1727 const size_t req_size = 1728 r_idx == pkt->car ? pkt->car_size : txq->size; 1729 const size_t pkt_space = req_size - r_ofs; 1730 /* 1731 * space is the largest amount of data that can be copied in the 1732 * grant table's next entry 1733 */ 1734 const size_t space = MIN(pkt_space, mbuf_space); 1735 1736 /* TODO: handle this error condition without panicking */ 1737 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1738 1739 gnttab[gnt_idx].source.u.ref = txq->gref; 1740 gnttab[gnt_idx].source.domid = otherend_id; 1741 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; 1742 gnttab[gnt_idx].dest.u.gmfn = virt_to_mfn( 1743 mtod(mbuf, vm_offset_t) + m_ofs); 1744 gnttab[gnt_idx].dest.offset = virt_to_offset( 1745 mtod(mbuf, vm_offset_t) + m_ofs); 1746 gnttab[gnt_idx].dest.domid = DOMID_SELF; 1747 gnttab[gnt_idx].len = space; 1748 gnttab[gnt_idx].flags = GNTCOPY_source_gref; 1749 1750 gnt_idx++; 1751 r_ofs += space; 1752 m_ofs += space; 1753 size_remaining -= space; 1754 if (req_size - r_ofs <= 0) { 1755 /* Must move to the next tx request */ 1756 r_ofs = 0; 1757 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 1758 } 1759 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { 1760 /* Must move to the next mbuf */ 1761 m_ofs = 0; 1762 mbuf = mbuf->m_next; 1763 } 1764 } 1765 1766 return gnt_idx; 1767 } 1768 1769 /** 1770 * Check the status of the grant copy operations, and update mbufs various 1771 * non-data fields to reflect the data present. 1772 * \param[in,out] mbufc mbuf chain to update. The chain must be valid and of 1773 * the correct length, and data should already be present 1774 * \param[in] gnttab A grant table for a just completed copy op 1775 * \param[in] n_entries The number of valid entries in the grant table 1776 */ 1777 static void 1778 xnb_update_mbufc(struct mbuf *mbufc, const gnttab_copy_table gnttab, 1779 int n_entries) 1780 { 1781 struct mbuf *mbuf = mbufc; 1782 int i; 1783 size_t total_size = 0; 1784 1785 for (i = 0; i < n_entries; i++) { 1786 KASSERT(gnttab[i].status == GNTST_okay, 1787 ("Some gnttab_copy entry had error status %hd\n", 1788 gnttab[i].status)); 1789 1790 mbuf->m_len += gnttab[i].len; 1791 total_size += gnttab[i].len; 1792 if (M_TRAILINGSPACE(mbuf) <= 0) { 1793 mbuf = mbuf->m_next; 1794 } 1795 } 1796 mbufc->m_pkthdr.len = total_size; 1797 1798 #if defined(INET) || defined(INET6) 1799 xnb_add_mbuf_cksum(mbufc); 1800 #endif 1801 } 1802 1803 /** 1804 * Dequeue at most one packet from the shared ring 1805 * \param[in,out] txb Netif tx ring. A packet will be removed from it, and 1806 * its private indices will be updated. But the indices 1807 * will not be pushed to the shared ring. 1808 * \param[in] ifnet Interface to which the packet will be sent 1809 * \param[in] otherend Domain ID of the other end of the ring 1810 * \param[out] mbufc The assembled mbuf chain, ready to send to the generic 1811 * networking stack 1812 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 1813 * this a function parameter so that we will take less 1814 * stack space. 1815 * \return An error code 1816 */ 1817 static int 1818 xnb_recv(netif_tx_back_ring_t *txb, domid_t otherend, struct mbuf **mbufc, 1819 struct ifnet *ifnet, gnttab_copy_table gnttab) 1820 { 1821 struct xnb_pkt pkt; 1822 /* number of tx requests consumed to build the last packet */ 1823 int num_consumed; 1824 int nr_ents; 1825 1826 *mbufc = NULL; 1827 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); 1828 if (num_consumed == 0) 1829 return 0; /* Nothing to receive */ 1830 1831 /* update statistics independent of errors */ 1832 if_inc_counter(ifnet, IFCOUNTER_IPACKETS, 1); 1833 1834 /* 1835 * if we got here, then 1 or more requests was consumed, but the packet 1836 * is not necessarily valid. 1837 */ 1838 if (xnb_pkt_is_valid(&pkt) == 0) { 1839 /* got a garbage packet, respond and drop it */ 1840 xnb_txpkt2rsp(&pkt, txb, 1); 1841 txb->req_cons += num_consumed; 1842 DPRINTF("xnb_intr: garbage packet, num_consumed=%d\n", 1843 num_consumed); 1844 if_inc_counter(ifnet, IFCOUNTER_IERRORS, 1); 1845 return EINVAL; 1846 } 1847 1848 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); 1849 1850 if (*mbufc == NULL) { 1851 /* 1852 * Couldn't allocate mbufs. Respond and drop the packet. Do 1853 * not consume the requests 1854 */ 1855 xnb_txpkt2rsp(&pkt, txb, 1); 1856 DPRINTF("xnb_intr: Couldn't allocate mbufs, num_consumed=%d\n", 1857 num_consumed); 1858 if_inc_counter(ifnet, IFCOUNTER_IQDROPS, 1); 1859 return ENOMEM; 1860 } 1861 1862 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); 1863 1864 if (nr_ents > 0) { 1865 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 1866 gnttab, nr_ents); 1867 KASSERT(hv_ret == 0, 1868 ("HYPERVISOR_grant_table_op returned %d\n", hv_ret)); 1869 xnb_update_mbufc(*mbufc, gnttab, nr_ents); 1870 } 1871 1872 xnb_txpkt2rsp(&pkt, txb, 0); 1873 txb->req_cons += num_consumed; 1874 return 0; 1875 } 1876 1877 /** 1878 * Create an xnb_pkt based on the contents of an mbuf chain. 1879 * \param[in] mbufc mbuf chain to transform into a packet 1880 * \param[out] pkt Storage for the newly generated xnb_pkt 1881 * \param[in] start The ring index of the first available slot in the rx 1882 * ring 1883 * \param[in] space The number of free slots in the rx ring 1884 * \retval 0 Success 1885 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 1886 * \retval EAGAIN There was not enough space in the ring to queue the 1887 * packet 1888 */ 1889 static int 1890 xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, 1891 RING_IDX start, int space) 1892 { 1893 1894 int retval = 0; 1895 1896 if ((mbufc == NULL) || 1897 ( (mbufc->m_flags & M_PKTHDR) == 0) || 1898 (mbufc->m_pkthdr.len == 0)) { 1899 xnb_pkt_invalidate(pkt); 1900 retval = EINVAL; 1901 } else { 1902 int slots_required; 1903 1904 xnb_pkt_validate(pkt); 1905 pkt->flags = 0; 1906 pkt->size = mbufc->m_pkthdr.len; 1907 pkt->car = start; 1908 pkt->car_size = mbufc->m_len; 1909 1910 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { 1911 pkt->flags |= NETRXF_extra_info; 1912 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; 1913 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 1914 pkt->extra.u.gso.pad = 0; 1915 pkt->extra.u.gso.features = 0; 1916 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; 1917 pkt->extra.flags = 0; 1918 pkt->cdr = start + 2; 1919 } else { 1920 pkt->cdr = start + 1; 1921 } 1922 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { 1923 pkt->flags |= 1924 (NETRXF_csum_blank | NETRXF_data_validated); 1925 } 1926 1927 /* 1928 * Each ring response can have up to PAGE_SIZE of data. 1929 * Assume that we can defragment the mbuf chain efficiently 1930 * into responses so that each response but the last uses all 1931 * PAGE_SIZE bytes. 1932 */ 1933 pkt->list_len = howmany(pkt->size, PAGE_SIZE); 1934 1935 if (pkt->list_len > 1) { 1936 pkt->flags |= NETRXF_more_data; 1937 } 1938 1939 slots_required = pkt->list_len + 1940 (pkt->flags & NETRXF_extra_info ? 1 : 0); 1941 if (slots_required > space) { 1942 xnb_pkt_invalidate(pkt); 1943 retval = EAGAIN; 1944 } 1945 } 1946 1947 return retval; 1948 } 1949 1950 /** 1951 * Build a gnttab_copy table that can be used to copy data from an mbuf chain 1952 * to the frontend's shared buffers. Does not actually perform the copy. 1953 * Always uses gref's on the other end's side. 1954 * \param[in] pkt pkt's associated responses form the dest for the copy 1955 * operatoin 1956 * \param[in] mbufc The source for the copy operation 1957 * \param[out] gnttab Storage for the returned grant table 1958 * \param[in] rxb Pointer to the backend ring structure 1959 * \param[in] otherend_id The domain ID of the other end of the copy 1960 * \return The number of gnttab entries filled 1961 */ 1962 static int 1963 xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, 1964 gnttab_copy_table gnttab, const netif_rx_back_ring_t *rxb, 1965 domid_t otherend_id) 1966 { 1967 1968 const struct mbuf *mbuf = mbufc;/* current mbuf within the chain */ 1969 int gnt_idx = 0; /* index into grant table */ 1970 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ 1971 int r_ofs = 0; /* offset of next data within rx request's data area */ 1972 int m_ofs = 0; /* offset of next data within mbuf's data area */ 1973 /* size in bytes that still needs to be represented in the table */ 1974 uint16_t size_remaining; 1975 1976 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; 1977 1978 while (size_remaining > 0) { 1979 const netif_rx_request_t *rxq = RING_GET_REQUEST(rxb, r_idx); 1980 const size_t mbuf_space = mbuf->m_len - m_ofs; 1981 /* Xen shared pages have an implied size of PAGE_SIZE */ 1982 const size_t req_size = PAGE_SIZE; 1983 const size_t pkt_space = req_size - r_ofs; 1984 /* 1985 * space is the largest amount of data that can be copied in the 1986 * grant table's next entry 1987 */ 1988 const size_t space = MIN(pkt_space, mbuf_space); 1989 1990 /* TODO: handle this error condition without panicing */ 1991 KASSERT(gnt_idx < GNTTAB_LEN, ("Grant table is too short")); 1992 1993 gnttab[gnt_idx].dest.u.ref = rxq->gref; 1994 gnttab[gnt_idx].dest.domid = otherend_id; 1995 gnttab[gnt_idx].dest.offset = r_ofs; 1996 gnttab[gnt_idx].source.u.gmfn = virt_to_mfn( 1997 mtod(mbuf, vm_offset_t) + m_ofs); 1998 gnttab[gnt_idx].source.offset = virt_to_offset( 1999 mtod(mbuf, vm_offset_t) + m_ofs); 2000 gnttab[gnt_idx].source.domid = DOMID_SELF; 2001 gnttab[gnt_idx].len = space; 2002 gnttab[gnt_idx].flags = GNTCOPY_dest_gref; 2003 2004 gnt_idx++; 2005 2006 r_ofs += space; 2007 m_ofs += space; 2008 size_remaining -= space; 2009 if (req_size - r_ofs <= 0) { 2010 /* Must move to the next rx request */ 2011 r_ofs = 0; 2012 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; 2013 } 2014 if (mbuf->m_len - m_ofs <= 0) { 2015 /* Must move to the next mbuf */ 2016 m_ofs = 0; 2017 mbuf = mbuf->m_next; 2018 } 2019 } 2020 2021 return gnt_idx; 2022 } 2023 2024 /** 2025 * Generates responses for all the requests that constituted pkt. Builds 2026 * responses and writes them to the ring, but doesn't push the shared ring 2027 * indices. 2028 * \param[in] pkt the packet that needs a response 2029 * \param[in] gnttab The grant copy table corresponding to this packet. 2030 * Used to determine how many rsp->netif_rx_response_t's to 2031 * generate. 2032 * \param[in] n_entries Number of relevant entries in the grant table 2033 * \param[out] ring Responses go here 2034 * \return The number of RX requests that were consumed to generate 2035 * the responses 2036 */ 2037 static int 2038 xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, 2039 int n_entries, netif_rx_back_ring_t *ring) 2040 { 2041 /* 2042 * This code makes the following assumptions: 2043 * * All entries in gnttab set GNTCOPY_dest_gref 2044 * * The entries in gnttab are grouped by their grefs: any two 2045 * entries with the same gref must be adjacent 2046 */ 2047 int error = 0; 2048 int gnt_idx, i; 2049 int n_responses = 0; 2050 grant_ref_t last_gref = GRANT_REF_INVALID; 2051 RING_IDX r_idx; 2052 2053 KASSERT(gnttab != NULL, ("Received a null granttable copy")); 2054 2055 /* 2056 * In the event of an error, we only need to send one response to the 2057 * netfront. In that case, we musn't write any data to the responses 2058 * after the one we send. So we must loop all the way through gnttab 2059 * looking for errors before we generate any responses 2060 * 2061 * Since we're looping through the grant table anyway, we'll count the 2062 * number of different gref's in it, which will tell us how many 2063 * responses to generate 2064 */ 2065 for (gnt_idx = 0; gnt_idx < n_entries; gnt_idx++) { 2066 int16_t status = gnttab[gnt_idx].status; 2067 if (status != GNTST_okay) { 2068 DPRINTF( 2069 "Got error %d for hypervisor gnttab_copy status\n", 2070 status); 2071 error = 1; 2072 break; 2073 } 2074 if (gnttab[gnt_idx].dest.u.ref != last_gref) { 2075 n_responses++; 2076 last_gref = gnttab[gnt_idx].dest.u.ref; 2077 } 2078 } 2079 2080 if (error != 0) { 2081 uint16_t id; 2082 netif_rx_response_t *rsp; 2083 2084 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; 2085 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); 2086 rsp->id = id; 2087 rsp->status = NETIF_RSP_ERROR; 2088 n_responses = 1; 2089 } else { 2090 gnt_idx = 0; 2091 const int has_extra = pkt->flags & NETRXF_extra_info; 2092 if (has_extra != 0) 2093 n_responses++; 2094 2095 for (i = 0; i < n_responses; i++) { 2096 netif_rx_request_t rxq; 2097 netif_rx_response_t *rsp; 2098 2099 r_idx = ring->rsp_prod_pvt + i; 2100 /* 2101 * We copy the structure of rxq instead of making a 2102 * pointer because it shares the same memory as rsp. 2103 */ 2104 rxq = *(RING_GET_REQUEST(ring, r_idx)); 2105 rsp = RING_GET_RESPONSE(ring, r_idx); 2106 if (has_extra && (i == 1)) { 2107 netif_extra_info_t *ext = 2108 (netif_extra_info_t*)rsp; 2109 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 2110 ext->flags = 0; 2111 ext->u.gso.size = pkt->extra.u.gso.size; 2112 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 2113 ext->u.gso.pad = 0; 2114 ext->u.gso.features = 0; 2115 } else { 2116 rsp->id = rxq.id; 2117 rsp->status = GNTST_okay; 2118 rsp->offset = 0; 2119 rsp->flags = 0; 2120 if (i < pkt->list_len - 1) 2121 rsp->flags |= NETRXF_more_data; 2122 if ((i == 0) && has_extra) 2123 rsp->flags |= NETRXF_extra_info; 2124 if ((i == 0) && 2125 (pkt->flags & NETRXF_data_validated)) { 2126 rsp->flags |= NETRXF_data_validated; 2127 rsp->flags |= NETRXF_csum_blank; 2128 } 2129 rsp->status = 0; 2130 for (; gnttab[gnt_idx].dest.u.ref == rxq.gref; 2131 gnt_idx++) { 2132 rsp->status += gnttab[gnt_idx].len; 2133 } 2134 } 2135 } 2136 } 2137 2138 ring->req_cons += n_responses; 2139 ring->rsp_prod_pvt += n_responses; 2140 return n_responses; 2141 } 2142 2143 #if defined(INET) || defined(INET6) 2144 /** 2145 * Add IP, TCP, and/or UDP checksums to every mbuf in a chain. The first mbuf 2146 * in the chain must start with a struct ether_header. 2147 * 2148 * XXX This function will perform incorrectly on UDP packets that are split up 2149 * into multiple ethernet frames. 2150 */ 2151 static void 2152 xnb_add_mbuf_cksum(struct mbuf *mbufc) 2153 { 2154 struct ether_header *eh; 2155 struct ip *iph; 2156 uint16_t ether_type; 2157 2158 eh = mtod(mbufc, struct ether_header*); 2159 ether_type = ntohs(eh->ether_type); 2160 if (ether_type != ETHERTYPE_IP) { 2161 /* Nothing to calculate */ 2162 return; 2163 } 2164 2165 iph = (struct ip*)(eh + 1); 2166 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2167 iph->ip_sum = 0; 2168 iph->ip_sum = in_cksum_hdr(iph); 2169 } 2170 2171 switch (iph->ip_p) { 2172 case IPPROTO_TCP: 2173 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2174 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); 2175 struct tcphdr *th = (struct tcphdr*)(iph + 1); 2176 th->th_sum = in_pseudo(iph->ip_src.s_addr, 2177 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); 2178 th->th_sum = in_cksum_skip(mbufc, 2179 sizeof(struct ether_header) + ntohs(iph->ip_len), 2180 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2181 } 2182 break; 2183 case IPPROTO_UDP: 2184 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { 2185 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); 2186 struct udphdr *uh = (struct udphdr*)(iph + 1); 2187 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, 2188 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); 2189 uh->uh_sum = in_cksum_skip(mbufc, 2190 sizeof(struct ether_header) + ntohs(iph->ip_len), 2191 sizeof(struct ether_header) + (iph->ip_hl << 2)); 2192 } 2193 break; 2194 default: 2195 break; 2196 } 2197 } 2198 #endif /* INET || INET6 */ 2199 2200 static void 2201 xnb_stop(struct xnb_softc *xnb) 2202 { 2203 struct ifnet *ifp; 2204 2205 mtx_assert(&xnb->sc_lock, MA_OWNED); 2206 ifp = xnb->xnb_ifp; 2207 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2208 if_link_state_change(ifp, LINK_STATE_DOWN); 2209 } 2210 2211 static int 2212 xnb_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2213 { 2214 struct xnb_softc *xnb = ifp->if_softc; 2215 struct ifreq *ifr = (struct ifreq*) data; 2216 #ifdef INET 2217 struct ifaddr *ifa = (struct ifaddr*)data; 2218 #endif 2219 int error = 0; 2220 2221 switch (cmd) { 2222 case SIOCSIFFLAGS: 2223 mtx_lock(&xnb->sc_lock); 2224 if (ifp->if_flags & IFF_UP) { 2225 xnb_ifinit_locked(xnb); 2226 } else { 2227 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2228 xnb_stop(xnb); 2229 } 2230 } 2231 /* 2232 * Note: netfront sets a variable named xn_if_flags 2233 * here, but that variable is never read 2234 */ 2235 mtx_unlock(&xnb->sc_lock); 2236 break; 2237 case SIOCSIFADDR: 2238 #ifdef INET 2239 mtx_lock(&xnb->sc_lock); 2240 if (ifa->ifa_addr->sa_family == AF_INET) { 2241 ifp->if_flags |= IFF_UP; 2242 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2243 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | 2244 IFF_DRV_OACTIVE); 2245 if_link_state_change(ifp, 2246 LINK_STATE_DOWN); 2247 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2248 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2249 if_link_state_change(ifp, 2250 LINK_STATE_UP); 2251 } 2252 arp_ifinit(ifp, ifa); 2253 mtx_unlock(&xnb->sc_lock); 2254 } else { 2255 mtx_unlock(&xnb->sc_lock); 2256 #endif 2257 error = ether_ioctl(ifp, cmd, data); 2258 #ifdef INET 2259 } 2260 #endif 2261 break; 2262 case SIOCSIFCAP: 2263 mtx_lock(&xnb->sc_lock); 2264 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { 2265 ifp->if_capenable |= IFCAP_TXCSUM; 2266 ifp->if_hwassist |= XNB_CSUM_FEATURES; 2267 } else { 2268 ifp->if_capenable &= ~(IFCAP_TXCSUM); 2269 ifp->if_hwassist &= ~(XNB_CSUM_FEATURES); 2270 } 2271 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { 2272 ifp->if_capenable |= IFCAP_RXCSUM; 2273 } else { 2274 ifp->if_capenable &= ~(IFCAP_RXCSUM); 2275 } 2276 /* 2277 * TODO enable TSO4 and LRO once we no longer need 2278 * to calculate checksums in software 2279 */ 2280 #if 0 2281 if (ifr->if_reqcap |= IFCAP_TSO4) { 2282 if (IFCAP_TXCSUM & ifp->if_capenable) { 2283 printf("xnb: Xen netif requires that " 2284 "TXCSUM be enabled in order " 2285 "to use TSO4\n"); 2286 error = EINVAL; 2287 } else { 2288 ifp->if_capenable |= IFCAP_TSO4; 2289 ifp->if_hwassist |= CSUM_TSO; 2290 } 2291 } else { 2292 ifp->if_capenable &= ~(IFCAP_TSO4); 2293 ifp->if_hwassist &= ~(CSUM_TSO); 2294 } 2295 if (ifr->ifreqcap |= IFCAP_LRO) { 2296 ifp->if_capenable |= IFCAP_LRO; 2297 } else { 2298 ifp->if_capenable &= ~(IFCAP_LRO); 2299 } 2300 #endif 2301 mtx_unlock(&xnb->sc_lock); 2302 break; 2303 case SIOCSIFMTU: 2304 ifp->if_mtu = ifr->ifr_mtu; 2305 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2306 xnb_ifinit(xnb); 2307 break; 2308 case SIOCADDMULTI: 2309 case SIOCDELMULTI: 2310 case SIOCSIFMEDIA: 2311 case SIOCGIFMEDIA: 2312 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); 2313 break; 2314 default: 2315 error = ether_ioctl(ifp, cmd, data); 2316 break; 2317 } 2318 return (error); 2319 } 2320 2321 static void 2322 xnb_start_locked(struct ifnet *ifp) 2323 { 2324 netif_rx_back_ring_t *rxb; 2325 struct xnb_softc *xnb; 2326 struct mbuf *mbufc; 2327 RING_IDX req_prod_local; 2328 2329 xnb = ifp->if_softc; 2330 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; 2331 2332 if (!xnb->carrier) 2333 return; 2334 2335 do { 2336 int out_of_space = 0; 2337 int notify; 2338 req_prod_local = rxb->sring->req_prod; 2339 xen_rmb(); 2340 for (;;) { 2341 int error; 2342 2343 IF_DEQUEUE(&ifp->if_snd, mbufc); 2344 if (mbufc == NULL) 2345 break; 2346 error = xnb_send(rxb, xnb->otherend_id, mbufc, 2347 xnb->rx_gnttab); 2348 switch (error) { 2349 case EAGAIN: 2350 /* 2351 * Insufficient space in the ring. 2352 * Requeue pkt and send when space is 2353 * available. 2354 */ 2355 IF_PREPEND(&ifp->if_snd, mbufc); 2356 /* 2357 * Perhaps the frontend missed an IRQ 2358 * and went to sleep. Notify it to wake 2359 * it up. 2360 */ 2361 out_of_space = 1; 2362 break; 2363 2364 case EINVAL: 2365 /* OS gave a corrupt packet. Drop it.*/ 2366 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2367 /* FALLTHROUGH */ 2368 default: 2369 /* Send succeeded, or packet had error. 2370 * Free the packet */ 2371 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2372 if (mbufc) 2373 m_freem(mbufc); 2374 break; 2375 } 2376 if (out_of_space != 0) 2377 break; 2378 } 2379 2380 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(rxb, notify); 2381 if ((notify != 0) || (out_of_space != 0)) 2382 xen_intr_signal(xnb->xen_intr_handle); 2383 rxb->sring->req_event = req_prod_local + 1; 2384 xen_mb(); 2385 } while (rxb->sring->req_prod != req_prod_local) ; 2386 } 2387 2388 /** 2389 * Sends one packet to the ring. Blocks until the packet is on the ring 2390 * \param[in] mbufc Contains one packet to send. Caller must free 2391 * \param[in,out] rxb The packet will be pushed onto this ring, but the 2392 * otherend will not be notified. 2393 * \param[in] otherend The domain ID of the other end of the connection 2394 * \retval EAGAIN The ring did not have enough space for the packet. 2395 * The ring has not been modified 2396 * \param[in,out] gnttab Pointer to enough memory for a grant table. We make 2397 * this a function parameter so that we will take less 2398 * stack space. 2399 * \retval EINVAL mbufc was corrupt or not convertible into a pkt 2400 */ 2401 static int 2402 xnb_send(netif_rx_back_ring_t *ring, domid_t otherend, const struct mbuf *mbufc, 2403 gnttab_copy_table gnttab) 2404 { 2405 struct xnb_pkt pkt; 2406 int error, n_entries, n_reqs; 2407 RING_IDX space; 2408 2409 space = ring->sring->req_prod - ring->req_cons; 2410 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); 2411 if (error != 0) 2412 return error; 2413 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); 2414 if (n_entries != 0) { 2415 int __unused hv_ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, 2416 gnttab, n_entries); 2417 KASSERT(hv_ret == 0, ("HYPERVISOR_grant_table_op returned %d\n", 2418 hv_ret)); 2419 } 2420 2421 n_reqs = xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); 2422 2423 return 0; 2424 } 2425 2426 static void 2427 xnb_start(struct ifnet *ifp) 2428 { 2429 struct xnb_softc *xnb; 2430 2431 xnb = ifp->if_softc; 2432 mtx_lock(&xnb->rx_lock); 2433 xnb_start_locked(ifp); 2434 mtx_unlock(&xnb->rx_lock); 2435 } 2436 2437 /* equivalent of network_open() in Linux */ 2438 static void 2439 xnb_ifinit_locked(struct xnb_softc *xnb) 2440 { 2441 struct ifnet *ifp; 2442 2443 ifp = xnb->xnb_ifp; 2444 2445 mtx_assert(&xnb->sc_lock, MA_OWNED); 2446 2447 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2448 return; 2449 2450 xnb_stop(xnb); 2451 2452 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2453 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2454 if_link_state_change(ifp, LINK_STATE_UP); 2455 } 2456 2457 2458 static void 2459 xnb_ifinit(void *xsc) 2460 { 2461 struct xnb_softc *xnb = xsc; 2462 2463 mtx_lock(&xnb->sc_lock); 2464 xnb_ifinit_locked(xnb); 2465 mtx_unlock(&xnb->sc_lock); 2466 } 2467 2468 /** 2469 * Callback used by the generic networking code to tell us when our carrier 2470 * state has changed. Since we don't have a physical carrier, we don't care 2471 */ 2472 static int 2473 xnb_ifmedia_upd(struct ifnet *ifp) 2474 { 2475 return (0); 2476 } 2477 2478 /** 2479 * Callback used by the generic networking code to ask us what our carrier 2480 * state is. Since we don't have a physical carrier, this is very simple 2481 */ 2482 static void 2483 xnb_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2484 { 2485 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2486 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2487 } 2488 2489 2490 /*---------------------------- NewBus Registration ---------------------------*/ 2491 static device_method_t xnb_methods[] = { 2492 /* Device interface */ 2493 DEVMETHOD(device_probe, xnb_probe), 2494 DEVMETHOD(device_attach, xnb_attach), 2495 DEVMETHOD(device_detach, xnb_detach), 2496 DEVMETHOD(device_shutdown, bus_generic_shutdown), 2497 DEVMETHOD(device_suspend, xnb_suspend), 2498 DEVMETHOD(device_resume, xnb_resume), 2499 2500 /* Xenbus interface */ 2501 DEVMETHOD(xenbus_otherend_changed, xnb_frontend_changed), 2502 2503 { 0, 0 } 2504 }; 2505 2506 static driver_t xnb_driver = { 2507 "xnb", 2508 xnb_methods, 2509 sizeof(struct xnb_softc), 2510 }; 2511 devclass_t xnb_devclass; 2512 2513 DRIVER_MODULE(xnb, xenbusb_back, xnb_driver, xnb_devclass, 0, 0); 2514 2515 2516 /*-------------------------- Unit Tests -------------------------------------*/ 2517 #ifdef XNB_DEBUG 2518 #include "netback_unit_tests.c" 2519 #endif 2520