1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/linker_set.h> 34 #include <sys/select.h> 35 #include <sys/uio.h> 36 #include <sys/ioctl.h> 37 #include <machine/atomic.h> 38 #include <net/ethernet.h> 39 #ifndef NETMAP_WITH_LIBS 40 #define NETMAP_WITH_LIBS 41 #endif 42 #include <net/netmap_user.h> 43 44 #include <errno.h> 45 #include <fcntl.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <stdint.h> 49 #include <string.h> 50 #include <strings.h> 51 #include <unistd.h> 52 #include <assert.h> 53 #include <md5.h> 54 #include <pthread.h> 55 #include <pthread_np.h> 56 57 #include "bhyverun.h" 58 #include "pci_emul.h" 59 #include "mevent.h" 60 #include "virtio.h" 61 62 #define VTNET_RINGSZ 1024 63 64 #define VTNET_MAXSEGS 256 65 66 /* 67 * Host capabilities. Note that we only offer a few of these. 68 */ 69 #define VIRTIO_NET_F_CSUM (1 << 0) /* host handles partial cksum */ 70 #define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* guest handles partial cksum */ 71 #define VIRTIO_NET_F_MAC (1 << 5) /* host supplies MAC */ 72 #define VIRTIO_NET_F_GSO_DEPREC (1 << 6) /* deprecated: host handles GSO */ 73 #define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* guest can rcv TSOv4 */ 74 #define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* guest can rcv TSOv6 */ 75 #define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* guest can rcv TSO with ECN */ 76 #define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* guest can rcv UFO */ 77 #define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* host can rcv TSOv4 */ 78 #define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* host can rcv TSOv6 */ 79 #define VIRTIO_NET_F_HOST_ECN (1 << 13) /* host can rcv TSO with ECN */ 80 #define VIRTIO_NET_F_HOST_UFO (1 << 14) /* host can rcv UFO */ 81 #define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* host can merge RX buffers */ 82 #define VIRTIO_NET_F_STATUS (1 << 16) /* config status field available */ 83 #define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* control channel available */ 84 #define VIRTIO_NET_F_CTRL_RX (1 << 18) /* control channel RX mode support */ 85 #define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* control channel VLAN filtering */ 86 #define VIRTIO_NET_F_GUEST_ANNOUNCE \ 87 (1 << 21) /* guest can send gratuitous pkts */ 88 89 #define VTNET_S_HOSTCAPS \ 90 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \ 91 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 92 93 /* 94 * PCI config-space "registers" 95 */ 96 struct virtio_net_config { 97 uint8_t mac[6]; 98 uint16_t status; 99 } __packed; 100 101 /* 102 * Queue definitions. 103 */ 104 #define VTNET_RXQ 0 105 #define VTNET_TXQ 1 106 #define VTNET_CTLQ 2 /* NB: not yet supported */ 107 108 #define VTNET_MAXQ 3 109 110 /* 111 * Fixed network header size 112 */ 113 struct virtio_net_rxhdr { 114 uint8_t vrh_flags; 115 uint8_t vrh_gso_type; 116 uint16_t vrh_hdr_len; 117 uint16_t vrh_gso_size; 118 uint16_t vrh_csum_start; 119 uint16_t vrh_csum_offset; 120 uint16_t vrh_bufs; 121 } __packed; 122 123 /* 124 * Debug printf 125 */ 126 static int pci_vtnet_debug; 127 #define DPRINTF(params) if (pci_vtnet_debug) printf params 128 #define WPRINTF(params) printf params 129 130 /* 131 * Per-device softc 132 */ 133 struct pci_vtnet_softc { 134 struct virtio_softc vsc_vs; 135 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 136 pthread_mutex_t vsc_mtx; 137 struct mevent *vsc_mevp; 138 139 int vsc_tapfd; 140 struct nm_desc *vsc_nmd; 141 142 int vsc_rx_ready; 143 volatile int resetting; /* set and checked outside lock */ 144 145 uint64_t vsc_features; /* negotiated features */ 146 147 struct virtio_net_config vsc_config; 148 149 pthread_mutex_t rx_mtx; 150 int rx_in_progress; 151 int rx_vhdrlen; 152 int rx_merge; /* merged rx bufs in use */ 153 154 pthread_t tx_tid; 155 pthread_mutex_t tx_mtx; 156 pthread_cond_t tx_cond; 157 int tx_in_progress; 158 159 void (*pci_vtnet_rx)(struct pci_vtnet_softc *sc); 160 void (*pci_vtnet_tx)(struct pci_vtnet_softc *sc, struct iovec *iov, 161 int iovcnt, int len); 162 }; 163 164 static void pci_vtnet_reset(void *); 165 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 166 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 167 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 168 static void pci_vtnet_neg_features(void *, uint64_t); 169 170 static struct virtio_consts vtnet_vi_consts = { 171 "vtnet", /* our name */ 172 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */ 173 sizeof(struct virtio_net_config), /* config reg size */ 174 pci_vtnet_reset, /* reset */ 175 NULL, /* device-wide qnotify -- not used */ 176 pci_vtnet_cfgread, /* read PCI config */ 177 pci_vtnet_cfgwrite, /* write PCI config */ 178 pci_vtnet_neg_features, /* apply negotiated features */ 179 VTNET_S_HOSTCAPS, /* our capabilities */ 180 }; 181 182 /* 183 * If the transmit thread is active then stall until it is done. 184 */ 185 static void 186 pci_vtnet_txwait(struct pci_vtnet_softc *sc) 187 { 188 189 pthread_mutex_lock(&sc->tx_mtx); 190 while (sc->tx_in_progress) { 191 pthread_mutex_unlock(&sc->tx_mtx); 192 usleep(10000); 193 pthread_mutex_lock(&sc->tx_mtx); 194 } 195 pthread_mutex_unlock(&sc->tx_mtx); 196 } 197 198 /* 199 * If the receive thread is active then stall until it is done. 200 */ 201 static void 202 pci_vtnet_rxwait(struct pci_vtnet_softc *sc) 203 { 204 205 pthread_mutex_lock(&sc->rx_mtx); 206 while (sc->rx_in_progress) { 207 pthread_mutex_unlock(&sc->rx_mtx); 208 usleep(10000); 209 pthread_mutex_lock(&sc->rx_mtx); 210 } 211 pthread_mutex_unlock(&sc->rx_mtx); 212 } 213 214 static void 215 pci_vtnet_reset(void *vsc) 216 { 217 struct pci_vtnet_softc *sc = vsc; 218 219 DPRINTF(("vtnet: device reset requested !\n")); 220 221 sc->resetting = 1; 222 223 /* 224 * Wait for the transmit and receive threads to finish their 225 * processing. 226 */ 227 pci_vtnet_txwait(sc); 228 pci_vtnet_rxwait(sc); 229 230 sc->vsc_rx_ready = 0; 231 sc->rx_merge = 1; 232 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr); 233 234 /* now reset rings, MSI-X vectors, and negotiated capabilities */ 235 vi_reset_dev(&sc->vsc_vs); 236 237 sc->resetting = 0; 238 } 239 240 /* 241 * Called to send a buffer chain out to the tap device 242 */ 243 static void 244 pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt, 245 int len) 246 { 247 static char pad[60]; /* all zero bytes */ 248 249 if (sc->vsc_tapfd == -1) 250 return; 251 252 /* 253 * If the length is < 60, pad out to that and add the 254 * extra zero'd segment to the iov. It is guaranteed that 255 * there is always an extra iov available by the caller. 256 */ 257 if (len < 60) { 258 iov[iovcnt].iov_base = pad; 259 iov[iovcnt].iov_len = 60 - len; 260 iovcnt++; 261 } 262 (void) writev(sc->vsc_tapfd, iov, iovcnt); 263 } 264 265 /* 266 * Called when there is read activity on the tap file descriptor. 267 * Each buffer posted by the guest is assumed to be able to contain 268 * an entire ethernet frame + rx header. 269 * MP note: the dummybuf is only used for discarding frames, so there 270 * is no need for it to be per-vtnet or locked. 271 */ 272 static uint8_t dummybuf[2048]; 273 274 static __inline struct iovec * 275 rx_iov_trim(struct iovec *iov, int *niov, int tlen) 276 { 277 struct iovec *riov; 278 279 /* XXX short-cut: assume first segment is >= tlen */ 280 assert(iov[0].iov_len >= tlen); 281 282 iov[0].iov_len -= tlen; 283 if (iov[0].iov_len == 0) { 284 assert(*niov > 1); 285 *niov -= 1; 286 riov = &iov[1]; 287 } else { 288 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + tlen); 289 riov = &iov[0]; 290 } 291 292 return (riov); 293 } 294 295 static void 296 pci_vtnet_tap_rx(struct pci_vtnet_softc *sc) 297 { 298 struct iovec iov[VTNET_MAXSEGS], *riov; 299 struct vqueue_info *vq; 300 void *vrx; 301 int len, n; 302 uint16_t idx; 303 304 /* 305 * Should never be called without a valid tap fd 306 */ 307 assert(sc->vsc_tapfd != -1); 308 309 /* 310 * But, will be called when the rx ring hasn't yet 311 * been set up or the guest is resetting the device. 312 */ 313 if (!sc->vsc_rx_ready || sc->resetting) { 314 /* 315 * Drop the packet and try later. 316 */ 317 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf)); 318 return; 319 } 320 321 /* 322 * Check for available rx buffers 323 */ 324 vq = &sc->vsc_queues[VTNET_RXQ]; 325 if (!vq_has_descs(vq)) { 326 /* 327 * Drop the packet and try later. Interrupt on 328 * empty, if that's negotiated. 329 */ 330 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf)); 331 vq_endchains(vq, 1); 332 return; 333 } 334 335 do { 336 /* 337 * Get descriptor chain. 338 */ 339 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 340 assert(n >= 1 && n <= VTNET_MAXSEGS); 341 342 /* 343 * Get a pointer to the rx header, and use the 344 * data immediately following it for the packet buffer. 345 */ 346 vrx = iov[0].iov_base; 347 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen); 348 349 len = readv(sc->vsc_tapfd, riov, n); 350 351 if (len < 0 && errno == EWOULDBLOCK) { 352 /* 353 * No more packets, but still some avail ring 354 * entries. Interrupt if needed/appropriate. 355 */ 356 vq_retchain(vq); 357 vq_endchains(vq, 0); 358 return; 359 } 360 361 /* 362 * The only valid field in the rx packet header is the 363 * number of buffers if merged rx bufs were negotiated. 364 */ 365 memset(vrx, 0, sc->rx_vhdrlen); 366 367 if (sc->rx_merge) { 368 struct virtio_net_rxhdr *vrxh; 369 370 vrxh = vrx; 371 vrxh->vrh_bufs = 1; 372 } 373 374 /* 375 * Release this chain and handle more chains. 376 */ 377 vq_relchain(vq, idx, len + sc->rx_vhdrlen); 378 } while (vq_has_descs(vq)); 379 380 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */ 381 vq_endchains(vq, 1); 382 } 383 384 static int 385 pci_vtnet_netmap_writev(struct nm_desc *nmd, struct iovec *iov, int iovcnt) 386 { 387 int r, i; 388 int len = 0; 389 390 for (r = nmd->cur_tx_ring; ; ) { 391 struct netmap_ring *ring = NETMAP_TXRING(nmd->nifp, r); 392 uint32_t cur, idx; 393 char *buf; 394 395 if (nm_ring_empty(ring)) { 396 r++; 397 if (r > nmd->last_tx_ring) 398 r = nmd->first_tx_ring; 399 if (r == nmd->cur_rx_ring) 400 break; 401 continue; 402 } 403 cur = ring->cur; 404 idx = ring->slot[cur].buf_idx; 405 buf = NETMAP_BUF(ring, idx); 406 407 for (i = 0; i < iovcnt; i++) { 408 memcpy(&buf[len], iov[i].iov_base, iov[i].iov_len); 409 len += iov[i].iov_len; 410 } 411 ring->slot[cur].len = len; 412 ring->head = ring->cur = nm_ring_next(ring, cur); 413 nmd->cur_tx_ring = r; 414 ioctl(nmd->fd, NIOCTXSYNC, NULL); 415 break; 416 } 417 418 return (len); 419 } 420 421 static inline int 422 pci_vtnet_netmap_readv(struct nm_desc *nmd, struct iovec *iov, int iovcnt) 423 { 424 int len = 0; 425 int i = 0; 426 int r; 427 428 for (r = nmd->cur_rx_ring; ; ) { 429 struct netmap_ring *ring = NETMAP_RXRING(nmd->nifp, r); 430 uint32_t cur, idx; 431 char *buf; 432 size_t left; 433 434 if (nm_ring_empty(ring)) { 435 r++; 436 if (r > nmd->last_rx_ring) 437 r = nmd->first_rx_ring; 438 if (r == nmd->cur_rx_ring) 439 break; 440 continue; 441 } 442 cur = ring->cur; 443 idx = ring->slot[cur].buf_idx; 444 buf = NETMAP_BUF(ring, idx); 445 left = ring->slot[cur].len; 446 447 for (i = 0; i < iovcnt && left > 0; i++) { 448 if (iov[i].iov_len > left) 449 iov[i].iov_len = left; 450 memcpy(iov[i].iov_base, &buf[len], iov[i].iov_len); 451 len += iov[i].iov_len; 452 left -= iov[i].iov_len; 453 } 454 ring->head = ring->cur = nm_ring_next(ring, cur); 455 nmd->cur_rx_ring = r; 456 ioctl(nmd->fd, NIOCRXSYNC, NULL); 457 break; 458 } 459 for (; i < iovcnt; i++) 460 iov[i].iov_len = 0; 461 462 return (len); 463 } 464 465 /* 466 * Called to send a buffer chain out to the vale port 467 */ 468 static void 469 pci_vtnet_netmap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt, 470 int len) 471 { 472 static char pad[60]; /* all zero bytes */ 473 474 if (sc->vsc_nmd == NULL) 475 return; 476 477 /* 478 * If the length is < 60, pad out to that and add the 479 * extra zero'd segment to the iov. It is guaranteed that 480 * there is always an extra iov available by the caller. 481 */ 482 if (len < 60) { 483 iov[iovcnt].iov_base = pad; 484 iov[iovcnt].iov_len = 60 - len; 485 iovcnt++; 486 } 487 (void) pci_vtnet_netmap_writev(sc->vsc_nmd, iov, iovcnt); 488 } 489 490 static void 491 pci_vtnet_netmap_rx(struct pci_vtnet_softc *sc) 492 { 493 struct iovec iov[VTNET_MAXSEGS], *riov; 494 struct vqueue_info *vq; 495 void *vrx; 496 int len, n; 497 uint16_t idx; 498 499 /* 500 * Should never be called without a valid netmap descriptor 501 */ 502 assert(sc->vsc_nmd != NULL); 503 504 /* 505 * But, will be called when the rx ring hasn't yet 506 * been set up or the guest is resetting the device. 507 */ 508 if (!sc->vsc_rx_ready || sc->resetting) { 509 /* 510 * Drop the packet and try later. 511 */ 512 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf); 513 return; 514 } 515 516 /* 517 * Check for available rx buffers 518 */ 519 vq = &sc->vsc_queues[VTNET_RXQ]; 520 if (!vq_has_descs(vq)) { 521 /* 522 * Drop the packet and try later. Interrupt on 523 * empty, if that's negotiated. 524 */ 525 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf); 526 vq_endchains(vq, 1); 527 return; 528 } 529 530 do { 531 /* 532 * Get descriptor chain. 533 */ 534 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 535 assert(n >= 1 && n <= VTNET_MAXSEGS); 536 537 /* 538 * Get a pointer to the rx header, and use the 539 * data immediately following it for the packet buffer. 540 */ 541 vrx = iov[0].iov_base; 542 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen); 543 544 len = pci_vtnet_netmap_readv(sc->vsc_nmd, riov, n); 545 546 if (len == 0) { 547 /* 548 * No more packets, but still some avail ring 549 * entries. Interrupt if needed/appropriate. 550 */ 551 vq_endchains(vq, 0); 552 return; 553 } 554 555 /* 556 * The only valid field in the rx packet header is the 557 * number of buffers if merged rx bufs were negotiated. 558 */ 559 memset(vrx, 0, sc->rx_vhdrlen); 560 561 if (sc->rx_merge) { 562 struct virtio_net_rxhdr *vrxh; 563 564 vrxh = vrx; 565 vrxh->vrh_bufs = 1; 566 } 567 568 /* 569 * Release this chain and handle more chains. 570 */ 571 vq_relchain(vq, idx, len + sc->rx_vhdrlen); 572 } while (vq_has_descs(vq)); 573 574 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */ 575 vq_endchains(vq, 1); 576 } 577 578 static void 579 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param) 580 { 581 struct pci_vtnet_softc *sc = param; 582 583 pthread_mutex_lock(&sc->rx_mtx); 584 sc->rx_in_progress = 1; 585 sc->pci_vtnet_rx(sc); 586 sc->rx_in_progress = 0; 587 pthread_mutex_unlock(&sc->rx_mtx); 588 589 } 590 591 static void 592 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 593 { 594 struct pci_vtnet_softc *sc = vsc; 595 596 /* 597 * A qnotify means that the rx process can now begin 598 */ 599 if (sc->vsc_rx_ready == 0) { 600 sc->vsc_rx_ready = 1; 601 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY; 602 } 603 } 604 605 static void 606 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 607 { 608 struct iovec iov[VTNET_MAXSEGS + 1]; 609 int i, n; 610 int plen, tlen; 611 uint16_t idx; 612 613 /* 614 * Obtain chain of descriptors. The first one is 615 * really the header descriptor, so we need to sum 616 * up two lengths: packet length and transfer length. 617 */ 618 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 619 assert(n >= 1 && n <= VTNET_MAXSEGS); 620 plen = 0; 621 tlen = iov[0].iov_len; 622 for (i = 1; i < n; i++) { 623 plen += iov[i].iov_len; 624 tlen += iov[i].iov_len; 625 } 626 627 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, n)); 628 sc->pci_vtnet_tx(sc, &iov[1], n - 1, plen); 629 630 /* chain is processed, release it and set tlen */ 631 vq_relchain(vq, idx, tlen); 632 } 633 634 static void 635 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 636 { 637 struct pci_vtnet_softc *sc = vsc; 638 639 /* 640 * Any ring entries to process? 641 */ 642 if (!vq_has_descs(vq)) 643 return; 644 645 /* Signal the tx thread for processing */ 646 pthread_mutex_lock(&sc->tx_mtx); 647 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY; 648 if (sc->tx_in_progress == 0) 649 pthread_cond_signal(&sc->tx_cond); 650 pthread_mutex_unlock(&sc->tx_mtx); 651 } 652 653 /* 654 * Thread which will handle processing of TX desc 655 */ 656 static void * 657 pci_vtnet_tx_thread(void *param) 658 { 659 struct pci_vtnet_softc *sc = param; 660 struct vqueue_info *vq; 661 int error; 662 663 vq = &sc->vsc_queues[VTNET_TXQ]; 664 665 /* 666 * Let us wait till the tx queue pointers get initialised & 667 * first tx signaled 668 */ 669 pthread_mutex_lock(&sc->tx_mtx); 670 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 671 assert(error == 0); 672 673 for (;;) { 674 /* note - tx mutex is locked here */ 675 while (sc->resetting || !vq_has_descs(vq)) { 676 vq->vq_used->vu_flags &= ~VRING_USED_F_NO_NOTIFY; 677 mb(); 678 if (!sc->resetting && vq_has_descs(vq)) 679 break; 680 681 sc->tx_in_progress = 0; 682 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 683 assert(error == 0); 684 } 685 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY; 686 sc->tx_in_progress = 1; 687 pthread_mutex_unlock(&sc->tx_mtx); 688 689 do { 690 /* 691 * Run through entries, placing them into 692 * iovecs and sending when an end-of-packet 693 * is found 694 */ 695 pci_vtnet_proctx(sc, vq); 696 } while (vq_has_descs(vq)); 697 698 /* 699 * Generate an interrupt if needed. 700 */ 701 vq_endchains(vq, 1); 702 703 pthread_mutex_lock(&sc->tx_mtx); 704 } 705 } 706 707 #ifdef notyet 708 static void 709 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 710 { 711 712 DPRINTF(("vtnet: control qnotify!\n\r")); 713 } 714 #endif 715 716 static int 717 pci_vtnet_parsemac(char *mac_str, uint8_t *mac_addr) 718 { 719 struct ether_addr *ea; 720 char *tmpstr; 721 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 722 723 tmpstr = strsep(&mac_str,"="); 724 725 if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) { 726 ea = ether_aton(mac_str); 727 728 if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) || 729 memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) { 730 fprintf(stderr, "Invalid MAC %s\n", mac_str); 731 return (EINVAL); 732 } else 733 memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN); 734 } 735 736 return (0); 737 } 738 739 static void 740 pci_vtnet_tap_setup(struct pci_vtnet_softc *sc, char *devname) 741 { 742 char tbuf[80]; 743 744 strcpy(tbuf, "/dev/"); 745 strlcat(tbuf, devname, sizeof(tbuf)); 746 747 sc->pci_vtnet_rx = pci_vtnet_tap_rx; 748 sc->pci_vtnet_tx = pci_vtnet_tap_tx; 749 750 sc->vsc_tapfd = open(tbuf, O_RDWR); 751 if (sc->vsc_tapfd == -1) { 752 WPRINTF(("open of tap device %s failed\n", tbuf)); 753 return; 754 } 755 756 /* 757 * Set non-blocking and register for read 758 * notifications with the event loop 759 */ 760 int opt = 1; 761 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) { 762 WPRINTF(("tap device O_NONBLOCK failed\n")); 763 close(sc->vsc_tapfd); 764 sc->vsc_tapfd = -1; 765 } 766 767 sc->vsc_mevp = mevent_add(sc->vsc_tapfd, 768 EVF_READ, 769 pci_vtnet_rx_callback, 770 sc); 771 if (sc->vsc_mevp == NULL) { 772 WPRINTF(("Could not register event\n")); 773 close(sc->vsc_tapfd); 774 sc->vsc_tapfd = -1; 775 } 776 } 777 778 static void 779 pci_vtnet_netmap_setup(struct pci_vtnet_softc *sc, char *ifname) 780 { 781 sc->pci_vtnet_rx = pci_vtnet_netmap_rx; 782 sc->pci_vtnet_tx = pci_vtnet_netmap_tx; 783 784 sc->vsc_nmd = nm_open(ifname, NULL, 0, 0); 785 if (sc->vsc_nmd == NULL) { 786 WPRINTF(("open of netmap device %s failed\n", ifname)); 787 return; 788 } 789 790 sc->vsc_mevp = mevent_add(sc->vsc_nmd->fd, 791 EVF_READ, 792 pci_vtnet_rx_callback, 793 sc); 794 if (sc->vsc_mevp == NULL) { 795 WPRINTF(("Could not register event\n")); 796 nm_close(sc->vsc_nmd); 797 sc->vsc_nmd = NULL; 798 } 799 } 800 801 static int 802 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 803 { 804 MD5_CTX mdctx; 805 unsigned char digest[16]; 806 char nstr[80]; 807 char tname[MAXCOMLEN + 1]; 808 struct pci_vtnet_softc *sc; 809 char *devname; 810 char *vtopts; 811 int mac_provided; 812 813 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 814 815 pthread_mutex_init(&sc->vsc_mtx, NULL); 816 817 vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues); 818 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 819 820 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 821 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 822 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 823 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 824 #ifdef notyet 825 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 826 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 827 #endif 828 829 /* 830 * Attempt to open the tap device and read the MAC address 831 * if specified 832 */ 833 mac_provided = 0; 834 sc->vsc_tapfd = -1; 835 sc->vsc_nmd = NULL; 836 if (opts != NULL) { 837 int err; 838 839 devname = vtopts = strdup(opts); 840 (void) strsep(&vtopts, ","); 841 842 if (vtopts != NULL) { 843 err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac); 844 if (err != 0) { 845 free(devname); 846 return (err); 847 } 848 mac_provided = 1; 849 } 850 851 if (strncmp(devname, "vale", 4) == 0) 852 pci_vtnet_netmap_setup(sc, devname); 853 if (strncmp(devname, "tap", 3) == 0 || 854 strncmp(devname, "vmnet", 5) == 0) 855 pci_vtnet_tap_setup(sc, devname); 856 857 free(devname); 858 } 859 860 /* 861 * The default MAC address is the standard NetApp OUI of 00-a0-98, 862 * followed by an MD5 of the PCI slot/func number and dev name 863 */ 864 if (!mac_provided) { 865 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot, 866 pi->pi_func, vmname); 867 868 MD5Init(&mdctx); 869 MD5Update(&mdctx, nstr, strlen(nstr)); 870 MD5Final(digest, &mdctx); 871 872 sc->vsc_config.mac[0] = 0x00; 873 sc->vsc_config.mac[1] = 0xa0; 874 sc->vsc_config.mac[2] = 0x98; 875 sc->vsc_config.mac[3] = digest[0]; 876 sc->vsc_config.mac[4] = digest[1]; 877 sc->vsc_config.mac[5] = digest[2]; 878 } 879 880 /* initialize config space */ 881 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 882 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 883 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 884 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); 885 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 886 887 /* Link is up if we managed to open tap device. */ 888 sc->vsc_config.status = (opts == NULL || sc->vsc_tapfd >= 0); 889 890 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 891 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) 892 return (1); 893 894 /* use BAR 0 to map config regs in IO space */ 895 vi_set_io_bar(&sc->vsc_vs, 0); 896 897 sc->resetting = 0; 898 899 sc->rx_merge = 1; 900 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr); 901 sc->rx_in_progress = 0; 902 pthread_mutex_init(&sc->rx_mtx, NULL); 903 904 /* 905 * Initialize tx semaphore & spawn TX processing thread. 906 * As of now, only one thread for TX desc processing is 907 * spawned. 908 */ 909 sc->tx_in_progress = 0; 910 pthread_mutex_init(&sc->tx_mtx, NULL); 911 pthread_cond_init(&sc->tx_cond, NULL); 912 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 913 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 914 pi->pi_func); 915 pthread_set_name_np(sc->tx_tid, tname); 916 917 return (0); 918 } 919 920 static int 921 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 922 { 923 struct pci_vtnet_softc *sc = vsc; 924 void *ptr; 925 926 if (offset < 6) { 927 assert(offset + size <= 6); 928 /* 929 * The driver is allowed to change the MAC address 930 */ 931 ptr = &sc->vsc_config.mac[offset]; 932 memcpy(ptr, &value, size); 933 } else { 934 /* silently ignore other writes */ 935 DPRINTF(("vtnet: write to readonly reg %d\n\r", offset)); 936 } 937 938 return (0); 939 } 940 941 static int 942 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 943 { 944 struct pci_vtnet_softc *sc = vsc; 945 void *ptr; 946 947 ptr = (uint8_t *)&sc->vsc_config + offset; 948 memcpy(retval, ptr, size); 949 return (0); 950 } 951 952 static void 953 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 954 { 955 struct pci_vtnet_softc *sc = vsc; 956 957 sc->vsc_features = negotiated_features; 958 959 if (!(sc->vsc_features & VIRTIO_NET_F_MRG_RXBUF)) { 960 sc->rx_merge = 0; 961 /* non-merge rx header is 2 bytes shorter */ 962 sc->rx_vhdrlen -= 2; 963 } 964 } 965 966 struct pci_devemu pci_de_vnet = { 967 .pe_emu = "virtio-net", 968 .pe_init = pci_vtnet_init, 969 .pe_barwrite = vi_pci_write, 970 .pe_barread = vi_pci_read 971 }; 972 PCI_EMUL_SET(pci_de_vnet); 973