1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/linker_set.h> 34 #include <sys/select.h> 35 #include <sys/uio.h> 36 #include <sys/ioctl.h> 37 #include <machine/atomic.h> 38 #include <net/ethernet.h> 39 #ifndef NETMAP_WITH_LIBS 40 #define NETMAP_WITH_LIBS 41 #endif 42 #include <net/netmap_user.h> 43 44 #include <errno.h> 45 #include <fcntl.h> 46 #include <stdio.h> 47 #include <stdlib.h> 48 #include <stdint.h> 49 #include <string.h> 50 #include <strings.h> 51 #include <unistd.h> 52 #include <assert.h> 53 #include <md5.h> 54 #include <pthread.h> 55 #include <pthread_np.h> 56 57 #include "bhyverun.h" 58 #include "pci_emul.h" 59 #include "mevent.h" 60 #include "virtio.h" 61 62 #define VTNET_RINGSZ 1024 63 64 #define VTNET_MAXSEGS 256 65 66 /* 67 * Host capabilities. Note that we only offer a few of these. 68 */ 69 #define VIRTIO_NET_F_CSUM (1 << 0) /* host handles partial cksum */ 70 #define VIRTIO_NET_F_GUEST_CSUM (1 << 1) /* guest handles partial cksum */ 71 #define VIRTIO_NET_F_MAC (1 << 5) /* host supplies MAC */ 72 #define VIRTIO_NET_F_GSO_DEPREC (1 << 6) /* deprecated: host handles GSO */ 73 #define VIRTIO_NET_F_GUEST_TSO4 (1 << 7) /* guest can rcv TSOv4 */ 74 #define VIRTIO_NET_F_GUEST_TSO6 (1 << 8) /* guest can rcv TSOv6 */ 75 #define VIRTIO_NET_F_GUEST_ECN (1 << 9) /* guest can rcv TSO with ECN */ 76 #define VIRTIO_NET_F_GUEST_UFO (1 << 10) /* guest can rcv UFO */ 77 #define VIRTIO_NET_F_HOST_TSO4 (1 << 11) /* host can rcv TSOv4 */ 78 #define VIRTIO_NET_F_HOST_TSO6 (1 << 12) /* host can rcv TSOv6 */ 79 #define VIRTIO_NET_F_HOST_ECN (1 << 13) /* host can rcv TSO with ECN */ 80 #define VIRTIO_NET_F_HOST_UFO (1 << 14) /* host can rcv UFO */ 81 #define VIRTIO_NET_F_MRG_RXBUF (1 << 15) /* host can merge RX buffers */ 82 #define VIRTIO_NET_F_STATUS (1 << 16) /* config status field available */ 83 #define VIRTIO_NET_F_CTRL_VQ (1 << 17) /* control channel available */ 84 #define VIRTIO_NET_F_CTRL_RX (1 << 18) /* control channel RX mode support */ 85 #define VIRTIO_NET_F_CTRL_VLAN (1 << 19) /* control channel VLAN filtering */ 86 #define VIRTIO_NET_F_GUEST_ANNOUNCE \ 87 (1 << 21) /* guest can send gratuitous pkts */ 88 89 #define VTNET_S_HOSTCAPS \ 90 ( VIRTIO_NET_F_MAC | VIRTIO_NET_F_MRG_RXBUF | VIRTIO_NET_F_STATUS | \ 91 VIRTIO_F_NOTIFY_ON_EMPTY | VIRTIO_RING_F_INDIRECT_DESC) 92 93 /* 94 * PCI config-space "registers" 95 */ 96 struct virtio_net_config { 97 uint8_t mac[6]; 98 uint16_t status; 99 } __packed; 100 101 /* 102 * Queue definitions. 103 */ 104 #define VTNET_RXQ 0 105 #define VTNET_TXQ 1 106 #define VTNET_CTLQ 2 /* NB: not yet supported */ 107 108 #define VTNET_MAXQ 3 109 110 /* 111 * Fixed network header size 112 */ 113 struct virtio_net_rxhdr { 114 uint8_t vrh_flags; 115 uint8_t vrh_gso_type; 116 uint16_t vrh_hdr_len; 117 uint16_t vrh_gso_size; 118 uint16_t vrh_csum_start; 119 uint16_t vrh_csum_offset; 120 uint16_t vrh_bufs; 121 } __packed; 122 123 /* 124 * Debug printf 125 */ 126 static int pci_vtnet_debug; 127 #define DPRINTF(params) if (pci_vtnet_debug) printf params 128 #define WPRINTF(params) printf params 129 130 /* 131 * Per-device softc 132 */ 133 struct pci_vtnet_softc { 134 struct virtio_softc vsc_vs; 135 struct vqueue_info vsc_queues[VTNET_MAXQ - 1]; 136 pthread_mutex_t vsc_mtx; 137 struct mevent *vsc_mevp; 138 139 int vsc_tapfd; 140 struct nm_desc *vsc_nmd; 141 142 int vsc_rx_ready; 143 volatile int resetting; /* set and checked outside lock */ 144 145 uint64_t vsc_features; /* negotiated features */ 146 147 struct virtio_net_config vsc_config; 148 149 pthread_mutex_t rx_mtx; 150 int rx_in_progress; 151 int rx_vhdrlen; 152 int rx_merge; /* merged rx bufs in use */ 153 154 pthread_t tx_tid; 155 pthread_mutex_t tx_mtx; 156 pthread_cond_t tx_cond; 157 int tx_in_progress; 158 159 void (*pci_vtnet_rx)(struct pci_vtnet_softc *sc); 160 void (*pci_vtnet_tx)(struct pci_vtnet_softc *sc, struct iovec *iov, 161 int iovcnt, int len); 162 }; 163 164 static void pci_vtnet_reset(void *); 165 /* static void pci_vtnet_notify(void *, struct vqueue_info *); */ 166 static int pci_vtnet_cfgread(void *, int, int, uint32_t *); 167 static int pci_vtnet_cfgwrite(void *, int, int, uint32_t); 168 static void pci_vtnet_neg_features(void *, uint64_t); 169 170 static struct virtio_consts vtnet_vi_consts = { 171 "vtnet", /* our name */ 172 VTNET_MAXQ - 1, /* we currently support 2 virtqueues */ 173 sizeof(struct virtio_net_config), /* config reg size */ 174 pci_vtnet_reset, /* reset */ 175 NULL, /* device-wide qnotify -- not used */ 176 pci_vtnet_cfgread, /* read PCI config */ 177 pci_vtnet_cfgwrite, /* write PCI config */ 178 pci_vtnet_neg_features, /* apply negotiated features */ 179 VTNET_S_HOSTCAPS, /* our capabilities */ 180 }; 181 182 /* 183 * If the transmit thread is active then stall until it is done. 184 */ 185 static void 186 pci_vtnet_txwait(struct pci_vtnet_softc *sc) 187 { 188 189 pthread_mutex_lock(&sc->tx_mtx); 190 while (sc->tx_in_progress) { 191 pthread_mutex_unlock(&sc->tx_mtx); 192 usleep(10000); 193 pthread_mutex_lock(&sc->tx_mtx); 194 } 195 pthread_mutex_unlock(&sc->tx_mtx); 196 } 197 198 /* 199 * If the receive thread is active then stall until it is done. 200 */ 201 static void 202 pci_vtnet_rxwait(struct pci_vtnet_softc *sc) 203 { 204 205 pthread_mutex_lock(&sc->rx_mtx); 206 while (sc->rx_in_progress) { 207 pthread_mutex_unlock(&sc->rx_mtx); 208 usleep(10000); 209 pthread_mutex_lock(&sc->rx_mtx); 210 } 211 pthread_mutex_unlock(&sc->rx_mtx); 212 } 213 214 static void 215 pci_vtnet_reset(void *vsc) 216 { 217 struct pci_vtnet_softc *sc = vsc; 218 219 DPRINTF(("vtnet: device reset requested !\n")); 220 221 sc->resetting = 1; 222 223 /* 224 * Wait for the transmit and receive threads to finish their 225 * processing. 226 */ 227 pci_vtnet_txwait(sc); 228 pci_vtnet_rxwait(sc); 229 230 sc->vsc_rx_ready = 0; 231 sc->rx_merge = 1; 232 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr); 233 234 /* now reset rings, MSI-X vectors, and negotiated capabilities */ 235 vi_reset_dev(&sc->vsc_vs); 236 237 sc->resetting = 0; 238 } 239 240 /* 241 * Called to send a buffer chain out to the tap device 242 */ 243 static void 244 pci_vtnet_tap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt, 245 int len) 246 { 247 static char pad[60]; /* all zero bytes */ 248 249 if (sc->vsc_tapfd == -1) 250 return; 251 252 /* 253 * If the length is < 60, pad out to that and add the 254 * extra zero'd segment to the iov. It is guaranteed that 255 * there is always an extra iov available by the caller. 256 */ 257 if (len < 60) { 258 iov[iovcnt].iov_base = pad; 259 iov[iovcnt].iov_len = 60 - len; 260 iovcnt++; 261 } 262 (void) writev(sc->vsc_tapfd, iov, iovcnt); 263 } 264 265 /* 266 * Called when there is read activity on the tap file descriptor. 267 * Each buffer posted by the guest is assumed to be able to contain 268 * an entire ethernet frame + rx header. 269 * MP note: the dummybuf is only used for discarding frames, so there 270 * is no need for it to be per-vtnet or locked. 271 */ 272 static uint8_t dummybuf[2048]; 273 274 static __inline struct iovec * 275 rx_iov_trim(struct iovec *iov, int *niov, int tlen) 276 { 277 struct iovec *riov; 278 279 /* XXX short-cut: assume first segment is >= tlen */ 280 assert(iov[0].iov_len >= tlen); 281 282 iov[0].iov_len -= tlen; 283 if (iov[0].iov_len == 0) { 284 assert(*niov > 1); 285 *niov -= 1; 286 riov = &iov[1]; 287 } else { 288 iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + tlen); 289 riov = &iov[0]; 290 } 291 292 return (riov); 293 } 294 295 static void 296 pci_vtnet_tap_rx(struct pci_vtnet_softc *sc) 297 { 298 struct iovec iov[VTNET_MAXSEGS], *riov; 299 struct vqueue_info *vq; 300 void *vrx; 301 int len, n; 302 uint16_t idx; 303 304 /* 305 * Should never be called without a valid tap fd 306 */ 307 assert(sc->vsc_tapfd != -1); 308 309 /* 310 * But, will be called when the rx ring hasn't yet 311 * been set up or the guest is resetting the device. 312 */ 313 if (!sc->vsc_rx_ready || sc->resetting) { 314 /* 315 * Drop the packet and try later. 316 */ 317 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf)); 318 return; 319 } 320 321 /* 322 * Check for available rx buffers 323 */ 324 vq = &sc->vsc_queues[VTNET_RXQ]; 325 if (!vq_has_descs(vq)) { 326 /* 327 * Drop the packet and try later. Interrupt on 328 * empty, if that's negotiated. 329 */ 330 (void) read(sc->vsc_tapfd, dummybuf, sizeof(dummybuf)); 331 vq_endchains(vq, 1); 332 return; 333 } 334 335 do { 336 /* 337 * Get descriptor chain. 338 */ 339 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 340 assert(n >= 1 && n <= VTNET_MAXSEGS); 341 342 /* 343 * Get a pointer to the rx header, and use the 344 * data immediately following it for the packet buffer. 345 */ 346 vrx = iov[0].iov_base; 347 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen); 348 349 len = readv(sc->vsc_tapfd, riov, n); 350 351 if (len < 0 && errno == EWOULDBLOCK) { 352 /* 353 * No more packets, but still some avail ring 354 * entries. Interrupt if needed/appropriate. 355 */ 356 vq_retchain(vq); 357 vq_endchains(vq, 0); 358 return; 359 } 360 361 /* 362 * The only valid field in the rx packet header is the 363 * number of buffers if merged rx bufs were negotiated. 364 */ 365 memset(vrx, 0, sc->rx_vhdrlen); 366 367 if (sc->rx_merge) { 368 struct virtio_net_rxhdr *vrxh; 369 370 vrxh = vrx; 371 vrxh->vrh_bufs = 1; 372 } 373 374 /* 375 * Release this chain and handle more chains. 376 */ 377 vq_relchain(vq, idx, len + sc->rx_vhdrlen); 378 } while (vq_has_descs(vq)); 379 380 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */ 381 vq_endchains(vq, 1); 382 } 383 384 static __inline int 385 pci_vtnet_netmap_writev(struct nm_desc *nmd, struct iovec *iov, int iovcnt) 386 { 387 int r, i; 388 int len = 0; 389 390 for (r = nmd->cur_tx_ring; ; ) { 391 struct netmap_ring *ring = NETMAP_TXRING(nmd->nifp, r); 392 uint32_t cur, idx; 393 char *buf; 394 395 if (nm_ring_empty(ring)) { 396 r++; 397 if (r > nmd->last_tx_ring) 398 r = nmd->first_tx_ring; 399 if (r == nmd->cur_tx_ring) 400 break; 401 continue; 402 } 403 cur = ring->cur; 404 idx = ring->slot[cur].buf_idx; 405 buf = NETMAP_BUF(ring, idx); 406 407 for (i = 0; i < iovcnt; i++) { 408 if (len + iov[i].iov_len > 2048) 409 break; 410 memcpy(&buf[len], iov[i].iov_base, iov[i].iov_len); 411 len += iov[i].iov_len; 412 } 413 ring->slot[cur].len = len; 414 ring->head = ring->cur = nm_ring_next(ring, cur); 415 nmd->cur_tx_ring = r; 416 ioctl(nmd->fd, NIOCTXSYNC, NULL); 417 break; 418 } 419 420 return (len); 421 } 422 423 static __inline int 424 pci_vtnet_netmap_readv(struct nm_desc *nmd, struct iovec *iov, int iovcnt) 425 { 426 int len = 0; 427 int i = 0; 428 int r; 429 430 for (r = nmd->cur_rx_ring; ; ) { 431 struct netmap_ring *ring = NETMAP_RXRING(nmd->nifp, r); 432 uint32_t cur, idx; 433 char *buf; 434 size_t left; 435 436 if (nm_ring_empty(ring)) { 437 r++; 438 if (r > nmd->last_rx_ring) 439 r = nmd->first_rx_ring; 440 if (r == nmd->cur_rx_ring) 441 break; 442 continue; 443 } 444 cur = ring->cur; 445 idx = ring->slot[cur].buf_idx; 446 buf = NETMAP_BUF(ring, idx); 447 left = ring->slot[cur].len; 448 449 for (i = 0; i < iovcnt && left > 0; i++) { 450 if (iov[i].iov_len > left) 451 iov[i].iov_len = left; 452 memcpy(iov[i].iov_base, &buf[len], iov[i].iov_len); 453 len += iov[i].iov_len; 454 left -= iov[i].iov_len; 455 } 456 ring->head = ring->cur = nm_ring_next(ring, cur); 457 nmd->cur_rx_ring = r; 458 ioctl(nmd->fd, NIOCRXSYNC, NULL); 459 break; 460 } 461 for (; i < iovcnt; i++) 462 iov[i].iov_len = 0; 463 464 return (len); 465 } 466 467 /* 468 * Called to send a buffer chain out to the vale port 469 */ 470 static void 471 pci_vtnet_netmap_tx(struct pci_vtnet_softc *sc, struct iovec *iov, int iovcnt, 472 int len) 473 { 474 static char pad[60]; /* all zero bytes */ 475 476 if (sc->vsc_nmd == NULL) 477 return; 478 479 /* 480 * If the length is < 60, pad out to that and add the 481 * extra zero'd segment to the iov. It is guaranteed that 482 * there is always an extra iov available by the caller. 483 */ 484 if (len < 60) { 485 iov[iovcnt].iov_base = pad; 486 iov[iovcnt].iov_len = 60 - len; 487 iovcnt++; 488 } 489 (void) pci_vtnet_netmap_writev(sc->vsc_nmd, iov, iovcnt); 490 } 491 492 static void 493 pci_vtnet_netmap_rx(struct pci_vtnet_softc *sc) 494 { 495 struct iovec iov[VTNET_MAXSEGS], *riov; 496 struct vqueue_info *vq; 497 void *vrx; 498 int len, n; 499 uint16_t idx; 500 501 /* 502 * Should never be called without a valid netmap descriptor 503 */ 504 assert(sc->vsc_nmd != NULL); 505 506 /* 507 * But, will be called when the rx ring hasn't yet 508 * been set up or the guest is resetting the device. 509 */ 510 if (!sc->vsc_rx_ready || sc->resetting) { 511 /* 512 * Drop the packet and try later. 513 */ 514 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf); 515 return; 516 } 517 518 /* 519 * Check for available rx buffers 520 */ 521 vq = &sc->vsc_queues[VTNET_RXQ]; 522 if (!vq_has_descs(vq)) { 523 /* 524 * Drop the packet and try later. Interrupt on 525 * empty, if that's negotiated. 526 */ 527 (void) nm_nextpkt(sc->vsc_nmd, (void *)dummybuf); 528 vq_endchains(vq, 1); 529 return; 530 } 531 532 do { 533 /* 534 * Get descriptor chain. 535 */ 536 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 537 assert(n >= 1 && n <= VTNET_MAXSEGS); 538 539 /* 540 * Get a pointer to the rx header, and use the 541 * data immediately following it for the packet buffer. 542 */ 543 vrx = iov[0].iov_base; 544 riov = rx_iov_trim(iov, &n, sc->rx_vhdrlen); 545 546 len = pci_vtnet_netmap_readv(sc->vsc_nmd, riov, n); 547 548 if (len == 0) { 549 /* 550 * No more packets, but still some avail ring 551 * entries. Interrupt if needed/appropriate. 552 */ 553 vq_retchain(vq); 554 vq_endchains(vq, 0); 555 return; 556 } 557 558 /* 559 * The only valid field in the rx packet header is the 560 * number of buffers if merged rx bufs were negotiated. 561 */ 562 memset(vrx, 0, sc->rx_vhdrlen); 563 564 if (sc->rx_merge) { 565 struct virtio_net_rxhdr *vrxh; 566 567 vrxh = vrx; 568 vrxh->vrh_bufs = 1; 569 } 570 571 /* 572 * Release this chain and handle more chains. 573 */ 574 vq_relchain(vq, idx, len + sc->rx_vhdrlen); 575 } while (vq_has_descs(vq)); 576 577 /* Interrupt if needed, including for NOTIFY_ON_EMPTY. */ 578 vq_endchains(vq, 1); 579 } 580 581 static void 582 pci_vtnet_rx_callback(int fd, enum ev_type type, void *param) 583 { 584 struct pci_vtnet_softc *sc = param; 585 586 pthread_mutex_lock(&sc->rx_mtx); 587 sc->rx_in_progress = 1; 588 sc->pci_vtnet_rx(sc); 589 sc->rx_in_progress = 0; 590 pthread_mutex_unlock(&sc->rx_mtx); 591 592 } 593 594 static void 595 pci_vtnet_ping_rxq(void *vsc, struct vqueue_info *vq) 596 { 597 struct pci_vtnet_softc *sc = vsc; 598 599 /* 600 * A qnotify means that the rx process can now begin 601 */ 602 if (sc->vsc_rx_ready == 0) { 603 sc->vsc_rx_ready = 1; 604 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY; 605 } 606 } 607 608 static void 609 pci_vtnet_proctx(struct pci_vtnet_softc *sc, struct vqueue_info *vq) 610 { 611 struct iovec iov[VTNET_MAXSEGS + 1]; 612 int i, n; 613 int plen, tlen; 614 uint16_t idx; 615 616 /* 617 * Obtain chain of descriptors. The first one is 618 * really the header descriptor, so we need to sum 619 * up two lengths: packet length and transfer length. 620 */ 621 n = vq_getchain(vq, &idx, iov, VTNET_MAXSEGS, NULL); 622 assert(n >= 1 && n <= VTNET_MAXSEGS); 623 plen = 0; 624 tlen = iov[0].iov_len; 625 for (i = 1; i < n; i++) { 626 plen += iov[i].iov_len; 627 tlen += iov[i].iov_len; 628 } 629 630 DPRINTF(("virtio: packet send, %d bytes, %d segs\n\r", plen, n)); 631 sc->pci_vtnet_tx(sc, &iov[1], n - 1, plen); 632 633 /* chain is processed, release it and set tlen */ 634 vq_relchain(vq, idx, tlen); 635 } 636 637 static void 638 pci_vtnet_ping_txq(void *vsc, struct vqueue_info *vq) 639 { 640 struct pci_vtnet_softc *sc = vsc; 641 642 /* 643 * Any ring entries to process? 644 */ 645 if (!vq_has_descs(vq)) 646 return; 647 648 /* Signal the tx thread for processing */ 649 pthread_mutex_lock(&sc->tx_mtx); 650 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY; 651 if (sc->tx_in_progress == 0) 652 pthread_cond_signal(&sc->tx_cond); 653 pthread_mutex_unlock(&sc->tx_mtx); 654 } 655 656 /* 657 * Thread which will handle processing of TX desc 658 */ 659 static void * 660 pci_vtnet_tx_thread(void *param) 661 { 662 struct pci_vtnet_softc *sc = param; 663 struct vqueue_info *vq; 664 int error; 665 666 vq = &sc->vsc_queues[VTNET_TXQ]; 667 668 /* 669 * Let us wait till the tx queue pointers get initialised & 670 * first tx signaled 671 */ 672 pthread_mutex_lock(&sc->tx_mtx); 673 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 674 assert(error == 0); 675 676 for (;;) { 677 /* note - tx mutex is locked here */ 678 while (sc->resetting || !vq_has_descs(vq)) { 679 vq->vq_used->vu_flags &= ~VRING_USED_F_NO_NOTIFY; 680 mb(); 681 if (!sc->resetting && vq_has_descs(vq)) 682 break; 683 684 sc->tx_in_progress = 0; 685 error = pthread_cond_wait(&sc->tx_cond, &sc->tx_mtx); 686 assert(error == 0); 687 } 688 vq->vq_used->vu_flags |= VRING_USED_F_NO_NOTIFY; 689 sc->tx_in_progress = 1; 690 pthread_mutex_unlock(&sc->tx_mtx); 691 692 do { 693 /* 694 * Run through entries, placing them into 695 * iovecs and sending when an end-of-packet 696 * is found 697 */ 698 pci_vtnet_proctx(sc, vq); 699 } while (vq_has_descs(vq)); 700 701 /* 702 * Generate an interrupt if needed. 703 */ 704 vq_endchains(vq, 1); 705 706 pthread_mutex_lock(&sc->tx_mtx); 707 } 708 } 709 710 #ifdef notyet 711 static void 712 pci_vtnet_ping_ctlq(void *vsc, struct vqueue_info *vq) 713 { 714 715 DPRINTF(("vtnet: control qnotify!\n\r")); 716 } 717 #endif 718 719 static int 720 pci_vtnet_parsemac(char *mac_str, uint8_t *mac_addr) 721 { 722 struct ether_addr *ea; 723 char *tmpstr; 724 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 725 726 tmpstr = strsep(&mac_str,"="); 727 728 if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) { 729 ea = ether_aton(mac_str); 730 731 if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) || 732 memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) { 733 fprintf(stderr, "Invalid MAC %s\n", mac_str); 734 return (EINVAL); 735 } else 736 memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN); 737 } 738 739 return (0); 740 } 741 742 static void 743 pci_vtnet_tap_setup(struct pci_vtnet_softc *sc, char *devname) 744 { 745 char tbuf[80]; 746 747 strcpy(tbuf, "/dev/"); 748 strlcat(tbuf, devname, sizeof(tbuf)); 749 750 sc->pci_vtnet_rx = pci_vtnet_tap_rx; 751 sc->pci_vtnet_tx = pci_vtnet_tap_tx; 752 753 sc->vsc_tapfd = open(tbuf, O_RDWR); 754 if (sc->vsc_tapfd == -1) { 755 WPRINTF(("open of tap device %s failed\n", tbuf)); 756 return; 757 } 758 759 /* 760 * Set non-blocking and register for read 761 * notifications with the event loop 762 */ 763 int opt = 1; 764 if (ioctl(sc->vsc_tapfd, FIONBIO, &opt) < 0) { 765 WPRINTF(("tap device O_NONBLOCK failed\n")); 766 close(sc->vsc_tapfd); 767 sc->vsc_tapfd = -1; 768 } 769 770 sc->vsc_mevp = mevent_add(sc->vsc_tapfd, 771 EVF_READ, 772 pci_vtnet_rx_callback, 773 sc); 774 if (sc->vsc_mevp == NULL) { 775 WPRINTF(("Could not register event\n")); 776 close(sc->vsc_tapfd); 777 sc->vsc_tapfd = -1; 778 } 779 } 780 781 static void 782 pci_vtnet_netmap_setup(struct pci_vtnet_softc *sc, char *ifname) 783 { 784 sc->pci_vtnet_rx = pci_vtnet_netmap_rx; 785 sc->pci_vtnet_tx = pci_vtnet_netmap_tx; 786 787 sc->vsc_nmd = nm_open(ifname, NULL, 0, 0); 788 if (sc->vsc_nmd == NULL) { 789 WPRINTF(("open of netmap device %s failed\n", ifname)); 790 return; 791 } 792 793 sc->vsc_mevp = mevent_add(sc->vsc_nmd->fd, 794 EVF_READ, 795 pci_vtnet_rx_callback, 796 sc); 797 if (sc->vsc_mevp == NULL) { 798 WPRINTF(("Could not register event\n")); 799 nm_close(sc->vsc_nmd); 800 sc->vsc_nmd = NULL; 801 } 802 } 803 804 static int 805 pci_vtnet_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 806 { 807 MD5_CTX mdctx; 808 unsigned char digest[16]; 809 char nstr[80]; 810 char tname[MAXCOMLEN + 1]; 811 struct pci_vtnet_softc *sc; 812 char *devname; 813 char *vtopts; 814 int mac_provided; 815 816 sc = calloc(1, sizeof(struct pci_vtnet_softc)); 817 818 pthread_mutex_init(&sc->vsc_mtx, NULL); 819 820 vi_softc_linkup(&sc->vsc_vs, &vtnet_vi_consts, sc, pi, sc->vsc_queues); 821 sc->vsc_vs.vs_mtx = &sc->vsc_mtx; 822 823 sc->vsc_queues[VTNET_RXQ].vq_qsize = VTNET_RINGSZ; 824 sc->vsc_queues[VTNET_RXQ].vq_notify = pci_vtnet_ping_rxq; 825 sc->vsc_queues[VTNET_TXQ].vq_qsize = VTNET_RINGSZ; 826 sc->vsc_queues[VTNET_TXQ].vq_notify = pci_vtnet_ping_txq; 827 #ifdef notyet 828 sc->vsc_queues[VTNET_CTLQ].vq_qsize = VTNET_RINGSZ; 829 sc->vsc_queues[VTNET_CTLQ].vq_notify = pci_vtnet_ping_ctlq; 830 #endif 831 832 /* 833 * Attempt to open the tap device and read the MAC address 834 * if specified 835 */ 836 mac_provided = 0; 837 sc->vsc_tapfd = -1; 838 sc->vsc_nmd = NULL; 839 if (opts != NULL) { 840 int err; 841 842 devname = vtopts = strdup(opts); 843 (void) strsep(&vtopts, ","); 844 845 if (vtopts != NULL) { 846 err = pci_vtnet_parsemac(vtopts, sc->vsc_config.mac); 847 if (err != 0) { 848 free(devname); 849 return (err); 850 } 851 mac_provided = 1; 852 } 853 854 if (strncmp(devname, "vale", 4) == 0) 855 pci_vtnet_netmap_setup(sc, devname); 856 if (strncmp(devname, "tap", 3) == 0 || 857 strncmp(devname, "vmnet", 5) == 0) 858 pci_vtnet_tap_setup(sc, devname); 859 860 free(devname); 861 } 862 863 /* 864 * The default MAC address is the standard NetApp OUI of 00-a0-98, 865 * followed by an MD5 of the PCI slot/func number and dev name 866 */ 867 if (!mac_provided) { 868 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot, 869 pi->pi_func, vmname); 870 871 MD5Init(&mdctx); 872 MD5Update(&mdctx, nstr, strlen(nstr)); 873 MD5Final(digest, &mdctx); 874 875 sc->vsc_config.mac[0] = 0x00; 876 sc->vsc_config.mac[1] = 0xa0; 877 sc->vsc_config.mac[2] = 0x98; 878 sc->vsc_config.mac[3] = digest[0]; 879 sc->vsc_config.mac[4] = digest[1]; 880 sc->vsc_config.mac[5] = digest[2]; 881 } 882 883 /* initialize config space */ 884 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_NET); 885 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR); 886 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK); 887 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_NET); 888 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR); 889 890 /* Link is up if we managed to open tap device or vale port. */ 891 sc->vsc_config.status = (opts == NULL || sc->vsc_tapfd >= 0 || 892 sc->vsc_nmd != NULL); 893 894 /* use BAR 1 to map MSI-X table and PBA, if we're using MSI-X */ 895 if (vi_intr_init(&sc->vsc_vs, 1, fbsdrun_virtio_msix())) 896 return (1); 897 898 /* use BAR 0 to map config regs in IO space */ 899 vi_set_io_bar(&sc->vsc_vs, 0); 900 901 sc->resetting = 0; 902 903 sc->rx_merge = 1; 904 sc->rx_vhdrlen = sizeof(struct virtio_net_rxhdr); 905 sc->rx_in_progress = 0; 906 pthread_mutex_init(&sc->rx_mtx, NULL); 907 908 /* 909 * Initialize tx semaphore & spawn TX processing thread. 910 * As of now, only one thread for TX desc processing is 911 * spawned. 912 */ 913 sc->tx_in_progress = 0; 914 pthread_mutex_init(&sc->tx_mtx, NULL); 915 pthread_cond_init(&sc->tx_cond, NULL); 916 pthread_create(&sc->tx_tid, NULL, pci_vtnet_tx_thread, (void *)sc); 917 snprintf(tname, sizeof(tname), "vtnet-%d:%d tx", pi->pi_slot, 918 pi->pi_func); 919 pthread_set_name_np(sc->tx_tid, tname); 920 921 return (0); 922 } 923 924 static int 925 pci_vtnet_cfgwrite(void *vsc, int offset, int size, uint32_t value) 926 { 927 struct pci_vtnet_softc *sc = vsc; 928 void *ptr; 929 930 if (offset < 6) { 931 assert(offset + size <= 6); 932 /* 933 * The driver is allowed to change the MAC address 934 */ 935 ptr = &sc->vsc_config.mac[offset]; 936 memcpy(ptr, &value, size); 937 } else { 938 /* silently ignore other writes */ 939 DPRINTF(("vtnet: write to readonly reg %d\n\r", offset)); 940 } 941 942 return (0); 943 } 944 945 static int 946 pci_vtnet_cfgread(void *vsc, int offset, int size, uint32_t *retval) 947 { 948 struct pci_vtnet_softc *sc = vsc; 949 void *ptr; 950 951 ptr = (uint8_t *)&sc->vsc_config + offset; 952 memcpy(retval, ptr, size); 953 return (0); 954 } 955 956 static void 957 pci_vtnet_neg_features(void *vsc, uint64_t negotiated_features) 958 { 959 struct pci_vtnet_softc *sc = vsc; 960 961 sc->vsc_features = negotiated_features; 962 963 if (!(sc->vsc_features & VIRTIO_NET_F_MRG_RXBUF)) { 964 sc->rx_merge = 0; 965 /* non-merge rx header is 2 bytes shorter */ 966 sc->rx_vhdrlen -= 2; 967 } 968 } 969 970 struct pci_devemu pci_de_vnet = { 971 .pe_emu = "virtio-net", 972 .pe_init = pci_vtnet_init, 973 .pe_barwrite = vi_pci_write, 974 .pe_barread = vi_pci_read 975 }; 976 PCI_EMUL_SET(pci_de_vnet); 977