1 /* 2 * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 */ 29 30 #include <net/netmap.h> 31 #include <sys/selinfo.h> 32 #include <vm/vm.h> 33 #include <vm/pmap.h> /* vtophys ? */ 34 #include <dev/netmap/netmap_kern.h> 35 36 /* 37 * Return 1 if the queue identified by 't' and 'idx' is in netmap mode. 38 */ 39 static int 40 vtnet_netmap_queue_on(struct vtnet_softc *sc, enum txrx t, int idx) 41 { 42 struct netmap_adapter *na = NA(sc->vtnet_ifp); 43 44 if (!nm_native_on(na)) 45 return 0; 46 47 if (t == NR_RX) 48 return !!(idx < na->num_rx_rings && 49 na->rx_rings[idx]->nr_mode == NKR_NETMAP_ON); 50 51 return !!(idx < na->num_tx_rings && 52 na->tx_rings[idx]->nr_mode == NKR_NETMAP_ON); 53 } 54 55 static void 56 vtnet_free_used(struct virtqueue *vq, int netmap_bufs, enum txrx t, int idx) 57 { 58 void *cookie; 59 int deq = 0; 60 61 while ((cookie = virtqueue_dequeue(vq, NULL)) != NULL) { 62 if (netmap_bufs) { 63 /* These are netmap buffers: there is nothing to do. */ 64 } else { 65 /* These are mbufs that we need to free. */ 66 struct mbuf *m; 67 68 if (t == NR_TX) { 69 struct vtnet_tx_header *txhdr = cookie; 70 m = txhdr->vth_mbuf; 71 m_freem(m); 72 uma_zfree(vtnet_tx_header_zone, txhdr); 73 } else { 74 m = cookie; 75 m_freem(m); 76 } 77 } 78 deq++; 79 } 80 81 if (deq) 82 nm_prinf("%d sgs dequeued from %s-%d (netmap=%d)", 83 deq, nm_txrx2str(t), idx, netmap_bufs); 84 } 85 86 /* Register and unregister. */ 87 static int 88 vtnet_netmap_reg(struct netmap_adapter *na, int state) 89 { 90 struct ifnet *ifp = na->ifp; 91 struct vtnet_softc *sc = ifp->if_softc; 92 int success; 93 int i; 94 95 /* Drain the taskqueues to make sure that there are no worker threads 96 * accessing the virtqueues. */ 97 vtnet_drain_taskqueues(sc); 98 99 VTNET_CORE_LOCK(sc); 100 101 /* We need nm_netmap_on() to return true when called by 102 * vtnet_init_locked() below. */ 103 if (state) 104 nm_set_native_flags(na); 105 106 /* We need to trigger a device reset in order to unexpose guest buffers 107 * published to the host. */ 108 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 109 /* Get pending used buffers. The way they are freed depends on whether 110 * they are netmap buffer or they are mbufs. We can tell apart the two 111 * cases by looking at kring->nr_mode, before this is possibly updated 112 * in the loop below. */ 113 for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { 114 struct vtnet_txq *txq = &sc->vtnet_txqs[i]; 115 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; 116 struct netmap_kring *kring; 117 118 VTNET_TXQ_LOCK(txq); 119 kring = NMR(na, NR_TX)[i]; 120 vtnet_free_used(txq->vtntx_vq, 121 kring->nr_mode == NKR_NETMAP_ON, NR_TX, i); 122 VTNET_TXQ_UNLOCK(txq); 123 124 VTNET_RXQ_LOCK(rxq); 125 kring = NMR(na, NR_RX)[i]; 126 vtnet_free_used(rxq->vtnrx_vq, 127 kring->nr_mode == NKR_NETMAP_ON, NR_RX, i); 128 VTNET_RXQ_UNLOCK(rxq); 129 } 130 vtnet_init_locked(sc); 131 success = (ifp->if_drv_flags & IFF_DRV_RUNNING) ? 0 : ENXIO; 132 133 if (state) { 134 netmap_krings_mode_commit(na, state); 135 nm_set_native_flags(na); 136 } else { 137 nm_clear_native_flags(na); 138 netmap_krings_mode_commit(na, state); 139 } 140 141 VTNET_CORE_UNLOCK(sc); 142 143 return success; 144 } 145 146 147 /* Reconcile kernel and user view of the transmit ring. */ 148 static int 149 vtnet_netmap_txsync(struct netmap_kring *kring, int flags) 150 { 151 struct netmap_adapter *na = kring->na; 152 struct ifnet *ifp = na->ifp; 153 struct netmap_ring *ring = kring->ring; 154 u_int ring_nr = kring->ring_id; 155 u_int nm_i; /* index into the netmap ring */ 156 u_int const lim = kring->nkr_num_slots - 1; 157 u_int const head = kring->rhead; 158 159 /* device-specific */ 160 struct vtnet_softc *sc = ifp->if_softc; 161 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr]; 162 struct virtqueue *vq = txq->vtntx_vq; 163 int interrupts = !(kring->nr_kflags & NKR_NOINTR); 164 u_int n; 165 166 /* 167 * First part: process new packets to send. 168 */ 169 rmb(); 170 171 nm_i = kring->nr_hwcur; 172 if (nm_i != head) { /* we have new packets to send */ 173 struct sglist *sg = txq->vtntx_sg; 174 175 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) { 176 /* we use an empty header here */ 177 struct netmap_slot *slot = &ring->slot[nm_i]; 178 u_int len = slot->len; 179 uint64_t paddr; 180 void *addr = PNMB(na, slot, &paddr); 181 int err; 182 183 NM_CHECK_ADDR_LEN(na, addr, len); 184 185 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 186 /* Initialize the scatterlist, expose it to the hypervisor, 187 * and kick the hypervisor (if necessary). 188 */ 189 sglist_reset(sg); // cheap 190 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size); 191 err |= sglist_append_phys(sg, paddr, len); 192 KASSERT(err == 0, ("%s: cannot append to sglist %d", 193 __func__, err)); 194 err = virtqueue_enqueue(vq, /*cookie=*/txq, sg, 195 /*readable=*/sg->sg_nseg, 196 /*writeable=*/0); 197 if (unlikely(err)) { 198 if (err != ENOSPC) 199 nm_prerr("virtqueue_enqueue(%s) failed: %d", 200 kring->name, err); 201 break; 202 } 203 } 204 205 virtqueue_notify(vq); 206 207 /* Update hwcur depending on where we stopped. */ 208 kring->nr_hwcur = nm_i; /* note we migth break early */ 209 } 210 211 /* Free used slots. We only consider our own used buffers, recognized 212 * by the token we passed to virtqueue_enqueue. 213 */ 214 n = 0; 215 for (;;) { 216 void *token = virtqueue_dequeue(vq, NULL); 217 if (token == NULL) 218 break; 219 if (unlikely(token != (void *)txq)) 220 nm_prerr("BUG: TX token mismatch"); 221 else 222 n++; 223 } 224 if (n > 0) { 225 kring->nr_hwtail += n; 226 if (kring->nr_hwtail > lim) 227 kring->nr_hwtail -= lim + 1; 228 } 229 230 if (interrupts && virtqueue_nfree(vq) < 32) 231 virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG); 232 233 return 0; 234 } 235 236 static int 237 vtnet_netmap_kring_refill(struct netmap_kring *kring, u_int nm_i, u_int head) 238 { 239 struct netmap_adapter *na = kring->na; 240 struct ifnet *ifp = na->ifp; 241 struct netmap_ring *ring = kring->ring; 242 u_int ring_nr = kring->ring_id; 243 u_int const lim = kring->nkr_num_slots - 1; 244 245 /* device-specific */ 246 struct vtnet_softc *sc = ifp->if_softc; 247 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; 248 struct virtqueue *vq = rxq->vtnrx_vq; 249 250 /* use a local sglist, default might be short */ 251 struct sglist_seg ss[2]; 252 struct sglist sg = { ss, 0, 0, 2 }; 253 254 for (; nm_i != head; nm_i = nm_next(nm_i, lim)) { 255 struct netmap_slot *slot = &ring->slot[nm_i]; 256 uint64_t paddr; 257 void *addr = PNMB(na, slot, &paddr); 258 int err; 259 260 if (addr == NETMAP_BUF_BASE(na)) { /* bad buf */ 261 if (netmap_ring_reinit(kring)) 262 return -1; 263 } 264 265 slot->flags &= ~NS_BUF_CHANGED; 266 sglist_reset(&sg); 267 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size); 268 err |= sglist_append_phys(&sg, paddr, NETMAP_BUF_SIZE(na)); 269 KASSERT(err == 0, ("%s: cannot append to sglist %d", 270 __func__, err)); 271 /* writable for the host */ 272 err = virtqueue_enqueue(vq, /*cookie=*/rxq, &sg, 273 /*readable=*/0, /*writeable=*/sg.sg_nseg); 274 if (unlikely(err)) { 275 if (err != ENOSPC) 276 nm_prerr("virtqueue_enqueue(%s) failed: %d", 277 kring->name, err); 278 break; 279 } 280 } 281 282 return nm_i; 283 } 284 285 /* 286 * Publish netmap buffers on a RX virtqueue. 287 * Returns -1 if this virtqueue is not being opened in netmap mode. 288 * If the virtqueue is being opened in netmap mode, return 0 on success and 289 * a positive error code on failure. 290 */ 291 static int 292 vtnet_netmap_rxq_populate(struct vtnet_rxq *rxq) 293 { 294 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp); 295 struct netmap_kring *kring; 296 int error; 297 298 if (!nm_native_on(na) || rxq->vtnrx_id >= na->num_rx_rings) 299 return -1; 300 301 kring = na->rx_rings[rxq->vtnrx_id]; 302 if (!(nm_kring_pending_on(kring) || 303 kring->nr_pending_mode == NKR_NETMAP_ON)) 304 return -1; 305 306 /* Expose all the RX netmap buffers. Note that the number of 307 * netmap slots in the RX ring matches the maximum number of 308 * 2-elements sglist that the RX virtqueue can accommodate. */ 309 error = vtnet_netmap_kring_refill(kring, 0, na->num_rx_desc); 310 virtqueue_notify(rxq->vtnrx_vq); 311 312 return error < 0 ? ENXIO : 0; 313 } 314 315 /* Reconcile kernel and user view of the receive ring. */ 316 static int 317 vtnet_netmap_rxsync(struct netmap_kring *kring, int flags) 318 { 319 struct netmap_adapter *na = kring->na; 320 struct ifnet *ifp = na->ifp; 321 struct netmap_ring *ring = kring->ring; 322 u_int ring_nr = kring->ring_id; 323 u_int nm_i; /* index into the netmap ring */ 324 u_int const lim = kring->nkr_num_slots - 1; 325 u_int const head = kring->rhead; 326 int force_update = (flags & NAF_FORCE_READ) || 327 (kring->nr_kflags & NKR_PENDINTR); 328 int interrupts = !(kring->nr_kflags & NKR_NOINTR); 329 330 /* device-specific */ 331 struct vtnet_softc *sc = ifp->if_softc; 332 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; 333 struct virtqueue *vq = rxq->vtnrx_vq; 334 335 rmb(); 336 /* 337 * First part: import newly received packets. 338 * Only accept our own buffers (matching the token). We should only get 339 * matching buffers. We may need to stop early to avoid hwtail to overrun 340 * hwcur. 341 */ 342 if (netmap_no_pendintr || force_update) { 343 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); 344 void *token; 345 346 vtnet_rxq_disable_intr(rxq); 347 348 nm_i = kring->nr_hwtail; 349 while (nm_i != hwtail_lim) { 350 int len; 351 token = virtqueue_dequeue(vq, &len); 352 if (token == NULL) { 353 if (interrupts && vtnet_rxq_enable_intr(rxq)) { 354 vtnet_rxq_disable_intr(rxq); 355 continue; 356 } 357 break; 358 } 359 if (unlikely(token != (void *)rxq)) { 360 nm_prerr("BUG: RX token mismatch"); 361 } else { 362 /* Skip the virtio-net header. */ 363 len -= sc->vtnet_hdr_size; 364 if (unlikely(len < 0)) { 365 nm_prlim(1, "Truncated virtio-net-header, " 366 "missing %d bytes", -len); 367 len = 0; 368 } 369 ring->slot[nm_i].len = len; 370 ring->slot[nm_i].flags = 0; 371 nm_i = nm_next(nm_i, lim); 372 } 373 } 374 kring->nr_hwtail = nm_i; 375 kring->nr_kflags &= ~NKR_PENDINTR; 376 } 377 nm_prdis("[B] h %d c %d hwcur %d hwtail %d", ring->head, ring->cur, 378 kring->nr_hwcur, kring->nr_hwtail); 379 380 /* 381 * Second part: skip past packets that userspace has released. 382 */ 383 nm_i = kring->nr_hwcur; /* netmap ring index */ 384 if (nm_i != head) { 385 int nm_j = vtnet_netmap_kring_refill(kring, nm_i, head); 386 if (nm_j < 0) 387 return nm_j; 388 kring->nr_hwcur = nm_j; 389 virtqueue_notify(vq); 390 } 391 392 nm_prdis("[C] h %d c %d t %d hwcur %d hwtail %d", ring->head, ring->cur, 393 ring->tail, kring->nr_hwcur, kring->nr_hwtail); 394 395 return 0; 396 } 397 398 399 /* Enable/disable interrupts on all virtqueues. */ 400 static void 401 vtnet_netmap_intr(struct netmap_adapter *na, int state) 402 { 403 struct vtnet_softc *sc = na->ifp->if_softc; 404 int i; 405 406 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { 407 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; 408 struct vtnet_txq *txq = &sc->vtnet_txqs[i]; 409 struct virtqueue *txvq = txq->vtntx_vq; 410 411 if (state) { 412 vtnet_rxq_enable_intr(rxq); 413 virtqueue_enable_intr(txvq); 414 } else { 415 vtnet_rxq_disable_intr(rxq); 416 virtqueue_disable_intr(txvq); 417 } 418 } 419 } 420 421 static int 422 vtnet_netmap_tx_slots(struct vtnet_softc *sc) 423 { 424 int div; 425 426 /* We need to prepend a virtio-net header to each netmap buffer to be 427 * transmitted, therefore calling virtqueue_enqueue() passing sglist 428 * with 2 elements. 429 * TX virtqueues use indirect descriptors if the feature was negotiated 430 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect 431 * descriptors, a single virtio descriptor is sufficient to reference 432 * each TX sglist. Without them, we need two separate virtio descriptors 433 * for each TX sglist. We therefore compute the number of netmap TX 434 * slots according to these assumptions. 435 */ 436 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1) 437 div = 1; 438 else 439 div = 2; 440 441 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div; 442 } 443 444 static int 445 vtnet_netmap_rx_slots(struct vtnet_softc *sc) 446 { 447 int div; 448 449 /* We need to prepend a virtio-net header to each netmap buffer to be 450 * received, therefore calling virtqueue_enqueue() passing sglist 451 * with 2 elements. 452 * RX virtqueues use indirect descriptors if the feature was negotiated 453 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect 454 * descriptors, a single virtio descriptor is sufficient to reference 455 * each RX sglist. Without them, we need two separate virtio descriptors 456 * for each RX sglist. We therefore compute the number of netmap RX 457 * slots according to these assumptions. 458 */ 459 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1) 460 div = 1; 461 else 462 div = 2; 463 464 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div; 465 } 466 467 static int 468 vtnet_netmap_config(struct netmap_adapter *na, struct nm_config_info *info) 469 { 470 struct vtnet_softc *sc = na->ifp->if_softc; 471 472 info->num_tx_rings = sc->vtnet_act_vq_pairs; 473 info->num_rx_rings = sc->vtnet_act_vq_pairs; 474 info->num_tx_descs = vtnet_netmap_tx_slots(sc); 475 info->num_rx_descs = vtnet_netmap_rx_slots(sc); 476 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); 477 478 return 0; 479 } 480 481 static void 482 vtnet_netmap_attach(struct vtnet_softc *sc) 483 { 484 struct netmap_adapter na; 485 486 bzero(&na, sizeof(na)); 487 488 na.ifp = sc->vtnet_ifp; 489 na.na_flags = 0; 490 na.num_tx_desc = vtnet_netmap_tx_slots(sc); 491 na.num_rx_desc = vtnet_netmap_rx_slots(sc); 492 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs; 493 na.rx_buf_maxsize = 0; 494 na.nm_register = vtnet_netmap_reg; 495 na.nm_txsync = vtnet_netmap_txsync; 496 na.nm_rxsync = vtnet_netmap_rxsync; 497 na.nm_intr = vtnet_netmap_intr; 498 na.nm_config = vtnet_netmap_config; 499 500 netmap_attach(&na); 501 502 nm_prinf("vtnet attached txq=%d, txd=%d rxq=%d, rxd=%d", 503 na.num_tx_rings, na.num_tx_desc, 504 na.num_tx_rings, na.num_rx_desc); 505 } 506 /* end of file */ 507