Lines Matching +full:irq +full:- +full:mirror

1 /*-
120 struct resource *irq; member
137 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
138 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
139 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
149 /* Mirror of PTFEAT register. */
174 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
175 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
262 pq->stats.kicks ++; in ptnet_kick()
264 bus_write_4(pq->sc->iomem, pq->kick, 0); in ptnet_kick()
294 sc->dev = dev; in ptnet_attach()
300 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, in ptnet_attach()
302 if (sc->iomem == NULL) { in ptnet_attach()
311 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ in ptnet_attach()
312 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ in ptnet_attach()
313 sc->ptfeatures = ptfeatures; in ptnet_attach()
315 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); in ptnet_attach()
316 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); in ptnet_attach()
317 sc->num_rings = num_tx_rings + num_rx_rings; in ptnet_attach()
318 sc->num_tx_rings = num_tx_rings; in ptnet_attach()
320 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { in ptnet_attach()
322 sc->num_rings); in ptnet_attach()
328 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, in ptnet_attach()
329 (size_t)0, -1UL, PAGE_SIZE, 0); in ptnet_attach()
330 if (sc->csb_gh == NULL) { in ptnet_attach()
335 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); in ptnet_attach()
342 uint64_t paddr = vtophys(sc->csb_gh); in ptnet_attach()
346 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, in ptnet_attach()
348 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, in ptnet_attach()
350 paddr = vtophys(sc->csb_hg); in ptnet_attach()
351 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, in ptnet_attach()
353 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, in ptnet_attach()
357 /* Allocate and initialize per-queue data structures. */ in ptnet_attach()
358 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, in ptnet_attach()
360 if (sc->queues == NULL) { in ptnet_attach()
364 sc->rxqueues = sc->queues + num_tx_rings; in ptnet_attach()
366 for (i = 0; i < sc->num_rings; i++) { in ptnet_attach()
367 struct ptnet_queue *pq = sc->queues + i; in ptnet_attach()
369 pq->sc = sc; in ptnet_attach()
370 pq->kring_id = i; in ptnet_attach()
371 pq->kick = PTNET_IO_KICK_BASE + 4 * i; in ptnet_attach()
372 pq->atok = sc->csb_gh + i; in ptnet_attach()
373 pq->ktoa = sc->csb_hg + i; in ptnet_attach()
374 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", in ptnet_attach()
376 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); in ptnet_attach()
379 pq->kring_id -= num_tx_rings; in ptnet_attach()
382 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, in ptnet_attach()
383 M_DEVBUF, M_NOWAIT, &pq->lock); in ptnet_attach()
384 if (pq->bufring == NULL) { in ptnet_attach()
391 sc->min_tx_space = 64; /* Safe initial value. */ in ptnet_attach()
399 sc->ifp = ifp = if_alloc(IFT_ETHER); in ptnet_attach()
410 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, in ptnet_attach()
412 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); in ptnet_attach()
413 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); in ptnet_attach()
415 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); in ptnet_attach()
416 sc->hwaddr[0] = (macreg >> 8) & 0xff; in ptnet_attach()
417 sc->hwaddr[1] = macreg & 0xff; in ptnet_attach()
418 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); in ptnet_attach()
419 sc->hwaddr[2] = (macreg >> 24) & 0xff; in ptnet_attach()
420 sc->hwaddr[3] = (macreg >> 16) & 0xff; in ptnet_attach()
421 sc->hwaddr[4] = (macreg >> 8) & 0xff; in ptnet_attach()
422 sc->hwaddr[5] = macreg & 0xff; in ptnet_attach()
424 ether_ifattach(ifp, sc->hwaddr); in ptnet_attach()
429 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { in ptnet_attach()
446 snprintf(sc->lock_name, sizeof(sc->lock_name), in ptnet_attach()
448 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); in ptnet_attach()
449 callout_init_mtx(&sc->tick, &sc->lock, 0); in ptnet_attach()
452 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); in ptnet_attach()
455 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); in ptnet_attach()
456 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); in ptnet_attach()
469 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); in ptnet_attach()
474 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); in ptnet_attach()
476 /* If virtio-net header was negotiated, set the virt_hdr_len field in in ptnet_attach()
490 /* Stop host sync-kloop if it was running. */
495 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); in ptnet_device_shutdown()
496 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); in ptnet_device_shutdown()
497 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); in ptnet_device_shutdown()
498 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); in ptnet_device_shutdown()
510 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) { in ptnet_detach()
511 ether_poll_deregister(sc->ifp); in ptnet_detach()
514 callout_drain(&sc->tick); in ptnet_detach()
516 if (sc->queues) { in ptnet_detach()
518 for (i = 0; i < sc->num_rings; i++) { in ptnet_detach()
519 struct ptnet_queue *pq = sc->queues + i; in ptnet_detach()
521 if (pq->taskq) { in ptnet_detach()
522 taskqueue_drain(pq->taskq, &pq->task); in ptnet_detach()
527 if (sc->ifp) { in ptnet_detach()
528 ether_ifdetach(sc->ifp); in ptnet_detach()
531 netmap_detach(sc->ifp); in ptnet_detach()
533 ifmedia_removeall(&sc->media); in ptnet_detach()
534 if_free(sc->ifp); in ptnet_detach()
535 sc->ifp = NULL; in ptnet_detach()
540 if (sc->csb_gh) { in ptnet_detach()
541 free(sc->csb_gh, M_DEVBUF); in ptnet_detach()
542 sc->csb_gh = NULL; in ptnet_detach()
543 sc->csb_hg = NULL; in ptnet_detach()
546 if (sc->queues) { in ptnet_detach()
547 for (i = 0; i < sc->num_rings; i++) { in ptnet_detach()
548 struct ptnet_queue *pq = sc->queues + i; in ptnet_detach()
550 if (mtx_initialized(&pq->lock)) { in ptnet_detach()
551 mtx_destroy(&pq->lock); in ptnet_detach()
553 if (pq->bufring != NULL) { in ptnet_detach()
554 buf_ring_free(pq->bufring, M_DEVBUF); in ptnet_detach()
557 free(sc->queues, M_DEVBUF); in ptnet_detach()
558 sc->queues = NULL; in ptnet_detach()
561 if (sc->iomem) { in ptnet_detach()
563 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); in ptnet_detach()
564 sc->iomem = NULL; in ptnet_detach()
567 mtx_destroy(&sc->lock); in ptnet_detach()
608 int nvecs = sc->num_rings; in ptnet_irqs_init()
609 device_t dev = sc->dev; in ptnet_irqs_init()
615 device_printf(dev, "Could not find MSI-X capability\n"); in ptnet_irqs_init()
619 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, in ptnet_irqs_init()
621 if (sc->msix_mem == NULL) { in ptnet_irqs_init()
627 device_printf(dev, "Not enough MSI-X vectors\n"); in ptnet_irqs_init()
633 device_printf(dev, "Failed to allocate MSI-X vectors\n"); in ptnet_irqs_init()
638 struct ptnet_queue *pq = sc->queues + i; in ptnet_irqs_init()
641 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, in ptnet_irqs_init()
643 if (pq->irq == NULL) { in ptnet_irqs_init()
653 struct ptnet_queue *pq = sc->queues + i; in ptnet_irqs_init()
656 if (i >= sc->num_tx_rings) { in ptnet_irqs_init()
659 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, in ptnet_irqs_init()
661 pq, &pq->cookie); in ptnet_irqs_init()
668 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); in ptnet_irqs_init()
670 bus_bind_intr(sc->dev, pq->irq, cpu_cur); in ptnet_irqs_init()
675 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); in ptnet_irqs_init()
679 struct ptnet_queue *pq = sc->queues + i; in ptnet_irqs_init()
681 if (i < sc->num_tx_rings) in ptnet_irqs_init()
682 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); in ptnet_irqs_init()
684 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); in ptnet_irqs_init()
686 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, in ptnet_irqs_init()
687 taskqueue_thread_enqueue, &pq->taskq); in ptnet_irqs_init()
688 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", in ptnet_irqs_init()
689 device_get_nameunit(sc->dev), cpu_cur); in ptnet_irqs_init()
702 device_t dev = sc->dev; in ptnet_irqs_fini()
705 for (i = 0; i < sc->num_rings; i++) { in ptnet_irqs_fini()
706 struct ptnet_queue *pq = sc->queues + i; in ptnet_irqs_fini()
708 if (pq->taskq) { in ptnet_irqs_fini()
709 taskqueue_free(pq->taskq); in ptnet_irqs_fini()
710 pq->taskq = NULL; in ptnet_irqs_fini()
713 if (pq->cookie) { in ptnet_irqs_fini()
714 bus_teardown_intr(dev, pq->irq, pq->cookie); in ptnet_irqs_fini()
715 pq->cookie = NULL; in ptnet_irqs_fini()
718 if (pq->irq) { in ptnet_irqs_fini()
719 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); in ptnet_irqs_fini()
720 pq->irq = NULL; in ptnet_irqs_fini()
724 if (sc->msix_mem) { in ptnet_irqs_fini()
729 sc->msix_mem); in ptnet_irqs_fini()
730 sc->msix_mem = NULL; in ptnet_irqs_fini()
748 device_t dev = sc->dev; in ptnet_ioctl()
770 ifr->ifr_reqcap, if_getcapenable(ifp)); in ptnet_ioctl()
771 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); in ptnet_ioctl()
777 if (ifr->ifr_reqcap & IFCAP_POLLING) { in ptnet_ioctl()
784 for (i = 0; i < sc->num_rings; i++) { in ptnet_ioctl()
785 pq = sc-> queues + i; in ptnet_ioctl()
789 pq->atok->appl_need_kick = 0; in ptnet_ioctl()
792 if (pq->taskq) { in ptnet_ioctl()
793 taskqueue_drain(pq->taskq, in ptnet_ioctl()
794 &pq->task); in ptnet_ioctl()
800 for (i = 0; i < sc->num_rings; i++) { in ptnet_ioctl()
801 pq = sc-> queues + i; in ptnet_ioctl()
803 pq->atok->appl_need_kick = 1; in ptnet_ioctl()
809 if_setcapenable(ifp, ifr->ifr_reqcap); in ptnet_ioctl()
814 if (ifr->ifr_mtu < ETHERMIN || in ptnet_ioctl()
815 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { in ptnet_ioctl()
819 if_setmtu(ifp, ifr->ifr_mtu); in ptnet_ioctl()
826 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); in ptnet_ioctl()
840 if_t ifp = sc->ifp; in ptnet_init_locked()
841 struct netmap_adapter *na_dr = &sc->ptna->dr.up; in ptnet_init_locked()
842 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; in ptnet_init_locked()
850 device_printf(sc->dev, "%s\n", __func__); in ptnet_init_locked()
868 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); in ptnet_init_locked()
870 device_printf(sc->dev, "netmap_mem_finalize() failed\n"); in ptnet_init_locked()
874 if (sc->ptna->backend_users == 0) { in ptnet_init_locked()
877 device_printf(sc->dev, "ptnet_nm_krings_create() " in ptnet_init_locked()
884 device_printf(sc->dev, "netmap_mem_rings_create() " in ptnet_init_locked()
889 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); in ptnet_init_locked()
891 device_printf(sc->dev, "netmap_mem_get_lut() " in ptnet_init_locked()
905 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; in ptnet_init_locked()
906 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, in ptnet_init_locked()
907 sc->min_tx_space); in ptnet_init_locked()
909 callout_reset(&sc->tick, hz, ptnet_tick, sc); in ptnet_init_locked()
917 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); in ptnet_init_locked()
923 netmap_mem_deref(na_dr->nm_mem, na_dr); in ptnet_init_locked()
932 if_t ifp = sc->ifp; in ptnet_stop()
933 struct netmap_adapter *na_dr = &sc->ptna->dr.up; in ptnet_stop()
934 struct netmap_adapter *na_nm = &sc->ptna->hwup.up; in ptnet_stop()
937 device_printf(sc->dev, "%s\n", __func__); in ptnet_stop()
943 /* Clear the driver-ready flag, and synchronize with all the queues, in ptnet_stop()
947 callout_stop(&sc->tick); in ptnet_stop()
948 for (i = 0; i < sc->num_rings; i++) { in ptnet_stop()
949 PTNET_Q_LOCK(sc->queues + i); in ptnet_stop()
950 PTNET_Q_UNLOCK(sc->queues + i); in ptnet_stop()
955 if (sc->ptna->backend_users == 0) { in ptnet_stop()
959 netmap_mem_deref(na_dr->nm_mem, na_dr); in ptnet_stop()
971 for (i = 0; i < sc->num_rings; i++) { in ptnet_qflush()
972 struct ptnet_queue *pq = sc->queues + i; in ptnet_qflush()
976 if (pq->bufring) { in ptnet_qflush()
977 while ((m = buf_ring_dequeue_sc(pq->bufring))) { in ptnet_qflush()
991 struct ifmedia *ifm = &sc->media; in ptnet_media_change()
993 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { in ptnet_media_change()
1009 for (i = 0; i < sc->num_rings; i++) { in ptnet_get_counter()
1010 struct ptnet_queue *pq = sc->queues + i; in ptnet_get_counter()
1011 int idx = (i < sc->num_tx_rings) ? 0 : 1; in ptnet_get_counter()
1013 stats[idx].packets += pq->stats.packets; in ptnet_get_counter()
1014 stats[idx].bytes += pq->stats.bytes; in ptnet_get_counter()
1015 stats[idx].errors += pq->stats.errors; in ptnet_get_counter()
1016 stats[idx].iqdrops += pq->stats.iqdrops; in ptnet_get_counter()
1017 stats[idx].mcasts += pq->stats.mcasts; in ptnet_get_counter()
1047 for (i = 0; i < sc->num_rings; i++) { in ptnet_tick()
1048 struct ptnet_queue *pq = sc->queues + i; in ptnet_tick()
1049 struct ptnet_queue_stats cur = pq->stats; in ptnet_tick()
1054 delta = now.tv_usec - sc->last_ts.tv_usec + in ptnet_tick()
1055 (now.tv_sec - sc->last_ts.tv_sec) * 1000000; in ptnet_tick()
1061 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " in ptnet_tick()
1063 (cur.packets - pq->last_stats.packets), in ptnet_tick()
1064 (cur.kicks - pq->last_stats.kicks), in ptnet_tick()
1065 (cur.intrs - pq->last_stats.intrs)); in ptnet_tick()
1066 pq->last_stats = cur; in ptnet_tick()
1068 microtime(&sc->last_ts); in ptnet_tick()
1069 callout_schedule(&sc->tick, hz); in ptnet_tick()
1078 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; in ptnet_media_status()
1079 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; in ptnet_media_status()
1089 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); in ptnet_nm_ptctl()
1090 return bus_read_4(sc->iomem, PTNET_IO_PTCTL); in ptnet_nm_ptctl()
1096 struct ptnet_softc *sc = if_getsoftc(na->ifp); in ptnet_nm_config()
1098 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); in ptnet_nm_config()
1099 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); in ptnet_nm_config()
1100 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); in ptnet_nm_config()
1101 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); in ptnet_nm_config()
1102 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); in ptnet_nm_config()
1104 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", in ptnet_nm_config()
1105 info->num_tx_rings, info->num_rx_rings, in ptnet_nm_config()
1106 info->num_tx_descs, info->num_rx_descs, in ptnet_nm_config()
1107 info->rx_buf_maxsize); in ptnet_nm_config()
1119 for (i = 0; i < sc->num_rings; i++) { in ptnet_sync_from_csb()
1120 struct nm_csb_atok *atok = sc->queues[i].atok; in ptnet_sync_from_csb()
1121 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; in ptnet_sync_from_csb()
1124 if (i < na->num_tx_rings) { in ptnet_sync_from_csb()
1125 kring = na->tx_rings[i]; in ptnet_sync_from_csb()
1127 kring = na->rx_rings[i - na->num_tx_rings]; in ptnet_sync_from_csb()
1129 kring->rhead = kring->ring->head = atok->head; in ptnet_sync_from_csb()
1130 kring->rcur = kring->ring->cur = atok->cur; in ptnet_sync_from_csb()
1131 kring->nr_hwcur = ktoa->hwcur; in ptnet_sync_from_csb()
1132 kring->nr_hwtail = kring->rtail = in ptnet_sync_from_csb()
1133 kring->ring->tail = ktoa->hwtail; in ptnet_sync_from_csb()
1136 ktoa->hwcur, atok->head, atok->cur, in ptnet_sync_from_csb()
1137 ktoa->hwtail); in ptnet_sync_from_csb()
1139 t, i, kring->nr_hwcur, kring->rhead, kring->rcur, in ptnet_sync_from_csb()
1140 kring->ring->head, kring->ring->cur, kring->nr_hwtail, in ptnet_sync_from_csb()
1141 kring->rtail, kring->ring->tail); in ptnet_sync_from_csb()
1150 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); in ptnet_update_vnet_hdr()
1151 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); in ptnet_update_vnet_hdr()
1152 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; in ptnet_update_vnet_hdr()
1158 /* device-specific */ in ptnet_nm_register()
1159 if_t ifp = na->ifp; in ptnet_nm_register()
1161 int native = (na == &sc->ptna->hwup.up); in ptnet_nm_register()
1167 sc->ptna->backend_users--; in ptnet_nm_register()
1176 if (native && !onoff && na->active_fds == 0) { in ptnet_nm_register()
1177 nm_prinf("Exit netmap mode, re-enable interrupts"); in ptnet_nm_register()
1178 for (i = 0; i < sc->num_rings; i++) { in ptnet_nm_register()
1179 pq = sc->queues + i; in ptnet_nm_register()
1180 pq->atok->appl_need_kick = 1; in ptnet_nm_register()
1185 if (sc->ptna->backend_users == 0) { in ptnet_nm_register()
1187 for (i = 0; i < sc->num_rings; i++) { in ptnet_nm_register()
1188 pq = sc->queues + i; in ptnet_nm_register()
1189 pq->ktoa->kern_need_kick = 1; in ptnet_nm_register()
1190 pq->atok->appl_need_kick = in ptnet_nm_register()
1192 && i >= sc->num_tx_rings); in ptnet_nm_register()
1195 /* Set the virtio-net header length. */ in ptnet_nm_register()
1223 if (sc->ptna->backend_users == 0) { in ptnet_nm_register()
1229 sc->ptna->backend_users++; in ptnet_nm_register()
1238 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); in ptnet_nm_txsync()
1239 struct ptnet_queue *pq = sc->queues + kring->ring_id; in ptnet_nm_txsync()
1242 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); in ptnet_nm_txsync()
1253 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); in ptnet_nm_rxsync()
1254 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; in ptnet_nm_rxsync()
1257 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); in ptnet_nm_rxsync()
1268 struct ptnet_softc *sc = if_getsoftc(na->ifp); in ptnet_nm_intr()
1271 for (i = 0; i < sc->num_rings; i++) { in ptnet_nm_intr()
1272 struct ptnet_queue *pq = sc->queues + i; in ptnet_nm_intr()
1273 pq->atok->appl_need_kick = onoff; in ptnet_nm_intr()
1281 struct ptnet_softc *sc = pq->sc; in ptnet_tx_intr()
1283 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); in ptnet_tx_intr()
1285 pq->stats.intrs ++; in ptnet_tx_intr()
1288 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { in ptnet_tx_intr()
1294 * at least when using MSI-X interrupts. The if_em driver, instead in ptnet_tx_intr()
1296 taskqueue_enqueue(pq->taskq, &pq->task); in ptnet_tx_intr()
1303 struct ptnet_softc *sc = pq->sc; in ptnet_rx_intr()
1306 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); in ptnet_rx_intr()
1308 pq->stats.intrs ++; in ptnet_rx_intr()
1311 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { in ptnet_rx_intr()
1315 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, in ptnet_rx_intr()
1316 * receive-side processing is executed directly in the interrupt in ptnet_rx_intr()
1327 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); in ptnet_vlan_tag_remove()
1328 m->m_flags |= M_VLANTAG; in ptnet_vlan_tag_remove()
1332 ETHER_HDR_LEN - ETHER_TYPE_LEN); in ptnet_vlan_tag_remove()
1340 struct netmap_ring *ring = kring->ring; in ptnet_ring_update()
1341 struct nm_csb_atok *atok = pq->atok; in ptnet_ring_update()
1342 struct nm_csb_ktoa *ktoa = pq->ktoa; in ptnet_ring_update()
1347 ring->head = ring->cur = head; in ptnet_ring_update()
1350 kring->rcur = kring->rhead = head; in ptnet_ring_update()
1352 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); in ptnet_ring_update()
1355 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { in ptnet_ring_update()
1356 atok->sync_flags = sync_flags; in ptnet_ring_update()
1362 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1363 (_k)->rtail - (_h)) < (_min)
1371 struct ptnet_softc *sc = pq->sc; in ptnet_drain_transmit_queue()
1372 bool have_vnet_hdr = sc->vnet_hdr_len; in ptnet_drain_transmit_queue()
1373 struct netmap_adapter *na = &sc->ptna->dr.up; in ptnet_drain_transmit_queue()
1374 if_t ifp = sc->ifp; in ptnet_drain_transmit_queue()
1394 taskqueue_enqueue(pq->taskq, &pq->task); in ptnet_drain_transmit_queue()
1406 atok = pq->atok; in ptnet_drain_transmit_queue()
1407 ktoa = pq->ktoa; in ptnet_drain_transmit_queue()
1408 kring = na->tx_rings[pq->kring_id]; in ptnet_drain_transmit_queue()
1409 ring = kring->ring; in ptnet_drain_transmit_queue()
1410 lim = kring->nkr_num_slots - 1; in ptnet_drain_transmit_queue()
1411 head = ring->head; in ptnet_drain_transmit_queue()
1412 minspace = sc->min_tx_space; in ptnet_drain_transmit_queue()
1426 atok->appl_need_kick = 1; in ptnet_drain_transmit_queue()
1429 * prevent the store to atok->appl_need_kick in ptnet_drain_transmit_queue()
1431 * ktoa->hwcur and ktoa->hwtail (store-load in ptnet_drain_transmit_queue()
1443 atok->appl_need_kick = 0; in ptnet_drain_transmit_queue()
1447 mhead = drbr_peek(ifp, pq->bufring); in ptnet_drain_transmit_queue()
1453 slot = ring->slot + head; in ptnet_drain_transmit_queue()
1457 /* If needed, prepare the virtio-net header at the beginning in ptnet_drain_transmit_queue()
1464 * two 8-bytes-wide writes. */ in ptnet_drain_transmit_queue()
1466 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { in ptnet_drain_transmit_queue()
1474 pq->stats.errors ++; in ptnet_drain_transmit_queue()
1475 drbr_advance(ifp, pq->bufring); in ptnet_drain_transmit_queue()
1482 mhead->m_pkthdr.csum_flags, vh->flags, in ptnet_drain_transmit_queue()
1483 vh->csum_start, vh->csum_offset, vh->hdr_len, in ptnet_drain_transmit_queue()
1484 vh->gso_size, vh->gso_type); in ptnet_drain_transmit_queue()
1490 for (mf = mhead; mf; mf = mf->m_next) { in ptnet_drain_transmit_queue()
1491 uint8_t *mdata = mf->m_data; in ptnet_drain_transmit_queue()
1492 int mlen = mf->m_len; in ptnet_drain_transmit_queue()
1495 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; in ptnet_drain_transmit_queue()
1503 mlen -= copy; in ptnet_drain_transmit_queue()
1511 slot->len = nmbuf_bytes; in ptnet_drain_transmit_queue()
1512 slot->flags = NS_MOREFRAG; in ptnet_drain_transmit_queue()
1515 KASSERT(head != ring->tail, in ptnet_drain_transmit_queue()
1517 slot = ring->slot + head; in ptnet_drain_transmit_queue()
1524 slot->len = nmbuf_bytes; in ptnet_drain_transmit_queue()
1525 slot->flags = 0; in ptnet_drain_transmit_queue()
1529 drbr_advance(ifp, pq->bufring); in ptnet_drain_transmit_queue()
1534 pq->stats.packets ++; in ptnet_drain_transmit_queue()
1535 pq->stats.bytes += mhead->m_pkthdr.len; in ptnet_drain_transmit_queue()
1536 if (mhead->m_flags & M_MCAST) { in ptnet_drain_transmit_queue()
1537 pq->stats.mcasts ++; in ptnet_drain_transmit_queue()
1555 drbr_inuse(ifp, pq->bufring))); in ptnet_drain_transmit_queue()
1556 taskqueue_enqueue(pq->taskq, &pq->task); in ptnet_drain_transmit_queue()
1572 DBG(device_printf(sc->dev, "transmit %p\n", m)); in ptnet_transmit()
1575 if (m->m_flags & M_VLANTAG) { in ptnet_transmit()
1576 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); in ptnet_transmit()
1580 m->m_flags &= ~M_VLANTAG; in ptnet_transmit()
1583 /* Get the flow-id if available. */ in ptnet_transmit()
1585 m->m_pkthdr.flowid : curcpu; in ptnet_transmit()
1587 if (unlikely(queue_idx >= sc->num_tx_rings)) { in ptnet_transmit()
1588 queue_idx %= sc->num_tx_rings; in ptnet_transmit()
1591 pq = sc->queues + queue_idx; in ptnet_transmit()
1593 err = drbr_enqueue(ifp, pq->bufring, m); in ptnet_transmit()
1598 pq->stats.errors ++; in ptnet_transmit()
1616 struct netmap_ring *ring = kring->ring; in ptnet_rx_discard()
1617 struct netmap_slot *slot = ring->slot + head; in ptnet_rx_discard()
1620 head = nm_next(head, kring->nkr_num_slots - 1); in ptnet_rx_discard()
1621 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { in ptnet_rx_discard()
1624 slot = ring->slot + head; in ptnet_rx_discard()
1633 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; in ptnet_rx_slot()
1638 if (mtail->m_len == MCLBYTES) { in ptnet_rx_slot()
1646 mtail->m_next = mf; in ptnet_rx_slot()
1649 mtail->m_len = 0; in ptnet_rx_slot()
1652 copy = MCLBYTES - mtail->m_len; in ptnet_rx_slot()
1660 nmbuf_len -= copy; in ptnet_rx_slot()
1662 mtail->m_len += copy; in ptnet_rx_slot()
1671 struct ptnet_softc *sc = pq->sc; in ptnet_rx_eof()
1672 bool have_vnet_hdr = sc->vnet_hdr_len; in ptnet_rx_eof()
1673 struct nm_csb_atok *atok = pq->atok; in ptnet_rx_eof()
1674 struct nm_csb_ktoa *ktoa = pq->ktoa; in ptnet_rx_eof()
1675 struct netmap_adapter *na = &sc->ptna->dr.up; in ptnet_rx_eof()
1676 struct netmap_kring *kring = na->rx_rings[pq->kring_id]; in ptnet_rx_eof()
1677 struct netmap_ring *ring = kring->ring; in ptnet_rx_eof()
1678 unsigned int const lim = kring->nkr_num_slots - 1; in ptnet_rx_eof()
1680 if_t ifp = sc->ifp; in ptnet_rx_eof()
1690 kring->nr_kflags &= ~NKR_PENDINTR; in ptnet_rx_eof()
1692 head = ring->head; in ptnet_rx_eof()
1702 if (head == ring->tail) { in ptnet_rx_eof()
1708 if (head == ring->tail) { in ptnet_rx_eof()
1713 atok->appl_need_kick = 1; in ptnet_rx_eof()
1717 * to atok->appl_need_kick to be reordered with in ptnet_rx_eof()
1718 * the load from ktoa->hwcur and ktoa->hwtail in ptnet_rx_eof()
1719 * (store-load barrier). */ in ptnet_rx_eof()
1722 if (likely(head == ring->tail)) { in ptnet_rx_eof()
1725 atok->appl_need_kick = 0; in ptnet_rx_eof()
1730 * virtio-net header. */ in ptnet_rx_eof()
1731 slot = ring->slot + head; in ptnet_rx_eof()
1733 nmbuf_len = slot->len; in ptnet_rx_eof()
1741 nm_prlim(1, "Fragmented vnet-hdr: dropping"); in ptnet_rx_eof()
1743 pq->stats.iqdrops ++; in ptnet_rx_eof()
1749 "gso_type %x", __func__, vh->flags, in ptnet_rx_eof()
1750 vh->csum_start, vh->csum_offset, vh->hdr_len, in ptnet_rx_eof()
1751 vh->gso_size, vh->gso_type); in ptnet_rx_eof()
1753 nmbuf_len -= PTNET_HDR_SIZE; in ptnet_rx_eof()
1762 device_printf(sc->dev, "%s: failed to allocate mbuf " in ptnet_rx_eof()
1764 pq->stats.errors ++; in ptnet_rx_eof()
1769 mhead->m_pkthdr.len = nmbuf_len; in ptnet_rx_eof()
1770 mtail->m_len = 0; in ptnet_rx_eof()
1774 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " in ptnet_rx_eof()
1776 head, ring->tail, slot->len, in ptnet_rx_eof()
1777 slot->flags)); in ptnet_rx_eof()
1786 device_printf(sc->dev, "%s: failed to allocate" in ptnet_rx_eof()
1787 " mbuf frag, reset head %u --> %u\n", in ptnet_rx_eof()
1791 pq->stats.errors ++; in ptnet_rx_eof()
1793 taskqueue_enqueue(pq->taskq, in ptnet_rx_eof()
1794 &pq->task); in ptnet_rx_eof()
1803 if (!(slot->flags & NS_MOREFRAG)) { in ptnet_rx_eof()
1807 if (unlikely(head == ring->tail)) { in ptnet_rx_eof()
1810 * the outer cycle (to do the double-check). */ in ptnet_rx_eof()
1813 pq->stats.iqdrops ++; in ptnet_rx_eof()
1817 slot = ring->slot + head; in ptnet_rx_eof()
1819 nmbuf_len = slot->len; in ptnet_rx_eof()
1820 mhead->m_pkthdr.len += nmbuf_len; in ptnet_rx_eof()
1823 mhead->m_pkthdr.rcvif = ifp; in ptnet_rx_eof()
1824 mhead->m_pkthdr.csum_flags = 0; in ptnet_rx_eof()
1827 mhead->m_pkthdr.flowid = pq->kring_id; in ptnet_rx_eof()
1834 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { in ptnet_rx_eof()
1840 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) in ptnet_rx_eof()
1841 vh->csum_start -= ETHER_VLAN_ENCAP_LEN; in ptnet_rx_eof()
1848 pq->stats.iqdrops ++; in ptnet_rx_eof()
1857 * the new ring->cur and ring->head (RX buffer refill). */ in ptnet_rx_eof()
1863 pq->stats.packets ++; in ptnet_rx_eof()
1864 pq->stats.bytes += mhead->m_pkthdr.len; in ptnet_rx_eof()
1869 /* The ring->head index (and related indices) are in ptnet_rx_eof()
1872 * must reload ring->head and restart processing the in ptnet_rx_eof()
1874 head = ring->head; in ptnet_rx_eof()
1890 /* If we ran out of budget or the double-check found new in ptnet_rx_eof()
1893 head, ring->tail)); in ptnet_rx_eof()
1894 taskqueue_enqueue(pq->taskq, &pq->task); in ptnet_rx_eof()
1907 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); in ptnet_rx_task()
1916 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); in ptnet_tx_task()
1932 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); in ptnet_poll()
1933 queue_budget = MAX(budget / sc->num_rings, 1); in ptnet_poll()
1934 nm_prlim(1, "Per-queue budget is %d", queue_budget); in ptnet_poll()
1939 for (i = 0; i < sc->num_rings; i++) { in ptnet_poll()
1940 struct ptnet_queue *pq = sc->queues + i; in ptnet_poll()
1949 if (i < sc->num_tx_rings) { in ptnet_poll()
1965 /* This may happen when initial budget < sc->num_rings, in ptnet_poll()
1971 budget -= rcnt; in ptnet_poll()