Lines Matching +full:sync +full:- +full:token

2  * Copyright (C) 2014-2018 Vincenzo Maffione, Luigi Rizzo.
39 if_t ifp = na->ifp; in vtnet_netmap_reg()
60 struct netmap_adapter *na = kring->na; in vtnet_netmap_txsync()
61 if_t ifp = na->ifp; in vtnet_netmap_txsync()
62 struct netmap_ring *ring = kring->ring; in vtnet_netmap_txsync()
63 u_int ring_nr = kring->ring_id; in vtnet_netmap_txsync()
65 u_int const lim = kring->nkr_num_slots - 1; in vtnet_netmap_txsync()
66 u_int const head = kring->rhead; in vtnet_netmap_txsync()
68 /* device-specific */ in vtnet_netmap_txsync()
70 struct vtnet_txq *txq = &sc->vtnet_txqs[ring_nr]; in vtnet_netmap_txsync()
71 struct virtqueue *vq = txq->vtntx_vq; in vtnet_netmap_txsync()
72 int interrupts = !(kring->nr_kflags & NKR_NOINTR); in vtnet_netmap_txsync()
79 nm_i = kring->nr_hwcur; in vtnet_netmap_txsync()
81 struct sglist *sg = txq->vtntx_sg; in vtnet_netmap_txsync()
85 struct netmap_slot *slot = &ring->slot[nm_i]; in vtnet_netmap_txsync()
87 u_int len = slot->len; in vtnet_netmap_txsync()
94 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); in vtnet_netmap_txsync()
99 err = sglist_append(sg, &txq->vtntx_shrhdr, sc->vtnet_hdr_size); in vtnet_netmap_txsync()
104 /*readable=*/sg->sg_nseg, in vtnet_netmap_txsync()
109 kring->name, err); in vtnet_netmap_txsync()
117 kring->nr_hwcur = nm_i; /* note we might break early */ in vtnet_netmap_txsync()
121 * by the token we passed to virtqueue_enqueue. in vtnet_netmap_txsync()
125 void *token = virtqueue_dequeue(vq, NULL); in vtnet_netmap_txsync() local
126 if (token == NULL) in vtnet_netmap_txsync()
128 if (unlikely(token != (void *)txq)) in vtnet_netmap_txsync()
129 nm_prerr("BUG: TX token mismatch"); in vtnet_netmap_txsync()
134 kring->nr_hwtail += n; in vtnet_netmap_txsync()
135 if (kring->nr_hwtail > lim) in vtnet_netmap_txsync()
136 kring->nr_hwtail -= lim + 1; in vtnet_netmap_txsync()
147 * from the next available one (rx->vtnrx_nm_refill).
150 * since the netmap ring and the virtqueue would go out of sync.
155 struct netmap_adapter *na = kring->na; in vtnet_netmap_kring_refill()
156 if_t ifp = na->ifp; in vtnet_netmap_kring_refill()
157 struct netmap_ring *ring = kring->ring; in vtnet_netmap_kring_refill()
158 u_int ring_nr = kring->ring_id; in vtnet_netmap_kring_refill()
159 u_int const lim = kring->nkr_num_slots - 1; in vtnet_netmap_kring_refill()
162 /* device-specific */ in vtnet_netmap_kring_refill()
164 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; in vtnet_netmap_kring_refill()
165 struct virtqueue *vq = rxq->vtnrx_vq; in vtnet_netmap_kring_refill()
171 for (nm_i = rxq->vtnrx_nm_refill; num > 0; in vtnet_netmap_kring_refill()
172 nm_i = nm_next(nm_i, lim), num--) { in vtnet_netmap_kring_refill()
173 struct netmap_slot *slot = &ring->slot[nm_i]; in vtnet_netmap_kring_refill()
184 slot->flags &= ~NS_BUF_CHANGED; in vtnet_netmap_kring_refill()
186 err = sglist_append(&sg, &rxq->vtnrx_shrhdr, sc->vtnet_hdr_size); in vtnet_netmap_kring_refill()
188 NETMAP_BUF_SIZE(na) - offset); in vtnet_netmap_kring_refill()
196 kring->name, err); in vtnet_netmap_kring_refill()
200 rxq->vtnrx_nm_refill = nm_i; in vtnet_netmap_kring_refill()
207 * Returns -1 if this virtqueue is not being opened in netmap mode.
214 struct netmap_adapter *na = NA(rxq->vtnrx_sc->vtnet_ifp); in vtnet_netmap_rxq_populate()
220 slot = netmap_reset(na, NR_RX, rxq->vtnrx_id, 0); in vtnet_netmap_rxq_populate()
222 return -1; in vtnet_netmap_rxq_populate()
223 kring = na->rx_rings[rxq->vtnrx_id]; in vtnet_netmap_rxq_populate()
228 * maximum number of 2-elements sglist that the RX virtqueue can in vtnet_netmap_rxq_populate()
229 * accommodate. We need to start from kring->nr_hwtail, which is 0 in vtnet_netmap_rxq_populate()
231 * virtio re-init (caused by a netma register or i.e., ifconfig) in vtnet_netmap_rxq_populate()
234 rxq->vtnrx_nm_refill = kring->nr_hwtail; in vtnet_netmap_rxq_populate()
235 num = na->num_rx_desc - 1 - nm_kr_rxspace(kring); in vtnet_netmap_rxq_populate()
237 virtqueue_notify(rxq->vtnrx_vq); in vtnet_netmap_rxq_populate()
246 struct netmap_adapter *na = kring->na; in vtnet_netmap_rxsync()
247 if_t ifp = na->ifp; in vtnet_netmap_rxsync()
248 struct netmap_ring *ring = kring->ring; in vtnet_netmap_rxsync()
249 u_int ring_nr = kring->ring_id; in vtnet_netmap_rxsync()
251 u_int const lim = kring->nkr_num_slots - 1; in vtnet_netmap_rxsync()
252 u_int const head = kring->rhead; in vtnet_netmap_rxsync()
254 (kring->nr_kflags & NKR_PENDINTR); in vtnet_netmap_rxsync()
255 int interrupts = !(kring->nr_kflags & NKR_NOINTR); in vtnet_netmap_rxsync()
257 /* device-specific */ in vtnet_netmap_rxsync()
259 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[ring_nr]; in vtnet_netmap_rxsync()
260 struct virtqueue *vq = rxq->vtnrx_vq; in vtnet_netmap_rxsync()
264 * Only accept our own buffers (matching the token). We should only get in vtnet_netmap_rxsync()
266 * we publish only N-1 receive buffers (and not N). in vtnet_netmap_rxsync()
268 * disabled, pending packets in the VQ and hwtail == (hwcur - 1), in vtnet_netmap_rxsync()
272 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); in vtnet_netmap_rxsync()
273 void *token; in vtnet_netmap_rxsync() local
277 nm_i = kring->nr_hwtail; in vtnet_netmap_rxsync()
280 token = virtqueue_dequeue(vq, &len); in vtnet_netmap_rxsync()
281 if (token == NULL) { in vtnet_netmap_rxsync()
283 * Enable the interrupts again and double-check in vtnet_netmap_rxsync()
287 * most N-1 slots. in vtnet_netmap_rxsync()
295 if (unlikely(token != (void *)rxq)) { in vtnet_netmap_rxsync()
296 nm_prerr("BUG: RX token mismatch"); in vtnet_netmap_rxsync()
303 /* Skip the virtio-net header. */ in vtnet_netmap_rxsync()
304 len -= sc->vtnet_hdr_size; in vtnet_netmap_rxsync()
306 nm_prlim(1, "Truncated virtio-net-header, " in vtnet_netmap_rxsync()
307 "missing %d bytes", -len); in vtnet_netmap_rxsync()
310 ring->slot[nm_i].len = len; in vtnet_netmap_rxsync()
311 ring->slot[nm_i].flags = 0; in vtnet_netmap_rxsync()
315 kring->nr_hwtail = nm_i; in vtnet_netmap_rxsync()
316 kring->nr_kflags &= ~NKR_PENDINTR; in vtnet_netmap_rxsync()
322 nm_i = kring->nr_hwcur; /* netmap ring index */ in vtnet_netmap_rxsync()
327 released = head - nm_i; in vtnet_netmap_rxsync()
329 released += kring->nkr_num_slots; in vtnet_netmap_rxsync()
336 kring->nr_hwcur = head; in vtnet_netmap_rxsync()
340 nm_prdis("h %d c %d t %d hwcur %d hwtail %d", kring->rhead, in vtnet_netmap_rxsync()
341 kring->rcur, kring->rtail, kring->nr_hwcur, kring->nr_hwtail); in vtnet_netmap_rxsync()
351 struct vtnet_softc *sc = if_getsoftc(na->ifp); in vtnet_netmap_intr()
354 for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { in vtnet_netmap_intr()
355 struct vtnet_rxq *rxq = &sc->vtnet_rxqs[i]; in vtnet_netmap_intr()
356 struct vtnet_txq *txq = &sc->vtnet_txqs[i]; in vtnet_netmap_intr()
357 struct virtqueue *txvq = txq->vtntx_vq; in vtnet_netmap_intr()
374 /* We need to prepend a virtio-net header to each netmap buffer to be in vtnet_netmap_tx_slots()
378 * with the host, and if sc->vtnet_tx_nsegs > 1. With indirect in vtnet_netmap_tx_slots()
384 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_tx_nsegs > 1) in vtnet_netmap_tx_slots()
389 return virtqueue_size(sc->vtnet_txqs[0].vtntx_vq) / div; in vtnet_netmap_tx_slots()
397 /* We need to prepend a virtio-net header to each netmap buffer to be in vtnet_netmap_rx_slots()
401 * with the host, and if sc->vtnet_rx_nsegs > 1. With indirect in vtnet_netmap_rx_slots()
407 if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) && sc->vtnet_rx_nsegs > 1) in vtnet_netmap_rx_slots()
412 return virtqueue_size(sc->vtnet_rxqs[0].vtnrx_vq) / div; in vtnet_netmap_rx_slots()
418 struct vtnet_softc *sc = if_getsoftc(na->ifp); in vtnet_netmap_config()
420 info->num_tx_rings = sc->vtnet_act_vq_pairs; in vtnet_netmap_config()
421 info->num_rx_rings = sc->vtnet_act_vq_pairs; in vtnet_netmap_config()
422 info->num_tx_descs = vtnet_netmap_tx_slots(sc); in vtnet_netmap_config()
423 info->num_rx_descs = vtnet_netmap_rx_slots(sc); in vtnet_netmap_config()
424 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); in vtnet_netmap_config()
436 na.ifp = sc->vtnet_ifp; in vtnet_netmap_attach()
440 na.num_tx_rings = na.num_rx_rings = sc->vtnet_max_vq_pairs; in vtnet_netmap_attach()