Lines Matching defs:pq
191 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
220 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
261 static inline void ptnet_kick(struct ptnet_queue *pq)
264 pq->stats.kicks ++;
266 bus_write_4(pq->sc->iomem, pq->kick, 0);
369 struct ptnet_queue *pq = sc->queues + i;
371 pq->sc = sc;
372 pq->kring_id = i;
373 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
374 pq->atok = sc->csb_gh + i;
375 pq->ktoa = sc->csb_hg + i;
376 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
378 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
381 pq->kring_id -= num_tx_rings;
384 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
385 M_DEVBUF, M_NOWAIT, &pq->lock);
386 if (pq->bufring == NULL) {
521 struct ptnet_queue *pq = sc->queues + i;
523 if (pq->taskq) {
524 taskqueue_drain(pq->taskq, &pq->task);
550 struct ptnet_queue *pq = sc->queues + i;
552 if (mtx_initialized(&pq->lock)) {
553 mtx_destroy(&pq->lock);
555 if (pq->bufring != NULL) {
556 buf_ring_free(pq->bufring, M_DEVBUF);
640 struct ptnet_queue *pq = sc->queues + i;
643 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
645 if (pq->irq == NULL) {
655 struct ptnet_queue *pq = sc->queues + i;
661 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
663 pq, &pq->cookie);
670 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
672 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
681 struct ptnet_queue *pq = sc->queues + i;
684 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
686 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
688 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
689 taskqueue_thread_enqueue, &pq->taskq);
690 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
708 struct ptnet_queue *pq = sc->queues + i;
710 if (pq->taskq) {
711 taskqueue_free(pq->taskq);
712 pq->taskq = NULL;
715 if (pq->cookie) {
716 bus_teardown_intr(dev, pq->irq, pq->cookie);
717 pq->cookie = NULL;
720 if (pq->irq) {
721 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
722 pq->irq = NULL;
776 struct ptnet_queue *pq;
787 pq = sc-> queues + i;
790 PTNET_Q_LOCK(pq);
791 pq->atok->appl_need_kick = 0;
792 PTNET_Q_UNLOCK(pq);
794 if (pq->taskq) {
795 taskqueue_drain(pq->taskq,
796 &pq->task);
803 pq = sc-> queues + i;
804 PTNET_Q_LOCK(pq);
805 pq->atok->appl_need_kick = 1;
806 PTNET_Q_UNLOCK(pq);
974 struct ptnet_queue *pq = sc->queues + i;
977 PTNET_Q_LOCK(pq);
978 if (pq->bufring) {
979 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
983 PTNET_Q_UNLOCK(pq);
1012 struct ptnet_queue *pq = sc->queues + i;
1015 stats[idx].packets += pq->stats.packets;
1016 stats[idx].bytes += pq->stats.bytes;
1017 stats[idx].errors += pq->stats.errors;
1018 stats[idx].iqdrops += pq->stats.iqdrops;
1019 stats[idx].mcasts += pq->stats.mcasts;
1050 struct ptnet_queue *pq = sc->queues + i;
1051 struct ptnet_queue_stats cur = pq->stats;
1065 (cur.packets - pq->last_stats.packets),
1066 (cur.kicks - pq->last_stats.kicks),
1067 (cur.intrs - pq->last_stats.intrs));
1068 pq->last_stats = cur;
1164 struct ptnet_queue *pq;
1181 pq = sc->queues + i;
1182 pq->atok->appl_need_kick = 1;
1190 pq = sc->queues + i;
1191 pq->ktoa->kern_need_kick = 1;
1192 pq->atok->appl_need_kick =
1241 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1244 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1246 ptnet_kick(pq);
1256 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1259 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1261 ptnet_kick(pq);
1274 struct ptnet_queue *pq = sc->queues + i;
1275 pq->atok->appl_need_kick = onoff;
1282 struct ptnet_queue *pq = opaque;
1283 struct ptnet_softc *sc = pq->sc;
1285 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1287 pq->stats.intrs ++;
1290 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1298 taskqueue_enqueue(pq->taskq, &pq->task);
1304 struct ptnet_queue *pq = opaque;
1305 struct ptnet_softc *sc = pq->sc;
1308 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1310 pq->stats.intrs ++;
1313 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1320 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1339 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1343 struct nm_csb_atok *atok = pq->atok;
1344 struct nm_csb_ktoa *ktoa = pq->ktoa;
1359 ptnet_kick(pq);
1370 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1373 struct ptnet_softc *sc = pq->sc;
1392 if (!PTNET_Q_TRYLOCK(pq)) {
1396 taskqueue_enqueue(pq->taskq, &pq->task);
1403 PTNET_Q_UNLOCK(pq);
1408 atok = pq->atok;
1409 ktoa = pq->ktoa;
1410 kring = na->tx_rings[pq->kring_id];
1449 mhead = drbr_peek(ifp, pq->bufring);
1476 pq->stats.errors ++;
1477 drbr_advance(ifp, pq->bufring);
1531 drbr_advance(ifp, pq->bufring);
1536 pq->stats.packets ++;
1537 pq->stats.bytes += mhead->m_pkthdr.len;
1539 pq->stats.mcasts ++;
1546 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1552 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1557 drbr_inuse(ifp, pq->bufring)));
1558 taskqueue_enqueue(pq->taskq, &pq->task);
1561 PTNET_Q_UNLOCK(pq);
1570 struct ptnet_queue *pq;
1593 pq = sc->queues + queue_idx;
1595 err = drbr_enqueue(ifp, pq->bufring, m);
1600 pq->stats.errors ++;
1610 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1671 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1673 struct ptnet_softc *sc = pq->sc;
1675 struct nm_csb_atok *atok = pq->atok;
1676 struct nm_csb_ktoa *ktoa = pq->ktoa;
1678 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
1686 PTNET_Q_LOCK(pq);
1745 pq->stats.iqdrops ++;
1766 pq->stats.errors ++;
1793 pq->stats.errors ++;
1795 taskqueue_enqueue(pq->taskq,
1796 &pq->task);
1815 pq->stats.iqdrops ++;
1829 mhead->m_pkthdr.flowid = pq->kring_id;
1850 pq->stats.iqdrops ++;
1860 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1865 pq->stats.packets ++;
1866 pq->stats.bytes += mhead->m_pkthdr.len;
1868 PTNET_Q_UNLOCK(pq);
1870 PTNET_Q_LOCK(pq);
1872 * updated under pq lock by ptnet_ring_update().
1887 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1896 taskqueue_enqueue(pq->taskq, &pq->task);
1899 PTNET_Q_UNLOCK(pq);
1907 struct ptnet_queue *pq = context;
1909 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1910 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1916 struct ptnet_queue *pq = context;
1918 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1919 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1942 struct ptnet_queue *pq = sc->queues + i;
1952 rcnt += ptnet_drain_transmit_queue(pq,
1955 rcnt += ptnet_rx_eof(pq, queue_budget,