Lines Matching refs:rxq

155 sfxge_rx_qflush_done(struct sfxge_rxq *rxq)  in sfxge_rx_qflush_done()  argument
158 rxq->flush_state = SFXGE_FLUSH_DONE; in sfxge_rx_qflush_done()
162 sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) in sfxge_rx_qflush_failed() argument
165 rxq->flush_state = SFXGE_FLUSH_FAILED; in sfxge_rx_qflush_failed()
183 struct sfxge_rxq *rxq = arg; in sfxge_rx_post_refill() local
189 sc = rxq->sc; in sfxge_rx_post_refill()
190 index = rxq->index; in sfxge_rx_post_refill()
192 magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); in sfxge_rx_post_refill()
197 KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, in sfxge_rx_post_refill()
203 sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) in sfxge_rx_schedule_refill() argument
209 rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz); in sfxge_rx_schedule_refill()
211 rxq->refill_delay = hz / 10; in sfxge_rx_schedule_refill()
213 callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay, in sfxge_rx_schedule_refill()
214 sfxge_rx_post_refill, rxq); in sfxge_rx_schedule_refill()
220 sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) in sfxge_rx_qfill() argument
231 sc = rxq->sc; in sfxge_rx_qfill()
232 index = rxq->index; in sfxge_rx_qfill()
236 prefetch_read_many(rxq->common); in sfxge_rx_qfill()
240 if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) in sfxge_rx_qfill()
243 rxfill = rxq->added - rxq->completed; in sfxge_rx_qfill()
244 KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), in sfxge_rx_qfill()
246 ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); in sfxge_rx_qfill()
247 KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), in sfxge_rx_qfill()
261 id = (rxq->added + batch) & rxq->ptr_mask; in sfxge_rx_qfill()
262 rx_desc = &rxq->queue[id]; in sfxge_rx_qfill()
277 sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); in sfxge_rx_qfill()
281 efx_rx_qpost(rxq->common, addr, mblksize, batch, in sfxge_rx_qfill()
282 rxq->completed, rxq->added); in sfxge_rx_qfill()
283 rxq->added += batch; in sfxge_rx_qfill()
289 sfxge_rx_schedule_refill(rxq, retrying); in sfxge_rx_qfill()
292 efx_rx_qpost(rxq->common, addr, mblksize, batch, in sfxge_rx_qfill()
293 rxq->completed, rxq->added); in sfxge_rx_qfill()
294 rxq->added += batch; in sfxge_rx_qfill()
298 bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, in sfxge_rx_qfill()
301 efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); in sfxge_rx_qfill()
307 if(rxq->pushed == rxq->completed) { in sfxge_rx_qfill()
308 sfxge_rx_schedule_refill(rxq, retrying); in sfxge_rx_qfill()
313 sfxge_rx_qrefill(struct sfxge_rxq *rxq) in sfxge_rx_qrefill() argument
316 if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) in sfxge_rx_qrefill()
320 sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE); in sfxge_rx_qrefill()
333 sfxge_rx_deliver(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_desc) in sfxge_rx_deliver() argument
335 struct sfxge_softc *sc = rxq->sc; in sfxge_rx_deliver()
348 efx_pseudo_hdr_hash_get(rxq->common, in sfxge_rx_deliver()
420 static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) in sfxge_lro_drop() argument
427 sfxge_rx_deliver(rxq, &c->next_buf); in sfxge_lro_drop()
431 bucket = c->conn_hash & rxq->lro.conns_mask; in sfxge_lro_drop()
432 KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong")); in sfxge_lro_drop()
433 --rxq->lro.conns_n[bucket]; in sfxge_lro_drop()
434 TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); in sfxge_lro_drop()
435 TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link); in sfxge_lro_drop()
441 static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now) in sfxge_lro_purge_idle() argument
446 KASSERT(LIST_EMPTY(&rxq->lro.active_conns), in sfxge_lro_purge_idle()
449 rxq->lro.last_purge_ticks = now; in sfxge_lro_purge_idle()
450 for (i = 0; i <= rxq->lro.conns_mask; ++i) { in sfxge_lro_purge_idle()
451 if (TAILQ_EMPTY(&rxq->lro.conns[i])) in sfxge_lro_purge_idle()
454 c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq); in sfxge_lro_purge_idle()
456 ++rxq->lro.n_drop_idle; in sfxge_lro_purge_idle()
457 sfxge_lro_drop(rxq, c); in sfxge_lro_purge_idle()
524 sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) in sfxge_lro_try_merge() argument
544 data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) - in sfxge_lro_try_merge()
567 sfxge_lro_deliver(&rxq->lro, c); in sfxge_lro_try_merge()
570 ++rxq->lro.n_misorder; in sfxge_lro_try_merge()
577 ++rxq->lro.n_drop_idle; in sfxge_lro_try_merge()
579 sfxge_lro_deliver(&rxq->lro, c); in sfxge_lro_try_merge()
580 sfxge_lro_drop(rxq, c); in sfxge_lro_try_merge()
587 ++rxq->lro.n_slow_start; in sfxge_lro_try_merge()
594 sfxge_lro_deliver(&rxq->lro, c); in sfxge_lro_try_merge()
596 ++rxq->lro.n_drop_closed; in sfxge_lro_try_merge()
597 sfxge_lro_drop(rxq, c); in sfxge_lro_try_merge()
603 rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size; in sfxge_lro_try_merge()
610 sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th); in sfxge_lro_try_merge()
615 sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th); in sfxge_lro_try_merge()
622 sfxge_rx_deliver(rxq, rx_buf); in sfxge_lro_try_merge()
669 sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) in sfxge_lro() argument
671 struct sfxge_softc *sc = rxq->sc; in sfxge_lro()
683 conn_hash = efx_pseudo_hdr_hash_get(rxq->common, in sfxge_lro()
724 bucket = conn_hash & rxq->lro.conns_mask; in sfxge_lro()
726 TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) { in sfxge_lro()
748 TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); in sfxge_lro()
749 TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link); in sfxge_lro()
752 if (!sfxge_lro_try_merge(rxq, c)) in sfxge_lro()
755 LIST_INSERT_HEAD(&rxq->lro.active_conns, c, in sfxge_lro()
767 sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th); in sfxge_lro()
769 sfxge_rx_deliver(rxq, rx_buf); in sfxge_lro()
772 static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) in sfxge_lro_end_of_burst() argument
774 struct sfxge_lro_state *st = &rxq->lro; in sfxge_lro_end_of_burst()
782 if (sfxge_lro_try_merge(rxq, c)) { in sfxge_lro_end_of_burst()
792 sfxge_lro_purge_idle(rxq, t); in sfxge_lro_end_of_burst()
798 sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) in sfxge_lro() argument
803 sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) in sfxge_lro_end_of_burst() argument
810 sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop) in sfxge_rx_qcomplete() argument
812 struct sfxge_softc *sc = rxq->sc; in sfxge_rx_qcomplete()
822 index = rxq->index; in sfxge_rx_qcomplete()
827 completed = rxq->completed; in sfxge_rx_qcomplete()
828 while (completed != rxq->pending) { in sfxge_rx_qcomplete()
832 id = completed++ & rxq->ptr_mask; in sfxge_rx_qcomplete()
833 rx_desc = &rxq->queue[id]; in sfxge_rx_qcomplete()
836 if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) in sfxge_rx_qcomplete()
847 rc = efx_pseudo_hdr_pkt_length_get(rxq->common, in sfxge_rx_qcomplete()
878 rxq->loopback++; in sfxge_rx_qcomplete()
894 sfxge_lro(rxq, prev); in sfxge_rx_qcomplete()
896 sfxge_rx_deliver(rxq, prev); in sfxge_rx_qcomplete()
906 rxq->completed = completed; in sfxge_rx_qcomplete()
908 level = rxq->added - rxq->completed; in sfxge_rx_qcomplete()
915 sfxge_lro(rxq, prev); in sfxge_rx_qcomplete()
917 sfxge_rx_deliver(rxq, prev); in sfxge_rx_qcomplete()
925 sfxge_lro_end_of_burst(rxq); in sfxge_rx_qcomplete()
928 if (level < rxq->refill_threshold) in sfxge_rx_qcomplete()
929 sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE); in sfxge_rx_qcomplete()
935 struct sfxge_rxq *rxq; in sfxge_rx_qstop() local
942 rxq = sc->rxq[index]; in sfxge_rx_qstop()
947 KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, in sfxge_rx_qstop()
950 rxq->init_state = SFXGE_RXQ_INITIALIZED; in sfxge_rx_qstop()
952 callout_stop(&rxq->refill_callout); in sfxge_rx_qstop()
954 while (rxq->flush_state != SFXGE_FLUSH_DONE && retry != 0) { in sfxge_rx_qstop()
955 rxq->flush_state = SFXGE_FLUSH_PENDING; in sfxge_rx_qstop()
960 if (efx_rx_qflush(rxq->common) != 0) { in sfxge_rx_qstop()
962 rxq->flush_state = SFXGE_FLUSH_FAILED; in sfxge_rx_qstop()
971 if (rxq->flush_state != SFXGE_FLUSH_PENDING) in sfxge_rx_qstop()
978 if (rxq->flush_state == SFXGE_FLUSH_PENDING) { in sfxge_rx_qstop()
982 rxq->flush_state = SFXGE_FLUSH_DONE; in sfxge_rx_qstop()
986 if (rxq->flush_state == SFXGE_FLUSH_FAILED) { in sfxge_rx_qstop()
989 rxq->flush_state = SFXGE_FLUSH_DONE; in sfxge_rx_qstop()
992 rxq->pending = rxq->added; in sfxge_rx_qstop()
993 sfxge_rx_qcomplete(rxq, B_TRUE); in sfxge_rx_qstop()
995 KASSERT(rxq->completed == rxq->pending, in sfxge_rx_qstop()
998 rxq->added = 0; in sfxge_rx_qstop()
999 rxq->pushed = 0; in sfxge_rx_qstop()
1000 rxq->pending = 0; in sfxge_rx_qstop()
1001 rxq->completed = 0; in sfxge_rx_qstop()
1002 rxq->loopback = 0; in sfxge_rx_qstop()
1005 efx_rx_qdestroy(rxq->common); in sfxge_rx_qstop()
1007 efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, in sfxge_rx_qstop()
1016 struct sfxge_rxq *rxq; in sfxge_rx_qstart() local
1023 rxq = sc->rxq[index]; in sfxge_rx_qstart()
1024 esmp = &rxq->mem; in sfxge_rx_qstart()
1027 KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, in sfxge_rx_qstart()
1033 if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp, in sfxge_rx_qstart()
1039 esmp, sc->rxq_entries, rxq->buf_base_id, EFX_RXQ_FLAG_NONE, in sfxge_rx_qstart()
1040 evq->common, &rxq->common)) != 0) in sfxge_rx_qstart()
1046 efx_rx_qenable(rxq->common); in sfxge_rx_qstart()
1048 rxq->init_state = SFXGE_RXQ_STARTED; in sfxge_rx_qstart()
1049 rxq->flush_state = SFXGE_FLUSH_REQUIRED; in sfxge_rx_qstart()
1052 sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE); in sfxge_rx_qstart()
1059 efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, in sfxge_rx_qstart()
1160 rc = efx_mac_filter_default_rxq_set(sc->enp, sc->rxq[0]->common, in sfxge_rx_start()
1180 static void sfxge_lro_init(struct sfxge_rxq *rxq) in sfxge_lro_init() argument
1182 struct sfxge_lro_state *st = &rxq->lro; in sfxge_lro_init()
1188 st->sc = rxq->sc; in sfxge_lro_init()
1201 static void sfxge_lro_fini(struct sfxge_rxq *rxq) in sfxge_lro_fini() argument
1203 struct sfxge_lro_state *st = &rxq->lro; in sfxge_lro_fini()
1216 sfxge_lro_drop(rxq, c); in sfxge_lro_fini()
1235 sfxge_lro_init(struct sfxge_rxq *rxq) in sfxge_lro_init() argument
1240 sfxge_lro_fini(struct sfxge_rxq *rxq) in sfxge_lro_fini() argument
1249 struct sfxge_rxq *rxq; in sfxge_rx_qfini() local
1251 rxq = sc->rxq[index]; in sfxge_rx_qfini()
1253 KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, in sfxge_rx_qfini()
1257 free(rxq->queue, M_SFXGE); in sfxge_rx_qfini()
1258 sfxge_lro_fini(rxq); in sfxge_rx_qfini()
1261 sfxge_dma_free(&rxq->mem); in sfxge_rx_qfini()
1263 sc->rxq[index] = NULL; in sfxge_rx_qfini()
1265 free(rxq, M_SFXGE); in sfxge_rx_qfini()
1271 struct sfxge_rxq *rxq; in sfxge_rx_qinit() local
1277 rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK); in sfxge_rx_qinit()
1278 rxq->sc = sc; in sfxge_rx_qinit()
1279 rxq->index = index; in sfxge_rx_qinit()
1280 rxq->entries = sc->rxq_entries; in sfxge_rx_qinit()
1281 rxq->ptr_mask = rxq->entries - 1; in sfxge_rx_qinit()
1282 rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries); in sfxge_rx_qinit()
1284 sc->rxq[index] = rxq; in sfxge_rx_qinit()
1285 esmp = &rxq->mem; in sfxge_rx_qinit()
1293 &rxq->buf_base_id); in sfxge_rx_qinit()
1296 rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries, in sfxge_rx_qinit()
1298 sfxge_lro_init(rxq); in sfxge_rx_qinit()
1300 callout_init(&rxq->refill_callout, 1); in sfxge_rx_qinit()
1302 rxq->init_state = SFXGE_RXQ_INITIALIZED; in sfxge_rx_qinit()
1335 sum += *(unsigned int *)((caddr_t)sc->rxq[index] + in sfxge_rx_stat_handler()