Lines Matching +full:dma +full:- +full:safe +full:- +full:map

119 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
129 bit_mask = (1UL << bits) - 1; in nicvf_poll_reg()
138 timeout--; in nicvf_poll_reg()
140 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); in nicvf_poll_reg()
152 *paddr = segs->ds_addr; in nicvf_dmamap_q_cb()
162 /* Create DMA tag first */ in nicvf_alloc_q_desc_mem()
164 bus_get_dma_tag(nic->dev), /* parent tag */ in nicvf_alloc_q_desc_mem()
175 &dmem->dmat); /* dmat */ in nicvf_alloc_q_desc_mem()
178 device_printf(nic->dev, in nicvf_alloc_q_desc_mem()
183 /* Allocate segment of continuous DMA safe memory */ in nicvf_alloc_q_desc_mem()
185 dmem->dmat, /* DMA tag */ in nicvf_alloc_q_desc_mem()
186 &dmem->base, /* virtual address */ in nicvf_alloc_q_desc_mem()
188 &dmem->dmap); /* DMA map */ in nicvf_alloc_q_desc_mem()
190 device_printf(nic->dev, "Failed to allocate DMA safe memory for" in nicvf_alloc_q_desc_mem()
196 dmem->dmat, in nicvf_alloc_q_desc_mem()
197 dmem->dmap, in nicvf_alloc_q_desc_mem()
198 dmem->base, in nicvf_alloc_q_desc_mem()
200 nicvf_dmamap_q_cb, /* map to DMA address cb. */ in nicvf_alloc_q_desc_mem()
201 &dmem->phys_base, /* physical address */ in nicvf_alloc_q_desc_mem()
204 device_printf(nic->dev, in nicvf_alloc_q_desc_mem()
205 "Cannot load DMA map of descriptors ring\n"); in nicvf_alloc_q_desc_mem()
209 dmem->q_len = q_len; in nicvf_alloc_q_desc_mem()
210 dmem->size = (desc_size * q_len); in nicvf_alloc_q_desc_mem()
215 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); in nicvf_alloc_q_desc_mem()
216 dmem->phys_base = 0; in nicvf_alloc_q_desc_mem()
218 err_dmat = bus_dma_tag_destroy(dmem->dmat); in nicvf_alloc_q_desc_mem()
219 dmem->base = NULL; in nicvf_alloc_q_desc_mem()
221 ("%s: Trying to destroy BUSY DMA tag", __func__)); in nicvf_alloc_q_desc_mem()
232 if ((dmem == NULL) || (dmem->base == NULL)) in nicvf_free_q_desc_mem()
235 /* Unload a map */ in nicvf_free_q_desc_mem()
236 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); in nicvf_free_q_desc_mem()
237 bus_dmamap_unload(dmem->dmat, dmem->dmap); in nicvf_free_q_desc_mem()
238 /* Free DMA memory */ in nicvf_free_q_desc_mem()
239 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); in nicvf_free_q_desc_mem()
240 /* Destroy DMA tag */ in nicvf_free_q_desc_mem()
241 err = bus_dma_tag_destroy(dmem->dmat); in nicvf_free_q_desc_mem()
244 ("%s: Trying to destroy BUSY DMA tag", __func__)); in nicvf_free_q_desc_mem()
246 dmem->phys_base = 0; in nicvf_free_q_desc_mem()
247 dmem->base = NULL; in nicvf_free_q_desc_mem()
252 * HW returns memory address where packet is DMA'ed but not a pointer
274 mbuf->m_len = mbuf->m_pkthdr.len = buf_len; in nicvf_alloc_rcv_buffer()
276 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, in nicvf_alloc_rcv_buffer()
279 device_printf(nic->dev, in nicvf_alloc_rcv_buffer()
280 "Failed to map mbuf into DMA visible memory, err: %d\n", in nicvf_alloc_rcv_buffer()
283 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); in nicvf_alloc_rcv_buffer()
287 panic("Unexpected number of DMA segments for RB: %d", nsegs); in nicvf_alloc_rcv_buffer()
292 rinfo = (struct rbuf_info *)mbuf->m_data; in nicvf_alloc_rcv_buffer()
295 rinfo->dmat = rbdr->rbdr_buff_dmat; in nicvf_alloc_rcv_buffer()
296 rinfo->dmap = dmap; in nicvf_alloc_rcv_buffer()
297 rinfo->mbuf = mbuf; in nicvf_alloc_rcv_buffer()
315 mbuf = rinfo->mbuf; in nicvf_rb_ptr_to_mbuf()
318 device_get_nameunit(nic->dev)); in nicvf_rb_ptr_to_mbuf()
324 rinfo->mbuf = NULL; in nicvf_rb_ptr_to_mbuf()
326 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); in nicvf_rb_ptr_to_mbuf()
327 bus_dmamap_unload(rinfo->dmat, rinfo->dmap); in nicvf_rb_ptr_to_mbuf()
344 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, in nicvf_init_rbdr()
347 device_printf(nic->dev, in nicvf_init_rbdr()
352 rbdr->desc = rbdr->dmem.base; in nicvf_init_rbdr()
357 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; in nicvf_init_rbdr()
358 rbdr->enable = TRUE; in nicvf_init_rbdr()
359 rbdr->thresh = RBDR_THRESH; in nicvf_init_rbdr()
360 rbdr->nic = nic; in nicvf_init_rbdr()
361 rbdr->idx = qidx; in nicvf_init_rbdr()
364 * Create DMA tag for Rx buffers. in nicvf_init_rbdr()
365 * Each map created using this tag is intended to store Rx payload for in nicvf_init_rbdr()
371 device_printf(nic->dev, in nicvf_init_rbdr()
376 bus_get_dma_tag(nic->dev), /* parent tag */ in nicvf_init_rbdr()
387 &rbdr->rbdr_buff_dmat); /* dmat */ in nicvf_init_rbdr()
390 device_printf(nic->dev, in nicvf_init_rbdr()
395 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * in nicvf_init_rbdr()
399 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); in nicvf_init_rbdr()
401 device_printf(nic->dev, in nicvf_init_rbdr()
402 "Failed to create DMA map for RB\n"); in nicvf_init_rbdr()
405 rbdr->rbdr_buff_dmaps[idx] = dmap; in nicvf_init_rbdr()
413 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); in nicvf_init_rbdr()
417 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); in nicvf_init_rbdr()
418 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); in nicvf_init_rbdr()
419 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, in nicvf_init_rbdr()
420 taskqueue_thread_enqueue, &rbdr->rbdr_taskq); in nicvf_init_rbdr()
421 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", in nicvf_init_rbdr()
422 device_get_nameunit(nic->dev)); in nicvf_init_rbdr()
439 qs = nic->qs; in nicvf_free_rbdr()
444 rbdr->enable = FALSE; in nicvf_free_rbdr()
445 if (rbdr->rbdr_taskq != NULL) { in nicvf_free_rbdr()
447 while (taskqueue_cancel(rbdr->rbdr_taskq, in nicvf_free_rbdr()
448 &rbdr->rbdr_task_nowait, NULL) != 0) { in nicvf_free_rbdr()
450 taskqueue_drain(rbdr->rbdr_taskq, in nicvf_free_rbdr()
451 &rbdr->rbdr_task_nowait); in nicvf_free_rbdr()
453 taskqueue_free(rbdr->rbdr_taskq); in nicvf_free_rbdr()
454 rbdr->rbdr_taskq = NULL; in nicvf_free_rbdr()
457 &rbdr->rbdr_task, NULL) != 0) { in nicvf_free_rbdr()
459 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); in nicvf_free_rbdr()
467 * - it is safe to operate using head and tail indexes in nicvf_free_rbdr()
470 * - there is no need to unload DMA map and free MBUF for other in nicvf_free_rbdr()
473 if (rbdr->rbdr_buff_dmat != NULL) { in nicvf_free_rbdr()
474 head = rbdr->head; in nicvf_free_rbdr()
475 tail = rbdr->tail; in nicvf_free_rbdr()
478 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; in nicvf_free_rbdr()
480 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); in nicvf_free_rbdr()
481 mbuf = rinfo->mbuf; in nicvf_free_rbdr()
485 head &= (rbdr->dmem.q_len - 1); in nicvf_free_rbdr()
489 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; in nicvf_free_rbdr()
491 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); in nicvf_free_rbdr()
492 mbuf = rinfo->mbuf; in nicvf_free_rbdr()
496 /* Destroy DMA maps */ in nicvf_free_rbdr()
497 for (idx = 0; idx < qs->rbdr_len; idx++) { in nicvf_free_rbdr()
498 if (rbdr->rbdr_buff_dmaps[idx] == NULL) in nicvf_free_rbdr()
500 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, in nicvf_free_rbdr()
501 rbdr->rbdr_buff_dmaps[idx]); in nicvf_free_rbdr()
503 ("%s: Could not destroy DMA map for RB, desc: %d", in nicvf_free_rbdr()
505 rbdr->rbdr_buff_dmaps[idx] = NULL; in nicvf_free_rbdr()
509 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); in nicvf_free_rbdr()
511 ("%s: Trying to destroy BUSY DMA tag", __func__)); in nicvf_free_rbdr()
513 rbdr->head = 0; in nicvf_free_rbdr()
514 rbdr->tail = 0; in nicvf_free_rbdr()
518 nicvf_free_q_desc_mem(nic, &rbdr->dmem); in nicvf_free_rbdr()
540 nic = rbdr->nic; in nicvf_refill_rbdr()
541 qs = nic->qs; in nicvf_refill_rbdr()
542 rbdr_idx = rbdr->idx; in nicvf_refill_rbdr()
545 if (!rbdr->enable) in nicvf_refill_rbdr()
552 if (qcount >= (qs->rbdr_len - 1)) { in nicvf_refill_rbdr()
556 refill_rb_cnt = qs->rbdr_len - qcount - 1; in nicvf_refill_rbdr()
562 tail &= (rbdr->dmem.q_len - 1); in nicvf_refill_rbdr()
564 dmap = rbdr->rbdr_buff_dmaps[tail]; in nicvf_refill_rbdr()
571 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); in nicvf_refill_rbdr()
572 refill_rb_cnt--; in nicvf_refill_rbdr()
589 * Re-enable RBDR interrupts only in nicvf_refill_rbdr()
631 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); in nicvf_rbdr_task_nowait()
644 rq_idx = cqe_rx->rq_idx; in nicvf_rcv_pkt_handler()
645 rq = &nic->qs->rq[rq_idx]; in nicvf_rcv_pkt_handler()
649 if (err && !cqe_rx->rb_cnt) in nicvf_rcv_pkt_handler()
654 dprintf(nic->dev, "Packet not received\n"); in nicvf_rcv_pkt_handler()
664 if (rq->lro_enabled && in nicvf_rcv_pkt_handler()
665 ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && in nicvf_rcv_pkt_handler()
666 (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == in nicvf_rcv_pkt_handler()
673 if ((rq->lro.lro_cnt != 0) && in nicvf_rcv_pkt_handler()
674 (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) in nicvf_rcv_pkt_handler()
681 err = buf_ring_enqueue(cq->rx_br, mbuf); in nicvf_rcv_pkt_handler()
703 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler()
705 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); in nicvf_snd_pkt_handler()
706 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) in nicvf_snd_pkt_handler()
709 dprintf(nic->dev, in nicvf_snd_pkt_handler()
711 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, in nicvf_snd_pkt_handler()
712 cqe_tx->sqe_ptr, hdr->subdesc_cnt); in nicvf_snd_pkt_handler()
714 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; in nicvf_snd_pkt_handler()
715 bus_dmamap_unload(sq->snd_buff_dmat, dmap); in nicvf_snd_pkt_handler()
717 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; in nicvf_snd_pkt_handler()
720 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; in nicvf_snd_pkt_handler()
721 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); in nicvf_snd_pkt_handler()
737 struct queue_set *qs = nic->qs; in nicvf_cq_intr_handler()
738 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler()
739 struct snd_queue *sq = &qs->sq[cq_idx]; in nicvf_cq_intr_handler()
759 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", in nicvf_cq_intr_handler()
765 cqe_head &= (cq->dmem.q_len - 1); in nicvf_cq_intr_handler()
769 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, in nicvf_cq_intr_handler()
770 cq_desc->cqe_type); in nicvf_cq_intr_handler()
771 switch (cq_desc->cqe_type) { in nicvf_cq_intr_handler()
801 dprintf(nic->dev, in nicvf_cq_intr_handler()
809 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { in nicvf_cq_intr_handler()
811 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); in nicvf_cq_intr_handler()
812 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); in nicvf_cq_intr_handler()
819 rq = &nic->qs->rq[rq_idx]; in nicvf_cq_intr_handler()
820 lro = &rq->lro; in nicvf_cq_intr_handler()
825 ifp = nic->ifp; in nicvf_cq_intr_handler()
827 while (!buf_ring_empty(cq->rx_br)) { in nicvf_cq_intr_handler()
828 mbuf = buf_ring_dequeue_mc(cq->rx_br); in nicvf_cq_intr_handler()
851 qs = nic->qs; in nicvf_qs_err_task()
854 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); in nicvf_qs_err_task()
857 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_qs_err_task()
867 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); in nicvf_qs_err_task()
868 nicvf_sq_enable(nic, &qs->sq[qidx], qidx); in nicvf_qs_err_task()
872 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); in nicvf_qs_err_task()
873 /* Re-enable Qset error interrupt */ in nicvf_qs_err_task()
885 nic = cq->nic; in nicvf_cmp_task()
888 cmp_err = nicvf_cq_intr_handler(nic, cq->idx); in nicvf_cmp_task()
894 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); in nicvf_cmp_task()
897 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); in nicvf_cmp_task()
899 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); in nicvf_cmp_task()
911 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", in nicvf_init_cmp_queue()
912 device_get_nameunit(nic->dev), qidx); in nicvf_init_cmp_queue()
913 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); in nicvf_init_cmp_queue()
915 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, in nicvf_init_cmp_queue()
919 device_printf(nic->dev, in nicvf_init_cmp_queue()
920 "Could not allocate DMA memory for CQ\n"); in nicvf_init_cmp_queue()
924 cq->desc = cq->dmem.base; in nicvf_init_cmp_queue()
925 cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; in nicvf_init_cmp_queue()
926 cq->nic = nic; in nicvf_init_cmp_queue()
927 cq->idx = qidx; in nicvf_init_cmp_queue()
928 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; in nicvf_init_cmp_queue()
930 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, in nicvf_init_cmp_queue()
931 &cq->mtx); in nicvf_init_cmp_queue()
934 NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); in nicvf_init_cmp_queue()
935 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, in nicvf_init_cmp_queue()
936 taskqueue_thread_enqueue, &cq->cmp_taskq); in nicvf_init_cmp_queue()
937 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", in nicvf_init_cmp_queue()
938 device_get_nameunit(nic->dev), qidx); in nicvf_init_cmp_queue()
952 * Ensure that it is safe to disable it or panic. in nicvf_free_cmp_queue()
954 if (cq->enable) in nicvf_free_cmp_queue()
955 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); in nicvf_free_cmp_queue()
957 if (cq->cmp_taskq != NULL) { in nicvf_free_cmp_queue()
959 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) in nicvf_free_cmp_queue()
960 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); in nicvf_free_cmp_queue()
962 taskqueue_free(cq->cmp_taskq); in nicvf_free_cmp_queue()
963 cq->cmp_taskq = NULL; in nicvf_free_cmp_queue()
968 * completion task. It is safe to do so since the corresponding CQ in nicvf_free_cmp_queue()
971 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); in nicvf_free_cmp_queue()
972 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); in nicvf_free_cmp_queue()
975 nicvf_free_q_desc_mem(nic, &cq->dmem); in nicvf_free_cmp_queue()
976 drbr_free(cq->rx_br, M_DEVBUF); in nicvf_free_cmp_queue()
978 mtx_destroy(&cq->mtx); in nicvf_free_cmp_queue()
979 memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); in nicvf_free_cmp_queue()
992 nic = sq->nic; in nicvf_xmit_locked()
993 ifp = nic->ifp; in nicvf_xmit_locked()
996 while ((next = drbr_peek(ifp, sq->br)) != NULL) { in nicvf_xmit_locked()
1003 drbr_advance(ifp, sq->br); in nicvf_xmit_locked()
1005 drbr_putback(ifp, sq->br, next); in nicvf_xmit_locked()
1009 drbr_advance(ifp, sq->br); in nicvf_xmit_locked()
1022 nic = sq->nic; in nicvf_snd_task()
1023 ifp = nic->ifp; in nicvf_snd_task()
1030 IFF_DRV_RUNNING) || !nic->link_up) in nicvf_snd_task()
1038 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); in nicvf_snd_task()
1050 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", in nicvf_init_snd_queue()
1051 device_get_nameunit(nic->dev), qidx); in nicvf_init_snd_queue()
1052 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); in nicvf_init_snd_queue()
1056 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, in nicvf_init_snd_queue()
1057 M_NOWAIT, &sq->mtx); in nicvf_init_snd_queue()
1058 if (sq->br == NULL) { in nicvf_init_snd_queue()
1059 device_printf(nic->dev, in nicvf_init_snd_queue()
1065 /* Allocate DMA memory for Tx descriptors */ in nicvf_init_snd_queue()
1066 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue()
1069 device_printf(nic->dev, in nicvf_init_snd_queue()
1070 "Could not allocate DMA memory for SQ\n"); in nicvf_init_snd_queue()
1074 sq->desc = sq->dmem.base; in nicvf_init_snd_queue()
1075 sq->head = sq->tail = 0; in nicvf_init_snd_queue()
1076 atomic_store_rel_int(&sq->free_cnt, q_len - 1); in nicvf_init_snd_queue()
1077 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue()
1078 sq->idx = qidx; in nicvf_init_snd_queue()
1079 sq->nic = nic; in nicvf_init_snd_queue()
1082 * Allocate DMA maps for Tx buffers in nicvf_init_snd_queue()
1085 /* Create DMA tag first */ in nicvf_init_snd_queue()
1087 bus_get_dma_tag(nic->dev), /* parent tag */ in nicvf_init_snd_queue()
1098 &sq->snd_buff_dmat); /* dmat */ in nicvf_init_snd_queue()
1101 device_printf(nic->dev, in nicvf_init_snd_queue()
1107 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, in nicvf_init_snd_queue()
1109 if (sq->snd_buff == NULL) { in nicvf_init_snd_queue()
1110 device_printf(nic->dev, in nicvf_init_snd_queue()
1118 err = bus_dmamap_create(sq->snd_buff_dmat, 0, in nicvf_init_snd_queue()
1119 &sq->snd_buff[i].dmap); in nicvf_init_snd_queue()
1121 device_printf(nic->dev, in nicvf_init_snd_queue()
1122 "Failed to create DMA maps for Tx buffers\n"); in nicvf_init_snd_queue()
1129 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); in nicvf_init_snd_queue()
1130 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, in nicvf_init_snd_queue()
1131 taskqueue_thread_enqueue, &sq->snd_taskq); in nicvf_init_snd_queue()
1132 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", in nicvf_init_snd_queue()
1133 device_get_nameunit(nic->dev), qidx); in nicvf_init_snd_queue()
1144 struct queue_set *qs = nic->qs; in nicvf_free_snd_queue()
1151 if (sq->snd_taskq != NULL) { in nicvf_free_snd_queue()
1153 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) in nicvf_free_snd_queue()
1154 taskqueue_drain(sq->snd_taskq, &sq->snd_task); in nicvf_free_snd_queue()
1156 taskqueue_free(sq->snd_taskq); in nicvf_free_snd_queue()
1157 sq->snd_taskq = NULL; in nicvf_free_snd_queue()
1161 if (sq->snd_buff_dmat != NULL) { in nicvf_free_snd_queue()
1162 if (sq->snd_buff != NULL) { in nicvf_free_snd_queue()
1163 for (i = 0; i < qs->sq_len; i++) { in nicvf_free_snd_queue()
1164 m_freem(sq->snd_buff[i].mbuf); in nicvf_free_snd_queue()
1165 sq->snd_buff[i].mbuf = NULL; in nicvf_free_snd_queue()
1167 bus_dmamap_unload(sq->snd_buff_dmat, in nicvf_free_snd_queue()
1168 sq->snd_buff[i].dmap); in nicvf_free_snd_queue()
1169 err = bus_dmamap_destroy(sq->snd_buff_dmat, in nicvf_free_snd_queue()
1170 sq->snd_buff[i].dmap); in nicvf_free_snd_queue()
1177 ("%s: Could not destroy DMA map for SQ", in nicvf_free_snd_queue()
1182 free(sq->snd_buff, M_NICVF); in nicvf_free_snd_queue()
1184 err = bus_dma_tag_destroy(sq->snd_buff_dmat); in nicvf_free_snd_queue()
1186 ("%s: Trying to destroy BUSY DMA tag", __func__)); in nicvf_free_snd_queue()
1190 if (sq->br != NULL) in nicvf_free_snd_queue()
1191 drbr_free(sq->br, M_DEVBUF); in nicvf_free_snd_queue()
1193 if (sq->dmem.base != NULL) in nicvf_free_snd_queue()
1194 nicvf_free_q_desc_mem(nic, &sq->dmem); in nicvf_free_snd_queue()
1198 mtx_destroy(&sq->mtx); in nicvf_free_snd_queue()
1199 memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); in nicvf_free_snd_queue()
1244 rbdr->head = in nicvf_reclaim_rbdr()
1246 rbdr->tail = in nicvf_reclaim_rbdr()
1270 timeout--; in nicvf_reclaim_rbdr()
1272 device_printf(nic->dev, in nicvf_reclaim_rbdr()
1298 ifp = nic->ifp; in nicvf_rcv_queue_config()
1300 rq = &qs->rq[qidx]; in nicvf_rcv_queue_config()
1301 rq->enable = enable; in nicvf_rcv_queue_config()
1303 lro = &rq->lro; in nicvf_rcv_queue_config()
1308 if (!rq->enable) { in nicvf_rcv_queue_config()
1312 rq->lro_enabled = FALSE; in nicvf_rcv_queue_config()
1317 rq->lro_enabled = FALSE; in nicvf_rcv_queue_config()
1320 device_printf(nic->dev, in nicvf_rcv_queue_config()
1323 rq->lro_enabled = TRUE; in nicvf_rcv_queue_config()
1324 lro->ifp = nic->ifp; in nicvf_rcv_queue_config()
1328 rq->cq_qs = qs->vnic_id; in nicvf_rcv_queue_config()
1329 rq->cq_idx = qidx; in nicvf_rcv_queue_config()
1330 rq->start_rbdr_qs = qs->vnic_id; in nicvf_rcv_queue_config()
1331 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; in nicvf_rcv_queue_config()
1332 rq->cont_rbdr_qs = qs->vnic_id; in nicvf_rcv_queue_config()
1333 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; in nicvf_rcv_queue_config()
1335 rq->caching = 1; in nicvf_rcv_queue_config()
1339 mbx.rq.qs_num = qs->vnic_id; in nicvf_rcv_queue_config()
1341 mbx.rq.cfg = ((uint64_t)rq->caching << 26) | (rq->cq_qs << 19) | in nicvf_rcv_queue_config()
1342 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | in nicvf_rcv_queue_config()
1343 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | in nicvf_rcv_queue_config()
1344 (rq->start_qs_rbdr_idx); in nicvf_rcv_queue_config()
1348 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); in nicvf_rcv_queue_config()
1376 cq = &qs->cq[qidx]; in nicvf_cmp_queue_config()
1377 cq->enable = enable; in nicvf_cmp_queue_config()
1379 if (!cq->enable) { in nicvf_cmp_queue_config()
1389 (uint64_t)(cq->dmem.phys_base)); in nicvf_cmp_queue_config()
1400 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); in nicvf_cmp_queue_config()
1402 nic->cq_coalesce_usecs); in nicvf_cmp_queue_config()
1414 sq = &qs->sq[qidx]; in nicvf_snd_queue_config()
1415 sq->enable = enable; in nicvf_snd_queue_config()
1417 if (!sq->enable) { in nicvf_snd_queue_config()
1425 sq->cq_qs = qs->vnic_id; in nicvf_snd_queue_config()
1426 sq->cq_idx = qidx; in nicvf_snd_queue_config()
1430 mbx.sq.qs_num = qs->vnic_id; in nicvf_snd_queue_config()
1432 mbx.sq.sqs_mode = nic->sqs_mode; in nicvf_snd_queue_config()
1433 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; in nicvf_snd_queue_config()
1438 (uint64_t)(sq->dmem.phys_base)); in nicvf_snd_queue_config()
1449 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); in nicvf_snd_queue_config()
1460 rbdr = &qs->rbdr[qidx]; in nicvf_rbdr_config()
1467 (uint64_t)(rbdr->dmem.phys_base)); in nicvf_rbdr_config()
1476 rbdr_cfg.lines = rbdr->dma_size / 128; in nicvf_rbdr_config()
1482 qs->rbdr_len - 1); in nicvf_rbdr_config()
1486 rbdr->thresh - 1); in nicvf_rbdr_config()
1497 qs = nic->qs; in nicvf_qset_config()
1499 device_printf(nic->dev, in nicvf_qset_config()
1504 qs->enable = enable; in nicvf_qset_config()
1505 qs->vnic_id = nic->vf_id; in nicvf_qset_config()
1509 mbx.qs.num = qs->vnic_id; in nicvf_qset_config()
1513 if (qs->enable) { in nicvf_qset_config()
1514 qs_cfg->ena = 1; in nicvf_qset_config()
1515 qs_cfg->vnic = qs->vnic_id; in nicvf_qset_config()
1526 qs = nic->qs; in nicvf_free_resources()
1531 if (qs->qs_err_taskq != NULL) { in nicvf_free_resources()
1533 while (taskqueue_cancel(qs->qs_err_taskq, in nicvf_free_resources()
1534 &qs->qs_err_task, NULL) != 0) { in nicvf_free_resources()
1535 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); in nicvf_free_resources()
1537 taskqueue_free(qs->qs_err_taskq); in nicvf_free_resources()
1538 qs->qs_err_taskq = NULL; in nicvf_free_resources()
1541 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) in nicvf_free_resources()
1542 nicvf_free_rbdr(nic, &qs->rbdr[qidx]); in nicvf_free_resources()
1545 for (qidx = 0; qidx < qs->cq_cnt; qidx++) in nicvf_free_resources()
1546 nicvf_free_cmp_queue(nic, &qs->cq[qidx]); in nicvf_free_resources()
1549 for (qidx = 0; qidx < qs->sq_cnt; qidx++) in nicvf_free_resources()
1550 nicvf_free_snd_queue(nic, &qs->sq[qidx]); in nicvf_free_resources()
1556 struct queue_set *qs = nic->qs; in nicvf_alloc_resources()
1560 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { in nicvf_alloc_resources()
1561 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, in nicvf_alloc_resources()
1567 for (qidx = 0; qidx < qs->sq_cnt; qidx++) { in nicvf_alloc_resources()
1568 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) in nicvf_alloc_resources()
1573 for (qidx = 0; qidx < qs->cq_cnt; qidx++) { in nicvf_alloc_resources()
1574 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) in nicvf_alloc_resources()
1579 NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); in nicvf_alloc_resources()
1580 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, in nicvf_alloc_resources()
1581 taskqueue_thread_enqueue, &qs->qs_err_taskq); in nicvf_alloc_resources()
1582 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", in nicvf_alloc_resources()
1583 device_get_nameunit(nic->dev)); in nicvf_alloc_resources()
1597 nic->qs = qs; in nicvf_set_qset_resources()
1600 qs->rbdr_cnt = RBDR_CNT; in nicvf_set_qset_resources()
1601 qs->rq_cnt = RCV_QUEUE_CNT; in nicvf_set_qset_resources()
1603 qs->sq_cnt = SND_QUEUE_CNT; in nicvf_set_qset_resources()
1604 qs->cq_cnt = CMP_QUEUE_CNT; in nicvf_set_qset_resources()
1607 qs->rbdr_len = RCV_BUF_COUNT; in nicvf_set_qset_resources()
1608 qs->sq_len = SND_QUEUE_LEN; in nicvf_set_qset_resources()
1609 qs->cq_len = CMP_QUEUE_LEN; in nicvf_set_qset_resources()
1611 nic->rx_queues = qs->rq_cnt; in nicvf_set_qset_resources()
1612 nic->tx_queues = qs->sq_cnt; in nicvf_set_qset_resources()
1624 qs = nic->qs; in nicvf_config_data_transfer()
1632 for (qidx = 0; qidx < qs->sq_cnt; qidx++) in nicvf_config_data_transfer()
1634 for (qidx = 0; qidx < qs->cq_cnt; qidx++) in nicvf_config_data_transfer()
1636 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) in nicvf_config_data_transfer()
1638 for (qidx = 0; qidx < qs->rq_cnt; qidx++) in nicvf_config_data_transfer()
1641 for (qidx = 0; qidx < qs->rq_cnt; qidx++) in nicvf_config_data_transfer()
1643 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) in nicvf_config_data_transfer()
1645 for (qidx = 0; qidx < qs->sq_cnt; qidx++) in nicvf_config_data_transfer()
1647 for (qidx = 0; qidx < qs->cq_cnt; qidx++) in nicvf_config_data_transfer()
1665 qentry = sq->tail; in nicvf_get_sq_desc()
1666 atomic_subtract_int(&sq->free_cnt, desc_cnt); in nicvf_get_sq_desc()
1667 sq->tail += desc_cnt; in nicvf_get_sq_desc()
1668 sq->tail &= (sq->dmem.q_len - 1); in nicvf_get_sq_desc()
1678 atomic_add_int(&sq->free_cnt, desc_cnt); in nicvf_put_sq_desc()
1679 sq->head += desc_cnt; in nicvf_put_sq_desc()
1680 sq->head &= (sq->dmem.q_len - 1); in nicvf_put_sq_desc()
1687 qentry &= (sq->dmem.q_len - 1); in nicvf_get_nxt_sqentry()
1722 while (sq->head != head) { in nicvf_sq_free_used_descs()
1723 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); in nicvf_sq_free_used_descs()
1724 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { in nicvf_sq_free_used_descs()
1728 snd_buff = &sq->snd_buff[sq->head]; in nicvf_sq_free_used_descs()
1729 if (snd_buff->mbuf != NULL) { in nicvf_sq_free_used_descs()
1730 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); in nicvf_sq_free_used_descs()
1731 m_freem(snd_buff->mbuf); in nicvf_sq_free_used_descs()
1732 sq->snd_buff[sq->head].mbuf = NULL; in nicvf_sq_free_used_descs()
1734 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); in nicvf_sq_free_used_descs()
1762 nic = sq->nic; in nicvf_sq_add_hdr_subdesc()
1765 sq->snd_buff[qentry].mbuf = mbuf; in nicvf_sq_add_hdr_subdesc()
1768 hdr->subdesc_type = SQ_DESC_TYPE_HEADER; in nicvf_sq_add_hdr_subdesc()
1770 hdr->post_cqe = 1; in nicvf_sq_add_hdr_subdesc()
1772 hdr->subdesc_cnt = subdesc_cnt; in nicvf_sq_add_hdr_subdesc()
1773 hdr->tot_len = len; in nicvf_sq_add_hdr_subdesc()
1776 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in nicvf_sq_add_hdr_subdesc()
1778 etype = ntohs(eh->evl_proto); in nicvf_sq_add_hdr_subdesc()
1781 etype = ntohs(eh->evl_encap_proto); in nicvf_sq_add_hdr_subdesc()
1784 poff = proto = -1; in nicvf_sq_add_hdr_subdesc()
1788 if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) { in nicvf_sq_add_hdr_subdesc()
1790 sq->snd_buff[qentry].mbuf = NULL; in nicvf_sq_add_hdr_subdesc()
1802 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { in nicvf_sq_add_hdr_subdesc()
1804 sq->snd_buff[qentry].mbuf = mbuf; in nicvf_sq_add_hdr_subdesc()
1808 if (mbuf->m_pkthdr.csum_flags & CSUM_IP) in nicvf_sq_add_hdr_subdesc()
1809 hdr->csum_l3 = 1; /* Enable IP csum calculation */ in nicvf_sq_add_hdr_subdesc()
1811 ip = (struct ip *)(mbuf->m_data + ehdrlen); in nicvf_sq_add_hdr_subdesc()
1812 iphlen = ip->ip_hl << 2; in nicvf_sq_add_hdr_subdesc()
1814 proto = ip->ip_p; in nicvf_sq_add_hdr_subdesc()
1820 if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) { in nicvf_sq_add_hdr_subdesc()
1823 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) in nicvf_sq_add_hdr_subdesc()
1826 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { in nicvf_sq_add_hdr_subdesc()
1828 sq->snd_buff[qentry].mbuf = mbuf; in nicvf_sq_add_hdr_subdesc()
1832 hdr->csum_l4 = SEND_L4_CSUM_TCP; in nicvf_sq_add_hdr_subdesc()
1835 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) in nicvf_sq_add_hdr_subdesc()
1838 if (mbuf->m_len < (poff + sizeof(struct udphdr))) { in nicvf_sq_add_hdr_subdesc()
1840 sq->snd_buff[qentry].mbuf = mbuf; in nicvf_sq_add_hdr_subdesc()
1844 hdr->csum_l4 = SEND_L4_CSUM_UDP; in nicvf_sq_add_hdr_subdesc()
1847 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) in nicvf_sq_add_hdr_subdesc()
1850 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { in nicvf_sq_add_hdr_subdesc()
1852 sq->snd_buff[qentry].mbuf = mbuf; in nicvf_sq_add_hdr_subdesc()
1856 hdr->csum_l4 = SEND_L4_CSUM_SCTP; in nicvf_sq_add_hdr_subdesc()
1861 hdr->l3_offset = ehdrlen; in nicvf_sq_add_hdr_subdesc()
1862 hdr->l4_offset = poff; in nicvf_sq_add_hdr_subdesc()
1865 if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { in nicvf_sq_add_hdr_subdesc()
1866 th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff)); in nicvf_sq_add_hdr_subdesc()
1868 hdr->tso = 1; in nicvf_sq_add_hdr_subdesc()
1869 hdr->tso_start = poff + (th->th_off * 4); in nicvf_sq_add_hdr_subdesc()
1870 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; in nicvf_sq_add_hdr_subdesc()
1871 hdr->inner_l3_offset = ehdrlen - 2; in nicvf_sq_add_hdr_subdesc()
1872 nic->drv_stats.tx_tso++; in nicvf_sq_add_hdr_subdesc()
1888 qentry &= (sq->dmem.q_len - 1); in nicvf_sq_add_gather_subdesc()
1892 gather->subdesc_type = SQ_DESC_TYPE_GATHER; in nicvf_sq_add_gather_subdesc()
1893 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; in nicvf_sq_add_gather_subdesc()
1894 gather->size = size; in nicvf_sq_add_gather_subdesc()
1895 gather->addr = data; in nicvf_sq_add_gather_subdesc()
1911 if (sq->free_cnt == 0) in nicvf_tx_mbuf_locked()
1914 snd_buff = &sq->snd_buff[sq->tail]; in nicvf_tx_mbuf_locked()
1916 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, in nicvf_tx_mbuf_locked()
1926 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; in nicvf_tx_mbuf_locked()
1927 if (subdesc_cnt > sq->free_cnt) { in nicvf_tx_mbuf_locked()
1929 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); in nicvf_tx_mbuf_locked()
1936 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp, in nicvf_tx_mbuf_locked()
1937 (*mbufp)->m_pkthdr.len); in nicvf_tx_mbuf_locked()
1940 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); in nicvf_tx_mbuf_locked()
1956 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); in nicvf_tx_mbuf_locked()
1958 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", in nicvf_tx_mbuf_locked()
1959 __func__, sq->idx, subdesc_cnt); in nicvf_tx_mbuf_locked()
1961 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, in nicvf_tx_mbuf_locked()
1962 sq->idx, subdesc_cnt); in nicvf_tx_mbuf_locked()
1970 return ((i & ~3) + 3 - (i & 3)); in frag_num()
1991 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", in nicvf_get_rcv_mbuf()
1992 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); in nicvf_get_rcv_mbuf()
1994 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { in nicvf_get_rcv_mbuf()
1999 (*rb_ptrs - cqe_rx->align_pad)); in nicvf_get_rcv_mbuf()
2000 mbuf->m_len = payload_len; in nicvf_get_rcv_mbuf()
2001 mbuf->m_data += cqe_rx->align_pad; in nicvf_get_rcv_mbuf()
2002 if_setrcvif(mbuf, nic->ifp); in nicvf_get_rcv_mbuf()
2006 m_append(mbuf, payload_len, mbuf_frag->m_data); in nicvf_get_rcv_mbuf()
2015 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; in nicvf_get_rcv_mbuf()
2017 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { in nicvf_get_rcv_mbuf()
2021 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) { in nicvf_get_rcv_mbuf()
2022 mbuf->m_pkthdr.csum_flags = in nicvf_get_rcv_mbuf()
2026 switch (cqe_rx->l4_type) { in nicvf_get_rcv_mbuf()
2029 mbuf->m_pkthdr.csum_flags |= in nicvf_get_rcv_mbuf()
2031 mbuf->m_pkthdr.csum_data = 0xffff; in nicvf_get_rcv_mbuf()
2034 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; in nicvf_get_rcv_mbuf()
2076 device_printf(nic->dev, in nicvf_enable_intr()
2113 device_printf(nic->dev, in nicvf_disable_intr()
2150 device_printf(nic->dev, in nicvf_clear_intr()
2190 device_printf(nic->dev, in nicvf_is_intr_enabled()
2207 rq = &nic->qs->rq[rq_idx]; in nicvf_update_rq_stats()
2208 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); in nicvf_update_rq_stats()
2209 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); in nicvf_update_rq_stats()
2221 sq = &nic->qs->sq[sq_idx]; in nicvf_update_sq_stats()
2222 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); in nicvf_update_sq_stats()
2223 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); in nicvf_update_sq_stats()
2231 struct nicvf_hw_stats *stats = &nic->hw_stats; in nicvf_check_cqe_rx_errs()
2232 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; in nicvf_check_cqe_rx_errs()
2234 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { in nicvf_check_cqe_rx_errs()
2235 drv_stats->rx_frames_ok++; in nicvf_check_cqe_rx_errs()
2239 switch (cqe_rx->err_opcode) { in nicvf_check_cqe_rx_errs()
2241 stats->rx_bgx_truncated_pkts++; in nicvf_check_cqe_rx_errs()
2244 stats->rx_jabber_errs++; in nicvf_check_cqe_rx_errs()
2247 stats->rx_fcs_errs++; in nicvf_check_cqe_rx_errs()
2250 stats->rx_bgx_errs++; in nicvf_check_cqe_rx_errs()
2253 stats->rx_prel2_errs++; in nicvf_check_cqe_rx_errs()
2256 stats->rx_l2_hdr_malformed++; in nicvf_check_cqe_rx_errs()
2259 stats->rx_oversize++; in nicvf_check_cqe_rx_errs()
2262 stats->rx_undersize++; in nicvf_check_cqe_rx_errs()
2265 stats->rx_l2_len_mismatch++; in nicvf_check_cqe_rx_errs()
2268 stats->rx_l2_pclp++; in nicvf_check_cqe_rx_errs()
2271 stats->rx_ip_ver_errs++; in nicvf_check_cqe_rx_errs()
2274 stats->rx_ip_csum_errs++; in nicvf_check_cqe_rx_errs()
2277 stats->rx_ip_hdr_malformed++; in nicvf_check_cqe_rx_errs()
2280 stats->rx_ip_payload_malformed++; in nicvf_check_cqe_rx_errs()
2283 stats->rx_ip_ttl_errs++; in nicvf_check_cqe_rx_errs()
2286 stats->rx_l3_pclp++; in nicvf_check_cqe_rx_errs()
2289 stats->rx_l4_malformed++; in nicvf_check_cqe_rx_errs()
2292 stats->rx_l4_csum_errs++; in nicvf_check_cqe_rx_errs()
2295 stats->rx_udp_len_errs++; in nicvf_check_cqe_rx_errs()
2298 stats->rx_l4_port_errs++; in nicvf_check_cqe_rx_errs()
2301 stats->rx_tcp_flag_errs++; in nicvf_check_cqe_rx_errs()
2304 stats->rx_tcp_offset_errs++; in nicvf_check_cqe_rx_errs()
2307 stats->rx_l4_pclp++; in nicvf_check_cqe_rx_errs()
2310 stats->rx_truncated_pkts++; in nicvf_check_cqe_rx_errs()
2322 struct cmp_queue_stats *stats = &cq->stats; in nicvf_check_cqe_tx_errs()
2324 switch (cqe_tx->send_status) { in nicvf_check_cqe_tx_errs()
2326 stats->tx.good++; in nicvf_check_cqe_tx_errs()
2329 stats->tx.desc_fault++; in nicvf_check_cqe_tx_errs()
2332 stats->tx.hdr_cons_err++; in nicvf_check_cqe_tx_errs()
2335 stats->tx.subdesc_err++; in nicvf_check_cqe_tx_errs()
2338 stats->tx.imm_size_oflow++; in nicvf_check_cqe_tx_errs()
2341 stats->tx.data_seq_err++; in nicvf_check_cqe_tx_errs()
2344 stats->tx.mem_seq_err++; in nicvf_check_cqe_tx_errs()
2347 stats->tx.lock_viol++; in nicvf_check_cqe_tx_errs()
2350 stats->tx.data_fault++; in nicvf_check_cqe_tx_errs()
2353 stats->tx.tstmp_conflict++; in nicvf_check_cqe_tx_errs()
2356 stats->tx.tstmp_timeout++; in nicvf_check_cqe_tx_errs()
2359 stats->tx.mem_fault++; in nicvf_check_cqe_tx_errs()
2362 stats->tx.csum_overlap++; in nicvf_check_cqe_tx_errs()
2365 stats->tx.csum_overflow++; in nicvf_check_cqe_tx_errs()