Lines Matching +full:supports +full:- +full:cqe

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
108 ifmr->ifm_status = IFM_AVALID; in mana_ifmedia_status()
109 ifmr->ifm_active = IFM_ETHER; in mana_ifmedia_status()
111 if (!apc->port_is_up) { in mana_ifmedia_status()
113 mana_dbg(NULL, "Port %u link is down\n", apc->port_idx); in mana_ifmedia_status()
117 ifmr->ifm_status |= IFM_ACTIVE; in mana_ifmedia_status()
118 ifmr->ifm_active |= IFM_100G_DR | IFM_FDX; in mana_ifmedia_status()
127 struct mana_port_stats *stats = &apc->port_stats; in mana_get_counter()
131 return (counter_u64_fetch(stats->rx_packets)); in mana_get_counter()
133 return (counter_u64_fetch(stats->tx_packets)); in mana_get_counter()
135 return (counter_u64_fetch(stats->rx_bytes)); in mana_get_counter()
137 return (counter_u64_fetch(stats->tx_bytes)); in mana_get_counter()
139 return (counter_u64_fetch(stats->rx_drops)); in mana_get_counter()
141 return (counter_u64_fetch(stats->tx_drops)); in mana_get_counter()
159 if (apc->port_is_up) in mana_restart()
181 new_mtu = ifr->ifr_mtu; in mana_ioctl()
188 new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18); in mana_ioctl()
192 if (apc->port_is_up) in mana_ioctl()
195 apc->frame_size = new_mtu + 18; in mana_ioctl()
207 if (!apc->port_is_up) in mana_ioctl()
214 if (apc->port_is_up) in mana_ioctl()
228 mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^ in mana_ioctl()
241 "Also disabled tso4 due to -txcsum.\n"); in mana_ioctl()
255 "Also disabled tso6 due to -txcsum6.\n"); in mana_ioctl()
300 rc = ifmedia_ioctl(ifp, ifr, &apc->media, command); in mana_ioctl()
305 ifrk->ifrk_func = RSS_FUNC_TOEPLITZ; in mana_ioctl()
306 ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE; in mana_ioctl()
307 memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_ioctl()
312 ifrh->ifrh_func = RSS_FUNC_TOEPLITZ; in mana_ioctl()
313 ifrh->ifrh_types = in mana_ioctl()
358 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_tx_map_mbuf()
363 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map, in mana_tx_map_mbuf()
368 counter_u64_add(tx_stats->collapse, 1); in mana_tx_map_mbuf()
371 counter_u64_add(tx_stats->collapse_err, 1); in mana_tx_map_mbuf()
380 err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, in mana_tx_map_mbuf()
381 tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT); in mana_tx_map_mbuf()
385 tp->wqe_req.sgl[i].address = segs[i].ds_addr; in mana_tx_map_mbuf()
386 tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey; in mana_tx_map_mbuf()
387 tp->wqe_req.sgl[i].size = segs[i].ds_len; in mana_tx_map_mbuf()
389 tp->wqe_req.num_sge = nsegs; in mana_tx_map_mbuf()
391 tx_info->mbuf = *m_head; in mana_tx_map_mbuf()
393 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map, in mana_tx_map_mbuf()
404 bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map, in mana_tx_unmap_mbuf()
406 bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map); in mana_tx_unmap_mbuf()
407 if (tx_info->mbuf) { in mana_tx_unmap_mbuf()
408 m_freem(tx_info->mbuf); in mana_tx_unmap_mbuf()
409 tx_info->mbuf = NULL; in mana_tx_unmap_mbuf()
423 mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize); in mana_load_rx_mbuf()
431 mlen = rxq->datasize; in mana_load_rx_mbuf()
434 mbuf->m_pkthdr.len = mbuf->m_len = mlen; in mana_load_rx_mbuf()
436 if (rx_oob->mbuf) { in mana_load_rx_mbuf()
437 mbuf = rx_oob->mbuf; in mana_load_rx_mbuf()
438 mlen = rx_oob->mbuf->m_pkthdr.len; in mana_load_rx_mbuf()
444 err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map, in mana_load_rx_mbuf()
450 counter_u64_add(rxq->stats.dma_mapping_err, 1); in mana_load_rx_mbuf()
454 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map, in mana_load_rx_mbuf()
457 rx_oob->mbuf = mbuf; in mana_load_rx_mbuf()
458 rx_oob->num_sge = 1; in mana_load_rx_mbuf()
459 rx_oob->sgl[0].address = segs[0].ds_addr; in mana_load_rx_mbuf()
460 rx_oob->sgl[0].size = mlen; in mana_load_rx_mbuf()
461 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; in mana_load_rx_mbuf()
474 bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map, in mana_unload_rx_mbuf()
476 bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map); in mana_unload_rx_mbuf()
478 if (free_mbuf && rx_oob->mbuf) { in mana_unload_rx_mbuf()
479 m_freem(rx_oob->mbuf); in mana_unload_rx_mbuf()
480 rx_oob->mbuf = NULL; in mana_unload_rx_mbuf()
486 #define MANA_L3_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
487 #define MANA_L4_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
496 if_t ndev = txq->ndev; in mana_xmit()
499 unsigned int tx_queue_size = apc->tx_queue_size; in mana_xmit()
500 struct mana_port_stats *port_stats = &apc->port_stats; in mana_xmit()
501 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_xmit()
511 gdma_sq = txq->gdma_sq; in mana_xmit()
512 cq = &apc->tx_qp[txq->idx].tx_cq; in mana_xmit()
513 tx_stats = &txq->stats; in mana_xmit()
517 next_to_use = txq->next_to_use; in mana_xmit()
519 while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) { in mana_xmit()
520 if (!apc->port_is_up || in mana_xmit()
522 drbr_putback(ndev, txq->txq_br, mbuf); in mana_xmit()
528 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0); in mana_xmit()
529 counter_u64_add(tx_stats->stop, 1); in mana_xmit()
530 uint64_t stops = counter_u64_fetch(tx_stats->stop); in mana_xmit()
531 uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup); in mana_xmit()
534 stops > wakeups && txq->alt_txq_idx == txq->idx) { in mana_xmit()
535 txq->alt_txq_idx = in mana_xmit()
536 (txq->idx + (stops / wakeups)) in mana_xmit()
537 % apc->num_queues; in mana_xmit()
538 counter_u64_add(tx_stats->alt_chg, 1); in mana_xmit()
541 drbr_putback(ndev, txq->txq_br, mbuf); in mana_xmit()
543 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task); in mana_xmit()
547 tx_info = &txq->tx_buf_info[next_to_use]; in mana_xmit()
557 counter_u64_add(tx_stats->dma_mapping_err, 1); in mana_xmit()
562 drbr_advance(ndev, txq->txq_br); in mana_xmit()
566 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; in mana_xmit()
567 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; in mana_xmit()
569 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) { in mana_xmit()
570 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset; in mana_xmit()
573 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset; in mana_xmit()
588 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { in mana_xmit()
598 pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen; in mana_xmit()
600 pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz; in mana_xmit()
602 } else if (mbuf->m_pkthdr.csum_flags & in mana_xmit()
614 mbuf->m_pkthdr.l3hlen; in mana_xmit()
618 } else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) { in mana_xmit()
628 len = mbuf->m_pkthdr.len; in mana_xmit()
631 (struct gdma_posted_wqe_info *)&tx_info->wqe_inf); in mana_xmit()
638 drbr_advance(ndev, txq->txq_br); in mana_xmit()
644 (void)atomic_inc_return(&txq->pending_sends); in mana_xmit()
646 drbr_advance(ndev, txq->txq_br); in mana_xmit()
648 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq); in mana_xmit()
654 txq->tso_pkts++; in mana_xmit()
655 txq->tso_bytes += len; in mana_xmit()
660 counter_u64_add_protected(tx_stats->packets, packets); in mana_xmit()
661 counter_u64_add_protected(port_stats->tx_packets, packets); in mana_xmit()
662 counter_u64_add_protected(tx_stats->bytes, bytes); in mana_xmit()
663 counter_u64_add_protected(port_stats->tx_bytes, bytes); in mana_xmit()
666 txq->next_to_use = next_to_use; in mana_xmit()
673 if_t ndev = txq->ndev; in mana_xmit_taskfunc()
676 while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up && in mana_xmit_taskfunc()
678 mtx_lock(&txq->txq_mtx); in mana_xmit_taskfunc()
680 mtx_unlock(&txq->txq_mtx); in mana_xmit_taskfunc()
686 if (unlikely((m)->m_len < (len))) { \
704 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) { in mana_tso_fixup()
705 etype = ntohs(eh->evl_proto); in mana_tso_fixup()
708 etype = ntohs(eh->evl_encap_proto); in mana_tso_fixup()
718 iphlen = ip->ip_hl << 2; in mana_tso_fixup()
719 mbuf->m_pkthdr.l3hlen = ehlen + iphlen; in mana_tso_fixup()
724 ip->ip_len = 0; in mana_tso_fixup()
725 ip->ip_sum = 0; in mana_tso_fixup()
726 th->th_sum = in_pseudo(ip->ip_src.s_addr, in mana_tso_fixup()
727 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); in mana_tso_fixup()
733 if (ip6->ip6_nxt != IPPROTO_TCP) { in mana_tso_fixup()
739 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6); in mana_tso_fixup()
743 ip6->ip6_plen = 0; in mana_tso_fixup()
744 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); in mana_tso_fixup()
769 if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) { in mana_mbuf_csum_check()
770 etype = ntohs(eh->evl_proto); in mana_mbuf_csum_check()
773 etype = ntohs(eh->evl_encap_proto); in mana_mbuf_csum_check()
785 iphlen = ip->ip_hl << 2; in mana_mbuf_csum_check()
786 mbuf->m_pkthdr.l3hlen = ehlen + iphlen; in mana_mbuf_csum_check()
788 MANA_L4_PROTO(mbuf) = ip->ip_p; in mana_mbuf_csum_check()
793 mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6); in mana_mbuf_csum_check()
795 MANA_L4_PROTO(mbuf) = ip6->ip6_nxt; in mana_mbuf_csum_check()
814 if (unlikely((!apc->port_is_up) || in mana_start_xmit()
818 if (m->m_pkthdr.csum_flags & CSUM_TSO) { in mana_start_xmit()
822 counter_u64_add_protected(apc->port_stats.tx_drops, 1); in mana_start_xmit()
830 counter_u64_add_protected(apc->port_stats.tx_drops, 1); in mana_start_xmit()
837 uint32_t hash = m->m_pkthdr.flowid; in mana_start_xmit()
838 txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] % in mana_start_xmit()
839 apc->num_queues; in mana_start_xmit()
841 txq_id = m->m_pkthdr.flowid % apc->num_queues; in mana_start_xmit()
844 if (apc->enable_tx_altq) in mana_start_xmit()
845 txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx; in mana_start_xmit()
847 txq = &apc->tx_qp[txq_id].txq; in mana_start_xmit()
849 is_drbr_empty = drbr_empty(ifp, txq->txq_br); in mana_start_xmit()
850 err = drbr_enqueue(ifp, txq->txq_br, m); in mana_start_xmit()
854 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task); in mana_start_xmit()
858 if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) { in mana_start_xmit()
860 mtx_unlock(&txq->txq_mtx); in mana_start_xmit()
862 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task); in mana_start_xmit()
871 bus_dma_tag_destroy(apc->tx_buf_tag); in mana_cleanup_port_context()
872 bus_dma_tag_destroy(apc->rx_buf_tag); in mana_cleanup_port_context()
873 apc->rx_buf_tag = NULL; in mana_cleanup_port_context()
875 free(apc->rxqs, M_DEVBUF); in mana_cleanup_port_context()
876 apc->rxqs = NULL; in mana_cleanup_port_context()
878 mana_free_counters((counter_u64_t *)&apc->port_stats, in mana_cleanup_port_context()
885 device_t dev = apc->ac->gdma_dev->gdma_context->dev; in mana_init_port_context()
902 &apc->tx_buf_tag); in mana_init_port_context()
919 &apc->rx_buf_tag); in mana_init_port_context()
925 apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *), in mana_init_port_context()
935 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
938 device_t dev = gc->dev; in mana_send_request()
942 req->dev_id = gc->mana.dev_id; in mana_send_request()
943 req->activity_id = atomic_inc_return(&activity_id); in mana_send_request()
949 if (err || resp->status) { in mana_send_request()
951 err, resp->status); in mana_send_request()
955 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 || in mana_send_request()
956 req->activity_id != resp->activity_id) { in mana_send_request()
959 req->dev_id.as_uint32, resp->dev_id.as_uint32, in mana_send_request()
960 req->activity_id, resp->activity_id); in mana_send_request()
972 if (resp_hdr->response.msg_type != expected_code) in mana_verify_resp_hdr()
975 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1) in mana_verify_resp_hdr()
978 if (resp_hdr->response.msg_size < min_size) in mana_verify_resp_hdr()
989 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
992 device_t dev = gc->dev; in mana_query_device_cfg()
1038 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
1055 apc->port_handle = resp.vport; in mana_query_vport_cfg()
1056 memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN); in mana_query_vport_cfg()
1064 apc->vport_use_count--; in mana_uncfg_vport()
1065 if (apc->vport_use_count < 0) { in mana_uncfg_vport()
1068 apc->vport_use_count); in mana_uncfg_vport()
1083 * For Ethernet usage, the hardware supports only one active user on a in mana_cfg_vport()
1098 if (apc->vport_use_count > 0) { in mana_cfg_vport()
1101 apc->vport_use_count++; in mana_cfg_vport()
1105 req.vport = apc->port_handle; in mana_cfg_vport()
1109 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
1112 if_printf(apc->ndev, "Failed to configure vPort: %d\n", err); in mana_cfg_vport()
1119 if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n", in mana_cfg_vport()
1127 apc->tx_shortform_allowed = resp.short_form_allowed; in mana_cfg_vport()
1128 apc->tx_vp_offset = resp.tx_vport_offset; in mana_cfg_vport()
1130 if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n", in mana_cfg_vport()
1131 apc->port_handle, protection_dom_id, doorbell_pg_id); in mana_cfg_vport()
1149 if_t ndev = apc->ndev; in mana_cfg_vport_steering()
1157 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, in mana_cfg_vport_steering()
1160 req->vport = apc->port_handle; in mana_cfg_vport_steering()
1161 req->num_indir_entries = num_entries; in mana_cfg_vport_steering()
1162 req->indir_tab_offset = sizeof(*req); in mana_cfg_vport_steering()
1163 req->rx_enable = rx; in mana_cfg_vport_steering()
1164 req->rss_enable = apc->rss_state; in mana_cfg_vport_steering()
1165 req->update_default_rxobj = update_default_rxobj; in mana_cfg_vport_steering()
1166 req->update_hashkey = update_key; in mana_cfg_vport_steering()
1167 req->update_indir_tab = update_tab; in mana_cfg_vport_steering()
1168 req->default_rxobj = apc->default_rxobj; in mana_cfg_vport_steering()
1171 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); in mana_cfg_vport_steering()
1175 memcpy(req_indir_tab, apc->rxobj_table, in mana_cfg_vport_steering()
1176 req->num_indir_entries * sizeof(mana_handle_t)); in mana_cfg_vport_steering()
1179 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
1200 apc->port_handle, num_entries); in mana_cfg_vport_steering()
1216 if_t ndev = apc->ndev; in mana_create_wq_obj()
1223 req.wq_gdma_region = wq_spec->gdma_region; in mana_create_wq_obj()
1224 req.cq_gdma_region = cq_spec->gdma_region; in mana_create_wq_obj()
1225 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj()
1226 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj()
1227 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id; in mana_create_wq_obj()
1228 req.cq_parent_qid = cq_spec->attached_eq; in mana_create_wq_obj()
1230 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
1254 wq_spec->queue_index = resp.wq_id; in mana_create_wq_obj()
1255 cq_spec->queue_index = resp.cq_id; in mana_create_wq_obj()
1268 if_t ndev = apc->ndev; in mana_destroy_wq_obj()
1276 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
1293 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
1297 if (!ac->eqs) in mana_destroy_eq()
1300 for (i = 0; i < gc->max_num_queues; i++) { in mana_destroy_eq()
1301 eq = ac->eqs[i].eq; in mana_destroy_eq()
1308 free(ac->eqs, M_DEVBUF); in mana_destroy_eq()
1309 ac->eqs = NULL; in mana_destroy_eq()
1315 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
1316 struct gdma_context *gc = gd->gdma_context; in mana_create_eq()
1321 ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
1328 spec.eq.context = ac->eqs; in mana_create_eq()
1331 for (i = 0; i < gc->max_num_queues; i++) { in mana_create_eq()
1332 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
1350 init_completion(&rxq->fence_event); in mana_fence_rq()
1354 req.wq_obj_handle = rxq->rxobj; in mana_fence_rq()
1356 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
1359 if_printf(apc->ndev, "Failed to fence RQ %u: %d\n", in mana_fence_rq()
1360 rxq->rxq_idx, err); in mana_fence_rq()
1366 if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n", in mana_fence_rq()
1367 rxq->rxq_idx, err, resp.hdr.status); in mana_fence_rq()
1374 if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) { in mana_fence_rq()
1375 if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n", in mana_fence_rq()
1376 rxq->rxq_idx); in mana_fence_rq()
1390 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_fence_rqs()
1391 rxq = apc->rxqs[rxq_idx]; in mana_fence_rqs()
1406 used_space_old = wq->head - wq->tail; in mana_move_wq_tail()
1407 used_space_new = wq->head - (wq->tail + num_units); in mana_move_wq_tail()
1416 wq->tail += num_units; in mana_move_wq_tail()
1423 struct gdma_comp *completions = cq->gdma_comp_buf; in mana_poll_tx_cq()
1428 struct mana_txq *txq = cq->txq; in mana_poll_tx_cq()
1434 int txq_idx = txq->idx; in mana_poll_tx_cq()
1442 ndev = txq->ndev; in mana_poll_tx_cq()
1444 tx_queue_size = apc->tx_queue_size; in mana_poll_tx_cq()
1446 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, in mana_poll_tx_cq()
1452 next_to_complete = txq->next_to_complete; in mana_poll_tx_cq()
1463 if (cqe_oob->cqe_hdr.client_type != in mana_poll_tx_cq()
1466 "WARNING: Invalid CQE client type %u\n", in mana_poll_tx_cq()
1467 cqe_oob->cqe_hdr.client_type); in mana_poll_tx_cq()
1471 switch (cqe_oob->cqe_hdr.cqe_type) { in mana_poll_tx_cq()
1486 "TX: txq %d CQE error %d, ntc = %d, " in mana_poll_tx_cq()
1488 txq_idx, cqe_oob->cqe_hdr.cqe_type, in mana_poll_tx_cq()
1489 next_to_complete, txq->pending_sends); in mana_poll_tx_cq()
1490 counter_u64_add(txq->stats.cqe_err, 1); in mana_poll_tx_cq()
1494 /* If the CQE type is unknown, log a debug msg, in mana_poll_tx_cq()
1498 "ERROR: TX: Unknown CQE type %d\n", in mana_poll_tx_cq()
1499 cqe_oob->cqe_hdr.cqe_type); in mana_poll_tx_cq()
1500 counter_u64_add(txq->stats.cqe_unknown_type, 1); in mana_poll_tx_cq()
1503 if (txq->gdma_txq_id != completions[i].wq_num) { in mana_poll_tx_cq()
1507 txq->gdma_txq_id, completions[i].wq_num); in mana_poll_tx_cq()
1511 tx_info = &txq->tx_buf_info[next_to_complete]; in mana_poll_tx_cq()
1512 if (!tx_info->mbuf) { in mana_poll_tx_cq()
1517 txq_idx, next_to_complete, txq->next_to_use, in mana_poll_tx_cq()
1518 txq->pending_sends, pkt_transmitted, sa_drop, in mana_poll_tx_cq()
1523 wqe_info = &tx_info->wqe_inf; in mana_poll_tx_cq()
1524 wqe_unit_cnt += wqe_info->wqe_size_in_bu; in mana_poll_tx_cq()
1535 txq->next_to_complete = next_to_complete; in mana_poll_tx_cq()
1543 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt); in mana_poll_tx_cq()
1548 gdma_wq = txq->gdma_sq; in mana_poll_tx_cq()
1556 /* Ensure checking txq_full before apc->port_is_up. */ in mana_poll_tx_cq()
1559 if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1560 /* Grab the txq lock and re-test */ in mana_poll_tx_cq()
1561 mtx_lock(&txq->txq_mtx); in mana_poll_tx_cq()
1565 apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) { in mana_poll_tx_cq()
1567 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, in mana_poll_tx_cq()
1569 counter_u64_add(txq->stats.wakeup, 1); in mana_poll_tx_cq()
1570 if (txq->alt_txq_idx != txq->idx) { in mana_poll_tx_cq()
1571 uint64_t stops = counter_u64_fetch(txq->stats.stop); in mana_poll_tx_cq()
1572 uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup); in mana_poll_tx_cq()
1575 txq->alt_txq_idx = txq->idx; in mana_poll_tx_cq()
1576 counter_u64_add(txq->stats.alt_reset, 1); in mana_poll_tx_cq()
1581 taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task); in mana_poll_tx_cq()
1583 mtx_unlock(&txq->txq_mtx); in mana_poll_tx_cq()
1586 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) in mana_poll_tx_cq()
1589 txq->idx, txq->pending_sends); in mana_poll_tx_cq()
1591 cq->work_done = pkt_transmitted; in mana_poll_tx_cq()
1600 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req, in mana_post_pkt_rxq()
1601 &recv_buf_oob->wqe_inf); in mana_post_pkt_rxq()
1604 rxq->rxq_idx, err); in mana_post_pkt_rxq()
1608 if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) { in mana_post_pkt_rxq()
1610 rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu); in mana_post_pkt_rxq()
1615 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe, in mana_rx_mbuf() argument
1618 struct mana_stats *rx_stats = &rxq->stats; in mana_rx_mbuf()
1619 if_t ndev = rxq->ndev; in mana_rx_mbuf()
1620 uint32_t pkt_len = cqe->ppi[0].pkt_len; in mana_rx_mbuf()
1621 uint16_t rxq_idx = rxq->rxq_idx; in mana_rx_mbuf()
1627 rxq->rx_cq.work_done++; in mana_rx_mbuf()
1633 mbuf->m_flags |= M_PKTHDR; in mana_rx_mbuf()
1634 mbuf->m_pkthdr.len = pkt_len; in mana_rx_mbuf()
1635 mbuf->m_len = pkt_len; in mana_rx_mbuf()
1636 mbuf->m_pkthdr.rcvif = ndev; in mana_rx_mbuf()
1640 (cqe->rx_iphdr_csum_succeed)) { in mana_rx_mbuf()
1641 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; in mana_rx_mbuf()
1642 mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; in mana_rx_mbuf()
1643 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) { in mana_rx_mbuf()
1644 mbuf->m_pkthdr.csum_flags |= in mana_rx_mbuf()
1646 mbuf->m_pkthdr.csum_data = 0xffff; in mana_rx_mbuf()
1648 if (cqe->rx_tcp_csum_succeed) in mana_rx_mbuf()
1653 if (cqe->rx_hashtype != 0) { in mana_rx_mbuf()
1654 mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash; in mana_rx_mbuf()
1656 uint16_t hashtype = cqe->rx_hashtype; in mana_rx_mbuf()
1693 mbuf->m_pkthdr.flowid = rxq_idx; in mana_rx_mbuf()
1699 rxq->lro_tried++; in mana_rx_mbuf()
1700 if (rxq->lro.lro_cnt != 0 && in mana_rx_mbuf()
1701 tcp_lro_rx(&rxq->lro, mbuf, 0) == 0) in mana_rx_mbuf()
1704 rxq->lro_failed++; in mana_rx_mbuf()
1711 counter_u64_add_protected(rx_stats->packets, 1); in mana_rx_mbuf()
1712 counter_u64_add_protected(apc->port_stats.rx_packets, 1); in mana_rx_mbuf()
1713 counter_u64_add_protected(rx_stats->bytes, pkt_len); in mana_rx_mbuf()
1714 counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len); in mana_rx_mbuf()
1727 next_to_refill = rxq->next_to_refill; in mana_refill_rx_mbufs()
1730 if (next_to_refill == rxq->buf_index) { in mana_refill_rx_mbufs()
1733 rxq->rxq_idx, next_to_refill); in mana_refill_rx_mbufs()
1737 rxbuf_oob = &rxq->rx_oobs[next_to_refill]; in mana_refill_rx_mbufs()
1739 if (likely(rxbuf_oob->mbuf == NULL)) { in mana_refill_rx_mbufs()
1744 rxq->rxq_idx, next_to_refill); in mana_refill_rx_mbufs()
1751 err, rxq->rxq_idx); in mana_refill_rx_mbufs()
1752 counter_u64_add(rxq->stats.mbuf_alloc_fail, 1); in mana_refill_rx_mbufs()
1759 rxq->num_rx_buf); in mana_refill_rx_mbufs()
1764 rxq->gdma_rq->gdma_dev->gdma_context; in mana_refill_rx_mbufs()
1766 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq); in mana_refill_rx_mbufs()
1770 counter_u64_add(rxq->stats.partial_refill, 1); in mana_refill_rx_mbufs()
1773 rxq->rxq_idx, i, num); in mana_refill_rx_mbufs()
1776 rxq->next_to_refill = next_to_refill; in mana_refill_rx_mbufs()
1782 struct gdma_comp *cqe) in mana_process_rx_cqe() argument
1784 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data; in mana_process_rx_cqe()
1786 if_t ndev = rxq->ndev; in mana_process_rx_cqe()
1792 switch (oob->cqe_hdr.cqe_type) { in mana_process_rx_cqe()
1798 counter_u64_add(apc->port_stats.rx_drops, 1); in mana_process_rx_cqe()
1799 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index]; in mana_process_rx_cqe()
1808 complete(&rxq->fence_event); in mana_process_rx_cqe()
1812 if_printf(ndev, "Unknown RX CQE type = %d\n", in mana_process_rx_cqe()
1813 oob->cqe_hdr.cqe_type); in mana_process_rx_cqe()
1817 if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY) in mana_process_rx_cqe()
1820 pktlen = oob->ppi[0].pkt_len; in mana_process_rx_cqe()
1825 rxq->gdma_id, cq->gdma_id, rxq->rxobj); in mana_process_rx_cqe()
1829 curr = rxq->buf_index; in mana_process_rx_cqe()
1830 rxbuf_oob = &rxq->rx_oobs[curr]; in mana_process_rx_cqe()
1831 if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) { in mana_process_rx_cqe()
1834 rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1839 old_mbuf = rxbuf_oob->mbuf; in mana_process_rx_cqe()
1844 rxbuf_oob->mbuf = NULL; in mana_process_rx_cqe()
1849 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu); in mana_process_rx_cqe()
1851 rxq->buf_index = MANA_IDX_NEXT(rxq->buf_index, rxq->num_rx_buf); in mana_process_rx_cqe()
1854 refill_required = MANA_GET_SPACE(rxq->next_to_refill, in mana_process_rx_cqe()
1855 rxq->buf_index, rxq->num_rx_buf); in mana_process_rx_cqe()
1857 if (refill_required >= rxq->refill_thresh) { in mana_process_rx_cqe()
1866 struct gdma_comp *comp = cq->gdma_comp_buf; in mana_poll_rx_cq()
1869 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); in mana_poll_rx_cq()
1877 "WARNING: CQE not for receive queue\n"); in mana_poll_rx_cq()
1881 /* verify recv cqe references the right rxq */ in mana_poll_rx_cq()
1882 if (comp[i].wq_num != cq->rxq->gdma_id) { in mana_poll_rx_cq()
1884 "WARNING: Received CQE %d not for " in mana_poll_rx_cq()
1886 comp[i].wq_num, cq->rxq->gdma_id); in mana_poll_rx_cq()
1890 mana_process_rx_cqe(cq->rxq, cq, &comp[i]); in mana_poll_rx_cq()
1893 tcp_lro_flush_all(&cq->rxq->lro); in mana_poll_rx_cq()
1902 KASSERT(cq->gdma_cq == gdma_queue, in mana_cq_handler()
1903 ("cq do not match %p, %p", cq->gdma_cq, gdma_queue)); in mana_cq_handler()
1905 if (cq->type == MANA_CQ_TYPE_RX) { in mana_cq_handler()
1911 if (cq->work_done < cq->budget && cq->do_not_ring_db == false) in mana_cq_handler()
1929 cq->work_done = 0; in mana_poll()
1930 if (cq->type == MANA_CQ_TYPE_RX) { in mana_poll()
1931 cq->budget = MANA_RX_BUDGET; in mana_poll()
1933 cq->budget = MANA_TX_BUDGET; in mana_poll()
1941 if (i == (MANA_POLL_BUDGET - 1)) in mana_poll()
1942 cq->budget = CQE_POLLING_BUFFER + 1; in mana_poll()
1944 mana_cq_handler(cq, cq->gdma_cq); in mana_poll()
1946 if (cq->work_done < cq->budget) in mana_poll()
1949 cq->work_done = 0; in mana_poll()
1958 taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task); in mana_schedule_task()
1964 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1966 if (!cq->gdma_cq) in mana_deinit_cq()
1970 if (cq->cleanup_tq) { in mana_deinit_cq()
1971 while (taskqueue_cancel(cq->cleanup_tq, in mana_deinit_cq()
1972 &cq->cleanup_task, NULL)) { in mana_deinit_cq()
1973 taskqueue_drain(cq->cleanup_tq, in mana_deinit_cq()
1974 &cq->cleanup_task); in mana_deinit_cq()
1977 taskqueue_free(cq->cleanup_tq); in mana_deinit_cq()
1980 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq); in mana_deinit_cq()
1986 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1991 if (!txq->gdma_sq) in mana_deinit_txq()
1994 if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) { in mana_deinit_txq()
2000 if (txq->next_to_use != txq->next_to_complete) { in mana_deinit_txq()
2004 txq->next_to_use, txq->next_to_complete); in mana_deinit_txq()
2008 if (txq->txq_br) { in mana_deinit_txq()
2009 mtx_lock(&txq->txq_mtx); in mana_deinit_txq()
2010 drbr_flush(apc->ndev, txq->txq_br); in mana_deinit_txq()
2011 mtx_unlock(&txq->txq_mtx); in mana_deinit_txq()
2012 buf_ring_free(txq->txq_br, M_DEVBUF); in mana_deinit_txq()
2016 if (txq->enqueue_tq) { in mana_deinit_txq()
2017 while (taskqueue_cancel(txq->enqueue_tq, in mana_deinit_txq()
2018 &txq->enqueue_task, NULL)) { in mana_deinit_txq()
2019 taskqueue_drain(txq->enqueue_tq, in mana_deinit_txq()
2020 &txq->enqueue_task); in mana_deinit_txq()
2023 taskqueue_free(txq->enqueue_tq); in mana_deinit_txq()
2026 if (txq->tx_buf_info) { in mana_deinit_txq()
2027 /* Free all mbufs which are still in-flight */ in mana_deinit_txq()
2028 for (i = 0; i < apc->tx_queue_size; i++) { in mana_deinit_txq()
2029 txbuf_info = &txq->tx_buf_info[i]; in mana_deinit_txq()
2030 if (txbuf_info->mbuf) { in mana_deinit_txq()
2035 free(txq->tx_buf_info, M_DEVBUF); in mana_deinit_txq()
2038 mana_free_counters((counter_u64_t *)&txq->stats, in mana_deinit_txq()
2039 sizeof(txq->stats)); in mana_deinit_txq()
2041 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq); in mana_deinit_txq()
2043 mtx_destroy(&txq->txq_mtx); in mana_deinit_txq()
2051 if (!apc->tx_qp) in mana_destroy_txq()
2054 for (i = 0; i < apc->num_queues; i++) { in mana_destroy_txq()
2055 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); in mana_destroy_txq()
2057 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); in mana_destroy_txq()
2059 mana_deinit_txq(apc, &apc->tx_qp[i].txq); in mana_destroy_txq()
2062 free(apc->tx_qp, M_DEVBUF); in mana_destroy_txq()
2063 apc->tx_qp = NULL; in mana_destroy_txq()
2069 struct mana_context *ac = apc->ac; in mana_create_txq()
2070 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
2082 apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp), in mana_create_txq()
2086 * apc->tx_queue_size represents the maximum number of WQEs in mana_create_txq()
2090 * as min val of apc->tx_queue_size is 128 and that would make in mana_create_txq()
2092 * apc->tx_queue_size are always power of two. in mana_create_txq()
2094 txq_size = apc->tx_queue_size * 32; in mana_create_txq()
2098 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE; in mana_create_txq()
2101 gc = gd->gdma_context; in mana_create_txq()
2103 for (i = 0; i < apc->num_queues; i++) { in mana_create_txq()
2104 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE; in mana_create_txq()
2107 txq = &apc->tx_qp[i].txq; in mana_create_txq()
2109 txq->ndev = net; in mana_create_txq()
2110 txq->vp_offset = apc->tx_vp_offset; in mana_create_txq()
2111 txq->idx = i; in mana_create_txq()
2112 txq->alt_txq_idx = i; in mana_create_txq()
2118 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq); in mana_create_txq()
2123 cq = &apc->tx_qp[i].tx_cq; in mana_create_txq()
2124 cq->type = MANA_CQ_TYPE_TX; in mana_create_txq()
2126 cq->txq = txq; in mana_create_txq()
2133 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
2135 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_txq()
2142 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; in mana_create_txq()
2143 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq()
2145 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_txq()
2146 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq()
2148 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_txq()
2150 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ, in mana_create_txq()
2151 &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object); in mana_create_txq()
2156 txq->gdma_sq->id = wq_spec.queue_index; in mana_create_txq()
2157 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_txq()
2159 txq->gdma_sq->mem_info.dma_region_handle = in mana_create_txq()
2161 cq->gdma_cq->mem_info.dma_region_handle = in mana_create_txq()
2164 txq->gdma_txq_id = txq->gdma_sq->id; in mana_create_txq()
2166 cq->gdma_id = cq->gdma_cq->id; in mana_create_txq()
2170 i, txq->gdma_txq_id, cq->gdma_id); in mana_create_txq()
2172 if (cq->gdma_id >= gc->max_num_cqs) { in mana_create_txq()
2173 if_printf(net, "CQ id %u too large.\n", cq->gdma_id); in mana_create_txq()
2178 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
2181 txq->tx_buf_info = malloc(apc->tx_queue_size * in mana_create_txq()
2185 snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name), in mana_create_txq()
2187 mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF); in mana_create_txq()
2189 txq->txq_br = buf_ring_alloc(4 * apc->tx_queue_size, in mana_create_txq()
2190 M_DEVBUF, M_WAITOK, &txq->txq_mtx); in mana_create_txq()
2193 TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq); in mana_create_txq()
2194 txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque", in mana_create_txq()
2195 M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq); in mana_create_txq()
2196 if (unlikely(txq->enqueue_tq == NULL)) { in mana_create_txq()
2202 taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET, in mana_create_txq()
2203 "mana txq p%u-tx%d", apc->port_idx, i); in mana_create_txq()
2205 mana_alloc_counters((counter_u64_t *)&txq->stats, in mana_create_txq()
2206 sizeof(txq->stats)); in mana_create_txq()
2209 cq->do_not_ring_db = false; in mana_create_txq()
2211 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq); in mana_create_txq()
2212 cq->cleanup_tq = in mana_create_txq()
2215 &cq->cleanup_tq); in mana_create_txq()
2217 if (apc->last_tx_cq_bind_cpu < 0) in mana_create_txq()
2218 apc->last_tx_cq_bind_cpu = CPU_FIRST(); in mana_create_txq()
2219 cq->cpu = apc->last_tx_cq_bind_cpu; in mana_create_txq()
2220 apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu); in mana_create_txq()
2222 if (apc->bind_cleanup_thread_cpu) { in mana_create_txq()
2224 CPU_SETOF(cq->cpu, &cpu_mask); in mana_create_txq()
2225 taskqueue_start_threads_cpuset(&cq->cleanup_tq, in mana_create_txq()
2227 "mana cq p%u-tx%u-cpu%d", in mana_create_txq()
2228 apc->port_idx, txq->idx, cq->cpu); in mana_create_txq()
2230 taskqueue_start_threads(&cq->cleanup_tq, 1, in mana_create_txq()
2231 PI_NET, "mana cq p%u-tx%u", in mana_create_txq()
2232 apc->port_idx, txq->idx); in mana_create_txq()
2235 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_txq()
2248 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
2262 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); in mana_destroy_rxq()
2264 mana_deinit_cq(apc, &rxq->rx_cq); in mana_destroy_rxq()
2266 mana_free_counters((counter_u64_t *)&rxq->stats, in mana_destroy_rxq()
2267 sizeof(rxq->stats)); in mana_destroy_rxq()
2270 tcp_lro_free(&rxq->lro); in mana_destroy_rxq()
2272 for (i = 0; i < rxq->num_rx_buf; i++) { in mana_destroy_rxq()
2273 rx_oob = &rxq->rx_oobs[i]; in mana_destroy_rxq()
2275 if (rx_oob->mbuf) in mana_destroy_rxq()
2278 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map); in mana_destroy_rxq()
2281 if (rxq->gdma_rq) in mana_destroy_rxq()
2282 mana_gd_destroy_queue(gc, rxq->gdma_rq); in mana_destroy_rxq()
2298 if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) { in mana_alloc_rx_wqe()
2300 "WARNING: Invalid rxq datasize %u\n", rxq->datasize); in mana_alloc_rx_wqe()
2306 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_alloc_rx_wqe()
2307 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_alloc_rx_wqe()
2310 err = bus_dmamap_create(apc->rx_buf_tag, 0, in mana_alloc_rx_wqe()
2311 &rx_oob->dma_map); in mana_alloc_rx_wqe()
2324 bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map); in mana_alloc_rx_wqe()
2328 rx_oob->wqe_req.sgl = rx_oob->sgl; in mana_alloc_rx_wqe()
2329 rx_oob->wqe_req.num_sge = rx_oob->num_sge; in mana_alloc_rx_wqe()
2330 rx_oob->wqe_req.inline_oob_size = 0; in mana_alloc_rx_wqe()
2331 rx_oob->wqe_req.inline_oob_data = NULL; in mana_alloc_rx_wqe()
2332 rx_oob->wqe_req.flags = 0; in mana_alloc_rx_wqe()
2333 rx_oob->wqe_req.client_data_unit = 0; in mana_alloc_rx_wqe()
2336 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32); in mana_alloc_rx_wqe()
2350 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) { in mana_push_wqe()
2351 rx_oob = &rxq->rx_oobs[buf_idx]; in mana_push_wqe()
2353 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req, in mana_push_wqe()
2354 &rx_oob->wqe_inf); in mana_push_wqe()
2366 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
2376 gc = gd->gdma_context; in mana_create_rxq()
2379 apc->rx_queue_size * sizeof(struct mana_recv_buf_oob), in mana_create_rxq()
2381 rxq->ndev = ndev; in mana_create_rxq()
2382 rxq->num_rx_buf = apc->rx_queue_size; in mana_create_rxq()
2383 rxq->rxq_idx = rxq_idx; in mana_create_rxq()
2388 rxq->datasize = ALIGN(apc->frame_size, MCLBYTES); in mana_create_rxq()
2389 if (rxq->datasize > MAX_FRAME_SIZE) in mana_create_rxq()
2390 rxq->datasize = MAX_FRAME_SIZE; in mana_create_rxq()
2393 rxq_idx, rxq->datasize); in mana_create_rxq()
2403 rxq->refill_thresh = mana_rx_refill_threshold; in mana_create_rxq()
2405 rxq->refill_thresh = MANA_RX_REFILL_THRESH; in mana_create_rxq()
2406 rxq->refill_thresh = min_t(uint32_t, in mana_create_rxq()
2407 rxq->num_rx_buf / 4, rxq->refill_thresh); in mana_create_rxq()
2410 rxq_idx, rxq->refill_thresh); in mana_create_rxq()
2412 rxq->rxobj = INVALID_MANA_HANDLE; in mana_create_rxq()
2420 err = tcp_lro_init(&rxq->lro); in mana_create_rxq()
2425 rxq->lro.ifp = ndev; in mana_create_rxq()
2429 mana_alloc_counters((counter_u64_t *)&rxq->stats, in mana_create_rxq()
2430 sizeof(rxq->stats)); in mana_create_rxq()
2440 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq); in mana_create_rxq()
2445 cq = &rxq->rx_cq; in mana_create_rxq()
2446 cq->type = MANA_CQ_TYPE_RX; in mana_create_rxq()
2447 cq->rxq = rxq; in mana_create_rxq()
2454 spec.cq.parent_eq = eq->eq; in mana_create_rxq()
2456 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); in mana_create_rxq()
2462 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; in mana_create_rxq()
2463 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq()
2465 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; in mana_create_rxq()
2466 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_rxq()
2468 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; in mana_create_rxq()
2470 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ, in mana_create_rxq()
2471 &wq_spec, &cq_spec, &rxq->rxobj); in mana_create_rxq()
2475 rxq->gdma_rq->id = wq_spec.queue_index; in mana_create_rxq()
2476 cq->gdma_cq->id = cq_spec.queue_index; in mana_create_rxq()
2478 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2479 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; in mana_create_rxq()
2481 rxq->gdma_id = rxq->gdma_rq->id; in mana_create_rxq()
2482 cq->gdma_id = cq->gdma_cq->id; in mana_create_rxq()
2488 if (cq->gdma_id >= gc->max_num_cqs) { in mana_create_rxq()
2493 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
2496 cq->do_not_ring_db = false; in mana_create_rxq()
2498 NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq); in mana_create_rxq()
2499 cq->cleanup_tq = in mana_create_rxq()
2502 &cq->cleanup_tq); in mana_create_rxq()
2504 if (apc->last_rx_cq_bind_cpu < 0) in mana_create_rxq()
2505 apc->last_rx_cq_bind_cpu = CPU_FIRST(); in mana_create_rxq()
2506 cq->cpu = apc->last_rx_cq_bind_cpu; in mana_create_rxq()
2507 apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu); in mana_create_rxq()
2509 if (apc->bind_cleanup_thread_cpu) { in mana_create_rxq()
2511 CPU_SETOF(cq->cpu, &cpu_mask); in mana_create_rxq()
2512 taskqueue_start_threads_cpuset(&cq->cleanup_tq, in mana_create_rxq()
2514 "mana cq p%u-rx%u-cpu%d", in mana_create_rxq()
2515 apc->port_idx, rxq->rxq_idx, cq->cpu); in mana_create_rxq()
2517 taskqueue_start_threads(&cq->cleanup_tq, 1, in mana_create_rxq()
2518 PI_NET, "mana cq p%u-rx%u", in mana_create_rxq()
2519 apc->port_idx, rxq->rxq_idx); in mana_create_rxq()
2522 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); in mana_create_rxq()
2540 struct mana_context *ac = apc->ac; in mana_add_rx_queues()
2545 for (i = 0; i < apc->num_queues; i++) { in mana_add_rx_queues()
2546 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2552 apc->rxqs[i] = rxq; in mana_add_rx_queues()
2555 apc->default_rxobj = apc->rxqs[0]->rxobj; in mana_add_rx_queues()
2566 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) { in mana_destroy_vport()
2567 rxq = apc->rxqs[rxq_idx]; in mana_destroy_vport()
2572 apc->rxqs[rxq_idx] = NULL; in mana_destroy_vport()
2583 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
2586 apc->default_rxobj = INVALID_MANA_HANDLE; in mana_create_vport()
2588 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell); in mana_create_vport()
2601 apc->indir_table[i] = i % apc->num_queues; in mana_rss_table_init()
2613 queue_idx = apc->indir_table[i]; in mana_config_rss()
2614 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; in mana_config_rss()
2632 int port_idx = apc->port_idx; in mana_init_port()
2649 if (apc->max_queues > max_queues) in mana_init_port()
2650 apc->max_queues = max_queues; in mana_init_port()
2652 if (apc->num_queues > apc->max_queues) in mana_init_port()
2653 apc->num_queues = apc->max_queues; in mana_init_port()
2658 bus_dma_tag_destroy(apc->rx_buf_tag); in mana_init_port()
2659 apc->rx_buf_tag = NULL; in mana_init_port()
2660 free(apc->rxqs, M_DEVBUF); in mana_init_port()
2661 apc->rxqs = NULL; in mana_init_port()
2679 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
2701 err = mana_alloc_queues(apc->ndev); in mana_up()
2710 apc->port_is_up = true; in mana_up()
2715 if_link_state_change(apc->ndev, LINK_STATE_UP); in mana_up()
2716 if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); in mana_up()
2728 if (!apc->port_is_up) { in mana_init()
2741 if (apc->port_is_up) in mana_dealloc_queues()
2744 /* No packet can be transmitted now since apc->port_is_up is false. in mana_dealloc_queues()
2745 * There is still a tiny chance that mana_poll_tx_cq() can re-enable in mana_dealloc_queues()
2746 * a txq because it may not timely see apc->port_is_up being cleared in mana_dealloc_queues()
2748 * new packets due to apc->port_is_up being false. in mana_dealloc_queues()
2750 * Drain all the in-flight TX packets in mana_dealloc_queues()
2752 for (i = 0; i < apc->num_queues; i++) { in mana_dealloc_queues()
2753 txq = &apc->tx_qp[i].txq; in mana_dealloc_queues()
2755 struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq; in mana_dealloc_queues()
2756 struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq); in mana_dealloc_queues()
2758 tx_cq->do_not_ring_db = true; in mana_dealloc_queues()
2759 rx_cq->do_not_ring_db = true; in mana_dealloc_queues()
2762 taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task); in mana_dealloc_queues()
2764 while (atomic_read(&txq->pending_sends) > 0) in mana_dealloc_queues()
2772 apc->rss_state = TRI_STATE_FALSE; in mana_dealloc_queues()
2789 apc->port_st_save = apc->port_is_up; in mana_down()
2790 apc->port_is_up = false; in mana_down()
2795 if (apc->port_st_save) { in mana_down()
2796 if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, in mana_down()
2798 if_link_state_change(apc->ndev, LINK_STATE_DOWN); in mana_down()
2802 err = mana_dealloc_queues(apc->ndev); in mana_down()
2804 if_printf(apc->ndev, in mana_down()
2850 "size %u out of allowable range (%u - %u), " in mana_get_tx_queue_size()
2878 "size %u out of allowable range (%u - %u), " in mana_get_rx_queue_size()
2896 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
2902 ndev = if_alloc_dev(IFT_ETHER, gc->dev); in mana_probe_port()
2906 apc->ac = ac; in mana_probe_port()
2907 apc->ndev = ndev; in mana_probe_port()
2908 apc->max_queues = gc->max_num_queues; in mana_probe_port()
2909 apc->num_queues = min_t(unsigned int, in mana_probe_port()
2910 gc->max_num_queues, MANA_MAX_NUM_QUEUES); in mana_probe_port()
2911 apc->tx_queue_size = mana_get_tx_queue_size(port_idx, in mana_probe_port()
2913 apc->rx_queue_size = mana_get_rx_queue_size(port_idx, in mana_probe_port()
2915 apc->port_handle = INVALID_MANA_HANDLE; in mana_probe_port()
2916 apc->port_idx = port_idx; in mana_probe_port()
2917 apc->frame_size = DEFAULT_FRAME_SIZE; in mana_probe_port()
2918 apc->last_tx_cq_bind_cpu = -1; in mana_probe_port()
2919 apc->last_rx_cq_bind_cpu = -1; in mana_probe_port()
2920 apc->vport_use_count = 0; in mana_probe_port()
2924 if_initname(ndev, device_get_name(gc->dev), port_idx); in mana_probe_port()
2925 if_setdev(ndev,gc->dev); in mana_probe_port()
2938 mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); in mana_probe_port()
2954 if_sethwtsomax(ndev, MANA_TSO_MAX_SZ - in mana_probe_port()
2969 ifmedia_init(&apc->media, IFM_IMASK, in mana_probe_port()
2971 ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL); in mana_probe_port()
2972 ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO); in mana_probe_port()
2974 ether_ifattach(ndev, apc->mac_addr); in mana_probe_port()
2977 mana_alloc_counters((counter_u64_t *)&apc->port_stats, in mana_probe_port()
2996 struct gdma_context *gc = gd->gdma_context; in mana_probe()
2997 device_t dev = gc->dev; in mana_probe()
3010 ac->gdma_dev = gd; in mana_probe()
3011 ac->num_ports = 1; in mana_probe()
3012 gd->driver_data = ac; in mana_probe()
3019 MANA_MICRO_VERSION, &ac->num_ports); in mana_probe()
3023 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
3024 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
3026 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3027 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
3045 struct gdma_context *gc = gd->gdma_context; in mana_remove()
3046 struct mana_context *ac = gd->driver_data; in mana_remove()
3047 device_t dev = gc->dev; in mana_remove()
3051 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
3052 ndev = ac->ports[i]; in mana_remove()
3068 gd->driver_data = NULL; in mana_remove()
3069 gd->gdma_context = NULL; in mana_remove()