Lines Matching +full:rx +full:- +full:num +full:- +full:evt
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
34 * freebsd-drivers@emulex.com
48 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
259 sc->dev = dev;
273 sc->flags |= OCE_FLAGS_BE2;
276 sc->flags |= OCE_FLAGS_BE3;
280 sc->flags |= OCE_FLAGS_XE201;
283 sc->flags |= OCE_FLAGS_SH;
308 sc->tx_ring_size = OCE_TX_RING_SIZE;
309 sc->rx_ring_size = OCE_RX_RING_SIZE;
311 sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
312 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
313 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
315 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
316 LOCK_CREATE(&sc->dev_lock, "Device_lock");
349 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
351 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
360 callout_init(&sc->timer, CALLOUT_MPSAFE);
361 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
365 sc->next =NULL;
367 softc_tail->next = sc;
376 callout_drain(&sc->timer);
379 if (sc->vlan_attach)
380 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
381 if (sc->vlan_detach)
382 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
389 ether_ifdetach(sc->ifp);
390 if_free(sc->ifp);
395 oce_dma_free(sc, &sc->bsmbx);
398 LOCK_DESTROY(&sc->dev_lock);
399 LOCK_DESTROY(&sc->bmbx_lock);
414 *ppoce_sc_tmp1 = sc->next;
415 if (sc->next == NULL) {
421 ppoce_sc_tmp1 = &poce_sc_tmp->next;
422 poce_sc_tmp = poce_sc_tmp->next;
425 LOCK(&sc->dev_lock);
427 UNLOCK(&sc->dev_lock);
429 callout_drain(&sc->timer);
431 if (sc->vlan_attach != NULL)
432 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
433 if (sc->vlan_detach != NULL)
434 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
436 ether_ifdetach(sc->ifp);
438 if_free(sc->ifp);
469 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
473 if (ifr->ifr_mtu > OCE_MAX_MTU)
476 if_setmtu(ifp, ifr->ifr_mtu);
485 device_printf(sc->dev, "Interface Up\n");
487 LOCK(&sc->dev_lock);
489 if_setdrvflagbits(sc->ifp, 0,
493 UNLOCK(&sc->dev_lock);
495 device_printf(sc->dev, "Interface Down\n");
498 if ((if_getflags(ifp) & IFF_PROMISC) && !sc->promisc) {
500 sc->promisc = TRUE;
501 } else if (!(if_getflags(ifp) & IFF_PROMISC) && sc->promisc) {
503 sc->promisc = FALSE;
512 device_printf(sc->dev,
517 u = ifr->ifr_reqcap ^ if_getcapenable(ifp);
529 "TSO disabled due to -txcsum.\n");
563 if(sc->enable_hwlro) {
597 rc = -rc;
625 LOCK(&sc->dev_lock);
627 if (if_getflags(sc->ifp) & IFF_UP) {
632 UNLOCK(&sc->dev_lock);
644 if (!sc->link_status)
648 queue_index = m->m_pkthdr.flowid % sc->nwqs;
650 wq = sc->wq[queue_index];
652 LOCK(&wq->tx_lock);
654 UNLOCK(&wq->tx_lock);
667 for (i = 0; i < sc->nwqs; i++) {
668 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
683 POCE_SOFTC sc = ii->sc;
684 struct oce_eq *eq = ii->eq;
689 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
692 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
693 if (eqe->evnt == 0)
695 eqe->evnt = 0;
696 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
698 RING_GET(eq->ring, 1);
707 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
709 /* Process TX, RX and MCC. But dont arm CQ*/
710 for (i = 0; i < eq->cq_valid; i++) {
711 cq = eq->cq[i];
712 (*cq->cq_handler)(cq->cb_arg);
716 for (i = 0; i < eq->cq_valid; i++) {
717 cq = eq->cq[i];
718 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
722 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
735 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
740 if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
743 sc->roce_intr_count = OCE_RDMA_VECTORS;
747 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
748 sc->intr_count = req_vectors;
750 rc = pci_alloc_msix(sc->dev, &tot_vectors);
753 pci_release_msi(sc->dev);
755 if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
757 if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
758 sc->roce_intr_count = (tot_vectors / 2);
760 sc->intr_count = tot_vectors - sc->roce_intr_count;
763 sc->intr_count = tot_vectors;
765 sc->flags |= OCE_FLAGS_USING_MSIX;
771 sc->intr_count = 1;
777 device_printf(sc->dev, "Using legacy interrupt\n");
782 for (; vector < sc->intr_count; vector++) {
799 POCE_SOFTC sc = ii->sc;
801 if (ii->eq == NULL)
804 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
806 taskqueue_enqueue(ii->tq, &ii->task);
808 ii->eq->intr++;
822 ii = &sc->intrs[vector];
828 if (sc->flags & OCE_FLAGS_USING_MSIX)
832 ii->intr_res = bus_alloc_resource_any(sc->dev,
835 ii->irq_rr = rr;
836 if (ii->intr_res == NULL) {
837 device_printf(sc->dev,
843 TASK_INIT(&ii->task, 0, isr, ii);
844 ii->vector = vector;
845 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
846 ii->tq = taskqueue_create_fast(ii->task_name,
849 &ii->tq);
850 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
851 device_get_nameunit(sc->dev));
853 ii->sc = sc;
854 rc = bus_setup_intr(sc->dev,
855 ii->intr_res,
857 oce_fast_isr, NULL, ii, &ii->tag);
867 for (i = 0; i < sc->intr_count; i++) {
869 if (sc->intrs[i].tag != NULL)
870 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
871 sc->intrs[i].tag);
872 if (sc->intrs[i].tq != NULL)
873 taskqueue_free(sc->intrs[i].tq);
875 if (sc->intrs[i].intr_res != NULL)
876 bus_release_resource(sc->dev, SYS_RES_IRQ,
877 sc->intrs[i].irq_rr,
878 sc->intrs[i].intr_res);
879 sc->intrs[i].tag = NULL;
880 sc->intrs[i].intr_res = NULL;
883 if (sc->flags & OCE_FLAGS_USING_MSIX)
884 pci_release_msi(sc->dev);
897 req->ifm_status = IFM_AVALID;
898 req->ifm_active = IFM_ETHER;
900 if (sc->link_status == 1)
901 req->ifm_status |= IFM_ACTIVE;
905 switch (sc->link_speed) {
907 req->ifm_active |= IFM_10_T | IFM_FDX;
908 sc->speed = 10;
911 req->ifm_active |= IFM_100_TX | IFM_FDX;
912 sc->speed = 100;
915 req->ifm_active |= IFM_1000_T | IFM_FDX;
916 sc->speed = 1000;
919 req->ifm_active |= IFM_10G_SR | IFM_FDX;
920 sc->speed = 10000;
923 req->ifm_active |= IFM_10G_SR | IFM_FDX;
924 sc->speed = 20000;
927 req->ifm_active |= IFM_10G_SR | IFM_FDX;
928 sc->speed = 25000;
931 req->ifm_active |= IFM_40G_SR4 | IFM_FDX;
932 sc->speed = 40000;
935 sc->speed = 0;
960 if (!ETHER_IS_MULTICAST(eh->ether_dhost))
965 is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
970 if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
972 uint8_t nexthdr = ip6->ip6_nxt;
975 switch (icmp6->icmp6_type) {
988 if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
990 int iphlen = ip->ip_hl << 2;
992 switch (uh->uh_dport) {
1031 struct oce_wq *wq = sc->wq[wq_index];
1045 if (!(m->m_flags & M_PKTHDR)) {
1050 /* Don't allow non-TSO packets longer than MTU */
1053 if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
1060 device_printf(sc->dev, "Insertion unsuccessful\n");
1066 * may cause a transmit stall on that port. So the work-around is to
1067 * pad short packets (<= 32 bytes) to a 36-byte length.
1070 if(m->m_pkthdr.len <= 32) {
1073 m_append(m, (36 - m->m_pkthdr.len), buf);
1078 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1091 pd = &wq->pckts[wq->pkt_desc_head];
1094 rc = bus_dmamap_load_mbuf_sg(wq->tag,
1095 pd->map,
1096 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
1098 num_wqes = pd->nsegs + 1;
1104 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1105 bus_dmamap_unload(wq->tag, pd->map);
1108 atomic_store_rel_int(&wq->pkt_desc_head,
1109 (wq->pkt_desc_head + 1) % \
1111 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
1112 pd->mbuf = m;
1115 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1116 nichdr->u0.dw[0] = 0;
1117 nichdr->u0.dw[1] = 0;
1118 nichdr->u0.dw[2] = 0;
1119 nichdr->u0.dw[3] = 0;
1121 nichdr->u0.s.complete = complete;
1122 nichdr->u0.s.mgmt = os2bmc;
1123 nichdr->u0.s.event = 1;
1124 nichdr->u0.s.crc = 1;
1125 nichdr->u0.s.forward = 0;
1126 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
1127 nichdr->u0.s.udpcs =
1128 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
1129 nichdr->u0.s.tcpcs =
1130 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
1131 nichdr->u0.s.num_wqe = num_wqes;
1132 nichdr->u0.s.total_length = m->m_pkthdr.len;
1134 if (m->m_flags & M_VLANTAG) {
1135 nichdr->u0.s.vlan = 1; /*Vlan present*/
1136 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
1139 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1140 if (m->m_pkthdr.tso_segsz) {
1141 nichdr->u0.s.lso = 1;
1142 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
1145 nichdr->u0.s.ipcs = 1;
1148 RING_PUT(wq->ring, 1);
1149 atomic_add_int(&wq->ring->num_used, 1);
1151 for (i = 0; i < pd->nsegs; i++) {
1153 RING_GET_PRODUCER_ITEM_VA(wq->ring,
1155 nicfrag->u0.s.rsvd0 = 0;
1156 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
1157 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
1158 nicfrag->u0.s.frag_len = segs[i].ds_len;
1159 pd->wqe_idx = wq->ring->pidx;
1160 RING_PUT(wq->ring, 1);
1161 atomic_add_int(&wq->ring->num_used, 1);
1163 if (num_wqes > (pd->nsegs + 1)) {
1165 RING_GET_PRODUCER_ITEM_VA(wq->ring,
1167 nicfrag->u0.dw[0] = 0;
1168 nicfrag->u0.dw[1] = 0;
1169 nicfrag->u0.dw[2] = 0;
1170 nicfrag->u0.dw[3] = 0;
1171 pd->wqe_idx = wq->ring->pidx;
1172 RING_PUT(wq->ring, 1);
1173 atomic_add_int(&wq->ring->num_used, 1);
1174 pd->nsegs++;
1177 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
1178 wq->tx_stats.tx_reqs++;
1179 wq->tx_stats.tx_wrbs += num_wqes;
1180 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
1181 wq->tx_stats.tx_pkts++;
1183 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
1185 reg_value = (num_wqes << 16) | wq->wq_id;
1192 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
1193 if (m->m_flags & M_MCAST)
1194 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
1195 ETHER_BPF_MTAP(sc->ifp, m);
1197 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
1232 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1235 pd = &wq->pckts[wq->pkt_desc_tail];
1236 atomic_store_rel_int(&wq->pkt_desc_tail,
1237 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE);
1238 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1239 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1240 bus_dmamap_unload(wq->tag, pd->map);
1242 m = pd->mbuf;
1244 pd->mbuf = NULL;
1246 if (if_getdrvflags(sc->ifp) & IFF_DRV_OACTIVE) {
1247 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1248 if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_OACTIVE));
1258 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1261 if (!drbr_empty(sc->ifp, wq->br))
1262 taskqueue_enqueue(taskqueue_swi, &wq->txtask);
1293 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1294 etype = ntohs(eh->evl_proto);
1297 etype = ntohs(eh->evl_encap_proto);
1304 ip = (struct ip *)(m->m_data + ehdrlen);
1305 if (ip->ip_p != IPPROTO_TCP)
1307 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1309 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1314 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1315 if (ip6->ip6_nxt != IPPROTO_TCP)
1319 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1336 POCE_SOFTC sc = wq->parent;
1337 if_t ifp = sc->ifp;
1340 LOCK(&wq->tx_lock);
1343 device_printf(sc->dev,
1344 "TX[%d] restart failed\n", wq->queue_index);
1346 UNLOCK(&wq->tx_lock);
1361 if (!sc->link_status)
1365 m = if_dequeue(sc->ifp);
1369 LOCK(&sc->wq[def_q]->tx_lock);
1371 UNLOCK(&sc->wq[def_q]->tx_lock);
1374 sc->wq[def_q]->tx_stats.tx_stops ++;
1389 POCE_SOFTC sc = wq->parent;
1390 struct oce_cq *cq = wq->cq;
1394 LOCK(&wq->tx_compl_lock);
1395 bus_dmamap_sync(cq->ring->dma.tag,
1396 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1397 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1398 while (cqe->u0.dw[3]) {
1401 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1402 if (wq->ring->cidx >= wq->ring->num_items)
1403 wq->ring->cidx -= wq->ring->num_items;
1406 wq->tx_stats.tx_compl++;
1407 cqe->u0.dw[3] = 0;
1408 RING_GET(cq->ring, 1);
1409 bus_dmamap_sync(cq->ring->dma.tag,
1410 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1412 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1417 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1419 UNLOCK(&wq->tx_compl_lock);
1431 br = wq->br;
1432 queue_index = wq->queue_index;
1451 wq->tx_stats.tx_stops ++;
1478 if(!cqe2->ipv6_frame) {
1480 ip4_hdr->ip_ttl = cqe2->frame_lifespan;
1481 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
1485 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
1486 payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
1487 - sizeof(struct ip6_hdr);
1488 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
1493 tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
1494 if(cqe2->push) {
1497 tcp_hdr->th_win = htons(cqe2->tcp_window);
1498 tcp_hdr->th_sum = 0xffff;
1499 if(cqe2->ts_opt) {
1501 *p = cqe1->tcp_timestamp_val;
1502 *(p+1) = cqe1->tcp_timestamp_ecr;
1511 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1513 uint32_t len = cqe_info->pkt_size;
1517 for (i = 0; i < cqe_info->num_frags; i++) {
1518 if (rq->ring->cidx == rq->ring->pidx) {
1519 device_printf(sc->dev,
1520 "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
1523 pd = &rq->pckts[rq->ring->cidx];
1525 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1526 bus_dmamap_unload(rq->tag, pd->map);
1527 RING_GET(rq->ring, 1);
1528 rq->pending--;
1530 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1531 pd->mbuf->m_len = frag_len;
1535 pd->mbuf->m_flags &= ~M_PKTHDR;
1536 tail->m_next = pd->mbuf;
1537 if(rq->islro)
1538 tail->m_nextpkt = NULL;
1539 tail = pd->mbuf;
1542 pd->mbuf->m_pkthdr.len = len;
1543 if(rq->islro)
1544 pd->mbuf->m_nextpkt = NULL;
1545 pd->mbuf->m_pkthdr.csum_flags = 0;
1547 if (cqe_info->l4_cksum_pass) {
1548 if(!cqe_info->ipv6_frame) { /* IPV4 */
1549 pd->mbuf->m_pkthdr.csum_flags |=
1552 if(rq->islro) {
1553 pd->mbuf->m_pkthdr.csum_flags |=
1557 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1559 if (cqe_info->ip_cksum_pass) {
1560 pd->mbuf->m_pkthdr.csum_flags |=
1564 *m = tail = pd->mbuf;
1566 pd->mbuf = NULL;
1567 len -= frag_len;
1576 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1583 cq_info.pkt_size = cqe->pkt_size;
1584 cq_info.vtag = cqe->vlan_tag;
1585 cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
1586 cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
1587 cq_info.ipv6_frame = cqe->ipv6_frame;
1588 cq_info.vtp = cqe->vtp;
1589 cq_info.qnq = cqe->qnq;
1592 cq_info.pkt_size = cqe2->coalesced_size;
1593 cq_info.vtag = cqe2->vlan_tag;
1594 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
1595 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
1596 cq_info.ipv6_frame = cqe2->ipv6_frame;
1597 cq_info.vtp = cqe2->vtp;
1598 cq_info.qnq = cqe1->qnq;
1603 cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
1604 if(cq_info.pkt_size % rq->cfg.frag_size)
1611 //assert(cqe2->valid != 0);
1613 //assert(cqe2->cqe_type != 2);
1617 m->m_pkthdr.rcvif = sc->ifp;
1618 if (rq->queue_index)
1619 m->m_pkthdr.flowid = (rq->queue_index - 1);
1621 m->m_pkthdr.flowid = rq->queue_index;
1626 if (sc->function_mode & FNM_FLEX10_MODE) {
1629 m->m_pkthdr.ether_vtag = cq_info.vtag;
1630 m->m_flags |= M_VLANTAG;
1632 } else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK)) {
1637 m->m_pkthdr.ether_vtag = cq_info.vtag;
1638 m->m_flags |= M_VLANTAG;
1641 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1643 if_input(sc->ifp, m);
1645 /* Update rx stats per queue */
1646 rq->rx_stats.rx_pkts++;
1647 rq->rx_stats.rx_bytes += cq_info.pkt_size;
1648 rq->rx_stats.rx_frags += cq_info.num_frags;
1649 rq->rx_stats.rx_ucast_pkts++;
1657 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1664 if(!cqe->u0.s.num_fragments)
1667 len = cqe->u0.s.pkt_size;
1670 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1675 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1681 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1683 vtag = cqe->u0.s.vlan_tag;
1685 cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
1686 cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
1687 cq_info.ipv6_frame = cqe->u0.s.ip_ver;
1688 cq_info.num_frags = cqe->u0.s.num_fragments;
1689 cq_info.pkt_size = cqe->u0.s.pkt_size;
1694 m->m_pkthdr.rcvif = sc->ifp;
1695 if (rq->queue_index)
1696 m->m_pkthdr.flowid = (rq->queue_index - 1);
1698 m->m_pkthdr.flowid = rq->queue_index;
1703 if (sc->function_mode & FNM_FLEX10_MODE) {
1705 if (cqe->u0.s.qnq) {
1706 m->m_pkthdr.ether_vtag = vtag;
1707 m->m_flags |= M_VLANTAG;
1709 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) {
1714 m->m_pkthdr.ether_vtag = vtag;
1715 m->m_flags |= M_VLANTAG;
1719 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
1723 (cqe->u0.s.ip_cksum_pass) &&
1724 (cqe->u0.s.l4_cksum_pass) &&
1725 (!cqe->u0.s.ip_ver) &&
1726 (rq->lro.lro_cnt != 0)) {
1727 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1728 rq->lro_pkts_queued ++;
1735 if_input(sc->ifp, m);
1739 /* Update rx stats per queue */
1740 rq->rx_stats.rx_pkts++;
1741 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1742 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1743 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1744 rq->rx_stats.rx_mcast_pkts++;
1745 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1746 rq->rx_stats.rx_ucast_pkts++;
1757 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1760 if (rq->ring->cidx == rq->ring->pidx) {
1761 device_printf(sc->dev,
1762 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
1765 pd = &rq->pckts[rq->ring->cidx];
1766 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1767 bus_dmamap_unload(rq->tag, pd->map);
1768 if (pd->mbuf != NULL) {
1769 m_freem(pd->mbuf);
1770 pd->mbuf = NULL;
1773 RING_GET(rq->ring, 1);
1774 rq->pending--;
1784 if (sc->be3_native) {
1786 vtp = cqe_v1->u0.s.vlan_tag_present;
1788 vtp = cqe->u0.s.vlan_tag_present;
1800 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1802 port_id = cqe_v1->u0.s.port;
1803 if (sc->port_id != port_id)
1816 struct lro_ctrl *lro = &rq->lro;
1817 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1823 rq->lro_pkts_queued = 0;
1834 for (i = 0; i < sc->nrqs; i++) {
1835 lro = &sc->rq[i]->lro;
1838 device_printf(sc->dev, "LRO init failed\n");
1841 lro->ifp = sc->ifp;
1853 for (i = 0; i < sc->nrqs; i++) {
1854 lro = &sc->rq[i]->lro;
1864 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1876 pd = &rq->pckts[rq->ring->pidx];
1877 pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
1878 if (pd->mbuf == NULL) {
1879 device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
1882 pd->mbuf->m_nextpkt = NULL;
1884 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
1886 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1887 pd->map,
1888 pd->mbuf,
1891 m_free(pd->mbuf);
1892 device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
1897 i--;
1901 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1903 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1904 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1905 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1907 RING_PUT(rq->ring, 1);
1909 rq->pending++;
1911 oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
1913 for (i = added / oce_max_rq_posts; i > 0; i--) {
1915 rxdb_reg.bits.qid = rq->rq_id;
1916 if(rq->islro) {
1917 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1923 added -= oce_max_rq_posts;
1926 rxdb_reg.bits.qid = rq->rq_id;
1928 if(rq->islro) {
1929 val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
1945 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
1946 if(!sc->enable_hwlro) {
1947 if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
1948 oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
1950 if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
1962 struct oce_cq *cq = rq->cq;
1963 POCE_SOFTC sc = rq->parent;
1968 LOCK(&rq->rx_lock);
1969 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1970 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1971 while (cqe->valid) {
1972 if(cqe->cqe_type == 0) { /* singleton cqe */
1974 if(rq->cqe_firstpart != NULL) {
1975 device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1978 if(cqe->error != 0) {
1979 rq->rx_stats.rxcp_err++;
1980 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
1983 rq->rx_stats.rx_compl++;
1984 cqe->valid = 0;
1985 RING_GET(cq->ring, 1);
1989 }else if(cqe->cqe_type == 0x1) { /* first part */
1991 if(rq->cqe_firstpart != NULL) {
1992 device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1995 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1996 RING_GET(cq->ring, 1);
1997 }else if(cqe->cqe_type == 0x2) { /* second part */
1999 if(cqe2->error != 0) {
2000 rq->rx_stats.rxcp_err++;
2001 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2004 if(rq->cqe_firstpart == NULL) {
2005 device_printf(sc->dev, "Got cqe2 without cqe1 \n");
2008 oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
2010 rq->rx_stats.rx_compl++;
2011 rq->cqe_firstpart->valid = 0;
2012 cqe2->valid = 0;
2013 rq->cqe_firstpart = NULL;
2015 RING_GET(cq->ring, 1);
2021 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2022 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
2026 UNLOCK(&rq->rx_lock);
2036 struct oce_cq *cq = rq->cq;
2037 POCE_SOFTC sc = rq->parent;
2042 if(rq->islro) {
2047 LOCK(&rq->rx_lock);
2048 bus_dmamap_sync(cq->ring->dma.tag,
2049 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2050 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2051 while (cqe->u0.dw[2]) {
2054 if (cqe->u0.s.error == 0) {
2057 rq->rx_stats.rxcp_err++;
2058 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
2062 rq->rx_stats.rx_compl++;
2063 cqe->u0.dw[2] = 0;
2066 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
2071 RING_GET(cq->ring, 1);
2072 bus_dmamap_sync(cq->ring->dma.tag,
2073 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2075 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
2087 UNLOCK(&rq->rx_lock);
2101 sc->ifp = if_alloc(IFT_ETHER);
2103 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
2104 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2105 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2107 if_setflags(sc->ifp, IFF_BROADCAST | IFF_MULTICAST);
2108 if_setioctlfn(sc->ifp, oce_ioctl);
2109 if_setstartfn(sc->ifp, oce_start);
2110 if_setinitfn(sc->ifp, oce_init);
2111 if_setmtu(sc->ifp, ETHERMTU);
2112 if_setsoftc(sc->ifp, sc);
2113 if_settransmitfn(sc->ifp, oce_multiq_start);
2114 if_setqflushfn(sc->ifp, oce_multiq_flush);
2116 if_initname(sc->ifp,
2117 device_get_name(sc->dev), device_get_unit(sc->dev));
2119 if_setsendqlen(sc->ifp, OCE_MAX_TX_DESC - 1);
2120 if_setsendqready(sc->ifp);
2122 if_sethwassist(sc->ifp, OCE_IF_HWASSIST);
2123 if_sethwassistbits(sc->ifp, CSUM_TSO, 0);
2124 if_sethwassistbits(sc->ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0);
2126 if_setcapabilities(sc->ifp, OCE_IF_CAPABILITIES);
2127 if_setcapabilitiesbit(sc->ifp, IFCAP_HWCSUM, 0);
2128 if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWFILTER, 0);
2131 if_setcapabilitiesbit(sc->ifp, IFCAP_TSO, 0);
2132 if_setcapabilitiesbit(sc->ifp, IFCAP_LRO, 0);
2133 if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWTSO, 0);
2136 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
2137 if_setbaudrate(sc->ifp, IF_Gbps(10));
2139 if_sethwtsomax(sc->ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2140 if_sethwtsomaxsegcount(sc->ifp, OCE_MAX_TX_ELEMENTS);
2141 if_sethwtsomaxsegsize(sc->ifp, 4096);
2143 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
2156 sc->vlan_tag[vtag] = 1;
2157 sc->vlans_added++;
2158 if (sc->vlans_added <= (sc->max_vlans + 1))
2172 sc->vlan_tag[vtag] = 0;
2173 sc->vlans_added--;
2188 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
2189 (if_getcapenable(sc->ifp) & IFCAP_VLAN_HWFILTER)) {
2191 if (sc->vlan_tag[i]) {
2197 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2200 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
2208 uint32_t old_pmac_id = sc->pmac_id;
2211 status = bcmp((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2212 sc->macaddr.size_of_struct);
2216 status = oce_mbox_macaddr_add(sc, (uint8_t *)(if_getlladdr(sc->ifp)),
2217 sc->if_id, &sc->pmac_id);
2219 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
2220 bcopy((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr,
2221 sc->macaddr.size_of_struct);
2224 device_printf(sc->dev, "Failed update macaddress\n");
2284 sizeof(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str),
2288 fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
2305 int eqd, i, num = 0;
2312 for (i = 0 ; i < sc->neqs; i++) {
2313 eqo = sc->eq[i];
2314 aic = &sc->aic_obj[i];
2316 if (!aic->enable) {
2317 if (aic->ticks)
2318 aic->ticks = 0;
2319 eqd = aic->et_eqd;
2324 rq = sc->rq[0];
2325 rxpkts = rq->rx_stats.rx_pkts;
2328 if (i + 1 < sc->nrqs) {
2329 rq = sc->rq[i + 1];
2330 rxpkts += rq->rx_stats.rx_pkts;
2332 if (i < sc->nwqs) {
2333 wq = sc->wq[i];
2334 tx_reqs = wq->tx_stats.tx_reqs;
2339 if (!aic->ticks || now < aic->ticks ||
2340 rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
2341 aic->prev_rxpkts = rxpkts;
2342 aic->prev_txreqs = tx_reqs;
2343 aic->ticks = now;
2347 delta = ticks_to_msecs(now - aic->ticks);
2349 pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
2350 (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
2356 eqd = min(eqd, aic->max_eqd);
2357 eqd = max(eqd, aic->min_eqd);
2359 aic->prev_rxpkts = rxpkts;
2360 aic->prev_txreqs = tx_reqs;
2361 aic->ticks = now;
2364 if (eqd != aic->cur_eqd) {
2365 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2366 set_eqd[num].eq_id = eqo->eq_id;
2367 aic->cur_eqd = eqd;
2368 num++;
2373 for(i = 0; i < num; i += 8) {
2374 if((num - i) >=8 )
2377 oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
2389 if (sc->hw_error)
2413 sc->hw_error = TRUE;
2414 device_printf(sc->dev, "Error detected in the card\n");
2418 device_printf(sc->dev,
2420 device_printf(sc->dev,
2422 device_printf(sc->dev,
2429 device_printf(sc->dev, "UE: %s bit set\n",
2437 device_printf(sc->dev, "UE: %s bit set\n",
2456 for (i = 0; i < sc->nwqs; i++)
2457 oce_tx_restart(sc, sc->wq[i]);
2463 callout_reset(&sc->timer, hz, oce_local_timer, sc);
2471 int pending_txqs = sc->nwqs;
2477 pending_txqs = sc->nwqs;
2485 if(!wq->ring->num_used)
2486 pending_txqs--;
2489 if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
2496 while(wq->ring->num_used) {
2497 LOCK(&wq->tx_compl_lock);
2499 UNLOCK(&wq->tx_compl_lock);
2516 if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2527 UNLOCK(&sc->dev_lock);
2528 for (i = 0; i < sc->intr_count; i++) {
2529 if (sc->intrs[i].tq != NULL) {
2530 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2533 LOCK(&sc->dev_lock);
2535 /* Delete RX queue in card with flush param */
2550 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2563 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING , 0);
2572 device_printf(sc->dev, "Unable to start RX\n");
2578 device_printf(sc->dev, "Unable to start TX\n");
2582 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2592 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2594 sc->link_status = ASYNC_EVENT_LINK_UP;
2595 if_link_state_change(sc->ifp, LINK_STATE_UP);
2597 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2598 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2603 struct oce_async_evt_grp5_os2bmc *evt)
2605 DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
2606 if (evt->u.s.mgmt_enable)
2607 sc->flags |= OCE_FLAGS_OS2BMC;
2611 sc->bmc_filt_mask = evt->u.s.arp_filter;
2612 sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
2613 sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
2614 sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
2615 sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
2616 sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
2617 sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
2618 sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
2619 sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
2627 switch (cqe->u0.s.async_type) {
2631 if (gcqe->enabled)
2632 sc->pvid = gcqe->tag & VLAN_VID_MASK;
2634 sc->pvid = 0;
2650 POCE_SOFTC sc = mq->parent;
2651 struct oce_cq *cq = mq->cq;
2657 bus_dmamap_sync(cq->ring->dma.tag,
2658 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2659 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2661 while (cqe->u0.dw[3]) {
2663 if (cqe->u0.s.async_event) {
2664 evt_type = cqe->u0.s.event_type;
2665 optype = cqe->u0.s.async_type;
2667 /* Link status evt */
2675 if(dbgcqe->valid)
2676 sc->qnqid = dbgcqe->vlan_tag;
2677 sc->qnq_debug_event = TRUE;
2680 cqe->u0.dw[3] = 0;
2681 RING_GET(cq->ring, 1);
2682 bus_dmamap_sync(cq->ring->dma.tag,
2683 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2684 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2689 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2698 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2699 (sc->function_mode & FNM_UMC_MODE) ||
2700 (sc->function_mode & FNM_VNIC_MODE) ||
2703 sc->nrqs = 1;
2704 sc->nwqs = 1;
2706 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2707 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2711 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2718 sc->nrqs = sc->intr_count + 1;
2719 sc->nwqs = sc->intr_count;
2721 sc->nrqs = 1;
2722 sc->nwqs = 1;
2726 sc->nwqs = 1;
2733 caddr_t m_datatemp = m->m_data;
2735 if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2736 m->m_data += sizeof(struct ether_header);
2739 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2740 (ip6->ip6_nxt != IPPROTO_UDP)){
2742 m->m_data += sizeof(struct ip6_hdr);
2745 if(ip6e->ip6e_len == 0xff) {
2746 m->m_data = m_datatemp;
2750 m->m_data = m_datatemp;
2758 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) {
2773 if(m->m_flags & M_VLANTAG) {
2774 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2775 m->m_flags &= ~M_VLANTAG;
2779 if(sc->pvid) {
2781 vlan_tag = sc->pvid;
2790 if(sc->qnqid) {
2791 m = ether_vlanencap(m, sc->qnqid);
2815 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2823 sc->nwqs = OCE_MAX_WQ;
2824 sc->nrssqs = max_rss;
2825 sc->nrqs = sc->nrssqs + 1;
2829 sc->nrssqs = max_rss;
2830 sc->nrqs = sc->nrssqs + 1;
2831 sc->nwqs = OCE_MAX_WQ;
2832 sc->max_vlans = MAX_VLANFILTER_SIZE;
2847 memcpy(macaddr, sc->macaddr.mac_addr, 6);
2858 return -EINVAL;
2861 if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
2862 (rdma_if->size != OCE_RDMA_IF_SIZE)) {
2863 return -ENXIO;
2866 rdma_info->close = oce_rdma_close;
2867 rdma_info->mbox_post = oce_mbox_post;
2868 rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
2869 rdma_info->get_mac_addr = oce_get_mac_addr;
2875 if (oce_rdma_if->announce != NULL) {
2877 di.dev = sc->dev;
2879 di.ifp = sc->ifp;
2880 di.db_bhandle = sc->db_bhandle;
2881 di.db_btag = sc->db_btag;
2883 if (sc->flags & OCE_FLAGS_USING_MSIX) {
2885 } else if (sc->flags & OCE_FLAGS_USING_MSI) {
2892 di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
2893 di.msix.start_vector = sc->intr_count;
2895 di.msix.vector_list[i] = sc->intrs[i].vector;
2899 memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
2900 di.vendor_id = pci_get_vendor(sc->dev);
2901 di.dev_id = pci_get_device(sc->dev);
2903 if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
2907 rdma_if->announce(&di);
2908 sc = sc->next;
2924 sc->enable_hwlro = strtol(value, NULL, 10);
2925 if(sc->enable_hwlro) {
2928 device_printf(sc->dev, "no hardware lro support\n");
2929 device_printf(sc->dev, "software lro enabled\n");
2930 sc->enable_hwlro = 0;
2932 device_printf(sc->dev, "hardware lro enabled\n");
2936 device_printf(sc->dev, "software lro enabled\n");
2939 sc->enable_hwlro = 0;
2954 device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");