Lines Matching +full:pci +full:- +full:host2

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
34 * freebsd-drivers@emulex.com
48 #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
103 "HOST2",
233 DRIVER_MODULE(oce, pci, oce_driver, 0, 0);
234 MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices,
236 MODULE_DEPEND(oce, pci, 1, 1, 1);
259 sc->dev = dev; in oce_probe()
273 sc->flags |= OCE_FLAGS_BE2; in oce_probe()
276 sc->flags |= OCE_FLAGS_BE3; in oce_probe()
280 sc->flags |= OCE_FLAGS_XE201; in oce_probe()
283 sc->flags |= OCE_FLAGS_SH; in oce_probe()
308 sc->tx_ring_size = OCE_TX_RING_SIZE; in oce_attach()
309 sc->rx_ring_size = OCE_RX_RING_SIZE; in oce_attach()
311 sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048); in oce_attach()
312 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL; in oce_attach()
313 sc->promisc = OCE_DEFAULT_PROMISCUOUS; in oce_attach()
315 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock"); in oce_attach()
316 LOCK_CREATE(&sc->dev_lock, "Device_lock"); in oce_attach()
349 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, in oce_attach()
351 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, in oce_attach()
360 callout_init(&sc->timer, CALLOUT_MPSAFE); in oce_attach()
361 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc); in oce_attach()
365 sc->next =NULL; in oce_attach()
367 softc_tail->next = sc; in oce_attach()
378 callout_drain(&sc->timer); in oce_attach()
381 if (sc->vlan_attach) in oce_attach()
382 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); in oce_attach()
383 if (sc->vlan_detach) in oce_attach()
384 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); in oce_attach()
391 ether_ifdetach(sc->ifp); in oce_attach()
392 if_free(sc->ifp); in oce_attach()
397 oce_dma_free(sc, &sc->bsmbx); in oce_attach()
400 LOCK_DESTROY(&sc->dev_lock); in oce_attach()
401 LOCK_DESTROY(&sc->bmbx_lock); in oce_attach()
416 *ppoce_sc_tmp1 = sc->next; in oce_detach()
417 if (sc->next == NULL) { in oce_detach()
423 ppoce_sc_tmp1 = &poce_sc_tmp->next; in oce_detach()
424 poce_sc_tmp = poce_sc_tmp->next; in oce_detach()
427 LOCK(&sc->dev_lock); in oce_detach()
429 UNLOCK(&sc->dev_lock); in oce_detach()
431 callout_drain(&sc->timer); in oce_detach()
433 if (sc->vlan_attach != NULL) in oce_detach()
434 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); in oce_detach()
435 if (sc->vlan_detach != NULL) in oce_detach()
436 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); in oce_detach()
438 ether_ifdetach(sc->ifp); in oce_detach()
440 if_free(sc->ifp); in oce_detach()
471 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command); in oce_ioctl()
475 if (ifr->ifr_mtu > OCE_MAX_MTU) in oce_ioctl()
478 if_setmtu(ifp, ifr->ifr_mtu); in oce_ioctl()
487 device_printf(sc->dev, "Interface Up\n"); in oce_ioctl()
489 LOCK(&sc->dev_lock); in oce_ioctl()
491 if_setdrvflagbits(sc->ifp, 0, in oce_ioctl()
495 UNLOCK(&sc->dev_lock); in oce_ioctl()
497 device_printf(sc->dev, "Interface Down\n"); in oce_ioctl()
500 if ((if_getflags(ifp) & IFF_PROMISC) && !sc->promisc) { in oce_ioctl()
502 sc->promisc = TRUE; in oce_ioctl()
503 } else if (!(if_getflags(ifp) & IFF_PROMISC) && sc->promisc) { in oce_ioctl()
505 sc->promisc = FALSE; in oce_ioctl()
514 device_printf(sc->dev, in oce_ioctl()
519 u = ifr->ifr_reqcap ^ if_getcapenable(ifp); in oce_ioctl()
531 "TSO disabled due to -txcsum.\n"); in oce_ioctl()
565 if(sc->enable_hwlro) { in oce_ioctl()
599 rc = -rc; in oce_ioctl()
627 LOCK(&sc->dev_lock); in oce_init()
629 if (if_getflags(sc->ifp) & IFF_UP) { in oce_init()
634 UNLOCK(&sc->dev_lock); in oce_init()
646 if (!sc->link_status) in oce_multiq_start()
650 queue_index = m->m_pkthdr.flowid % sc->nwqs; in oce_multiq_start()
652 wq = sc->wq[queue_index]; in oce_multiq_start()
654 LOCK(&wq->tx_lock); in oce_multiq_start()
656 UNLOCK(&wq->tx_lock); in oce_multiq_start()
669 for (i = 0; i < sc->nwqs; i++) { in oce_multiq_flush()
670 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL) in oce_multiq_flush()
685 POCE_SOFTC sc = ii->sc; in oce_intr()
686 struct oce_eq *eq = ii->eq; in oce_intr()
691 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, in oce_intr()
694 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); in oce_intr()
695 if (eqe->evnt == 0) in oce_intr()
697 eqe->evnt = 0; in oce_intr()
698 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, in oce_intr()
700 RING_GET(eq->ring, 1); in oce_intr()
709 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE); in oce_intr()
712 for (i = 0; i < eq->cq_valid; i++) { in oce_intr()
713 cq = eq->cq[i]; in oce_intr()
714 (*cq->cq_handler)(cq->cb_arg); in oce_intr()
718 for (i = 0; i < eq->cq_valid; i++) { in oce_intr()
719 cq = eq->cq[i]; in oce_intr()
720 oce_arm_cq(sc, cq->cq_id, 0, TRUE); in oce_intr()
724 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); in oce_intr()
737 req_vectors = MAX((sc->nrqs - 1), sc->nwqs); in oce_setup_intr()
742 if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { in oce_setup_intr()
745 sc->roce_intr_count = OCE_RDMA_VECTORS; in oce_setup_intr()
749 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { in oce_setup_intr()
750 sc->intr_count = req_vectors; in oce_setup_intr()
752 rc = pci_alloc_msix(sc->dev, &tot_vectors); in oce_setup_intr()
755 pci_release_msi(sc->dev); in oce_setup_intr()
757 if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { in oce_setup_intr()
759 if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) { in oce_setup_intr()
760 sc->roce_intr_count = (tot_vectors / 2); in oce_setup_intr()
762 sc->intr_count = tot_vectors - sc->roce_intr_count; in oce_setup_intr()
765 sc->intr_count = tot_vectors; in oce_setup_intr()
767 sc->flags |= OCE_FLAGS_USING_MSIX; in oce_setup_intr()
773 sc->intr_count = 1; in oce_setup_intr()
779 device_printf(sc->dev, "Using legacy interrupt\n"); in oce_setup_intr()
784 for (; vector < sc->intr_count; vector++) { in oce_setup_intr()
801 POCE_SOFTC sc = ii->sc; in oce_fast_isr()
803 if (ii->eq == NULL) in oce_fast_isr()
806 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE); in oce_fast_isr()
808 taskqueue_enqueue(ii->tq, &ii->task); in oce_fast_isr()
810 ii->eq->intr++; in oce_fast_isr()
824 ii = &sc->intrs[vector]; in oce_alloc_intr()
830 if (sc->flags & OCE_FLAGS_USING_MSIX) in oce_alloc_intr()
834 ii->intr_res = bus_alloc_resource_any(sc->dev, in oce_alloc_intr()
837 ii->irq_rr = rr; in oce_alloc_intr()
838 if (ii->intr_res == NULL) { in oce_alloc_intr()
839 device_printf(sc->dev, in oce_alloc_intr()
845 TASK_INIT(&ii->task, 0, isr, ii); in oce_alloc_intr()
846 ii->vector = vector; in oce_alloc_intr()
847 sprintf(ii->task_name, "oce_task[%d]", ii->vector); in oce_alloc_intr()
848 ii->tq = taskqueue_create_fast(ii->task_name, in oce_alloc_intr()
851 &ii->tq); in oce_alloc_intr()
852 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq", in oce_alloc_intr()
853 device_get_nameunit(sc->dev)); in oce_alloc_intr()
855 ii->sc = sc; in oce_alloc_intr()
856 rc = bus_setup_intr(sc->dev, in oce_alloc_intr()
857 ii->intr_res, in oce_alloc_intr()
859 oce_fast_isr, NULL, ii, &ii->tag); in oce_alloc_intr()
869 for (i = 0; i < sc->intr_count; i++) { in oce_intr_free()
871 if (sc->intrs[i].tag != NULL) in oce_intr_free()
872 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res, in oce_intr_free()
873 sc->intrs[i].tag); in oce_intr_free()
874 if (sc->intrs[i].tq != NULL) in oce_intr_free()
875 taskqueue_free(sc->intrs[i].tq); in oce_intr_free()
877 if (sc->intrs[i].intr_res != NULL) in oce_intr_free()
878 bus_release_resource(sc->dev, SYS_RES_IRQ, in oce_intr_free()
879 sc->intrs[i].irq_rr, in oce_intr_free()
880 sc->intrs[i].intr_res); in oce_intr_free()
881 sc->intrs[i].tag = NULL; in oce_intr_free()
882 sc->intrs[i].intr_res = NULL; in oce_intr_free()
885 if (sc->flags & OCE_FLAGS_USING_MSIX) in oce_intr_free()
886 pci_release_msi(sc->dev); in oce_intr_free()
899 req->ifm_status = IFM_AVALID; in oce_media_status()
900 req->ifm_active = IFM_ETHER; in oce_media_status()
902 if (sc->link_status == 1) in oce_media_status()
903 req->ifm_status |= IFM_ACTIVE; in oce_media_status()
907 switch (sc->link_speed) { in oce_media_status()
909 req->ifm_active |= IFM_10_T | IFM_FDX; in oce_media_status()
910 sc->speed = 10; in oce_media_status()
913 req->ifm_active |= IFM_100_TX | IFM_FDX; in oce_media_status()
914 sc->speed = 100; in oce_media_status()
917 req->ifm_active |= IFM_1000_T | IFM_FDX; in oce_media_status()
918 sc->speed = 1000; in oce_media_status()
921 req->ifm_active |= IFM_10G_SR | IFM_FDX; in oce_media_status()
922 sc->speed = 10000; in oce_media_status()
925 req->ifm_active |= IFM_10G_SR | IFM_FDX; in oce_media_status()
926 sc->speed = 20000; in oce_media_status()
929 req->ifm_active |= IFM_10G_SR | IFM_FDX; in oce_media_status()
930 sc->speed = 25000; in oce_media_status()
933 req->ifm_active |= IFM_40G_SR4 | IFM_FDX; in oce_media_status()
934 sc->speed = 40000; in oce_media_status()
937 sc->speed = 0; in oce_media_status()
962 if (!ETHER_IS_MULTICAST(eh->ether_dhost)) in oce_is_pkt_dest_bmc()
967 is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) { in oce_is_pkt_dest_bmc()
972 if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) { in oce_is_pkt_dest_bmc()
974 uint8_t nexthdr = ip6->ip6_nxt; in oce_is_pkt_dest_bmc()
977 switch (icmp6->icmp6_type) { in oce_is_pkt_dest_bmc()
990 if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) { in oce_is_pkt_dest_bmc()
992 int iphlen = ip->ip_hl << 2; in oce_is_pkt_dest_bmc()
994 switch (uh->uh_dport) { in oce_is_pkt_dest_bmc()
1033 struct oce_wq *wq = sc->wq[wq_index]; in oce_tx()
1047 if (!(m->m_flags & M_PKTHDR)) { in oce_tx()
1052 /* Don't allow non-TSO packets longer than MTU */ in oce_tx()
1055 if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE)) in oce_tx()
1062 device_printf(sc->dev, "Insertion unsuccessful\n"); in oce_tx()
1068 * may cause a transmit stall on that port. So the work-around is to in oce_tx()
1069 * pad short packets (<= 32 bytes) to a 36-byte length. in oce_tx()
1072 if(m->m_pkthdr.len <= 32) { in oce_tx()
1075 m_append(m, (36 - m->m_pkthdr.len), buf); in oce_tx()
1080 if (m->m_pkthdr.csum_flags & CSUM_TSO) { in oce_tx()
1093 pd = &wq->pckts[wq->pkt_desc_head]; in oce_tx()
1096 rc = bus_dmamap_load_mbuf_sg(wq->tag, in oce_tx()
1097 pd->map, in oce_tx()
1098 m, segs, &pd->nsegs, BUS_DMA_NOWAIT); in oce_tx()
1100 num_wqes = pd->nsegs + 1; in oce_tx()
1106 if (num_wqes >= RING_NUM_FREE(wq->ring)) { in oce_tx()
1107 bus_dmamap_unload(wq->tag, pd->map); in oce_tx()
1110 atomic_store_rel_int(&wq->pkt_desc_head, in oce_tx()
1111 (wq->pkt_desc_head + 1) % \ in oce_tx()
1113 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE); in oce_tx()
1114 pd->mbuf = m; in oce_tx()
1117 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe); in oce_tx()
1118 nichdr->u0.dw[0] = 0; in oce_tx()
1119 nichdr->u0.dw[1] = 0; in oce_tx()
1120 nichdr->u0.dw[2] = 0; in oce_tx()
1121 nichdr->u0.dw[3] = 0; in oce_tx()
1123 nichdr->u0.s.complete = complete; in oce_tx()
1124 nichdr->u0.s.mgmt = os2bmc; in oce_tx()
1125 nichdr->u0.s.event = 1; in oce_tx()
1126 nichdr->u0.s.crc = 1; in oce_tx()
1127 nichdr->u0.s.forward = 0; in oce_tx()
1128 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0; in oce_tx()
1129 nichdr->u0.s.udpcs = in oce_tx()
1130 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0; in oce_tx()
1131 nichdr->u0.s.tcpcs = in oce_tx()
1132 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0; in oce_tx()
1133 nichdr->u0.s.num_wqe = num_wqes; in oce_tx()
1134 nichdr->u0.s.total_length = m->m_pkthdr.len; in oce_tx()
1136 if (m->m_flags & M_VLANTAG) { in oce_tx()
1137 nichdr->u0.s.vlan = 1; /*Vlan present*/ in oce_tx()
1138 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag; in oce_tx()
1141 if (m->m_pkthdr.csum_flags & CSUM_TSO) { in oce_tx()
1142 if (m->m_pkthdr.tso_segsz) { in oce_tx()
1143 nichdr->u0.s.lso = 1; in oce_tx()
1144 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz; in oce_tx()
1147 nichdr->u0.s.ipcs = 1; in oce_tx()
1150 RING_PUT(wq->ring, 1); in oce_tx()
1151 atomic_add_int(&wq->ring->num_used, 1); in oce_tx()
1153 for (i = 0; i < pd->nsegs; i++) { in oce_tx()
1155 RING_GET_PRODUCER_ITEM_VA(wq->ring, in oce_tx()
1157 nicfrag->u0.s.rsvd0 = 0; in oce_tx()
1158 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr); in oce_tx()
1159 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr); in oce_tx()
1160 nicfrag->u0.s.frag_len = segs[i].ds_len; in oce_tx()
1161 pd->wqe_idx = wq->ring->pidx; in oce_tx()
1162 RING_PUT(wq->ring, 1); in oce_tx()
1163 atomic_add_int(&wq->ring->num_used, 1); in oce_tx()
1165 if (num_wqes > (pd->nsegs + 1)) { in oce_tx()
1167 RING_GET_PRODUCER_ITEM_VA(wq->ring, in oce_tx()
1169 nicfrag->u0.dw[0] = 0; in oce_tx()
1170 nicfrag->u0.dw[1] = 0; in oce_tx()
1171 nicfrag->u0.dw[2] = 0; in oce_tx()
1172 nicfrag->u0.dw[3] = 0; in oce_tx()
1173 pd->wqe_idx = wq->ring->pidx; in oce_tx()
1174 RING_PUT(wq->ring, 1); in oce_tx()
1175 atomic_add_int(&wq->ring->num_used, 1); in oce_tx()
1176 pd->nsegs++; in oce_tx()
1179 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); in oce_tx()
1180 wq->tx_stats.tx_reqs++; in oce_tx()
1181 wq->tx_stats.tx_wrbs += num_wqes; in oce_tx()
1182 wq->tx_stats.tx_bytes += m->m_pkthdr.len; in oce_tx()
1183 wq->tx_stats.tx_pkts++; in oce_tx()
1185 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, in oce_tx()
1187 reg_value = (num_wqes << 16) | wq->wq_id; in oce_tx()
1194 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); in oce_tx()
1195 if (m->m_flags & M_MCAST) in oce_tx()
1196 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1); in oce_tx()
1197 ETHER_BPF_MTAP(sc->ifp, m); in oce_tx()
1199 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value); in oce_tx()
1234 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; in oce_process_tx_completion()
1237 pd = &wq->pckts[wq->pkt_desc_tail]; in oce_process_tx_completion()
1238 atomic_store_rel_int(&wq->pkt_desc_tail, in oce_process_tx_completion()
1239 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE); in oce_process_tx_completion()
1240 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1); in oce_process_tx_completion()
1241 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE); in oce_process_tx_completion()
1242 bus_dmamap_unload(wq->tag, pd->map); in oce_process_tx_completion()
1244 m = pd->mbuf; in oce_process_tx_completion()
1246 pd->mbuf = NULL; in oce_process_tx_completion()
1248 if (if_getdrvflags(sc->ifp) & IFF_DRV_OACTIVE) { in oce_process_tx_completion()
1249 if (wq->ring->num_used < (wq->ring->num_items / 2)) { in oce_process_tx_completion()
1250 if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_OACTIVE)); in oce_process_tx_completion()
1260 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) in oce_tx_restart()
1263 if (!drbr_empty(sc->ifp, wq->br)) in oce_tx_restart()
1264 taskqueue_enqueue(taskqueue_swi, &wq->txtask); in oce_tx_restart()
1295 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { in oce_tso_setup()
1296 etype = ntohs(eh->evl_proto); in oce_tso_setup()
1299 etype = ntohs(eh->evl_encap_proto); in oce_tso_setup()
1306 ip = (struct ip *)(m->m_data + ehdrlen); in oce_tso_setup()
1307 if (ip->ip_p != IPPROTO_TCP) in oce_tso_setup()
1309 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); in oce_tso_setup()
1311 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2); in oce_tso_setup()
1316 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); in oce_tso_setup()
1317 if (ip6->ip6_nxt != IPPROTO_TCP) in oce_tso_setup()
1321 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2); in oce_tso_setup()
1338 POCE_SOFTC sc = wq->parent; in oce_tx_task()
1339 if_t ifp = sc->ifp; in oce_tx_task()
1342 LOCK(&wq->tx_lock); in oce_tx_task()
1345 device_printf(sc->dev, in oce_tx_task()
1346 "TX[%d] restart failed\n", wq->queue_index); in oce_tx_task()
1348 UNLOCK(&wq->tx_lock); in oce_tx_task()
1363 if (!sc->link_status) in oce_start()
1367 m = if_dequeue(sc->ifp); in oce_start()
1371 LOCK(&sc->wq[def_q]->tx_lock); in oce_start()
1373 UNLOCK(&sc->wq[def_q]->tx_lock); in oce_start()
1376 sc->wq[def_q]->tx_stats.tx_stops ++; in oce_start()
1391 POCE_SOFTC sc = wq->parent; in oce_wq_handler()
1392 struct oce_cq *cq = wq->cq; in oce_wq_handler()
1396 LOCK(&wq->tx_compl_lock); in oce_wq_handler()
1397 bus_dmamap_sync(cq->ring->dma.tag, in oce_wq_handler()
1398 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_wq_handler()
1399 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_wq_handler()
1400 while (cqe->u0.dw[3]) { in oce_wq_handler()
1403 wq->ring->cidx = cqe->u0.s.wqe_index + 1; in oce_wq_handler()
1404 if (wq->ring->cidx >= wq->ring->num_items) in oce_wq_handler()
1405 wq->ring->cidx -= wq->ring->num_items; in oce_wq_handler()
1408 wq->tx_stats.tx_compl++; in oce_wq_handler()
1409 cqe->u0.dw[3] = 0; in oce_wq_handler()
1410 RING_GET(cq->ring, 1); in oce_wq_handler()
1411 bus_dmamap_sync(cq->ring->dma.tag, in oce_wq_handler()
1412 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_wq_handler()
1414 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); in oce_wq_handler()
1419 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); in oce_wq_handler()
1421 UNLOCK(&wq->tx_compl_lock); in oce_wq_handler()
1433 br = wq->br; in oce_multiq_transmit()
1434 queue_index = wq->queue_index; in oce_multiq_transmit()
1453 wq->tx_stats.tx_stops ++; in oce_multiq_transmit()
1480 if(!cqe2->ipv6_frame) { in oce_correct_header()
1482 ip4_hdr->ip_ttl = cqe2->frame_lifespan; in oce_correct_header()
1483 ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header)); in oce_correct_header()
1487 ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan; in oce_correct_header()
1488 payload_len = cqe2->coalesced_size - sizeof(struct ether_header) in oce_correct_header()
1489 - sizeof(struct ip6_hdr); in oce_correct_header()
1490 ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len); in oce_correct_header()
1495 tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num); in oce_correct_header()
1496 if(cqe2->push) { in oce_correct_header()
1499 tcp_hdr->th_win = htons(cqe2->tcp_window); in oce_correct_header()
1500 tcp_hdr->th_sum = 0xffff; in oce_correct_header()
1501 if(cqe2->ts_opt) { in oce_correct_header()
1503 *p = cqe1->tcp_timestamp_val; in oce_correct_header()
1504 *(p+1) = cqe1->tcp_timestamp_ecr; in oce_correct_header()
1513 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; in oce_rx_mbuf_chain()
1515 uint32_t len = cqe_info->pkt_size; in oce_rx_mbuf_chain()
1519 for (i = 0; i < cqe_info->num_frags; i++) { in oce_rx_mbuf_chain()
1520 if (rq->ring->cidx == rq->ring->pidx) { in oce_rx_mbuf_chain()
1521 device_printf(sc->dev, in oce_rx_mbuf_chain()
1522 "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n"); in oce_rx_mbuf_chain()
1525 pd = &rq->pckts[rq->ring->cidx]; in oce_rx_mbuf_chain()
1527 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); in oce_rx_mbuf_chain()
1528 bus_dmamap_unload(rq->tag, pd->map); in oce_rx_mbuf_chain()
1529 RING_GET(rq->ring, 1); in oce_rx_mbuf_chain()
1530 rq->pending--; in oce_rx_mbuf_chain()
1532 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len; in oce_rx_mbuf_chain()
1533 pd->mbuf->m_len = frag_len; in oce_rx_mbuf_chain()
1537 pd->mbuf->m_flags &= ~M_PKTHDR; in oce_rx_mbuf_chain()
1538 tail->m_next = pd->mbuf; in oce_rx_mbuf_chain()
1539 if(rq->islro) in oce_rx_mbuf_chain()
1540 tail->m_nextpkt = NULL; in oce_rx_mbuf_chain()
1541 tail = pd->mbuf; in oce_rx_mbuf_chain()
1544 pd->mbuf->m_pkthdr.len = len; in oce_rx_mbuf_chain()
1545 if(rq->islro) in oce_rx_mbuf_chain()
1546 pd->mbuf->m_nextpkt = NULL; in oce_rx_mbuf_chain()
1547 pd->mbuf->m_pkthdr.csum_flags = 0; in oce_rx_mbuf_chain()
1549 if (cqe_info->l4_cksum_pass) { in oce_rx_mbuf_chain()
1550 if(!cqe_info->ipv6_frame) { /* IPV4 */ in oce_rx_mbuf_chain()
1551 pd->mbuf->m_pkthdr.csum_flags |= in oce_rx_mbuf_chain()
1554 if(rq->islro) { in oce_rx_mbuf_chain()
1555 pd->mbuf->m_pkthdr.csum_flags |= in oce_rx_mbuf_chain()
1559 pd->mbuf->m_pkthdr.csum_data = 0xffff; in oce_rx_mbuf_chain()
1561 if (cqe_info->ip_cksum_pass) { in oce_rx_mbuf_chain()
1562 pd->mbuf->m_pkthdr.csum_flags |= in oce_rx_mbuf_chain()
1566 *m = tail = pd->mbuf; in oce_rx_mbuf_chain()
1568 pd->mbuf = NULL; in oce_rx_mbuf_chain()
1569 len -= frag_len; in oce_rx_mbuf_chain()
1578 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; in oce_rx_lro()
1585 cq_info.pkt_size = cqe->pkt_size; in oce_rx_lro()
1586 cq_info.vtag = cqe->vlan_tag; in oce_rx_lro()
1587 cq_info.l4_cksum_pass = cqe->l4_cksum_pass; in oce_rx_lro()
1588 cq_info.ip_cksum_pass = cqe->ip_cksum_pass; in oce_rx_lro()
1589 cq_info.ipv6_frame = cqe->ipv6_frame; in oce_rx_lro()
1590 cq_info.vtp = cqe->vtp; in oce_rx_lro()
1591 cq_info.qnq = cqe->qnq; in oce_rx_lro()
1594 cq_info.pkt_size = cqe2->coalesced_size; in oce_rx_lro()
1595 cq_info.vtag = cqe2->vlan_tag; in oce_rx_lro()
1596 cq_info.l4_cksum_pass = cqe2->l4_cksum_pass; in oce_rx_lro()
1597 cq_info.ip_cksum_pass = cqe2->ip_cksum_pass; in oce_rx_lro()
1598 cq_info.ipv6_frame = cqe2->ipv6_frame; in oce_rx_lro()
1599 cq_info.vtp = cqe2->vtp; in oce_rx_lro()
1600 cq_info.qnq = cqe1->qnq; in oce_rx_lro()
1605 cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size; in oce_rx_lro()
1606 if(cq_info.pkt_size % rq->cfg.frag_size) in oce_rx_lro()
1613 //assert(cqe2->valid != 0); in oce_rx_lro()
1615 //assert(cqe2->cqe_type != 2); in oce_rx_lro()
1619 m->m_pkthdr.rcvif = sc->ifp; in oce_rx_lro()
1620 if (rq->queue_index) in oce_rx_lro()
1621 m->m_pkthdr.flowid = (rq->queue_index - 1); in oce_rx_lro()
1623 m->m_pkthdr.flowid = rq->queue_index; in oce_rx_lro()
1628 if (sc->function_mode & FNM_FLEX10_MODE) { in oce_rx_lro()
1631 m->m_pkthdr.ether_vtag = cq_info.vtag; in oce_rx_lro()
1632 m->m_flags |= M_VLANTAG; in oce_rx_lro()
1634 } else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK)) { in oce_rx_lro()
1639 m->m_pkthdr.ether_vtag = cq_info.vtag; in oce_rx_lro()
1640 m->m_flags |= M_VLANTAG; in oce_rx_lro()
1643 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); in oce_rx_lro()
1645 if_input(sc->ifp, m); in oce_rx_lro()
1648 rq->rx_stats.rx_pkts++; in oce_rx_lro()
1649 rq->rx_stats.rx_bytes += cq_info.pkt_size; in oce_rx_lro()
1650 rq->rx_stats.rx_frags += cq_info.num_frags; in oce_rx_lro()
1651 rq->rx_stats.rx_ucast_pkts++; in oce_rx_lro()
1659 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; in oce_rx()
1666 if(!cqe->u0.s.num_fragments) in oce_rx()
1669 len = cqe->u0.s.pkt_size; in oce_rx()
1672 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); in oce_rx()
1677 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); in oce_rx()
1683 vtag = BSWAP_16(cqe->u0.s.vlan_tag); in oce_rx()
1685 vtag = cqe->u0.s.vlan_tag; in oce_rx()
1687 cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass; in oce_rx()
1688 cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass; in oce_rx()
1689 cq_info.ipv6_frame = cqe->u0.s.ip_ver; in oce_rx()
1690 cq_info.num_frags = cqe->u0.s.num_fragments; in oce_rx()
1691 cq_info.pkt_size = cqe->u0.s.pkt_size; in oce_rx()
1696 m->m_pkthdr.rcvif = sc->ifp; in oce_rx()
1697 if (rq->queue_index) in oce_rx()
1698 m->m_pkthdr.flowid = (rq->queue_index - 1); in oce_rx()
1700 m->m_pkthdr.flowid = rq->queue_index; in oce_rx()
1705 if (sc->function_mode & FNM_FLEX10_MODE) { in oce_rx()
1707 if (cqe->u0.s.qnq) { in oce_rx()
1708 m->m_pkthdr.ether_vtag = vtag; in oce_rx()
1709 m->m_flags |= M_VLANTAG; in oce_rx()
1711 } else if (sc->pvid != (vtag & VLAN_VID_MASK)) { in oce_rx()
1716 m->m_pkthdr.ether_vtag = vtag; in oce_rx()
1717 m->m_flags |= M_VLANTAG; in oce_rx()
1721 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); in oce_rx()
1725 (cqe->u0.s.ip_cksum_pass) && in oce_rx()
1726 (cqe->u0.s.l4_cksum_pass) && in oce_rx()
1727 (!cqe->u0.s.ip_ver) && in oce_rx()
1728 (rq->lro.lro_cnt != 0)) { in oce_rx()
1729 if (tcp_lro_rx(&rq->lro, m, 0) == 0) { in oce_rx()
1730 rq->lro_pkts_queued ++; in oce_rx()
1737 if_input(sc->ifp, m); in oce_rx()
1742 rq->rx_stats.rx_pkts++; in oce_rx()
1743 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size; in oce_rx()
1744 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments; in oce_rx()
1745 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET) in oce_rx()
1746 rq->rx_stats.rx_mcast_pkts++; in oce_rx()
1747 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET) in oce_rx()
1748 rq->rx_stats.rx_ucast_pkts++; in oce_rx()
1759 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; in oce_discard_rx_comp()
1762 if (rq->ring->cidx == rq->ring->pidx) { in oce_discard_rx_comp()
1763 device_printf(sc->dev, in oce_discard_rx_comp()
1764 "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n"); in oce_discard_rx_comp()
1767 pd = &rq->pckts[rq->ring->cidx]; in oce_discard_rx_comp()
1768 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); in oce_discard_rx_comp()
1769 bus_dmamap_unload(rq->tag, pd->map); in oce_discard_rx_comp()
1770 if (pd->mbuf != NULL) { in oce_discard_rx_comp()
1771 m_freem(pd->mbuf); in oce_discard_rx_comp()
1772 pd->mbuf = NULL; in oce_discard_rx_comp()
1775 RING_GET(rq->ring, 1); in oce_discard_rx_comp()
1776 rq->pending--; in oce_discard_rx_comp()
1786 if (sc->be3_native) { in oce_cqe_vtp_valid()
1788 vtp = cqe_v1->u0.s.vlan_tag_present; in oce_cqe_vtp_valid()
1790 vtp = cqe->u0.s.vlan_tag_present; in oce_cqe_vtp_valid()
1802 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) { in oce_cqe_portid_valid()
1804 port_id = cqe_v1->u0.s.port; in oce_cqe_portid_valid()
1805 if (sc->port_id != port_id) in oce_cqe_portid_valid()
1818 struct lro_ctrl *lro = &rq->lro; in oce_rx_flush_lro()
1819 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; in oce_rx_flush_lro()
1825 rq->lro_pkts_queued = 0; in oce_rx_flush_lro()
1836 for (i = 0; i < sc->nrqs; i++) { in oce_init_lro()
1837 lro = &sc->rq[i]->lro; in oce_init_lro()
1840 device_printf(sc->dev, "LRO init failed\n"); in oce_init_lro()
1843 lro->ifp = sc->ifp; in oce_init_lro()
1855 for (i = 0; i < sc->nrqs; i++) { in oce_free_lro()
1856 lro = &sc->rq[i]->lro; in oce_free_lro()
1866 POCE_SOFTC sc = (POCE_SOFTC) rq->parent; in oce_alloc_rx_bufs()
1878 pd = &rq->pckts[rq->ring->pidx]; in oce_alloc_rx_bufs()
1879 pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size); in oce_alloc_rx_bufs()
1880 if (pd->mbuf == NULL) { in oce_alloc_rx_bufs()
1881 device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size); in oce_alloc_rx_bufs()
1884 pd->mbuf->m_nextpkt = NULL; in oce_alloc_rx_bufs()
1886 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size; in oce_alloc_rx_bufs()
1888 rc = bus_dmamap_load_mbuf_sg(rq->tag, in oce_alloc_rx_bufs()
1889 pd->map, in oce_alloc_rx_bufs()
1890 pd->mbuf, in oce_alloc_rx_bufs()
1893 m_free(pd->mbuf); in oce_alloc_rx_bufs()
1894 device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc); in oce_alloc_rx_bufs()
1899 i--; in oce_alloc_rx_bufs()
1903 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD); in oce_alloc_rx_bufs()
1905 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe); in oce_alloc_rx_bufs()
1906 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr); in oce_alloc_rx_bufs()
1907 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr); in oce_alloc_rx_bufs()
1909 RING_PUT(rq->ring, 1); in oce_alloc_rx_bufs()
1911 rq->pending++; in oce_alloc_rx_bufs()
1913 oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS; in oce_alloc_rx_bufs()
1915 for (i = added / oce_max_rq_posts; i > 0; i--) { in oce_alloc_rx_bufs()
1917 rxdb_reg.bits.qid = rq->rq_id; in oce_alloc_rx_bufs()
1918 if(rq->islro) { in oce_alloc_rx_bufs()
1919 val |= rq->rq_id & DB_LRO_RQ_ID_MASK; in oce_alloc_rx_bufs()
1925 added -= oce_max_rq_posts; in oce_alloc_rx_bufs()
1928 rxdb_reg.bits.qid = rq->rq_id; in oce_alloc_rx_bufs()
1930 if(rq->islro) { in oce_alloc_rx_bufs()
1931 val |= rq->rq_id & DB_LRO_RQ_ID_MASK; in oce_alloc_rx_bufs()
1947 oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE); in oce_check_rx_bufs()
1948 if(!sc->enable_hwlro) { in oce_check_rx_bufs()
1949 if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1) in oce_check_rx_bufs()
1950 oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1)); in oce_check_rx_bufs()
1952 if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64) in oce_check_rx_bufs()
1964 struct oce_cq *cq = rq->cq; in oce_rq_handler_lro()
1965 POCE_SOFTC sc = rq->parent; in oce_rq_handler_lro()
1970 LOCK(&rq->rx_lock); in oce_rq_handler_lro()
1971 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_rq_handler_lro()
1972 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); in oce_rq_handler_lro()
1973 while (cqe->valid) { in oce_rq_handler_lro()
1974 if(cqe->cqe_type == 0) { /* singleton cqe */ in oce_rq_handler_lro()
1976 if(rq->cqe_firstpart != NULL) { in oce_rq_handler_lro()
1977 device_printf(sc->dev, "Got singleton cqe after cqe1 \n"); in oce_rq_handler_lro()
1980 if(cqe->error != 0) { in oce_rq_handler_lro()
1981 rq->rx_stats.rxcp_err++; in oce_rq_handler_lro()
1982 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); in oce_rq_handler_lro()
1985 rq->rx_stats.rx_compl++; in oce_rq_handler_lro()
1986 cqe->valid = 0; in oce_rq_handler_lro()
1987 RING_GET(cq->ring, 1); in oce_rq_handler_lro()
1991 }else if(cqe->cqe_type == 0x1) { /* first part */ in oce_rq_handler_lro()
1993 if(rq->cqe_firstpart != NULL) { in oce_rq_handler_lro()
1994 device_printf(sc->dev, "Got cqe1 after cqe1 \n"); in oce_rq_handler_lro()
1997 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe; in oce_rq_handler_lro()
1998 RING_GET(cq->ring, 1); in oce_rq_handler_lro()
1999 }else if(cqe->cqe_type == 0x2) { /* second part */ in oce_rq_handler_lro()
2001 if(cqe2->error != 0) { in oce_rq_handler_lro()
2002 rq->rx_stats.rxcp_err++; in oce_rq_handler_lro()
2003 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); in oce_rq_handler_lro()
2006 if(rq->cqe_firstpart == NULL) { in oce_rq_handler_lro()
2007 device_printf(sc->dev, "Got cqe2 without cqe1 \n"); in oce_rq_handler_lro()
2010 oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2); in oce_rq_handler_lro()
2012 rq->rx_stats.rx_compl++; in oce_rq_handler_lro()
2013 rq->cqe_firstpart->valid = 0; in oce_rq_handler_lro()
2014 cqe2->valid = 0; in oce_rq_handler_lro()
2015 rq->cqe_firstpart = NULL; in oce_rq_handler_lro()
2017 RING_GET(cq->ring, 1); in oce_rq_handler_lro()
2023 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_rq_handler_lro()
2024 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); in oce_rq_handler_lro()
2028 UNLOCK(&rq->rx_lock); in oce_rq_handler_lro()
2038 struct oce_cq *cq = rq->cq; in oce_rq_handler()
2039 POCE_SOFTC sc = rq->parent; in oce_rq_handler()
2044 if(rq->islro) { in oce_rq_handler()
2049 LOCK(&rq->rx_lock); in oce_rq_handler()
2050 bus_dmamap_sync(cq->ring->dma.tag, in oce_rq_handler()
2051 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_rq_handler()
2052 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_rq_handler()
2053 while (cqe->u0.dw[2]) { in oce_rq_handler()
2056 if (cqe->u0.s.error == 0) { in oce_rq_handler()
2059 rq->rx_stats.rxcp_err++; in oce_rq_handler()
2060 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); in oce_rq_handler()
2064 rq->rx_stats.rx_compl++; in oce_rq_handler()
2065 cqe->u0.dw[2] = 0; in oce_rq_handler()
2068 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) { in oce_rq_handler()
2073 RING_GET(cq->ring, 1); in oce_rq_handler()
2074 bus_dmamap_sync(cq->ring->dma.tag, in oce_rq_handler()
2075 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_rq_handler()
2077 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); in oce_rq_handler()
2089 UNLOCK(&rq->rx_lock); in oce_rq_handler()
2103 sc->ifp = if_alloc(IFT_ETHER); in oce_attach_ifp()
2105 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status); in oce_attach_ifp()
2106 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); in oce_attach_ifp()
2107 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); in oce_attach_ifp()
2109 if_setflags(sc->ifp, IFF_BROADCAST | IFF_MULTICAST); in oce_attach_ifp()
2110 if_setioctlfn(sc->ifp, oce_ioctl); in oce_attach_ifp()
2111 if_setstartfn(sc->ifp, oce_start); in oce_attach_ifp()
2112 if_setinitfn(sc->ifp, oce_init); in oce_attach_ifp()
2113 if_setmtu(sc->ifp, ETHERMTU); in oce_attach_ifp()
2114 if_setsoftc(sc->ifp, sc); in oce_attach_ifp()
2115 if_settransmitfn(sc->ifp, oce_multiq_start); in oce_attach_ifp()
2116 if_setqflushfn(sc->ifp, oce_multiq_flush); in oce_attach_ifp()
2118 if_initname(sc->ifp, in oce_attach_ifp()
2119 device_get_name(sc->dev), device_get_unit(sc->dev)); in oce_attach_ifp()
2121 if_setsendqlen(sc->ifp, OCE_MAX_TX_DESC - 1); in oce_attach_ifp()
2122 if_setsendqready(sc->ifp); in oce_attach_ifp()
2124 if_sethwassist(sc->ifp, OCE_IF_HWASSIST); in oce_attach_ifp()
2125 if_sethwassistbits(sc->ifp, CSUM_TSO, 0); in oce_attach_ifp()
2126 if_sethwassistbits(sc->ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP), 0); in oce_attach_ifp()
2128 if_setcapabilities(sc->ifp, OCE_IF_CAPABILITIES); in oce_attach_ifp()
2129 if_setcapabilitiesbit(sc->ifp, IFCAP_HWCSUM, 0); in oce_attach_ifp()
2130 if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWFILTER, 0); in oce_attach_ifp()
2133 if_setcapabilitiesbit(sc->ifp, IFCAP_TSO, 0); in oce_attach_ifp()
2134 if_setcapabilitiesbit(sc->ifp, IFCAP_LRO, 0); in oce_attach_ifp()
2135 if_setcapabilitiesbit(sc->ifp, IFCAP_VLAN_HWTSO, 0); in oce_attach_ifp()
2138 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); in oce_attach_ifp()
2139 if_setbaudrate(sc->ifp, IF_Gbps(10)); in oce_attach_ifp()
2141 if_sethwtsomax(sc->ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); in oce_attach_ifp()
2142 if_sethwtsomaxsegcount(sc->ifp, OCE_MAX_TX_ELEMENTS); in oce_attach_ifp()
2143 if_sethwtsomaxsegsize(sc->ifp, 4096); in oce_attach_ifp()
2145 ether_ifattach(sc->ifp, sc->macaddr.mac_addr); in oce_attach_ifp()
2158 sc->vlan_tag[vtag] = 1; in oce_add_vlan()
2159 sc->vlans_added++; in oce_add_vlan()
2160 if (sc->vlans_added <= (sc->max_vlans + 1)) in oce_add_vlan()
2174 sc->vlan_tag[vtag] = 0; in oce_del_vlan()
2175 sc->vlans_added--; in oce_del_vlan()
2190 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && in oce_vid_config()
2191 (if_getcapenable(sc->ifp) & IFCAP_VLAN_HWFILTER)) { in oce_vid_config()
2193 if (sc->vlan_tag[i]) { in oce_vid_config()
2199 status = oce_config_vlan(sc, (uint8_t) sc->if_id, in oce_vid_config()
2202 status = oce_config_vlan(sc, (uint8_t) sc->if_id, in oce_vid_config()
2210 uint32_t old_pmac_id = sc->pmac_id; in oce_mac_addr_set()
2213 status = bcmp((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr, in oce_mac_addr_set()
2214 sc->macaddr.size_of_struct); in oce_mac_addr_set()
2218 status = oce_mbox_macaddr_add(sc, (uint8_t *)(if_getlladdr(sc->ifp)), in oce_mac_addr_set()
2219 sc->if_id, &sc->pmac_id); in oce_mac_addr_set()
2221 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id); in oce_mac_addr_set()
2222 bcopy((if_getlladdr(sc->ifp)), sc->macaddr.mac_addr, in oce_mac_addr_set()
2223 sc->macaddr.size_of_struct); in oce_mac_addr_set()
2226 device_printf(sc->dev, "Failed update macaddress\n"); in oce_mac_addr_set()
2286 sizeof(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str), in oce_handle_passthrough()
2290 fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str, in oce_handle_passthrough()
2314 for (i = 0 ; i < sc->neqs; i++) { in oce_eqd_set_periodic()
2315 eqo = sc->eq[i]; in oce_eqd_set_periodic()
2316 aic = &sc->aic_obj[i]; in oce_eqd_set_periodic()
2318 if (!aic->enable) { in oce_eqd_set_periodic()
2319 if (aic->ticks) in oce_eqd_set_periodic()
2320 aic->ticks = 0; in oce_eqd_set_periodic()
2321 eqd = aic->et_eqd; in oce_eqd_set_periodic()
2326 rq = sc->rq[0]; in oce_eqd_set_periodic()
2327 rxpkts = rq->rx_stats.rx_pkts; in oce_eqd_set_periodic()
2330 if (i + 1 < sc->nrqs) { in oce_eqd_set_periodic()
2331 rq = sc->rq[i + 1]; in oce_eqd_set_periodic()
2332 rxpkts += rq->rx_stats.rx_pkts; in oce_eqd_set_periodic()
2334 if (i < sc->nwqs) { in oce_eqd_set_periodic()
2335 wq = sc->wq[i]; in oce_eqd_set_periodic()
2336 tx_reqs = wq->tx_stats.tx_reqs; in oce_eqd_set_periodic()
2341 if (!aic->ticks || now < aic->ticks || in oce_eqd_set_periodic()
2342 rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) { in oce_eqd_set_periodic()
2343 aic->prev_rxpkts = rxpkts; in oce_eqd_set_periodic()
2344 aic->prev_txreqs = tx_reqs; in oce_eqd_set_periodic()
2345 aic->ticks = now; in oce_eqd_set_periodic()
2349 delta = ticks_to_msecs(now - aic->ticks); in oce_eqd_set_periodic()
2351 pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) + in oce_eqd_set_periodic()
2352 (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta); in oce_eqd_set_periodic()
2358 eqd = min(eqd, aic->max_eqd); in oce_eqd_set_periodic()
2359 eqd = max(eqd, aic->min_eqd); in oce_eqd_set_periodic()
2361 aic->prev_rxpkts = rxpkts; in oce_eqd_set_periodic()
2362 aic->prev_txreqs = tx_reqs; in oce_eqd_set_periodic()
2363 aic->ticks = now; in oce_eqd_set_periodic()
2366 if (eqd != aic->cur_eqd) { in oce_eqd_set_periodic()
2368 set_eqd[num].eq_id = eqo->eq_id; in oce_eqd_set_periodic()
2369 aic->cur_eqd = eqd; in oce_eqd_set_periodic()
2376 if((num - i) >=8 ) in oce_eqd_set_periodic()
2379 oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i)); in oce_eqd_set_periodic()
2391 if (sc->hw_error) in oce_detect_hw_error()
2415 sc->hw_error = TRUE; in oce_detect_hw_error()
2416 device_printf(sc->dev, "Error detected in the card\n"); in oce_detect_hw_error()
2420 device_printf(sc->dev, in oce_detect_hw_error()
2422 device_printf(sc->dev, in oce_detect_hw_error()
2424 device_printf(sc->dev, in oce_detect_hw_error()
2431 device_printf(sc->dev, "UE: %s bit set\n", in oce_detect_hw_error()
2439 device_printf(sc->dev, "UE: %s bit set\n", in oce_detect_hw_error()
2458 for (i = 0; i < sc->nwqs; i++) in oce_local_timer()
2459 oce_tx_restart(sc, sc->wq[i]); in oce_local_timer()
2465 callout_reset(&sc->timer, hz, oce_local_timer, sc); in oce_local_timer()
2473 int pending_txqs = sc->nwqs; in oce_tx_compl_clean()
2479 pending_txqs = sc->nwqs; in oce_tx_compl_clean()
2487 if(!wq->ring->num_used) in oce_tx_compl_clean()
2488 pending_txqs--; in oce_tx_compl_clean()
2491 if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error) in oce_tx_compl_clean()
2498 while(wq->ring->num_used) { in oce_tx_compl_clean()
2499 LOCK(&wq->tx_compl_lock); in oce_tx_compl_clean()
2501 UNLOCK(&wq->tx_compl_lock); in oce_tx_compl_clean()
2518 if_setdrvflagbits(sc->ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); in oce_if_deactivate()
2529 UNLOCK(&sc->dev_lock); in oce_if_deactivate()
2530 for (i = 0; i < sc->intr_count; i++) { in oce_if_deactivate()
2531 if (sc->intrs[i].tq != NULL) { in oce_if_deactivate()
2532 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task); in oce_if_deactivate()
2535 LOCK(&sc->dev_lock); in oce_if_deactivate()
2552 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE); in oce_if_deactivate()
2565 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING , 0); in oce_if_activate()
2574 device_printf(sc->dev, "Unable to start RX\n"); in oce_if_activate()
2580 device_printf(sc->dev, "Unable to start TX\n"); in oce_if_activate()
2584 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); in oce_if_activate()
2594 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) == in process_link_state()
2596 sc->link_status = ASYNC_EVENT_LINK_UP; in process_link_state()
2597 if_link_state_change(sc->ifp, LINK_STATE_UP); in process_link_state()
2599 sc->link_status = ASYNC_EVENT_LINK_DOWN; in process_link_state()
2600 if_link_state_change(sc->ifp, LINK_STATE_DOWN); in process_link_state()
2608 if (evt->u.s.mgmt_enable) in oce_async_grp5_osbmc_process()
2609 sc->flags |= OCE_FLAGS_OS2BMC; in oce_async_grp5_osbmc_process()
2613 sc->bmc_filt_mask = evt->u.s.arp_filter; in oce_async_grp5_osbmc_process()
2614 sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1); in oce_async_grp5_osbmc_process()
2615 sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2); in oce_async_grp5_osbmc_process()
2616 sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3); in oce_async_grp5_osbmc_process()
2617 sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4); in oce_async_grp5_osbmc_process()
2618 sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5); in oce_async_grp5_osbmc_process()
2619 sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6); in oce_async_grp5_osbmc_process()
2620 sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7); in oce_async_grp5_osbmc_process()
2621 sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8); in oce_async_grp5_osbmc_process()
2629 switch (cqe->u0.s.async_type) { in oce_process_grp5_events()
2633 if (gcqe->enabled) in oce_process_grp5_events()
2634 sc->pvid = gcqe->tag & VLAN_VID_MASK; in oce_process_grp5_events()
2636 sc->pvid = 0; in oce_process_grp5_events()
2652 POCE_SOFTC sc = mq->parent; in oce_mq_handler()
2653 struct oce_cq *cq = mq->cq; in oce_mq_handler()
2659 bus_dmamap_sync(cq->ring->dma.tag, in oce_mq_handler()
2660 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_mq_handler()
2661 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_mq_handler()
2663 while (cqe->u0.dw[3]) { in oce_mq_handler()
2665 if (cqe->u0.s.async_event) { in oce_mq_handler()
2666 evt_type = cqe->u0.s.event_type; in oce_mq_handler()
2667 optype = cqe->u0.s.async_type; in oce_mq_handler()
2677 if(dbgcqe->valid) in oce_mq_handler()
2678 sc->qnqid = dbgcqe->vlan_tag; in oce_mq_handler()
2679 sc->qnq_debug_event = TRUE; in oce_mq_handler()
2682 cqe->u0.dw[3] = 0; in oce_mq_handler()
2683 RING_GET(cq->ring, 1); in oce_mq_handler()
2684 bus_dmamap_sync(cq->ring->dma.tag, in oce_mq_handler()
2685 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); in oce_mq_handler()
2686 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); in oce_mq_handler()
2691 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); in oce_mq_handler()
2700 if ((sc->function_mode & FNM_FLEX10_MODE) || in setup_max_queues_want()
2701 (sc->function_mode & FNM_UMC_MODE) || in setup_max_queues_want()
2702 (sc->function_mode & FNM_VNIC_MODE) || in setup_max_queues_want()
2705 sc->nrqs = 1; in setup_max_queues_want()
2706 sc->nwqs = 1; in setup_max_queues_want()
2708 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; in setup_max_queues_want()
2709 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs); in setup_max_queues_want()
2713 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; in setup_max_queues_want()
2720 sc->nrqs = sc->intr_count + 1; in update_queues_got()
2721 sc->nwqs = sc->intr_count; in update_queues_got()
2723 sc->nrqs = 1; in update_queues_got()
2724 sc->nwqs = 1; in update_queues_got()
2728 sc->nwqs = 1; in update_queues_got()
2735 caddr_t m_datatemp = m->m_data; in oce_check_ipv6_ext_hdr()
2737 if (eh->ether_type == htons(ETHERTYPE_IPV6)) { in oce_check_ipv6_ext_hdr()
2738 m->m_data += sizeof(struct ether_header); in oce_check_ipv6_ext_hdr()
2741 if((ip6->ip6_nxt != IPPROTO_TCP) && \ in oce_check_ipv6_ext_hdr()
2742 (ip6->ip6_nxt != IPPROTO_UDP)){ in oce_check_ipv6_ext_hdr()
2744 m->m_data += sizeof(struct ip6_hdr); in oce_check_ipv6_ext_hdr()
2747 if(ip6e->ip6e_len == 0xff) { in oce_check_ipv6_ext_hdr()
2748 m->m_data = m_datatemp; in oce_check_ipv6_ext_hdr()
2752 m->m_data = m_datatemp; in oce_check_ipv6_ext_hdr()
2760 if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) { in is_be3_a1()
2775 if(m->m_flags & M_VLANTAG) { in oce_insert_vlan_tag()
2776 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); in oce_insert_vlan_tag()
2777 m->m_flags &= ~M_VLANTAG; in oce_insert_vlan_tag()
2781 if(sc->pvid) { in oce_insert_vlan_tag()
2783 vlan_tag = sc->pvid; in oce_insert_vlan_tag()
2792 if(sc->qnqid) { in oce_insert_vlan_tag()
2793 m = ether_vlanencap(m, sc->qnqid); in oce_insert_vlan_tag()
2817 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native)) in oce_get_config()
2825 sc->nwqs = OCE_MAX_WQ; in oce_get_config()
2826 sc->nrssqs = max_rss; in oce_get_config()
2827 sc->nrqs = sc->nrssqs + 1; in oce_get_config()
2831 sc->nrssqs = max_rss; in oce_get_config()
2832 sc->nrqs = sc->nrssqs + 1; in oce_get_config()
2833 sc->nwqs = OCE_MAX_WQ; in oce_get_config()
2834 sc->max_vlans = MAX_VLANFILTER_SIZE; in oce_get_config()
2849 memcpy(macaddr, sc->macaddr.mac_addr, 6); in oce_get_mac_addr()
2860 return -EINVAL; in oce_register_rdma()
2863 if ((rdma_info->size != OCE_RDMA_INFO_SIZE) || in oce_register_rdma()
2864 (rdma_if->size != OCE_RDMA_IF_SIZE)) { in oce_register_rdma()
2865 return -ENXIO; in oce_register_rdma()
2868 rdma_info->close = oce_rdma_close; in oce_register_rdma()
2869 rdma_info->mbox_post = oce_mbox_post; in oce_register_rdma()
2870 rdma_info->common_req_hdr_init = mbx_common_req_hdr_init; in oce_register_rdma()
2871 rdma_info->get_mac_addr = oce_get_mac_addr; in oce_register_rdma()
2877 if (oce_rdma_if->announce != NULL) { in oce_register_rdma()
2879 di.dev = sc->dev; in oce_register_rdma()
2881 di.ifp = sc->ifp; in oce_register_rdma()
2882 di.db_bhandle = sc->db_bhandle; in oce_register_rdma()
2883 di.db_btag = sc->db_btag; in oce_register_rdma()
2885 if (sc->flags & OCE_FLAGS_USING_MSIX) { in oce_register_rdma()
2887 } else if (sc->flags & OCE_FLAGS_USING_MSI) { in oce_register_rdma()
2894 di.msix.num_vectors = sc->intr_count + sc->roce_intr_count; in oce_register_rdma()
2895 di.msix.start_vector = sc->intr_count; in oce_register_rdma()
2897 di.msix.vector_list[i] = sc->intrs[i].vector; in oce_register_rdma()
2901 memcpy(di.mac_addr, sc->macaddr.mac_addr, 6); in oce_register_rdma()
2902 di.vendor_id = pci_get_vendor(sc->dev); in oce_register_rdma()
2903 di.dev_id = pci_get_device(sc->dev); in oce_register_rdma()
2905 if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { in oce_register_rdma()
2909 rdma_if->announce(&di); in oce_register_rdma()
2910 sc = sc->next; in oce_register_rdma()
2926 sc->enable_hwlro = strtol(value, NULL, 10); in oce_read_env_variables()
2927 if(sc->enable_hwlro) { in oce_read_env_variables()
2930 device_printf(sc->dev, "no hardware lro support\n"); in oce_read_env_variables()
2931 device_printf(sc->dev, "software lro enabled\n"); in oce_read_env_variables()
2932 sc->enable_hwlro = 0; in oce_read_env_variables()
2934 device_printf(sc->dev, "hardware lro enabled\n"); in oce_read_env_variables()
2938 device_printf(sc->dev, "software lro enabled\n"); in oce_read_env_variables()
2941 sc->enable_hwlro = 0; in oce_read_env_variables()
2956 … device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n"); in oce_read_env_variables()