oce_if.c (cfe30d02adda7c3b5c76156ac52d50d8cab325d9) | oce_if.c (291a1934fa36be527bba60f5d24688687118b29a) |
---|---|
1/*- | 1/*- |
2 * Copyright (C) 2012 Emulex | 2 * Copyright (C) 2013 Emulex |
3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * --- 80 unchanged lines hidden (view full) --- 91static void oce_local_timer(void *arg); 92static void oce_if_deactivate(POCE_SOFTC sc); 93static void oce_if_activate(POCE_SOFTC sc); 94static void setup_max_queues_want(POCE_SOFTC sc); 95static void update_queues_got(POCE_SOFTC sc); 96static void process_link_state(POCE_SOFTC sc, 97 struct oce_async_cqe_link_state *acqe); 98static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m); | 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * --- 80 unchanged lines hidden (view full) --- 91static void oce_local_timer(void *arg); 92static void oce_if_deactivate(POCE_SOFTC sc); 93static void oce_if_activate(POCE_SOFTC sc); 94static void setup_max_queues_want(POCE_SOFTC sc); 95static void update_queues_got(POCE_SOFTC sc); 96static void process_link_state(POCE_SOFTC sc, 97 struct oce_async_cqe_link_state *acqe); 98static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m); |
99static void oce_get_config(POCE_SOFTC sc); |
|
99static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete); 100 101/* IP specific */ 102#if defined(INET6) || defined(INET) 103static int oce_init_lro(POCE_SOFTC sc); 104static void oce_rx_flush_lro(struct oce_rq *rq); 105static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp); 106#endif --- 35 unchanged lines hidden (view full) --- 142 143/* Supported devices table */ 144static uint32_t supportedDevices[] = { 145 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2, 146 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3, 147 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3, 148 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201, 149 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF, | 100static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete); 101 102/* IP specific */ 103#if defined(INET6) || defined(INET) 104static int oce_init_lro(POCE_SOFTC sc); 105static void oce_rx_flush_lro(struct oce_rq *rq); 106static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp); 107#endif --- 35 unchanged lines hidden (view full) --- 143 144/* Supported devices table */ 145static uint32_t supportedDevices[] = { 146 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2, 147 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3, 148 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3, 149 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201, 150 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF, |
151 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH |
|
150}; 151 152 153 154 155/***************************************************************************** 156 * Driver entry points functions * 157 *****************************************************************************/ --- 27 unchanged lines hidden (view full) --- 185 break; 186 case PCI_PRODUCT_BE3: 187 sc->flags |= OCE_FLAGS_BE3; 188 break; 189 case PCI_PRODUCT_XE201: 190 case PCI_PRODUCT_XE201_VF: 191 sc->flags |= OCE_FLAGS_XE201; 192 break; | 152}; 153 154 155 156 157/***************************************************************************** 158 * Driver entry points functions * 159 *****************************************************************************/ --- 27 unchanged lines hidden (view full) --- 187 break; 188 case PCI_PRODUCT_BE3: 189 sc->flags |= OCE_FLAGS_BE3; 190 break; 191 case PCI_PRODUCT_XE201: 192 case PCI_PRODUCT_XE201_VF: 193 sc->flags |= OCE_FLAGS_XE201; 194 break; |
195 case PCI_PRODUCT_SH: 196 sc->flags |= OCE_FLAGS_SH; 197 break; |
|
193 default: 194 return ENXIO; 195 } 196 return BUS_PROBE_DEFAULT; 197 } 198 } 199 } 200 --- 8 unchanged lines hidden (view full) --- 209 int rc = 0; 210 211 sc = device_get_softc(dev); 212 213 rc = oce_hw_pci_alloc(sc); 214 if (rc) 215 return rc; 216 | 198 default: 199 return ENXIO; 200 } 201 return BUS_PROBE_DEFAULT; 202 } 203 } 204 } 205 --- 8 unchanged lines hidden (view full) --- 214 int rc = 0; 215 216 sc = device_get_softc(dev); 217 218 rc = oce_hw_pci_alloc(sc); 219 if (rc) 220 return rc; 221 |
217 sc->rss_enable = oce_enable_rss; | |
218 sc->tx_ring_size = OCE_TX_RING_SIZE; 219 sc->rx_ring_size = OCE_RX_RING_SIZE; 220 sc->rq_frag_size = OCE_RQ_BUF_SIZE; 221 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL; 222 sc->promisc = OCE_DEFAULT_PROMISCUOUS; 223 224 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock"); 225 LOCK_CREATE(&sc->dev_lock, "Device_lock"); 226 227 /* initialise the hardware */ 228 rc = oce_hw_init(sc); 229 if (rc) 230 goto pci_res_free; 231 | 222 sc->tx_ring_size = OCE_TX_RING_SIZE; 223 sc->rx_ring_size = OCE_RX_RING_SIZE; 224 sc->rq_frag_size = OCE_RQ_BUF_SIZE; 225 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL; 226 sc->promisc = OCE_DEFAULT_PROMISCUOUS; 227 228 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock"); 229 LOCK_CREATE(&sc->dev_lock, "Device_lock"); 230 231 /* initialise the hardware */ 232 rc = oce_hw_init(sc); 233 if (rc) 234 goto pci_res_free; 235 |
236 oce_get_config(sc); 237 |
|
232 setup_max_queues_want(sc); 233 234 rc = oce_setup_intr(sc); 235 if (rc) 236 goto mbox_free; 237 238 rc = oce_queue_init_all(sc); 239 if (rc) --- 241 unchanged lines hidden (view full) --- 481static int 482oce_multiq_start(struct ifnet *ifp, struct mbuf *m) 483{ 484 POCE_SOFTC sc = ifp->if_softc; 485 struct oce_wq *wq = NULL; 486 int queue_index = 0; 487 int status = 0; 488 | 238 setup_max_queues_want(sc); 239 240 rc = oce_setup_intr(sc); 241 if (rc) 242 goto mbox_free; 243 244 rc = oce_queue_init_all(sc); 245 if (rc) --- 241 unchanged lines hidden (view full) --- 487static int 488oce_multiq_start(struct ifnet *ifp, struct mbuf *m) 489{ 490 POCE_SOFTC sc = ifp->if_softc; 491 struct oce_wq *wq = NULL; 492 int queue_index = 0; 493 int status = 0; 494 |
495 if (!sc->link_status) 496 return ENXIO; 497 |
|
489 if ((m->m_flags & M_FLOWID) != 0) 490 queue_index = m->m_pkthdr.flowid % sc->nwqs; | 498 if ((m->m_flags & M_FLOWID) != 0) 499 queue_index = m->m_pkthdr.flowid % sc->nwqs; |
491 | 500 |
492 wq = sc->wq[queue_index]; 493 | 501 wq = sc->wq[queue_index]; 502 |
494 if (TRY_LOCK(&wq->tx_lock)) { 495 status = oce_multiq_transmit(ifp, m, wq); 496 UNLOCK(&wq->tx_lock); 497 } else { 498 status = drbr_enqueue(ifp, wq->br, m); 499 } | 503 LOCK(&wq->tx_lock); 504 status = oce_multiq_transmit(ifp, m, wq); 505 UNLOCK(&wq->tx_lock); 506 |
500 return status; 501 502} 503 504 505static void 506oce_multiq_flush(struct ifnet *ifp) 507{ --- 66 unchanged lines hidden (view full) --- 574 575 576static int 577oce_setup_intr(POCE_SOFTC sc) 578{ 579 int rc = 0, use_intx = 0; 580 int vector = 0, req_vectors = 0; 581 | 507 return status; 508 509} 510 511 512static void 513oce_multiq_flush(struct ifnet *ifp) 514{ --- 66 unchanged lines hidden (view full) --- 581 582 583static int 584oce_setup_intr(POCE_SOFTC sc) 585{ 586 int rc = 0, use_intx = 0; 587 int vector = 0, req_vectors = 0; 588 |
582 if (sc->rss_enable) | 589 if (is_rss_enabled(sc)) |
583 req_vectors = MAX((sc->nrqs - 1), sc->nwqs); 584 else 585 req_vectors = 1; 586 587 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { 588 sc->intr_count = req_vectors; 589 rc = pci_alloc_msix(sc->dev, &sc->intr_count); 590 if (rc != 0) { --- 182 unchanged lines hidden (view full) --- 773static int 774oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index) 775{ 776 int rc = 0, i, retry_cnt = 0; 777 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS]; 778 struct mbuf *m, *m_temp; 779 struct oce_wq *wq = sc->wq[wq_index]; 780 struct oce_packet_desc *pd; | 590 req_vectors = MAX((sc->nrqs - 1), sc->nwqs); 591 else 592 req_vectors = 1; 593 594 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { 595 sc->intr_count = req_vectors; 596 rc = pci_alloc_msix(sc->dev, &sc->intr_count); 597 if (rc != 0) { --- 182 unchanged lines hidden (view full) --- 780static int 781oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index) 782{ 783 int rc = 0, i, retry_cnt = 0; 784 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS]; 785 struct mbuf *m, *m_temp; 786 struct oce_wq *wq = sc->wq[wq_index]; 787 struct oce_packet_desc *pd; |
781 uint32_t out; | |
782 struct oce_nic_hdr_wqe *nichdr; 783 struct oce_nic_frag_wqe *nicfrag; 784 int num_wqes; 785 uint32_t reg_value; 786 boolean_t complete = TRUE; 787 788 m = *mpp; 789 if (!m) --- 21 unchanged lines hidden (view full) --- 811 m = NULL; 812#endif 813 if (m == NULL) { 814 rc = ENXIO; 815 goto free_ret; 816 } 817 } 818 | 788 struct oce_nic_hdr_wqe *nichdr; 789 struct oce_nic_frag_wqe *nicfrag; 790 int num_wqes; 791 uint32_t reg_value; 792 boolean_t complete = TRUE; 793 794 m = *mpp; 795 if (!m) --- 21 unchanged lines hidden (view full) --- 817 m = NULL; 818#endif 819 if (m == NULL) { 820 rc = ENXIO; 821 goto free_ret; 822 } 823 } 824 |
819 out = wq->packets_out + 1; 820 if (out == OCE_WQ_PACKET_ARRAY_SIZE) 821 out = 0; 822 if (out == wq->packets_in) 823 return EBUSY; 824 825 pd = &wq->pckts[wq->packets_out]; | 825 pd = &wq->pckts[wq->pkt_desc_head]; |
826retry: 827 rc = bus_dmamap_load_mbuf_sg(wq->tag, 828 pd->map, 829 m, segs, &pd->nsegs, BUS_DMA_NOWAIT); 830 if (rc == 0) { 831 num_wqes = pd->nsegs + 1; | 826retry: 827 rc = bus_dmamap_load_mbuf_sg(wq->tag, 828 pd->map, 829 m, segs, &pd->nsegs, BUS_DMA_NOWAIT); 830 if (rc == 0) { 831 num_wqes = pd->nsegs + 1; |
832 if (IS_BE(sc)) { | 832 if (IS_BE(sc) || IS_SH(sc)) { |
833 /*Dummy required only for BE3.*/ 834 if (num_wqes & 1) 835 num_wqes++; 836 } 837 if (num_wqes >= RING_NUM_FREE(wq->ring)) { 838 bus_dmamap_unload(wq->tag, pd->map); 839 return EBUSY; 840 } | 833 /*Dummy required only for BE3.*/ 834 if (num_wqes & 1) 835 num_wqes++; 836 } 837 if (num_wqes >= RING_NUM_FREE(wq->ring)) { 838 bus_dmamap_unload(wq->tag, pd->map); 839 return EBUSY; 840 } |
841 | 841 atomic_store_rel_int(&wq->pkt_desc_head, 842 (wq->pkt_desc_head + 1) % \ 843 OCE_WQ_PACKET_ARRAY_SIZE); |
842 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE); 843 pd->mbuf = m; | 844 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE); 845 pd->mbuf = m; |
844 wq->packets_out = out; | |
845 846 nichdr = 847 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe); 848 nichdr->u0.dw[0] = 0; 849 nichdr->u0.dw[1] = 0; 850 nichdr->u0.dw[2] = 0; 851 nichdr->u0.dw[3] = 0; 852 --- 12 unchanged lines hidden (view full) --- 865 nichdr->u0.s.vlan = 1; /*Vlan present*/ 866 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag; 867 } 868 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 869 if (m->m_pkthdr.tso_segsz) { 870 nichdr->u0.s.lso = 1; 871 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz; 872 } | 846 847 nichdr = 848 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe); 849 nichdr->u0.dw[0] = 0; 850 nichdr->u0.dw[1] = 0; 851 nichdr->u0.dw[2] = 0; 852 nichdr->u0.dw[3] = 0; 853 --- 12 unchanged lines hidden (view full) --- 866 nichdr->u0.s.vlan = 1; /*Vlan present*/ 867 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag; 868 } 869 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 870 if (m->m_pkthdr.tso_segsz) { 871 nichdr->u0.s.lso = 1; 872 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz; 873 } |
873 if (!IS_BE(sc)) | 874 if (!IS_BE(sc) || !IS_SH(sc)) |
874 nichdr->u0.s.ipcs = 1; 875 } 876 877 RING_PUT(wq->ring, 1); | 875 nichdr->u0.s.ipcs = 1; 876 } 877 878 RING_PUT(wq->ring, 1); |
878 wq->ring->num_used++; | 879 atomic_add_int(&wq->ring->num_used, 1); |
879 880 for (i = 0; i < pd->nsegs; i++) { 881 nicfrag = 882 RING_GET_PRODUCER_ITEM_VA(wq->ring, 883 struct oce_nic_frag_wqe); 884 nicfrag->u0.s.rsvd0 = 0; 885 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr); 886 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr); 887 nicfrag->u0.s.frag_len = segs[i].ds_len; 888 pd->wqe_idx = wq->ring->pidx; 889 RING_PUT(wq->ring, 1); | 880 881 for (i = 0; i < pd->nsegs; i++) { 882 nicfrag = 883 RING_GET_PRODUCER_ITEM_VA(wq->ring, 884 struct oce_nic_frag_wqe); 885 nicfrag->u0.s.rsvd0 = 0; 886 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr); 887 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr); 888 nicfrag->u0.s.frag_len = segs[i].ds_len; 889 pd->wqe_idx = wq->ring->pidx; 890 RING_PUT(wq->ring, 1); |
890 wq->ring->num_used++; | 891 atomic_add_int(&wq->ring->num_used, 1); |
891 } 892 if (num_wqes > (pd->nsegs + 1)) { 893 nicfrag = 894 RING_GET_PRODUCER_ITEM_VA(wq->ring, 895 struct oce_nic_frag_wqe); 896 nicfrag->u0.dw[0] = 0; 897 nicfrag->u0.dw[1] = 0; 898 nicfrag->u0.dw[2] = 0; 899 nicfrag->u0.dw[3] = 0; 900 pd->wqe_idx = wq->ring->pidx; 901 RING_PUT(wq->ring, 1); | 892 } 893 if (num_wqes > (pd->nsegs + 1)) { 894 nicfrag = 895 RING_GET_PRODUCER_ITEM_VA(wq->ring, 896 struct oce_nic_frag_wqe); 897 nicfrag->u0.dw[0] = 0; 898 nicfrag->u0.dw[1] = 0; 899 nicfrag->u0.dw[2] = 0; 900 nicfrag->u0.dw[3] = 0; 901 pd->wqe_idx = wq->ring->pidx; 902 RING_PUT(wq->ring, 1); |
902 wq->ring->num_used++; | 903 atomic_add_int(&wq->ring->num_used, 1); |
903 pd->nsegs++; 904 } 905 906 sc->ifp->if_opackets++; 907 wq->tx_stats.tx_reqs++; 908 wq->tx_stats.tx_wrbs += num_wqes; 909 wq->tx_stats.tx_bytes += m->m_pkthdr.len; 910 wq->tx_stats.tx_pkts++; 911 912 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, 913 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 914 reg_value = (num_wqes << 16) | wq->wq_id; | 904 pd->nsegs++; 905 } 906 907 sc->ifp->if_opackets++; 908 wq->tx_stats.tx_reqs++; 909 wq->tx_stats.tx_wrbs += num_wqes; 910 wq->tx_stats.tx_bytes += m->m_pkthdr.len; 911 wq->tx_stats.tx_pkts++; 912 913 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, 914 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 915 reg_value = (num_wqes << 16) | wq->wq_id; |
915 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value); | 916 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value); |
916 917 } else if (rc == EFBIG) { 918 if (retry_cnt == 0) { 919 m_temp = m_defrag(m, M_NOWAIT); 920 if (m_temp == NULL) 921 goto free_ret; 922 m = m_temp; 923 *mpp = m_temp; 924 retry_cnt = retry_cnt + 1; 925 goto retry; 926 } else 927 goto free_ret; 928 } else if (rc == ENOMEM) 929 return rc; 930 else 931 goto free_ret; | 917 918 } else if (rc == EFBIG) { 919 if (retry_cnt == 0) { 920 m_temp = m_defrag(m, M_NOWAIT); 921 if (m_temp == NULL) 922 goto free_ret; 923 m = m_temp; 924 *mpp = m_temp; 925 retry_cnt = retry_cnt + 1; 926 goto retry; 927 } else 928 goto free_ret; 929 } else if (rc == ENOMEM) 930 return rc; 931 else 932 goto free_ret; |
932 | 933 |
933 return 0; 934 935free_ret: 936 m_freem(*mpp); 937 *mpp = NULL; 938 return rc; 939} 940 941 942static void 943oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status) 944{ | 934 return 0; 935 936free_ret: 937 m_freem(*mpp); 938 *mpp = NULL; 939 return rc; 940} 941 942 943static void 944oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status) 945{ |
945 uint32_t in; | |
946 struct oce_packet_desc *pd; 947 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; 948 struct mbuf *m; 949 | 946 struct oce_packet_desc *pd; 947 POCE_SOFTC sc = (POCE_SOFTC) wq->parent; 948 struct mbuf *m; 949 |
950 if (wq->packets_out == wq->packets_in) 951 device_printf(sc->dev, "WQ transmit descriptor missing\n"); 952 953 in = wq->packets_in + 1; 954 if (in == OCE_WQ_PACKET_ARRAY_SIZE) 955 in = 0; 956 957 pd = &wq->pckts[wq->packets_in]; 958 wq->packets_in = in; 959 wq->ring->num_used -= (pd->nsegs + 1); | 950 pd = &wq->pckts[wq->pkt_desc_tail]; 951 atomic_store_rel_int(&wq->pkt_desc_tail, 952 (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE); 953 atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1); |
960 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE); 961 bus_dmamap_unload(wq->tag, pd->map); 962 963 m = pd->mbuf; 964 m_freem(m); 965 pd->mbuf = NULL; 966 | 954 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE); 955 bus_dmamap_unload(wq->tag, pd->map); 956 957 m = pd->mbuf; 958 m_freem(m); 959 pd->mbuf = NULL; 960 |
961 |
|
967 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) { 968 if (wq->ring->num_used < (wq->ring->num_items / 2)) { 969 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE); 970 oce_tx_restart(sc, wq); 971 } 972 } 973} 974 --- 86 unchanged lines hidden (view full) --- 1061 1062void 1063oce_tx_task(void *arg, int npending) 1064{ 1065 struct oce_wq *wq = arg; 1066 POCE_SOFTC sc = wq->parent; 1067 struct ifnet *ifp = sc->ifp; 1068 int rc = 0; | 962 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) { 963 if (wq->ring->num_used < (wq->ring->num_items / 2)) { 964 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE); 965 oce_tx_restart(sc, wq); 966 } 967 } 968} 969 --- 86 unchanged lines hidden (view full) --- 1056 1057void 1058oce_tx_task(void *arg, int npending) 1059{ 1060 struct oce_wq *wq = arg; 1061 POCE_SOFTC sc = wq->parent; 1062 struct ifnet *ifp = sc->ifp; 1063 int rc = 0; |
1069 | 1064 |
1070#if __FreeBSD_version >= 800000 | 1065#if __FreeBSD_version >= 800000 |
1071 if (TRY_LOCK(&wq->tx_lock)) { 1072 rc = oce_multiq_transmit(ifp, NULL, wq); 1073 if (rc) { 1074 device_printf(sc->dev, 1075 "TX[%d] restart failed\n", wq->queue_index); 1076 } 1077 UNLOCK(&wq->tx_lock); | 1066 LOCK(&wq->tx_lock); 1067 rc = oce_multiq_transmit(ifp, NULL, wq); 1068 if (rc) { 1069 device_printf(sc->dev, 1070 "TX[%d] restart failed\n", wq->queue_index); |
1078 } | 1071 } |
1072 UNLOCK(&wq->tx_lock); |
|
1079#else 1080 oce_start(ifp); 1081#endif 1082 1083} 1084 1085 1086void --- 42 unchanged lines hidden (view full) --- 1129oce_wq_handler(void *arg) 1130{ 1131 struct oce_wq *wq = (struct oce_wq *)arg; 1132 POCE_SOFTC sc = wq->parent; 1133 struct oce_cq *cq = wq->cq; 1134 struct oce_nic_tx_cqe *cqe; 1135 int num_cqes = 0; 1136 | 1073#else 1074 oce_start(ifp); 1075#endif 1076 1077} 1078 1079 1080void --- 42 unchanged lines hidden (view full) --- 1123oce_wq_handler(void *arg) 1124{ 1125 struct oce_wq *wq = (struct oce_wq *)arg; 1126 POCE_SOFTC sc = wq->parent; 1127 struct oce_cq *cq = wq->cq; 1128 struct oce_nic_tx_cqe *cqe; 1129 int num_cqes = 0; 1130 |
1137 LOCK(&wq->tx_lock); | |
1138 bus_dmamap_sync(cq->ring->dma.tag, 1139 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1140 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 1141 while (cqe->u0.dw[3]) { 1142 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); 1143 1144 wq->ring->cidx = cqe->u0.s.wqe_index + 1; 1145 if (wq->ring->cidx >= wq->ring->num_items) --- 7 unchanged lines hidden (view full) --- 1153 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1154 cqe = 1155 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 1156 num_cqes++; 1157 } 1158 1159 if (num_cqes) 1160 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); | 1131 bus_dmamap_sync(cq->ring->dma.tag, 1132 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1133 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 1134 while (cqe->u0.dw[3]) { 1135 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); 1136 1137 wq->ring->cidx = cqe->u0.s.wqe_index + 1; 1138 if (wq->ring->cidx >= wq->ring->num_items) --- 7 unchanged lines hidden (view full) --- 1146 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1147 cqe = 1148 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); 1149 num_cqes++; 1150 } 1151 1152 if (num_cqes) 1153 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); |
1161 UNLOCK(&wq->tx_lock); | |
1162 1163 return 0; 1164} 1165 1166 1167static int 1168oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq) 1169{ --- 58 unchanged lines hidden (view full) --- 1228 len = cqe->u0.s.pkt_size; 1229 if (!len) { 1230 /*partial DMA workaround for Lancer*/ 1231 oce_discard_rx_comp(rq, cqe); 1232 goto exit; 1233 } 1234 1235 /* Get vlan_tag value */ | 1154 1155 return 0; 1156} 1157 1158 1159static int 1160oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq) 1161{ --- 58 unchanged lines hidden (view full) --- 1220 len = cqe->u0.s.pkt_size; 1221 if (!len) { 1222 /*partial DMA workaround for Lancer*/ 1223 oce_discard_rx_comp(rq, cqe); 1224 goto exit; 1225 } 1226 1227 /* Get vlan_tag value */ |
1236 if(IS_BE(sc)) | 1228 if(IS_BE(sc) || IS_SH(sc)) |
1237 vtag = BSWAP_16(cqe->u0.s.vlan_tag); 1238 else 1239 vtag = cqe->u0.s.vlan_tag; 1240 1241 1242 for (i = 0; i < cqe->u0.s.num_fragments; i++) { 1243 1244 if (rq->packets_out == rq->packets_in) { --- 44 unchanged lines hidden (view full) --- 1289 if (m) { 1290 if (!oce_cqe_portid_valid(sc, cqe)) { 1291 m_freem(m); 1292 goto exit; 1293 } 1294 1295 m->m_pkthdr.rcvif = sc->ifp; 1296#if __FreeBSD_version >= 800000 | 1229 vtag = BSWAP_16(cqe->u0.s.vlan_tag); 1230 else 1231 vtag = cqe->u0.s.vlan_tag; 1232 1233 1234 for (i = 0; i < cqe->u0.s.num_fragments; i++) { 1235 1236 if (rq->packets_out == rq->packets_in) { --- 44 unchanged lines hidden (view full) --- 1281 if (m) { 1282 if (!oce_cqe_portid_valid(sc, cqe)) { 1283 m_freem(m); 1284 goto exit; 1285 } 1286 1287 m->m_pkthdr.rcvif = sc->ifp; 1288#if __FreeBSD_version >= 800000 |
1297 m->m_pkthdr.flowid = rq->queue_index; | 1289 if (rq->queue_index) 1290 m->m_pkthdr.flowid = (rq->queue_index - 1); 1291 else 1292 m->m_pkthdr.flowid = rq->queue_index; |
1298 m->m_flags |= M_FLOWID; 1299#endif 1300 /* This deternies if vlan tag is Valid */ 1301 if (oce_cqe_vtp_valid(sc, cqe)) { 1302 if (sc->function_mode & FNM_FLEX10_MODE) { 1303 /* FLEX10. If QnQ is not set, neglect VLAN */ 1304 if (cqe->u0.s.qnq) { 1305 m->m_pkthdr.ether_vtag = vtag; --- 90 unchanged lines hidden (view full) --- 1396 1397 1398static int 1399oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) 1400{ 1401 struct oce_nic_rx_cqe_v1 *cqe_v1; 1402 int port_id = 0; 1403 | 1293 m->m_flags |= M_FLOWID; 1294#endif 1295 /* This deternies if vlan tag is Valid */ 1296 if (oce_cqe_vtp_valid(sc, cqe)) { 1297 if (sc->function_mode & FNM_FLEX10_MODE) { 1298 /* FLEX10. If QnQ is not set, neglect VLAN */ 1299 if (cqe->u0.s.qnq) { 1300 m->m_pkthdr.ether_vtag = vtag; --- 90 unchanged lines hidden (view full) --- 1391 1392 1393static int 1394oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) 1395{ 1396 struct oce_nic_rx_cqe_v1 *cqe_v1; 1397 int port_id = 0; 1398 |
1404 if (sc->be3_native && IS_BE(sc)) { | 1399 if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) { |
1405 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; 1406 port_id = cqe_v1->u0.s.port; 1407 if (sc->port_id != port_id) 1408 return 0; 1409 } else 1410 ;/* For BE3 legacy and Lancer this is dummy */ 1411 1412 return 1; --- 129 unchanged lines hidden (view full) --- 1542{ 1543 struct oce_rq *rq = (struct oce_rq *)arg; 1544 struct oce_cq *cq = rq->cq; 1545 POCE_SOFTC sc = rq->parent; 1546 struct oce_nic_rx_cqe *cqe; 1547 int num_cqes = 0, rq_buffers_used = 0; 1548 1549 | 1400 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; 1401 port_id = cqe_v1->u0.s.port; 1402 if (sc->port_id != port_id) 1403 return 0; 1404 } else 1405 ;/* For BE3 legacy and Lancer this is dummy */ 1406 1407 return 1; --- 129 unchanged lines hidden (view full) --- 1537{ 1538 struct oce_rq *rq = (struct oce_rq *)arg; 1539 struct oce_cq *cq = rq->cq; 1540 POCE_SOFTC sc = rq->parent; 1541 struct oce_nic_rx_cqe *cqe; 1542 int num_cqes = 0, rq_buffers_used = 0; 1543 1544 |
1550 LOCK(&rq->rx_lock); | |
1551 bus_dmamap_sync(cq->ring->dma.tag, 1552 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1553 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); 1554 while (cqe->u0.dw[2]) { 1555 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); 1556 1557 RING_GET(rq->ring, 1); 1558 if (cqe->u0.s.error == 0) { --- 30 unchanged lines hidden (view full) --- 1589 1590 if (num_cqes) { 1591 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); 1592 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending; 1593 if (rq_buffers_used > 1) 1594 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1)); 1595 } 1596 | 1545 bus_dmamap_sync(cq->ring->dma.tag, 1546 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); 1547 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); 1548 while (cqe->u0.dw[2]) { 1549 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); 1550 1551 RING_GET(rq->ring, 1); 1552 if (cqe->u0.s.error == 0) { --- 30 unchanged lines hidden (view full) --- 1583 1584 if (num_cqes) { 1585 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); 1586 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending; 1587 if (rq_buffers_used > 1) 1588 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1)); 1589 } 1590 |
1597 UNLOCK(&rq->rx_lock); 1598 | |
1599 return 0; 1600 1601} 1602 1603 1604 1605 1606/***************************************************************************** --- 277 unchanged lines hidden (view full) --- 1884 oce_refresh_queue_stats(sc); 1885 oce_mac_addr_set(sc); 1886 1887 /* TX Watch Dog*/ 1888 for (i = 0; i < sc->nwqs; i++) 1889 oce_tx_restart(sc, sc->wq[i]); 1890 1891 /* calculate and set the eq delay for optimal interrupt rate */ | 1591 return 0; 1592 1593} 1594 1595 1596 1597 1598/***************************************************************************** --- 277 unchanged lines hidden (view full) --- 1876 oce_refresh_queue_stats(sc); 1877 oce_mac_addr_set(sc); 1878 1879 /* TX Watch Dog*/ 1880 for (i = 0; i < sc->nwqs; i++) 1881 oce_tx_restart(sc, sc->wq[i]); 1882 1883 /* calculate and set the eq delay for optimal interrupt rate */ |
1892 if (IS_BE(sc)) | 1884 if (IS_BE(sc) || IS_SH(sc)) |
1893 oce_eqd_set_periodic(sc); 1894 1895 callout_reset(&sc->timer, hz, oce_local_timer, sc); 1896} 1897 1898 1899/* NOTE : This should only be called holding 1900 * DEVICE_LOCK. --- 174 unchanged lines hidden (view full) --- 2075 2076 return 0; 2077} 2078 2079 2080static void 2081setup_max_queues_want(POCE_SOFTC sc) 2082{ | 1885 oce_eqd_set_periodic(sc); 1886 1887 callout_reset(&sc->timer, hz, oce_local_timer, sc); 1888} 1889 1890 1891/* NOTE : This should only be called holding 1892 * DEVICE_LOCK. --- 174 unchanged lines hidden (view full) --- 2067 2068 return 0; 2069} 2070 2071 2072static void 2073setup_max_queues_want(POCE_SOFTC sc) 2074{ |
2083 int max_rss = 0; 2084 | |
2085 /* Check if it is FLEX machine. Is so dont use RSS */ 2086 if ((sc->function_mode & FNM_FLEX10_MODE) || 2087 (sc->function_mode & FNM_UMC_MODE) || 2088 (sc->function_mode & FNM_VNIC_MODE) || | 2075 /* Check if it is FLEX machine. Is so dont use RSS */ 2076 if ((sc->function_mode & FNM_FLEX10_MODE) || 2077 (sc->function_mode & FNM_UMC_MODE) || 2078 (sc->function_mode & FNM_VNIC_MODE) || |
2089 (!sc->rss_enable) || | 2079 (!is_rss_enabled(sc)) || |
2090 (sc->flags & OCE_FLAGS_BE2)) { 2091 sc->nrqs = 1; 2092 sc->nwqs = 1; | 2080 (sc->flags & OCE_FLAGS_BE2)) { 2081 sc->nrqs = 1; 2082 sc->nwqs = 1; |
2093 sc->rss_enable = 0; 2094 } else { 2095 /* For multiq, our deisgn is to have TX rings equal to 2096 RSS rings. So that we can pair up one RSS ring and TX 2097 to a single intr, which improves CPU cache efficiency. 2098 */ 2099 if (IS_BE(sc) && (!sc->be3_native)) 2100 max_rss = OCE_LEGACY_MODE_RSS; 2101 else 2102 max_rss = OCE_MAX_RSS; 2103 2104 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */ 2105 sc->nwqs = MIN(OCE_NCPUS, max_rss); | |
2106 } | 2083 } |
2107 | |
2108} 2109 2110 2111static void 2112update_queues_got(POCE_SOFTC sc) 2113{ | 2084} 2085 2086 2087static void 2088update_queues_got(POCE_SOFTC sc) 2089{ |
2114 if (sc->rss_enable) { | 2090 if (is_rss_enabled(sc)) { |
2115 sc->nrqs = sc->intr_count + 1; 2116 sc->nwqs = sc->intr_count; 2117 } else { 2118 sc->nrqs = 1; 2119 sc->nwqs = 1; 2120 } 2121} 2122 --- 68 unchanged lines hidden (view full) --- 2191oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m) 2192{ 2193 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \ 2194 oce_check_ipv6_ext_hdr(m)) { 2195 return TRUE; 2196 } 2197 return FALSE; 2198} | 2091 sc->nrqs = sc->intr_count + 1; 2092 sc->nwqs = sc->intr_count; 2093 } else { 2094 sc->nrqs = 1; 2095 sc->nwqs = 1; 2096 } 2097} 2098 --- 68 unchanged lines hidden (view full) --- 2167oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m) 2168{ 2169 if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \ 2170 oce_check_ipv6_ext_hdr(m)) { 2171 return TRUE; 2172 } 2173 return FALSE; 2174} |
2175 2176static void 2177oce_get_config(POCE_SOFTC sc) 2178{ 2179 int rc = 0; 2180 uint32_t max_rss = 0; 2181 2182 if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native)) 2183 max_rss = OCE_LEGACY_MODE_RSS; 2184 else 2185 max_rss = OCE_MAX_RSS; 2186 2187 if (!IS_BE(sc)) { 2188 rc = oce_get_func_config(sc); 2189 if (rc) { 2190 sc->nwqs = OCE_MAX_WQ; 2191 sc->nrssqs = max_rss; 2192 sc->nrqs = sc->nrssqs + 1; 2193 } 2194 } 2195 else { 2196 rc = oce_get_profile_config(sc); 2197 sc->nrssqs = max_rss; 2198 sc->nrqs = sc->nrssqs + 1; 2199 if (rc) 2200 sc->nwqs = OCE_MAX_WQ; 2201 } 2202} |
|