1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 /*$FreeBSD$*/ 34 35 #include "lio_bsd.h" 36 #include "lio_common.h" 37 #include "lio_droq.h" 38 #include "lio_iq.h" 39 #include "lio_response_manager.h" 40 #include "lio_device.h" 41 #include "lio_ctrl.h" 42 #include "lio_main.h" 43 #include "lio_network.h" 44 #include "lio_rxtx.h" 45 46 int 47 lio_xmit(struct lio *lio, struct lio_instr_queue *iq, 48 struct mbuf **m_headp) 49 { 50 struct lio_data_pkt ndata; 51 union lio_cmd_setup cmdsetup; 52 struct lio_mbuf_free_info *finfo = NULL; 53 struct octeon_device *oct = iq->oct_dev; 54 struct lio_iq_stats *stats; 55 struct octeon_instr_irh *irh; 56 struct lio_request_list *tx_buf; 57 union lio_tx_info *tx_info; 58 struct mbuf *m_head; 59 bus_dma_segment_t segs[LIO_MAX_SG]; 60 bus_dmamap_t map; 61 uint64_t dptr = 0; 62 uint32_t tag = 0; 63 int iq_no = 0; 64 int nsegs; 65 int status = 0; 66 67 iq_no = iq->txpciq.s.q_no; 68 tag = iq_no; 69 stats = &oct->instr_queue[iq_no]->stats; 70 tx_buf = iq->request_list + iq->host_write_index; 71 72 /* 73 * Check for all conditions in which the current packet cannot be 74 * transmitted. 75 */ 76 if (!(atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 77 (!lio->linfo.link.s.link_up)) { 78 lio_dev_info(oct, "Transmit failed link_status : %d\n", 79 lio->linfo.link.s.link_up); 80 status = ENETDOWN; 81 goto drop_packet; 82 } 83 84 if (lio_iq_is_full(oct, iq_no)) { 85 /* Defer sending if queue is full */ 86 lio_dev_dbg(oct, "Transmit failed iq:%d full\n", iq_no); 87 stats->tx_iq_busy++; 88 return (ENOBUFS); 89 } 90 91 map = tx_buf->map; 92 status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs, 93 BUS_DMA_NOWAIT); 94 if (status == EFBIG) { 95 struct mbuf *m; 96 97 m = m_defrag(*m_headp, M_NOWAIT); 98 if (m == NULL) { 99 stats->mbuf_defrag_failed++; 100 goto drop_packet; 101 } 102 103 *m_headp = m; 104 status = bus_dmamap_load_mbuf_sg(iq->txtag, map, 105 *m_headp, segs, &nsegs, 106 BUS_DMA_NOWAIT); 107 } 108 109 if (status == ENOMEM) { 110 goto retry; 111 } else if (status) { 112 stats->tx_dmamap_fail++; 113 lio_dev_dbg(oct, "bus_dmamap_load_mbuf_sg failed with error %d. iq:%d", 114 status, iq_no); 115 goto drop_packet; 116 } 117 118 m_head = *m_headp; 119 120 /* Info used to unmap and free the buffers. */ 121 finfo = &tx_buf->finfo; 122 finfo->map = map; 123 finfo->mb = m_head; 124 125 /* Prepare the attributes for the data to be passed to OSI. */ 126 bzero(&ndata, sizeof(struct lio_data_pkt)); 127 128 ndata.buf = (void *)finfo; 129 ndata.q_no = iq_no; 130 ndata.datasize = m_head->m_pkthdr.len; 131 132 cmdsetup.cmd_setup64 = 0; 133 cmdsetup.s.iq_no = iq_no; 134 135 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 136 cmdsetup.s.ip_csum = 1; 137 138 if ((m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) || 139 (m_head->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP))) 140 cmdsetup.s.transport_csum = 1; 141 142 if (nsegs == 1) { 143 cmdsetup.s.u.datasize = segs[0].ds_len; 144 lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 145 146 dptr = segs[0].ds_addr; 147 ndata.cmd.cmd3.dptr = dptr; 148 ndata.reqtype = LIO_REQTYPE_NORESP_NET; 149 150 } else { 151 struct lio_gather *g; 152 int i; 153 154 mtx_lock(&lio->glist_lock[iq_no]); 155 g = (struct lio_gather *) 156 lio_delete_first_node(&lio->ghead[iq_no]); 157 mtx_unlock(&lio->glist_lock[iq_no]); 158 159 if (g == NULL) { 160 lio_dev_err(oct, 161 "Transmit scatter gather: glist null!\n"); 162 goto retry; 163 } 164 165 cmdsetup.s.gather = 1; 166 cmdsetup.s.u.gatherptrs = nsegs; 167 lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 168 169 bzero(g->sg, g->sg_size); 170 171 i = 0; 172 while (nsegs--) { 173 g->sg[(i >> 2)].ptr[(i & 3)] = segs[i].ds_addr; 174 lio_add_sg_size(&g->sg[(i >> 2)], segs[i].ds_len, 175 (i & 3)); 176 i++; 177 } 178 179 dptr = g->sg_dma_ptr; 180 181 ndata.cmd.cmd3.dptr = dptr; 182 finfo->g = g; 183 184 ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG; 185 } 186 187 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 188 tx_info = (union lio_tx_info *)&ndata.cmd.cmd3.ossp[0]; 189 190 if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) { 191 tx_info->s.gso_size = m_head->m_pkthdr.tso_segsz; 192 tx_info->s.gso_segs = howmany(m_head->m_pkthdr.len, 193 m_head->m_pkthdr.tso_segsz); 194 stats->tx_gso++; 195 } 196 197 /* HW insert VLAN tag */ 198 if (m_head->m_flags & M_VLANTAG) { 199 irh->priority = m_head->m_pkthdr.ether_vtag >> 13; 200 irh->vlan = m_head->m_pkthdr.ether_vtag & 0xfff; 201 } 202 203 status = lio_send_data_pkt(oct, &ndata); 204 if (status == LIO_IQ_SEND_FAILED) 205 goto retry; 206 207 if (tx_info->s.gso_segs) 208 stats->tx_done += tx_info->s.gso_segs; 209 else 210 stats->tx_done++; 211 212 stats->tx_tot_bytes += ndata.datasize; 213 214 return (0); 215 216 retry: 217 return (ENOBUFS); 218 219 drop_packet: 220 stats->tx_dropped++; 221 lio_dev_err(oct, "IQ%d Transmit dropped: %llu\n", iq_no, 222 LIO_CAST64(stats->tx_dropped)); 223 224 m_freem(*m_headp); 225 *m_headp = NULL; 226 227 return (status); 228 } 229 230 int 231 lio_mq_start_locked(struct ifnet *ifp, struct lio_instr_queue *iq) 232 { 233 struct lio *lio = if_getsoftc(ifp); 234 struct mbuf *next; 235 int err = 0; 236 237 if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || 238 (!lio->linfo.link.s.link_up)) 239 return (-ENETDOWN); 240 241 /* Process the queue */ 242 while ((next = drbr_peek(ifp, iq->br)) != NULL) { 243 err = lio_xmit(lio, iq, &next); 244 if (err) { 245 if (next == NULL) 246 drbr_advance(ifp, iq->br); 247 else 248 drbr_putback(ifp, iq->br, next); 249 break; 250 } 251 drbr_advance(ifp, iq->br); 252 /* Send a copy of the frame to the BPF listener */ 253 ETHER_BPF_MTAP(ifp, next); 254 if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) || 255 (!lio->linfo.link.s.link_up)) 256 break; 257 } 258 259 return (err); 260 } 261 262 int 263 lio_mq_start(struct ifnet *ifp, struct mbuf *m) 264 { 265 struct lio *lio = if_getsoftc(ifp); 266 struct octeon_device *oct = lio->oct_dev; 267 struct lio_instr_queue *iq; 268 int err = 0, i; 269 #ifdef RSS 270 uint32_t bucket_id; 271 #endif 272 273 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 274 #ifdef RSS 275 if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m), 276 &bucket_id) == 0) { 277 i = bucket_id % oct->num_iqs; 278 if (bucket_id > oct->num_iqs) 279 lio_dev_dbg(oct, 280 "bucket_id (%d) > num_iqs (%d)\n", 281 bucket_id, oct->num_iqs); 282 } else 283 #endif 284 i = m->m_pkthdr.flowid % oct->num_iqs; 285 } else 286 i = curcpu % oct->num_iqs; 287 288 iq = oct->instr_queue[i]; 289 290 err = drbr_enqueue(ifp, iq->br, m); 291 if (err) 292 return (err); 293 294 if (mtx_trylock(&iq->enq_lock)) { 295 lio_mq_start_locked(ifp, iq); 296 mtx_unlock(&iq->enq_lock); 297 } 298 299 return (err); 300 } 301 302 void 303 lio_qflush(struct ifnet *ifp) 304 { 305 struct lio *lio = if_getsoftc(ifp); 306 struct octeon_device *oct = lio->oct_dev; 307 struct lio_instr_queue *iq; 308 struct mbuf *m; 309 int i; 310 311 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { 312 if (!(oct->io_qmask.iq & BIT_ULL(i))) 313 continue; 314 315 iq = oct->instr_queue[i]; 316 317 mtx_lock(&iq->enq_lock); 318 while ((m = buf_ring_dequeue_sc(iq->br)) != NULL) 319 m_freem(m); 320 321 mtx_unlock(&iq->enq_lock); 322 } 323 324 if_qflush(ifp); 325 } 326