1e948693eSPhilip Paeps /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 4929c7febSAndrew Rybchenko * Copyright (c) 2010-2016 Solarflare Communications Inc. 5e948693eSPhilip Paeps * All rights reserved. 6e948693eSPhilip Paeps * 7e948693eSPhilip Paeps * This software was developed in part by Philip Paeps under contract for 8e948693eSPhilip Paeps * Solarflare Communications, Inc. 9e948693eSPhilip Paeps * 10e948693eSPhilip Paeps * Redistribution and use in source and binary forms, with or without 113c838a9fSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 12e948693eSPhilip Paeps * 133c838a9fSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 143c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer. 153c838a9fSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 163c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 173c838a9fSAndrew Rybchenko * and/or other materials provided with the distribution. 183c838a9fSAndrew Rybchenko * 193c838a9fSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 203c838a9fSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 213c838a9fSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 223c838a9fSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 233c838a9fSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 243c838a9fSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 253c838a9fSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 263c838a9fSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 273c838a9fSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 283c838a9fSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 293c838a9fSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 303c838a9fSAndrew Rybchenko * 313c838a9fSAndrew Rybchenko * The views and conclusions contained in the software and documentation are 323c838a9fSAndrew Rybchenko * those of the authors and should not be interpreted as representing official 333c838a9fSAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project. 34e948693eSPhilip Paeps */ 35e948693eSPhilip Paeps 36e948693eSPhilip Paeps #include <sys/cdefs.h> 37e948693eSPhilip Paeps __FBSDID("$FreeBSD$"); 38e948693eSPhilip Paeps 392da88194SAndrew Rybchenko #include "opt_rss.h" 402da88194SAndrew Rybchenko 418ec07310SGleb Smirnoff #include <sys/param.h> 428ec07310SGleb Smirnoff #include <sys/malloc.h> 43e948693eSPhilip Paeps #include <sys/mbuf.h> 44e948693eSPhilip Paeps #include <sys/smp.h> 45e948693eSPhilip Paeps #include <sys/socket.h> 46e948693eSPhilip Paeps #include <sys/sysctl.h> 473c838a9fSAndrew Rybchenko #include <sys/syslog.h> 48e948693eSPhilip Paeps #include <sys/limits.h> 49245d1576SAndrew Rybchenko #include <sys/syslog.h> 50e948693eSPhilip Paeps 51e948693eSPhilip Paeps #include <net/ethernet.h> 52e948693eSPhilip Paeps #include <net/if.h> 53e948693eSPhilip Paeps #include <net/if_vlan_var.h> 54e948693eSPhilip Paeps 55e948693eSPhilip Paeps #include <netinet/in.h> 56e948693eSPhilip Paeps #include <netinet/ip.h> 57e948693eSPhilip Paeps #include <netinet/ip6.h> 58e948693eSPhilip Paeps #include <netinet/tcp.h> 59e948693eSPhilip Paeps 60e948693eSPhilip Paeps #include <machine/in_cksum.h> 61e948693eSPhilip Paeps 622da88194SAndrew Rybchenko #ifdef RSS 632da88194SAndrew Rybchenko #include <net/rss_config.h> 642da88194SAndrew Rybchenko #endif 652da88194SAndrew Rybchenko 66e948693eSPhilip Paeps #include "common/efx.h" 67e948693eSPhilip Paeps 68e948693eSPhilip Paeps 69e948693eSPhilip Paeps #include "sfxge.h" 70e948693eSPhilip Paeps #include "sfxge_rx.h" 71e948693eSPhilip Paeps 72385b1d8eSGeorge V. Neville-Neil #define RX_REFILL_THRESHOLD(_entries) (EFX_RXQ_LIMIT(_entries) * 9 / 10) 73e948693eSPhilip Paeps 7418daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 7518daa0eeSAndrew Rybchenko 76245d1576SAndrew Rybchenko SYSCTL_NODE(_hw_sfxge, OID_AUTO, lro, CTLFLAG_RD, NULL, 77245d1576SAndrew Rybchenko "Large receive offload (LRO) parameters"); 78245d1576SAndrew Rybchenko 79245d1576SAndrew Rybchenko #define SFXGE_LRO_PARAM(_param) SFXGE_PARAM(lro._param) 80245d1576SAndrew Rybchenko 81e948693eSPhilip Paeps /* Size of the LRO hash table. Must be a power of 2. A larger table 82e948693eSPhilip Paeps * means we can accelerate a larger number of streams. 83e948693eSPhilip Paeps */ 84e948693eSPhilip Paeps static unsigned lro_table_size = 128; 85245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(table_size), &lro_table_size); 86245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, table_size, CTLFLAG_RDTUN, 87245d1576SAndrew Rybchenko &lro_table_size, 0, 88245d1576SAndrew Rybchenko "Size of the LRO hash table (must be a power of 2)"); 89e948693eSPhilip Paeps 90e948693eSPhilip Paeps /* Maximum length of a hash chain. If chains get too long then the lookup 91e948693eSPhilip Paeps * time increases and may exceed the benefit of LRO. 92e948693eSPhilip Paeps */ 93e948693eSPhilip Paeps static unsigned lro_chain_max = 20; 94245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(chain_max), &lro_chain_max); 95245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, chain_max, CTLFLAG_RDTUN, 96245d1576SAndrew Rybchenko &lro_chain_max, 0, 97245d1576SAndrew Rybchenko "The maximum length of a hash chain"); 98e948693eSPhilip Paeps 99e948693eSPhilip Paeps /* Maximum time (in ticks) that a connection can be idle before it's LRO 100e948693eSPhilip Paeps * state is discarded. 101e948693eSPhilip Paeps */ 102e948693eSPhilip Paeps static unsigned lro_idle_ticks; /* initialised in sfxge_rx_init() */ 103245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(idle_ticks), &lro_idle_ticks); 104245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, idle_ticks, CTLFLAG_RDTUN, 105245d1576SAndrew Rybchenko &lro_idle_ticks, 0, 106245d1576SAndrew Rybchenko "The maximum time (in ticks) that a connection can be idle " 107245d1576SAndrew Rybchenko "before it's LRO state is discarded"); 108e948693eSPhilip Paeps 109e948693eSPhilip Paeps /* Number of packets with payload that must arrive in-order before a 110e948693eSPhilip Paeps * connection is eligible for LRO. The idea is we should avoid coalescing 111e948693eSPhilip Paeps * segments when the sender is in slow-start because reducing the ACK rate 112e948693eSPhilip Paeps * can damage performance. 113e948693eSPhilip Paeps */ 114e948693eSPhilip Paeps static int lro_slow_start_packets = 2000; 115245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(slow_start_packets), &lro_slow_start_packets); 116245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, slow_start_packets, CTLFLAG_RDTUN, 117245d1576SAndrew Rybchenko &lro_slow_start_packets, 0, 118245d1576SAndrew Rybchenko "Number of packets with payload that must arrive in-order before " 119245d1576SAndrew Rybchenko "a connection is eligible for LRO"); 120e948693eSPhilip Paeps 121e948693eSPhilip Paeps /* Number of packets with payload that must arrive in-order following loss 122e948693eSPhilip Paeps * before a connection is eligible for LRO. The idea is we should avoid 123e948693eSPhilip Paeps * coalescing segments when the sender is recovering from loss, because 124e948693eSPhilip Paeps * reducing the ACK rate can damage performance. 125e948693eSPhilip Paeps */ 126e948693eSPhilip Paeps static int lro_loss_packets = 20; 127245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(loss_packets), &lro_loss_packets); 128245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, loss_packets, CTLFLAG_RDTUN, 129245d1576SAndrew Rybchenko &lro_loss_packets, 0, 130245d1576SAndrew Rybchenko "Number of packets with payload that must arrive in-order " 131245d1576SAndrew Rybchenko "following loss before a connection is eligible for LRO"); 132e948693eSPhilip Paeps 133e948693eSPhilip Paeps /* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */ 134e948693eSPhilip Paeps #define SFXGE_LRO_L2_ID_VLAN 0x4000 135e948693eSPhilip Paeps #define SFXGE_LRO_L2_ID_IPV6 0x8000 136e948693eSPhilip Paeps #define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN) 137e948693eSPhilip Paeps #define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6)) 138e948693eSPhilip Paeps 139e948693eSPhilip Paeps /* Compare IPv6 addresses, avoiding conditional branches */ 1400b28bbdcSAndrew Rybchenko static unsigned long ipv6_addr_cmp(const struct in6_addr *left, 141e948693eSPhilip Paeps const struct in6_addr *right) 142e948693eSPhilip Paeps { 143e948693eSPhilip Paeps #if LONG_BIT == 64 144e948693eSPhilip Paeps const uint64_t *left64 = (const uint64_t *)left; 145e948693eSPhilip Paeps const uint64_t *right64 = (const uint64_t *)right; 146e948693eSPhilip Paeps return (left64[0] - right64[0]) | (left64[1] - right64[1]); 147e948693eSPhilip Paeps #else 148e948693eSPhilip Paeps return (left->s6_addr32[0] - right->s6_addr32[0]) | 149e948693eSPhilip Paeps (left->s6_addr32[1] - right->s6_addr32[1]) | 150e948693eSPhilip Paeps (left->s6_addr32[2] - right->s6_addr32[2]) | 151e948693eSPhilip Paeps (left->s6_addr32[3] - right->s6_addr32[3]); 152e948693eSPhilip Paeps #endif 153e948693eSPhilip Paeps } 154e948693eSPhilip Paeps 15518daa0eeSAndrew Rybchenko #endif /* SFXGE_LRO */ 15618daa0eeSAndrew Rybchenko 157e948693eSPhilip Paeps void 158e948693eSPhilip Paeps sfxge_rx_qflush_done(struct sfxge_rxq *rxq) 159e948693eSPhilip Paeps { 160e948693eSPhilip Paeps 161e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_DONE; 162e948693eSPhilip Paeps } 163e948693eSPhilip Paeps 164e948693eSPhilip Paeps void 165e948693eSPhilip Paeps sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) 166e948693eSPhilip Paeps { 167e948693eSPhilip Paeps 168e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_FAILED; 169e948693eSPhilip Paeps } 170e948693eSPhilip Paeps 1712da88194SAndrew Rybchenko #ifdef RSS 1722da88194SAndrew Rybchenko static uint8_t toep_key[RSS_KEYSIZE]; 1732da88194SAndrew Rybchenko #else 174e948693eSPhilip Paeps static uint8_t toep_key[] = { 175e948693eSPhilip Paeps 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 176e948693eSPhilip Paeps 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 177e948693eSPhilip Paeps 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 178e948693eSPhilip Paeps 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 179e948693eSPhilip Paeps 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa 180e948693eSPhilip Paeps }; 1812da88194SAndrew Rybchenko #endif 182e948693eSPhilip Paeps 183e948693eSPhilip Paeps static void 184e948693eSPhilip Paeps sfxge_rx_post_refill(void *arg) 185e948693eSPhilip Paeps { 186e948693eSPhilip Paeps struct sfxge_rxq *rxq = arg; 187e948693eSPhilip Paeps struct sfxge_softc *sc; 188e948693eSPhilip Paeps unsigned int index; 189e948693eSPhilip Paeps struct sfxge_evq *evq; 190e948693eSPhilip Paeps uint16_t magic; 191e948693eSPhilip Paeps 192e948693eSPhilip Paeps sc = rxq->sc; 193e948693eSPhilip Paeps index = rxq->index; 194e948693eSPhilip Paeps evq = sc->evq[index]; 19584bcd65eSAndrew Rybchenko magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); 196e948693eSPhilip Paeps 197e948693eSPhilip Paeps /* This is guaranteed due to the start/stop order of rx and ev */ 198e948693eSPhilip Paeps KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 199e948693eSPhilip Paeps ("evq not started")); 200e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, 201e948693eSPhilip Paeps ("rxq not started")); 202e948693eSPhilip Paeps efx_ev_qpost(evq->common, magic); 203e948693eSPhilip Paeps } 204e948693eSPhilip Paeps 205e948693eSPhilip Paeps static void 206e948693eSPhilip Paeps sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) 207e948693eSPhilip Paeps { 208e948693eSPhilip Paeps /* Initially retry after 100 ms, but back off in case of 209e948693eSPhilip Paeps * repeated failures as we probably have to wait for the 210e948693eSPhilip Paeps * administrator to raise the pool limit. */ 211e948693eSPhilip Paeps if (retrying) 212e948693eSPhilip Paeps rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz); 213e948693eSPhilip Paeps else 214e948693eSPhilip Paeps rxq->refill_delay = hz / 10; 215e948693eSPhilip Paeps 216e948693eSPhilip Paeps callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay, 217e948693eSPhilip Paeps sfxge_rx_post_refill, rxq); 218e948693eSPhilip Paeps } 219e948693eSPhilip Paeps 220e948693eSPhilip Paeps #define SFXGE_REFILL_BATCH 64 221e948693eSPhilip Paeps 222e948693eSPhilip Paeps static void 223e948693eSPhilip Paeps sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) 224e948693eSPhilip Paeps { 225e948693eSPhilip Paeps struct sfxge_softc *sc; 226e948693eSPhilip Paeps unsigned int index; 227e948693eSPhilip Paeps struct sfxge_evq *evq; 228e948693eSPhilip Paeps unsigned int batch; 229e948693eSPhilip Paeps unsigned int rxfill; 230e948693eSPhilip Paeps unsigned int mblksize; 231e948693eSPhilip Paeps int ntodo; 232e948693eSPhilip Paeps efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; 233e948693eSPhilip Paeps 234e948693eSPhilip Paeps sc = rxq->sc; 235e948693eSPhilip Paeps index = rxq->index; 236e948693eSPhilip Paeps evq = sc->evq[index]; 237e948693eSPhilip Paeps 238e948693eSPhilip Paeps prefetch_read_many(sc->enp); 239e948693eSPhilip Paeps prefetch_read_many(rxq->common); 240e948693eSPhilip Paeps 241763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 242e948693eSPhilip Paeps 243851128b8SAndrew Rybchenko if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) 244e948693eSPhilip Paeps return; 245e948693eSPhilip Paeps 246e948693eSPhilip Paeps rxfill = rxq->added - rxq->completed; 247385b1d8eSGeorge V. Neville-Neil KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), 248385b1d8eSGeorge V. Neville-Neil ("rxfill > EFX_RXQ_LIMIT(rxq->entries)")); 249385b1d8eSGeorge V. Neville-Neil ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); 250385b1d8eSGeorge V. Neville-Neil KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), 251385b1d8eSGeorge V. Neville-Neil ("ntodo > EFX_RQX_LIMIT(rxq->entries)")); 252e948693eSPhilip Paeps 253e948693eSPhilip Paeps if (ntodo == 0) 254e948693eSPhilip Paeps return; 255e948693eSPhilip Paeps 256e948693eSPhilip Paeps batch = 0; 2573c838a9fSAndrew Rybchenko mblksize = sc->rx_buffer_size - sc->rx_buffer_align; 258e948693eSPhilip Paeps while (ntodo-- > 0) { 259e948693eSPhilip Paeps unsigned int id; 260e948693eSPhilip Paeps struct sfxge_rx_sw_desc *rx_desc; 261e948693eSPhilip Paeps bus_dma_segment_t seg; 262e948693eSPhilip Paeps struct mbuf *m; 263e948693eSPhilip Paeps 264385b1d8eSGeorge V. Neville-Neil id = (rxq->added + batch) & rxq->ptr_mask; 265e948693eSPhilip Paeps rx_desc = &rxq->queue[id]; 266e948693eSPhilip Paeps KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); 267e948693eSPhilip Paeps 268e948693eSPhilip Paeps rx_desc->flags = EFX_DISCARD; 269009d75e7SGleb Smirnoff m = rx_desc->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 270009d75e7SGleb Smirnoff sc->rx_cluster_size); 271e948693eSPhilip Paeps if (m == NULL) 272e948693eSPhilip Paeps break; 2733c838a9fSAndrew Rybchenko 2743c838a9fSAndrew Rybchenko /* m_len specifies length of area to be mapped for DMA */ 2753c838a9fSAndrew Rybchenko m->m_len = mblksize; 2763c838a9fSAndrew Rybchenko m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE); 2773c838a9fSAndrew Rybchenko m->m_data += sc->rx_buffer_align; 2783c838a9fSAndrew Rybchenko 279e948693eSPhilip Paeps sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); 280e948693eSPhilip Paeps addr[batch++] = seg.ds_addr; 281e948693eSPhilip Paeps 282e948693eSPhilip Paeps if (batch == SFXGE_REFILL_BATCH) { 283e948693eSPhilip Paeps efx_rx_qpost(rxq->common, addr, mblksize, batch, 284e948693eSPhilip Paeps rxq->completed, rxq->added); 285e948693eSPhilip Paeps rxq->added += batch; 286e948693eSPhilip Paeps batch = 0; 287e948693eSPhilip Paeps } 288e948693eSPhilip Paeps } 289e948693eSPhilip Paeps 290e948693eSPhilip Paeps if (ntodo != 0) 291e948693eSPhilip Paeps sfxge_rx_schedule_refill(rxq, retrying); 292e948693eSPhilip Paeps 293e948693eSPhilip Paeps if (batch != 0) { 294e948693eSPhilip Paeps efx_rx_qpost(rxq->common, addr, mblksize, batch, 295e948693eSPhilip Paeps rxq->completed, rxq->added); 296e948693eSPhilip Paeps rxq->added += batch; 297e948693eSPhilip Paeps } 298e948693eSPhilip Paeps 299e948693eSPhilip Paeps /* Make the descriptors visible to the hardware */ 300e948693eSPhilip Paeps bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, 301e948693eSPhilip Paeps BUS_DMASYNC_PREWRITE); 302e948693eSPhilip Paeps 3033c838a9fSAndrew Rybchenko efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); 3043c838a9fSAndrew Rybchenko 3053c838a9fSAndrew Rybchenko /* The queue could still be empty if no descriptors were actually 3063c838a9fSAndrew Rybchenko * pushed, in which case there will be no event to cause the next 3073c838a9fSAndrew Rybchenko * refill, so we must schedule a refill ourselves. 3083c838a9fSAndrew Rybchenko */ 3093c838a9fSAndrew Rybchenko if(rxq->pushed == rxq->completed) { 3103c838a9fSAndrew Rybchenko sfxge_rx_schedule_refill(rxq, retrying); 3113c838a9fSAndrew Rybchenko } 312e948693eSPhilip Paeps } 313e948693eSPhilip Paeps 314e948693eSPhilip Paeps void 315e948693eSPhilip Paeps sfxge_rx_qrefill(struct sfxge_rxq *rxq) 316e948693eSPhilip Paeps { 317e948693eSPhilip Paeps 318851128b8SAndrew Rybchenko if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) 319e948693eSPhilip Paeps return; 320e948693eSPhilip Paeps 321e948693eSPhilip Paeps /* Make sure the queue is full */ 322385b1d8eSGeorge V. Neville-Neil sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE); 323e948693eSPhilip Paeps } 324e948693eSPhilip Paeps 325e948693eSPhilip Paeps static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m) 326e948693eSPhilip Paeps { 327e948693eSPhilip Paeps struct ifnet *ifp = sc->ifnet; 328e948693eSPhilip Paeps 329e948693eSPhilip Paeps m->m_pkthdr.rcvif = ifp; 330e948693eSPhilip Paeps m->m_pkthdr.csum_data = 0xffff; 331e948693eSPhilip Paeps ifp->if_input(ifp, m); 332e948693eSPhilip Paeps } 333e948693eSPhilip Paeps 334e948693eSPhilip Paeps static void 335dae57086SAndrew Rybchenko sfxge_rx_deliver(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_desc) 336e948693eSPhilip Paeps { 337dae57086SAndrew Rybchenko struct sfxge_softc *sc = rxq->sc; 338e948693eSPhilip Paeps struct mbuf *m = rx_desc->mbuf; 339588644a4SAndrew Rybchenko int flags = rx_desc->flags; 340e948693eSPhilip Paeps int csum_flags; 341e948693eSPhilip Paeps 342e948693eSPhilip Paeps /* Convert checksum flags */ 343588644a4SAndrew Rybchenko csum_flags = (flags & EFX_CKSUM_IPV4) ? 344e948693eSPhilip Paeps (CSUM_IP_CHECKED | CSUM_IP_VALID) : 0; 345588644a4SAndrew Rybchenko if (flags & EFX_CKSUM_TCPUDP) 346e948693eSPhilip Paeps csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 347e948693eSPhilip Paeps 348588644a4SAndrew Rybchenko if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { 3493c838a9fSAndrew Rybchenko m->m_pkthdr.flowid = 35054c1459cSAndrew Rybchenko efx_pseudo_hdr_hash_get(rxq->common, 3513c838a9fSAndrew Rybchenko EFX_RX_HASHALG_TOEPLITZ, 352e948693eSPhilip Paeps mtod(m, uint8_t *)); 353dcf08586SAndrew Rybchenko /* The hash covers a 4-tuple for TCP only */ 354dcf08586SAndrew Rybchenko M_HASHTYPE_SET(m, 355588644a4SAndrew Rybchenko (flags & EFX_PKT_IPV4) ? 356588644a4SAndrew Rybchenko ((flags & EFX_PKT_TCP) ? 357dcf08586SAndrew Rybchenko M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_IPV4) : 358588644a4SAndrew Rybchenko ((flags & EFX_PKT_TCP) ? 359dcf08586SAndrew Rybchenko M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_IPV6)); 360e948693eSPhilip Paeps } 361e948693eSPhilip Paeps m->m_data += sc->rx_prefix_size; 362e948693eSPhilip Paeps m->m_len = rx_desc->size - sc->rx_prefix_size; 363e948693eSPhilip Paeps m->m_pkthdr.len = m->m_len; 364e948693eSPhilip Paeps m->m_pkthdr.csum_flags = csum_flags; 365e948693eSPhilip Paeps __sfxge_rx_deliver(sc, rx_desc->mbuf); 366e948693eSPhilip Paeps 367e948693eSPhilip Paeps rx_desc->flags = EFX_DISCARD; 368e948693eSPhilip Paeps rx_desc->mbuf = NULL; 369e948693eSPhilip Paeps } 370e948693eSPhilip Paeps 37118daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 37218daa0eeSAndrew Rybchenko 373e948693eSPhilip Paeps static void 374e948693eSPhilip Paeps sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c) 375e948693eSPhilip Paeps { 376e948693eSPhilip Paeps struct sfxge_softc *sc = st->sc; 377e948693eSPhilip Paeps struct mbuf *m = c->mbuf; 378e948693eSPhilip Paeps struct tcphdr *c_th; 379e948693eSPhilip Paeps int csum_flags; 380e948693eSPhilip Paeps 381e948693eSPhilip Paeps KASSERT(m, ("no mbuf to deliver")); 382e948693eSPhilip Paeps 383e948693eSPhilip Paeps ++st->n_bursts; 384e948693eSPhilip Paeps 385e948693eSPhilip Paeps /* Finish off packet munging and recalculate IP header checksum. */ 386e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 387e948693eSPhilip Paeps struct ip *iph = c->nh; 388e948693eSPhilip Paeps iph->ip_len = htons(iph->ip_len); 389e948693eSPhilip Paeps iph->ip_sum = 0; 390e948693eSPhilip Paeps iph->ip_sum = in_cksum_hdr(iph); 391e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 392e948693eSPhilip Paeps csum_flags = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR | 393e948693eSPhilip Paeps CSUM_IP_CHECKED | CSUM_IP_VALID); 394e948693eSPhilip Paeps } else { 395e948693eSPhilip Paeps struct ip6_hdr *iph = c->nh; 396e948693eSPhilip Paeps iph->ip6_plen = htons(iph->ip6_plen); 397e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 398e948693eSPhilip Paeps csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 399e948693eSPhilip Paeps } 400e948693eSPhilip Paeps 401e948693eSPhilip Paeps c_th->th_win = c->th_last->th_win; 402e948693eSPhilip Paeps c_th->th_ack = c->th_last->th_ack; 403e948693eSPhilip Paeps if (c_th->th_off == c->th_last->th_off) { 404e948693eSPhilip Paeps /* Copy TCP options (take care to avoid going negative). */ 405e948693eSPhilip Paeps int optlen = ((c_th->th_off - 5) & 0xf) << 2u; 406e948693eSPhilip Paeps memcpy(c_th + 1, c->th_last + 1, optlen); 407e948693eSPhilip Paeps } 408e948693eSPhilip Paeps 409e948693eSPhilip Paeps m->m_pkthdr.flowid = c->conn_hash; 410dcf08586SAndrew Rybchenko M_HASHTYPE_SET(m, 411dcf08586SAndrew Rybchenko SFXGE_LRO_CONN_IS_TCPIPV4(c) ? 412dcf08586SAndrew Rybchenko M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_TCP_IPV6); 413a411fe4eSAndrew Rybchenko 414e948693eSPhilip Paeps m->m_pkthdr.csum_flags = csum_flags; 415e948693eSPhilip Paeps __sfxge_rx_deliver(sc, m); 416e948693eSPhilip Paeps 417e948693eSPhilip Paeps c->mbuf = NULL; 418e948693eSPhilip Paeps c->delivered = 1; 419e948693eSPhilip Paeps } 420e948693eSPhilip Paeps 421e948693eSPhilip Paeps /* Drop the given connection, and add it to the free list. */ 422e948693eSPhilip Paeps static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) 423e948693eSPhilip Paeps { 424e948693eSPhilip Paeps unsigned bucket; 425e948693eSPhilip Paeps 426e948693eSPhilip Paeps KASSERT(!c->mbuf, ("found orphaned mbuf")); 427e948693eSPhilip Paeps 428b7b0edd1SGeorge V. Neville-Neil if (c->next_buf.mbuf != NULL) { 429dae57086SAndrew Rybchenko sfxge_rx_deliver(rxq, &c->next_buf); 430e948693eSPhilip Paeps LIST_REMOVE(c, active_link); 431e948693eSPhilip Paeps } 432e948693eSPhilip Paeps 433e948693eSPhilip Paeps bucket = c->conn_hash & rxq->lro.conns_mask; 434e948693eSPhilip Paeps KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong")); 435e948693eSPhilip Paeps --rxq->lro.conns_n[bucket]; 436e948693eSPhilip Paeps TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); 437e948693eSPhilip Paeps TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link); 438e948693eSPhilip Paeps } 439e948693eSPhilip Paeps 440e948693eSPhilip Paeps /* Stop tracking connections that have gone idle in order to keep hash 441e948693eSPhilip Paeps * chains short. 442e948693eSPhilip Paeps */ 443e948693eSPhilip Paeps static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now) 444e948693eSPhilip Paeps { 445e948693eSPhilip Paeps struct sfxge_lro_conn *c; 446e948693eSPhilip Paeps unsigned i; 447e948693eSPhilip Paeps 448e948693eSPhilip Paeps KASSERT(LIST_EMPTY(&rxq->lro.active_conns), 449e948693eSPhilip Paeps ("found active connections")); 450e948693eSPhilip Paeps 451e948693eSPhilip Paeps rxq->lro.last_purge_ticks = now; 452e948693eSPhilip Paeps for (i = 0; i <= rxq->lro.conns_mask; ++i) { 453e948693eSPhilip Paeps if (TAILQ_EMPTY(&rxq->lro.conns[i])) 454e948693eSPhilip Paeps continue; 455e948693eSPhilip Paeps 456e948693eSPhilip Paeps c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq); 457e948693eSPhilip Paeps if (now - c->last_pkt_ticks > lro_idle_ticks) { 458e948693eSPhilip Paeps ++rxq->lro.n_drop_idle; 459e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 460e948693eSPhilip Paeps } 461e948693eSPhilip Paeps } 462e948693eSPhilip Paeps } 463e948693eSPhilip Paeps 464e948693eSPhilip Paeps static void 465e948693eSPhilip Paeps sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, 466e948693eSPhilip Paeps struct mbuf *mbuf, struct tcphdr *th) 467e948693eSPhilip Paeps { 468e948693eSPhilip Paeps struct tcphdr *c_th; 469e948693eSPhilip Paeps 470e948693eSPhilip Paeps /* Tack the new mbuf onto the chain. */ 471e948693eSPhilip Paeps KASSERT(!mbuf->m_next, ("mbuf already chained")); 472e948693eSPhilip Paeps c->mbuf_tail->m_next = mbuf; 473e948693eSPhilip Paeps c->mbuf_tail = mbuf; 474e948693eSPhilip Paeps 475e948693eSPhilip Paeps /* Increase length appropriately */ 476e948693eSPhilip Paeps c->mbuf->m_pkthdr.len += mbuf->m_len; 477e948693eSPhilip Paeps 478e948693eSPhilip Paeps /* Update the connection state flags */ 479e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 480e948693eSPhilip Paeps struct ip *iph = c->nh; 481e948693eSPhilip Paeps iph->ip_len += mbuf->m_len; 482e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 483e948693eSPhilip Paeps } else { 484e948693eSPhilip Paeps struct ip6_hdr *iph = c->nh; 485e948693eSPhilip Paeps iph->ip6_plen += mbuf->m_len; 486e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 487e948693eSPhilip Paeps } 488e948693eSPhilip Paeps c_th->th_flags |= (th->th_flags & TH_PUSH); 489e948693eSPhilip Paeps c->th_last = th; 490e948693eSPhilip Paeps ++st->n_merges; 491e948693eSPhilip Paeps 492e948693eSPhilip Paeps /* Pass packet up now if another segment could overflow the IP 493e948693eSPhilip Paeps * length. 494e948693eSPhilip Paeps */ 495e948693eSPhilip Paeps if (c->mbuf->m_pkthdr.len > 65536 - 9200) 496e948693eSPhilip Paeps sfxge_lro_deliver(st, c); 497e948693eSPhilip Paeps } 498e948693eSPhilip Paeps 499e948693eSPhilip Paeps static void 500e948693eSPhilip Paeps sfxge_lro_start(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, 501e948693eSPhilip Paeps struct mbuf *mbuf, void *nh, struct tcphdr *th) 502e948693eSPhilip Paeps { 503e948693eSPhilip Paeps /* Start the chain */ 504e948693eSPhilip Paeps c->mbuf = mbuf; 505e948693eSPhilip Paeps c->mbuf_tail = c->mbuf; 506e948693eSPhilip Paeps c->nh = nh; 507e948693eSPhilip Paeps c->th_last = th; 508e948693eSPhilip Paeps 509e948693eSPhilip Paeps mbuf->m_pkthdr.len = mbuf->m_len; 510e948693eSPhilip Paeps 511e948693eSPhilip Paeps /* Mangle header fields for later processing */ 512e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 513e948693eSPhilip Paeps struct ip *iph = nh; 514e948693eSPhilip Paeps iph->ip_len = ntohs(iph->ip_len); 515e948693eSPhilip Paeps } else { 516e948693eSPhilip Paeps struct ip6_hdr *iph = nh; 517e948693eSPhilip Paeps iph->ip6_plen = ntohs(iph->ip6_plen); 518e948693eSPhilip Paeps } 519e948693eSPhilip Paeps } 520e948693eSPhilip Paeps 521e948693eSPhilip Paeps /* Try to merge or otherwise hold or deliver (as appropriate) the 522e948693eSPhilip Paeps * packet buffered for this connection (c->next_buf). Return a flag 523e948693eSPhilip Paeps * indicating whether the connection is still active for LRO purposes. 524e948693eSPhilip Paeps */ 525e948693eSPhilip Paeps static int 526e948693eSPhilip Paeps sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) 527e948693eSPhilip Paeps { 528e948693eSPhilip Paeps struct sfxge_rx_sw_desc *rx_buf = &c->next_buf; 529e948693eSPhilip Paeps char *eh = c->next_eh; 530e948693eSPhilip Paeps int data_length, hdr_length, dont_merge; 531e948693eSPhilip Paeps unsigned th_seq, pkt_length; 532e948693eSPhilip Paeps struct tcphdr *th; 533e948693eSPhilip Paeps unsigned now; 534e948693eSPhilip Paeps 535e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 536e948693eSPhilip Paeps struct ip *iph = c->next_nh; 537e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 538e948693eSPhilip Paeps pkt_length = ntohs(iph->ip_len) + (char *) iph - eh; 539e948693eSPhilip Paeps } else { 540e948693eSPhilip Paeps struct ip6_hdr *iph = c->next_nh; 541e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 542e948693eSPhilip Paeps pkt_length = ntohs(iph->ip6_plen) + (char *) th - eh; 543e948693eSPhilip Paeps } 544e948693eSPhilip Paeps 545e948693eSPhilip Paeps hdr_length = (char *) th + th->th_off * 4 - eh; 546e948693eSPhilip Paeps data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) - 547e948693eSPhilip Paeps hdr_length); 548e948693eSPhilip Paeps th_seq = ntohl(th->th_seq); 549e948693eSPhilip Paeps dont_merge = ((data_length <= 0) 550e948693eSPhilip Paeps | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN))); 551e948693eSPhilip Paeps 552e948693eSPhilip Paeps /* Check for options other than aligned timestamp. */ 553e948693eSPhilip Paeps if (th->th_off != 5) { 554e948693eSPhilip Paeps const uint32_t *opt_ptr = (const uint32_t *) (th + 1); 555e948693eSPhilip Paeps if (th->th_off == 8 && 556e948693eSPhilip Paeps opt_ptr[0] == ntohl((TCPOPT_NOP << 24) | 557e948693eSPhilip Paeps (TCPOPT_NOP << 16) | 558e948693eSPhilip Paeps (TCPOPT_TIMESTAMP << 8) | 559e948693eSPhilip Paeps TCPOLEN_TIMESTAMP)) { 560e948693eSPhilip Paeps /* timestamp option -- okay */ 561e948693eSPhilip Paeps } else { 562e948693eSPhilip Paeps dont_merge = 1; 563e948693eSPhilip Paeps } 564e948693eSPhilip Paeps } 565e948693eSPhilip Paeps 566e948693eSPhilip Paeps if (__predict_false(th_seq != c->next_seq)) { 567e948693eSPhilip Paeps /* Out-of-order, so start counting again. */ 568b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 569e948693eSPhilip Paeps sfxge_lro_deliver(&rxq->lro, c); 570e948693eSPhilip Paeps c->n_in_order_pkts -= lro_loss_packets; 571e948693eSPhilip Paeps c->next_seq = th_seq + data_length; 572e948693eSPhilip Paeps ++rxq->lro.n_misorder; 573e948693eSPhilip Paeps goto deliver_buf_out; 574e948693eSPhilip Paeps } 575e948693eSPhilip Paeps c->next_seq = th_seq + data_length; 576e948693eSPhilip Paeps 577e948693eSPhilip Paeps now = ticks; 578e948693eSPhilip Paeps if (now - c->last_pkt_ticks > lro_idle_ticks) { 579e948693eSPhilip Paeps ++rxq->lro.n_drop_idle; 580b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 581e948693eSPhilip Paeps sfxge_lro_deliver(&rxq->lro, c); 582e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 583b7b0edd1SGeorge V. Neville-Neil return (0); 584e948693eSPhilip Paeps } 585e948693eSPhilip Paeps c->last_pkt_ticks = ticks; 586e948693eSPhilip Paeps 587e948693eSPhilip Paeps if (c->n_in_order_pkts < lro_slow_start_packets) { 588e948693eSPhilip Paeps /* May be in slow-start, so don't merge. */ 589e948693eSPhilip Paeps ++rxq->lro.n_slow_start; 590e948693eSPhilip Paeps ++c->n_in_order_pkts; 591e948693eSPhilip Paeps goto deliver_buf_out; 592e948693eSPhilip Paeps } 593e948693eSPhilip Paeps 594e948693eSPhilip Paeps if (__predict_false(dont_merge)) { 595b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 596e948693eSPhilip Paeps sfxge_lro_deliver(&rxq->lro, c); 597e948693eSPhilip Paeps if (th->th_flags & (TH_FIN | TH_RST)) { 598e948693eSPhilip Paeps ++rxq->lro.n_drop_closed; 599e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 600b7b0edd1SGeorge V. Neville-Neil return (0); 601e948693eSPhilip Paeps } 602e948693eSPhilip Paeps goto deliver_buf_out; 603e948693eSPhilip Paeps } 604e948693eSPhilip Paeps 605e948693eSPhilip Paeps rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size; 606e948693eSPhilip Paeps 607e948693eSPhilip Paeps if (__predict_true(c->mbuf != NULL)) { 608e948693eSPhilip Paeps /* Remove headers and any padding */ 609e948693eSPhilip Paeps rx_buf->mbuf->m_data += hdr_length; 610e948693eSPhilip Paeps rx_buf->mbuf->m_len = data_length; 611e948693eSPhilip Paeps 612e948693eSPhilip Paeps sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th); 613e948693eSPhilip Paeps } else { 614e948693eSPhilip Paeps /* Remove any padding */ 615e948693eSPhilip Paeps rx_buf->mbuf->m_len = pkt_length; 616e948693eSPhilip Paeps 617e948693eSPhilip Paeps sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th); 618e948693eSPhilip Paeps } 619e948693eSPhilip Paeps 620e948693eSPhilip Paeps rx_buf->mbuf = NULL; 621b7b0edd1SGeorge V. Neville-Neil return (1); 622e948693eSPhilip Paeps 623e948693eSPhilip Paeps deliver_buf_out: 624dae57086SAndrew Rybchenko sfxge_rx_deliver(rxq, rx_buf); 625b7b0edd1SGeorge V. Neville-Neil return (1); 626e948693eSPhilip Paeps } 627e948693eSPhilip Paeps 628e948693eSPhilip Paeps static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash, 629e948693eSPhilip Paeps uint16_t l2_id, void *nh, struct tcphdr *th) 630e948693eSPhilip Paeps { 631e948693eSPhilip Paeps unsigned bucket = conn_hash & st->conns_mask; 632e948693eSPhilip Paeps struct sfxge_lro_conn *c; 633e948693eSPhilip Paeps 634e948693eSPhilip Paeps if (st->conns_n[bucket] >= lro_chain_max) { 635e948693eSPhilip Paeps ++st->n_too_many; 636e948693eSPhilip Paeps return; 637e948693eSPhilip Paeps } 638e948693eSPhilip Paeps 639e948693eSPhilip Paeps if (!TAILQ_EMPTY(&st->free_conns)) { 640e948693eSPhilip Paeps c = TAILQ_FIRST(&st->free_conns); 641e948693eSPhilip Paeps TAILQ_REMOVE(&st->free_conns, c, link); 642e948693eSPhilip Paeps } else { 643e275c0d3SGleb Smirnoff c = malloc(sizeof(*c), M_SFXGE, M_NOWAIT); 644e948693eSPhilip Paeps if (c == NULL) 645e948693eSPhilip Paeps return; 646e948693eSPhilip Paeps c->mbuf = NULL; 647e948693eSPhilip Paeps c->next_buf.mbuf = NULL; 648e948693eSPhilip Paeps } 649e948693eSPhilip Paeps 650e948693eSPhilip Paeps /* Create the connection tracking data */ 651e948693eSPhilip Paeps ++st->conns_n[bucket]; 652e948693eSPhilip Paeps TAILQ_INSERT_HEAD(&st->conns[bucket], c, link); 653e948693eSPhilip Paeps c->l2_id = l2_id; 654e948693eSPhilip Paeps c->conn_hash = conn_hash; 655e948693eSPhilip Paeps c->source = th->th_sport; 656e948693eSPhilip Paeps c->dest = th->th_dport; 657e948693eSPhilip Paeps c->n_in_order_pkts = 0; 658e948693eSPhilip Paeps c->last_pkt_ticks = *(volatile int *)&ticks; 659e948693eSPhilip Paeps c->delivered = 0; 660e948693eSPhilip Paeps ++st->n_new_stream; 661e948693eSPhilip Paeps /* NB. We don't initialise c->next_seq, and it doesn't matter what 662e948693eSPhilip Paeps * value it has. Most likely the next packet received for this 663e948693eSPhilip Paeps * connection will not match -- no harm done. 664e948693eSPhilip Paeps */ 665e948693eSPhilip Paeps } 666e948693eSPhilip Paeps 667e948693eSPhilip Paeps /* Process mbuf and decide whether to dispatch it to the stack now or 668e948693eSPhilip Paeps * later. 669e948693eSPhilip Paeps */ 670e948693eSPhilip Paeps static void 671e948693eSPhilip Paeps sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) 672e948693eSPhilip Paeps { 673e948693eSPhilip Paeps struct sfxge_softc *sc = rxq->sc; 674e948693eSPhilip Paeps struct mbuf *m = rx_buf->mbuf; 675e948693eSPhilip Paeps struct ether_header *eh; 676e948693eSPhilip Paeps struct sfxge_lro_conn *c; 677e948693eSPhilip Paeps uint16_t l2_id; 678e948693eSPhilip Paeps uint16_t l3_proto; 679e948693eSPhilip Paeps void *nh; 680e948693eSPhilip Paeps struct tcphdr *th; 681e948693eSPhilip Paeps uint32_t conn_hash; 682e948693eSPhilip Paeps unsigned bucket; 683e948693eSPhilip Paeps 684e948693eSPhilip Paeps /* Get the hardware hash */ 68554c1459cSAndrew Rybchenko conn_hash = efx_pseudo_hdr_hash_get(rxq->common, 6863c838a9fSAndrew Rybchenko EFX_RX_HASHALG_TOEPLITZ, 687e948693eSPhilip Paeps mtod(m, uint8_t *)); 688e948693eSPhilip Paeps 689e948693eSPhilip Paeps eh = (struct ether_header *)(m->m_data + sc->rx_prefix_size); 690e948693eSPhilip Paeps if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 691e948693eSPhilip Paeps struct ether_vlan_header *veh = (struct ether_vlan_header *)eh; 692e948693eSPhilip Paeps l2_id = EVL_VLANOFTAG(ntohs(veh->evl_tag)) | 693e948693eSPhilip Paeps SFXGE_LRO_L2_ID_VLAN; 694e948693eSPhilip Paeps l3_proto = veh->evl_proto; 695e948693eSPhilip Paeps nh = veh + 1; 696e948693eSPhilip Paeps } else { 697e948693eSPhilip Paeps l2_id = 0; 698e948693eSPhilip Paeps l3_proto = eh->ether_type; 699e948693eSPhilip Paeps nh = eh + 1; 700e948693eSPhilip Paeps } 701e948693eSPhilip Paeps 702e948693eSPhilip Paeps /* Check whether this is a suitable packet (unfragmented 703e948693eSPhilip Paeps * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and 704e948693eSPhilip Paeps * length, and compute a hash if necessary. If not, return. 705e948693eSPhilip Paeps */ 706e948693eSPhilip Paeps if (l3_proto == htons(ETHERTYPE_IP)) { 707e948693eSPhilip Paeps struct ip *iph = nh; 7083b3390c1SAndrew Rybchenko 7093b3390c1SAndrew Rybchenko KASSERT(iph->ip_p == IPPROTO_TCP, 7103b3390c1SAndrew Rybchenko ("IPv4 protocol is not TCP, but packet marker is set")); 7113b3390c1SAndrew Rybchenko if ((iph->ip_hl - (sizeof(*iph) >> 2u)) | 712e948693eSPhilip Paeps (iph->ip_off & htons(IP_MF | IP_OFFMASK))) 713e948693eSPhilip Paeps goto deliver_now; 714e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 715e948693eSPhilip Paeps } else if (l3_proto == htons(ETHERTYPE_IPV6)) { 716e948693eSPhilip Paeps struct ip6_hdr *iph = nh; 7173b3390c1SAndrew Rybchenko 7183b3390c1SAndrew Rybchenko KASSERT(iph->ip6_nxt == IPPROTO_TCP, 7193b3390c1SAndrew Rybchenko ("IPv6 next header is not TCP, but packet marker is set")); 720e948693eSPhilip Paeps l2_id |= SFXGE_LRO_L2_ID_IPV6; 721e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 722e948693eSPhilip Paeps } else { 723e948693eSPhilip Paeps goto deliver_now; 724e948693eSPhilip Paeps } 725e948693eSPhilip Paeps 726e948693eSPhilip Paeps bucket = conn_hash & rxq->lro.conns_mask; 727e948693eSPhilip Paeps 728e948693eSPhilip Paeps TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) { 729e948693eSPhilip Paeps if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash)) 730e948693eSPhilip Paeps continue; 731e948693eSPhilip Paeps if ((c->source - th->th_sport) | (c->dest - th->th_dport)) 732e948693eSPhilip Paeps continue; 733b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) { 734e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 735e948693eSPhilip Paeps struct ip *c_iph, *iph = nh; 736e948693eSPhilip Paeps c_iph = c->nh; 737e948693eSPhilip Paeps if ((c_iph->ip_src.s_addr - iph->ip_src.s_addr) | 738e948693eSPhilip Paeps (c_iph->ip_dst.s_addr - iph->ip_dst.s_addr)) 739e948693eSPhilip Paeps continue; 740e948693eSPhilip Paeps } else { 741e948693eSPhilip Paeps struct ip6_hdr *c_iph, *iph = nh; 742e948693eSPhilip Paeps c_iph = c->nh; 743e948693eSPhilip Paeps if (ipv6_addr_cmp(&c_iph->ip6_src, &iph->ip6_src) | 744e948693eSPhilip Paeps ipv6_addr_cmp(&c_iph->ip6_dst, &iph->ip6_dst)) 745e948693eSPhilip Paeps continue; 746e948693eSPhilip Paeps } 747e948693eSPhilip Paeps } 748e948693eSPhilip Paeps 749e948693eSPhilip Paeps /* Re-insert at head of list to reduce lookup time. */ 750e948693eSPhilip Paeps TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); 751e948693eSPhilip Paeps TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link); 752e948693eSPhilip Paeps 753b7b0edd1SGeorge V. Neville-Neil if (c->next_buf.mbuf != NULL) { 754e948693eSPhilip Paeps if (!sfxge_lro_try_merge(rxq, c)) 755e948693eSPhilip Paeps goto deliver_now; 756e948693eSPhilip Paeps } else { 757e948693eSPhilip Paeps LIST_INSERT_HEAD(&rxq->lro.active_conns, c, 758e948693eSPhilip Paeps active_link); 759e948693eSPhilip Paeps } 760e948693eSPhilip Paeps c->next_buf = *rx_buf; 761e948693eSPhilip Paeps c->next_eh = eh; 762e948693eSPhilip Paeps c->next_nh = nh; 763e948693eSPhilip Paeps 764e948693eSPhilip Paeps rx_buf->mbuf = NULL; 765e948693eSPhilip Paeps rx_buf->flags = EFX_DISCARD; 766e948693eSPhilip Paeps return; 767e948693eSPhilip Paeps } 768e948693eSPhilip Paeps 769e948693eSPhilip Paeps sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th); 770e948693eSPhilip Paeps deliver_now: 771dae57086SAndrew Rybchenko sfxge_rx_deliver(rxq, rx_buf); 772e948693eSPhilip Paeps } 773e948693eSPhilip Paeps 774e948693eSPhilip Paeps static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) 775e948693eSPhilip Paeps { 776e948693eSPhilip Paeps struct sfxge_lro_state *st = &rxq->lro; 777e948693eSPhilip Paeps struct sfxge_lro_conn *c; 778e948693eSPhilip Paeps unsigned t; 779e948693eSPhilip Paeps 780e948693eSPhilip Paeps while (!LIST_EMPTY(&st->active_conns)) { 781e948693eSPhilip Paeps c = LIST_FIRST(&st->active_conns); 782b7b0edd1SGeorge V. Neville-Neil if (!c->delivered && c->mbuf != NULL) 783e948693eSPhilip Paeps sfxge_lro_deliver(st, c); 784e948693eSPhilip Paeps if (sfxge_lro_try_merge(rxq, c)) { 785b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 786e948693eSPhilip Paeps sfxge_lro_deliver(st, c); 787e948693eSPhilip Paeps LIST_REMOVE(c, active_link); 788e948693eSPhilip Paeps } 789e948693eSPhilip Paeps c->delivered = 0; 790e948693eSPhilip Paeps } 791e948693eSPhilip Paeps 792e948693eSPhilip Paeps t = *(volatile int *)&ticks; 793e948693eSPhilip Paeps if (__predict_false(t != st->last_purge_ticks)) 794e948693eSPhilip Paeps sfxge_lro_purge_idle(rxq, t); 795e948693eSPhilip Paeps } 796e948693eSPhilip Paeps 79718daa0eeSAndrew Rybchenko #else /* !SFXGE_LRO */ 79818daa0eeSAndrew Rybchenko 79918daa0eeSAndrew Rybchenko static void 80018daa0eeSAndrew Rybchenko sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) 80118daa0eeSAndrew Rybchenko { 80218daa0eeSAndrew Rybchenko } 80318daa0eeSAndrew Rybchenko 80418daa0eeSAndrew Rybchenko static void 80518daa0eeSAndrew Rybchenko sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) 80618daa0eeSAndrew Rybchenko { 80718daa0eeSAndrew Rybchenko } 80818daa0eeSAndrew Rybchenko 80918daa0eeSAndrew Rybchenko #endif /* SFXGE_LRO */ 81018daa0eeSAndrew Rybchenko 811e948693eSPhilip Paeps void 812e948693eSPhilip Paeps sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop) 813e948693eSPhilip Paeps { 814e948693eSPhilip Paeps struct sfxge_softc *sc = rxq->sc; 815b5bae9f4SAndrew Rybchenko int if_capenable = sc->ifnet->if_capenable; 816b5bae9f4SAndrew Rybchenko int lro_enabled = if_capenable & IFCAP_LRO; 817e948693eSPhilip Paeps unsigned int index; 818e948693eSPhilip Paeps struct sfxge_evq *evq; 819e948693eSPhilip Paeps unsigned int completed; 820e948693eSPhilip Paeps unsigned int level; 821e948693eSPhilip Paeps struct mbuf *m; 822e948693eSPhilip Paeps struct sfxge_rx_sw_desc *prev = NULL; 823e948693eSPhilip Paeps 824e948693eSPhilip Paeps index = rxq->index; 825e948693eSPhilip Paeps evq = sc->evq[index]; 826e948693eSPhilip Paeps 827763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 828e948693eSPhilip Paeps 829e948693eSPhilip Paeps completed = rxq->completed; 830e948693eSPhilip Paeps while (completed != rxq->pending) { 831e948693eSPhilip Paeps unsigned int id; 832e948693eSPhilip Paeps struct sfxge_rx_sw_desc *rx_desc; 833e948693eSPhilip Paeps 834385b1d8eSGeorge V. Neville-Neil id = completed++ & rxq->ptr_mask; 835e948693eSPhilip Paeps rx_desc = &rxq->queue[id]; 836e948693eSPhilip Paeps m = rx_desc->mbuf; 837e948693eSPhilip Paeps 838851128b8SAndrew Rybchenko if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) 839e948693eSPhilip Paeps goto discard; 840e948693eSPhilip Paeps 841e948693eSPhilip Paeps if (rx_desc->flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) 842e948693eSPhilip Paeps goto discard; 843e948693eSPhilip Paeps 844453130d9SPedro F. Giffuni /* Read the length from the pseudo header if required */ 8453c838a9fSAndrew Rybchenko if (rx_desc->flags & EFX_PKT_PREFIX_LEN) { 8463c838a9fSAndrew Rybchenko uint16_t tmp_size; 8473c838a9fSAndrew Rybchenko int rc; 84854c1459cSAndrew Rybchenko rc = efx_pseudo_hdr_pkt_length_get(rxq->common, 8493c838a9fSAndrew Rybchenko mtod(m, uint8_t *), 8503c838a9fSAndrew Rybchenko &tmp_size); 8513c838a9fSAndrew Rybchenko KASSERT(rc == 0, ("cannot get packet length: %d", rc)); 8523c838a9fSAndrew Rybchenko rx_desc->size = (int)tmp_size + sc->rx_prefix_size; 8533c838a9fSAndrew Rybchenko } 8543c838a9fSAndrew Rybchenko 855e948693eSPhilip Paeps prefetch_read_many(mtod(m, caddr_t)); 856e948693eSPhilip Paeps 857b5bae9f4SAndrew Rybchenko switch (rx_desc->flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { 858b5bae9f4SAndrew Rybchenko case EFX_PKT_IPV4: 859b5bae9f4SAndrew Rybchenko if (~if_capenable & IFCAP_RXCSUM) 860b5bae9f4SAndrew Rybchenko rx_desc->flags &= 861b5bae9f4SAndrew Rybchenko ~(EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP); 862b5bae9f4SAndrew Rybchenko break; 863b5bae9f4SAndrew Rybchenko case EFX_PKT_IPV6: 864b5bae9f4SAndrew Rybchenko if (~if_capenable & IFCAP_RXCSUM_IPV6) 865b5bae9f4SAndrew Rybchenko rx_desc->flags &= ~EFX_CKSUM_TCPUDP; 866b5bae9f4SAndrew Rybchenko break; 867b5bae9f4SAndrew Rybchenko case 0: 868e948693eSPhilip Paeps /* Check for loopback packets */ 869b5bae9f4SAndrew Rybchenko { 870e948693eSPhilip Paeps struct ether_header *etherhp; 871e948693eSPhilip Paeps 872e948693eSPhilip Paeps /*LINTED*/ 873e948693eSPhilip Paeps etherhp = mtod(m, struct ether_header *); 874e948693eSPhilip Paeps 875e948693eSPhilip Paeps if (etherhp->ether_type == 876e948693eSPhilip Paeps htons(SFXGE_ETHERTYPE_LOOPBACK)) { 877e948693eSPhilip Paeps EFSYS_PROBE(loopback); 878e948693eSPhilip Paeps 879e948693eSPhilip Paeps rxq->loopback++; 880e948693eSPhilip Paeps goto discard; 881e948693eSPhilip Paeps } 882e948693eSPhilip Paeps } 883b5bae9f4SAndrew Rybchenko break; 884b5bae9f4SAndrew Rybchenko default: 885b5bae9f4SAndrew Rybchenko KASSERT(B_FALSE, 886b5bae9f4SAndrew Rybchenko ("Rx descriptor with both IPv4 and IPv6 flags")); 887b5bae9f4SAndrew Rybchenko goto discard; 888b5bae9f4SAndrew Rybchenko } 889e948693eSPhilip Paeps 890e948693eSPhilip Paeps /* Pass packet up the stack or into LRO (pipelined) */ 891e948693eSPhilip Paeps if (prev != NULL) { 8923b3390c1SAndrew Rybchenko if (lro_enabled && 8933b3390c1SAndrew Rybchenko ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == 8943b3390c1SAndrew Rybchenko (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) 895e948693eSPhilip Paeps sfxge_lro(rxq, prev); 896e948693eSPhilip Paeps else 897dae57086SAndrew Rybchenko sfxge_rx_deliver(rxq, prev); 898e948693eSPhilip Paeps } 899e948693eSPhilip Paeps prev = rx_desc; 900e948693eSPhilip Paeps continue; 901e948693eSPhilip Paeps 902e948693eSPhilip Paeps discard: 903e948693eSPhilip Paeps /* Return the packet to the pool */ 904e948693eSPhilip Paeps m_free(m); 905e948693eSPhilip Paeps rx_desc->mbuf = NULL; 906e948693eSPhilip Paeps } 907e948693eSPhilip Paeps rxq->completed = completed; 908e948693eSPhilip Paeps 909e948693eSPhilip Paeps level = rxq->added - rxq->completed; 910e948693eSPhilip Paeps 911e948693eSPhilip Paeps /* Pass last packet up the stack or into LRO */ 912e948693eSPhilip Paeps if (prev != NULL) { 9133b3390c1SAndrew Rybchenko if (lro_enabled && 9143b3390c1SAndrew Rybchenko ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == 9153b3390c1SAndrew Rybchenko (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) 916e948693eSPhilip Paeps sfxge_lro(rxq, prev); 917e948693eSPhilip Paeps else 918dae57086SAndrew Rybchenko sfxge_rx_deliver(rxq, prev); 919e948693eSPhilip Paeps } 920e948693eSPhilip Paeps 921e948693eSPhilip Paeps /* 922e948693eSPhilip Paeps * If there are any pending flows and this is the end of the 923e948693eSPhilip Paeps * poll then they must be completed. 924e948693eSPhilip Paeps */ 925e948693eSPhilip Paeps if (eop) 926e948693eSPhilip Paeps sfxge_lro_end_of_burst(rxq); 927e948693eSPhilip Paeps 928e948693eSPhilip Paeps /* Top up the queue if necessary */ 929385b1d8eSGeorge V. Neville-Neil if (level < rxq->refill_threshold) 930385b1d8eSGeorge V. Neville-Neil sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE); 931e948693eSPhilip Paeps } 932e948693eSPhilip Paeps 933e948693eSPhilip Paeps static void 934e948693eSPhilip Paeps sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index) 935e948693eSPhilip Paeps { 936e948693eSPhilip Paeps struct sfxge_rxq *rxq; 937e948693eSPhilip Paeps struct sfxge_evq *evq; 938e948693eSPhilip Paeps unsigned int count; 9393c838a9fSAndrew Rybchenko unsigned int retry = 3; 9403c838a9fSAndrew Rybchenko 9413c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 942e948693eSPhilip Paeps 943e948693eSPhilip Paeps rxq = sc->rxq[index]; 944e948693eSPhilip Paeps evq = sc->evq[index]; 945e948693eSPhilip Paeps 946763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 947e948693eSPhilip Paeps 948e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, 949e948693eSPhilip Paeps ("rxq not started")); 950e948693eSPhilip Paeps 951e948693eSPhilip Paeps rxq->init_state = SFXGE_RXQ_INITIALIZED; 952e948693eSPhilip Paeps 953e948693eSPhilip Paeps callout_stop(&rxq->refill_callout); 954e948693eSPhilip Paeps 9553c838a9fSAndrew Rybchenko while (rxq->flush_state != SFXGE_FLUSH_DONE && retry != 0) { 956e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_PENDING; 957e948693eSPhilip Paeps 958763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 959e948693eSPhilip Paeps 9603c838a9fSAndrew Rybchenko /* Flush the receive queue */ 9613c838a9fSAndrew Rybchenko if (efx_rx_qflush(rxq->common) != 0) { 9623c838a9fSAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 9633c838a9fSAndrew Rybchenko rxq->flush_state = SFXGE_FLUSH_FAILED; 9643c838a9fSAndrew Rybchenko break; 9653c838a9fSAndrew Rybchenko } 9663c838a9fSAndrew Rybchenko 967e948693eSPhilip Paeps count = 0; 968e948693eSPhilip Paeps do { 969e948693eSPhilip Paeps /* Spin for 100 ms */ 970e948693eSPhilip Paeps DELAY(100000); 971e948693eSPhilip Paeps 972e948693eSPhilip Paeps if (rxq->flush_state != SFXGE_FLUSH_PENDING) 973e948693eSPhilip Paeps break; 974e948693eSPhilip Paeps 975e948693eSPhilip Paeps } while (++count < 20); 976e948693eSPhilip Paeps 977763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 978e948693eSPhilip Paeps 9793c838a9fSAndrew Rybchenko if (rxq->flush_state == SFXGE_FLUSH_PENDING) { 9803c838a9fSAndrew Rybchenko /* Flush timeout - neither done nor failed */ 9813c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Cannot flush Rx queue %u\n", 9823c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 983e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_DONE; 9843c838a9fSAndrew Rybchenko } 9853c838a9fSAndrew Rybchenko retry--; 9863c838a9fSAndrew Rybchenko } 9873c838a9fSAndrew Rybchenko if (rxq->flush_state == SFXGE_FLUSH_FAILED) { 9883c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Flushing Rx queue %u failed\n", 9893c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 9903c838a9fSAndrew Rybchenko rxq->flush_state = SFXGE_FLUSH_DONE; 9913c838a9fSAndrew Rybchenko } 992e948693eSPhilip Paeps 993e948693eSPhilip Paeps rxq->pending = rxq->added; 994e948693eSPhilip Paeps sfxge_rx_qcomplete(rxq, B_TRUE); 995e948693eSPhilip Paeps 996e948693eSPhilip Paeps KASSERT(rxq->completed == rxq->pending, 997e948693eSPhilip Paeps ("rxq->completed != rxq->pending")); 998e948693eSPhilip Paeps 999e948693eSPhilip Paeps rxq->added = 0; 10003c838a9fSAndrew Rybchenko rxq->pushed = 0; 1001e948693eSPhilip Paeps rxq->pending = 0; 1002e948693eSPhilip Paeps rxq->completed = 0; 1003e948693eSPhilip Paeps rxq->loopback = 0; 1004e948693eSPhilip Paeps 1005e948693eSPhilip Paeps /* Destroy the common code receive queue. */ 1006e948693eSPhilip Paeps efx_rx_qdestroy(rxq->common); 1007e948693eSPhilip Paeps 1008e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, 1009385b1d8eSGeorge V. Neville-Neil EFX_RXQ_NBUFS(sc->rxq_entries)); 1010e948693eSPhilip Paeps 1011763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1012e948693eSPhilip Paeps } 1013e948693eSPhilip Paeps 1014e948693eSPhilip Paeps static int 1015e948693eSPhilip Paeps sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index) 1016e948693eSPhilip Paeps { 1017e948693eSPhilip Paeps struct sfxge_rxq *rxq; 1018e948693eSPhilip Paeps efsys_mem_t *esmp; 1019e948693eSPhilip Paeps struct sfxge_evq *evq; 1020e948693eSPhilip Paeps int rc; 1021e948693eSPhilip Paeps 10223c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 10233c838a9fSAndrew Rybchenko 1024e948693eSPhilip Paeps rxq = sc->rxq[index]; 1025e948693eSPhilip Paeps esmp = &rxq->mem; 1026e948693eSPhilip Paeps evq = sc->evq[index]; 1027e948693eSPhilip Paeps 1028e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, 1029e948693eSPhilip Paeps ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); 1030e948693eSPhilip Paeps KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1031e948693eSPhilip Paeps ("evq->init_state != SFXGE_EVQ_STARTED")); 1032e948693eSPhilip Paeps 1033e948693eSPhilip Paeps /* Program the buffer table. */ 1034e948693eSPhilip Paeps if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp, 1035385b1d8eSGeorge V. Neville-Neil EFX_RXQ_NBUFS(sc->rxq_entries))) != 0) 1036385b1d8eSGeorge V. Neville-Neil return (rc); 1037e948693eSPhilip Paeps 1038e948693eSPhilip Paeps /* Create the common code receive queue. */ 103934113442SAndrew Rybchenko if ((rc = efx_rx_qcreate(sc->enp, index, 0, EFX_RXQ_TYPE_DEFAULT, 1040*9445d1c5SAndrew Rybchenko esmp, sc->rxq_entries, rxq->buf_base_id, EFX_RXQ_FLAG_NONE, 1041*9445d1c5SAndrew Rybchenko evq->common, &rxq->common)) != 0) 1042e948693eSPhilip Paeps goto fail; 1043e948693eSPhilip Paeps 1044763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 1045e948693eSPhilip Paeps 1046e948693eSPhilip Paeps /* Enable the receive queue. */ 1047e948693eSPhilip Paeps efx_rx_qenable(rxq->common); 1048e948693eSPhilip Paeps 1049e948693eSPhilip Paeps rxq->init_state = SFXGE_RXQ_STARTED; 10503c838a9fSAndrew Rybchenko rxq->flush_state = SFXGE_FLUSH_REQUIRED; 1051e948693eSPhilip Paeps 1052e948693eSPhilip Paeps /* Try to fill the queue from the pool. */ 1053385b1d8eSGeorge V. Neville-Neil sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE); 1054e948693eSPhilip Paeps 1055763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1056e948693eSPhilip Paeps 1057e948693eSPhilip Paeps return (0); 1058e948693eSPhilip Paeps 1059e948693eSPhilip Paeps fail: 1060e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, 1061385b1d8eSGeorge V. Neville-Neil EFX_RXQ_NBUFS(sc->rxq_entries)); 1062385b1d8eSGeorge V. Neville-Neil return (rc); 1063e948693eSPhilip Paeps } 1064e948693eSPhilip Paeps 1065e948693eSPhilip Paeps void 1066e948693eSPhilip Paeps sfxge_rx_stop(struct sfxge_softc *sc) 1067e948693eSPhilip Paeps { 1068e948693eSPhilip Paeps int index; 1069e948693eSPhilip Paeps 10703c838a9fSAndrew Rybchenko efx_mac_filter_default_rxq_clear(sc->enp); 10713c838a9fSAndrew Rybchenko 1072e948693eSPhilip Paeps /* Stop the receive queue(s) */ 1073133366a6SAndrew Rybchenko index = sc->rxq_count; 1074e948693eSPhilip Paeps while (--index >= 0) 1075e948693eSPhilip Paeps sfxge_rx_qstop(sc, index); 1076e948693eSPhilip Paeps 1077e948693eSPhilip Paeps sc->rx_prefix_size = 0; 1078e948693eSPhilip Paeps sc->rx_buffer_size = 0; 1079e948693eSPhilip Paeps 1080e948693eSPhilip Paeps efx_rx_fini(sc->enp); 1081e948693eSPhilip Paeps } 1082e948693eSPhilip Paeps 1083e948693eSPhilip Paeps int 1084e948693eSPhilip Paeps sfxge_rx_start(struct sfxge_softc *sc) 1085e948693eSPhilip Paeps { 1086e948693eSPhilip Paeps struct sfxge_intr *intr; 10873c838a9fSAndrew Rybchenko const efx_nic_cfg_t *encp; 10883c838a9fSAndrew Rybchenko size_t hdrlen, align, reserved; 1089e948693eSPhilip Paeps int index; 1090e948693eSPhilip Paeps int rc; 1091e948693eSPhilip Paeps 1092e948693eSPhilip Paeps intr = &sc->intr; 1093e948693eSPhilip Paeps 1094e948693eSPhilip Paeps /* Initialize the common code receive module. */ 1095e948693eSPhilip Paeps if ((rc = efx_rx_init(sc->enp)) != 0) 1096e948693eSPhilip Paeps return (rc); 1097e948693eSPhilip Paeps 10983c838a9fSAndrew Rybchenko encp = efx_nic_cfg_get(sc->enp); 10993c838a9fSAndrew Rybchenko sc->rx_buffer_size = EFX_MAC_PDU(sc->ifnet->if_mtu); 11003c838a9fSAndrew Rybchenko 1101e948693eSPhilip Paeps /* Calculate the receive packet buffer size. */ 11023c838a9fSAndrew Rybchenko sc->rx_prefix_size = encp->enc_rx_prefix_size; 11033c838a9fSAndrew Rybchenko 11043c838a9fSAndrew Rybchenko /* Ensure IP headers are 32bit aligned */ 11053c838a9fSAndrew Rybchenko hdrlen = sc->rx_prefix_size + sizeof (struct ether_header); 11063c838a9fSAndrew Rybchenko sc->rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen; 11073c838a9fSAndrew Rybchenko 11083c838a9fSAndrew Rybchenko sc->rx_buffer_size += sc->rx_buffer_align; 11093c838a9fSAndrew Rybchenko 11103c838a9fSAndrew Rybchenko /* Align end of packet buffer for RX DMA end padding */ 11113c838a9fSAndrew Rybchenko align = MAX(1, encp->enc_rx_buf_align_end); 11123c838a9fSAndrew Rybchenko EFSYS_ASSERT(ISP2(align)); 11133c838a9fSAndrew Rybchenko sc->rx_buffer_size = P2ROUNDUP(sc->rx_buffer_size, align); 11143c838a9fSAndrew Rybchenko 11153c838a9fSAndrew Rybchenko /* 11163c838a9fSAndrew Rybchenko * Standard mbuf zones only guarantee pointer-size alignment; 11173c838a9fSAndrew Rybchenko * we need extra space to align to the cache line 11183c838a9fSAndrew Rybchenko */ 11193c838a9fSAndrew Rybchenko reserved = sc->rx_buffer_size + CACHE_LINE_SIZE; 1120e948693eSPhilip Paeps 1121e948693eSPhilip Paeps /* Select zone for packet buffers */ 11223c838a9fSAndrew Rybchenko if (reserved <= MCLBYTES) 1123009d75e7SGleb Smirnoff sc->rx_cluster_size = MCLBYTES; 11243c838a9fSAndrew Rybchenko else if (reserved <= MJUMPAGESIZE) 1125009d75e7SGleb Smirnoff sc->rx_cluster_size = MJUMPAGESIZE; 11263c838a9fSAndrew Rybchenko else if (reserved <= MJUM9BYTES) 1127009d75e7SGleb Smirnoff sc->rx_cluster_size = MJUM9BYTES; 1128e948693eSPhilip Paeps else 1129009d75e7SGleb Smirnoff sc->rx_cluster_size = MJUM16BYTES; 1130e948693eSPhilip Paeps 1131e948693eSPhilip Paeps /* 1132e948693eSPhilip Paeps * Set up the scale table. Enable all hash types and hash insertion. 1133e948693eSPhilip Paeps */ 113444fcad03SAndrew Rybchenko for (index = 0; index < nitems(sc->rx_indir_table); index++) 1135f949e9f8SAndrew Rybchenko #ifdef RSS 1136f949e9f8SAndrew Rybchenko sc->rx_indir_table[index] = 1137f949e9f8SAndrew Rybchenko rss_get_indirection_to_bucket(index) % sc->rxq_count; 1138f949e9f8SAndrew Rybchenko #else 1139133366a6SAndrew Rybchenko sc->rx_indir_table[index] = index % sc->rxq_count; 1140f949e9f8SAndrew Rybchenko #endif 114182af879cSAndrew Rybchenko if ((rc = efx_rx_scale_tbl_set(sc->enp, EFX_RSS_CONTEXT_DEFAULT, 114282af879cSAndrew Rybchenko sc->rx_indir_table, 114344fcad03SAndrew Rybchenko nitems(sc->rx_indir_table))) != 0) 1144e948693eSPhilip Paeps goto fail; 114582af879cSAndrew Rybchenko (void)efx_rx_scale_mode_set(sc->enp, EFX_RSS_CONTEXT_DEFAULT, 114682af879cSAndrew Rybchenko EFX_RX_HASHALG_TOEPLITZ, 114719734dbbSAndrew Rybchenko EFX_RX_HASH_IPV4 | EFX_RX_HASH_TCPIPV4 | 114819734dbbSAndrew Rybchenko EFX_RX_HASH_IPV6 | EFX_RX_HASH_TCPIPV6, B_TRUE); 1149e948693eSPhilip Paeps 11502da88194SAndrew Rybchenko #ifdef RSS 11512da88194SAndrew Rybchenko rss_getkey(toep_key); 11522da88194SAndrew Rybchenko #endif 115382af879cSAndrew Rybchenko if ((rc = efx_rx_scale_key_set(sc->enp, EFX_RSS_CONTEXT_DEFAULT, 115482af879cSAndrew Rybchenko toep_key, 1155e948693eSPhilip Paeps sizeof(toep_key))) != 0) 1156e948693eSPhilip Paeps goto fail; 1157e948693eSPhilip Paeps 1158e948693eSPhilip Paeps /* Start the receive queue(s). */ 1159133366a6SAndrew Rybchenko for (index = 0; index < sc->rxq_count; index++) { 1160e948693eSPhilip Paeps if ((rc = sfxge_rx_qstart(sc, index)) != 0) 1161e948693eSPhilip Paeps goto fail2; 1162e948693eSPhilip Paeps } 1163e948693eSPhilip Paeps 11643c838a9fSAndrew Rybchenko rc = efx_mac_filter_default_rxq_set(sc->enp, sc->rxq[0]->common, 11653c838a9fSAndrew Rybchenko sc->intr.n_alloc > 1); 11663c838a9fSAndrew Rybchenko if (rc != 0) 11673c838a9fSAndrew Rybchenko goto fail3; 11683c838a9fSAndrew Rybchenko 1169e948693eSPhilip Paeps return (0); 1170e948693eSPhilip Paeps 11713c838a9fSAndrew Rybchenko fail3: 1172e948693eSPhilip Paeps fail2: 1173e948693eSPhilip Paeps while (--index >= 0) 1174e948693eSPhilip Paeps sfxge_rx_qstop(sc, index); 1175e948693eSPhilip Paeps 1176e948693eSPhilip Paeps fail: 1177e948693eSPhilip Paeps efx_rx_fini(sc->enp); 1178e948693eSPhilip Paeps 1179e948693eSPhilip Paeps return (rc); 1180e948693eSPhilip Paeps } 1181e948693eSPhilip Paeps 118218daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 118318daa0eeSAndrew Rybchenko 1184e948693eSPhilip Paeps static void sfxge_lro_init(struct sfxge_rxq *rxq) 1185e948693eSPhilip Paeps { 1186e948693eSPhilip Paeps struct sfxge_lro_state *st = &rxq->lro; 1187e948693eSPhilip Paeps unsigned i; 1188e948693eSPhilip Paeps 1189e948693eSPhilip Paeps st->conns_mask = lro_table_size - 1; 1190e948693eSPhilip Paeps KASSERT(!((st->conns_mask + 1) & st->conns_mask), 1191e948693eSPhilip Paeps ("lro_table_size must be a power of 2")); 1192e948693eSPhilip Paeps st->sc = rxq->sc; 1193e948693eSPhilip Paeps st->conns = malloc((st->conns_mask + 1) * sizeof(st->conns[0]), 1194e948693eSPhilip Paeps M_SFXGE, M_WAITOK); 1195e948693eSPhilip Paeps st->conns_n = malloc((st->conns_mask + 1) * sizeof(st->conns_n[0]), 1196e948693eSPhilip Paeps M_SFXGE, M_WAITOK); 1197e948693eSPhilip Paeps for (i = 0; i <= st->conns_mask; ++i) { 1198e948693eSPhilip Paeps TAILQ_INIT(&st->conns[i]); 1199e948693eSPhilip Paeps st->conns_n[i] = 0; 1200e948693eSPhilip Paeps } 1201e948693eSPhilip Paeps LIST_INIT(&st->active_conns); 1202e948693eSPhilip Paeps TAILQ_INIT(&st->free_conns); 1203e948693eSPhilip Paeps } 1204e948693eSPhilip Paeps 1205e948693eSPhilip Paeps static void sfxge_lro_fini(struct sfxge_rxq *rxq) 1206e948693eSPhilip Paeps { 1207e948693eSPhilip Paeps struct sfxge_lro_state *st = &rxq->lro; 1208e948693eSPhilip Paeps struct sfxge_lro_conn *c; 1209e948693eSPhilip Paeps unsigned i; 1210e948693eSPhilip Paeps 1211e948693eSPhilip Paeps /* Return cleanly if sfxge_lro_init() has not been called. */ 1212e948693eSPhilip Paeps if (st->conns == NULL) 1213e948693eSPhilip Paeps return; 1214e948693eSPhilip Paeps 1215e948693eSPhilip Paeps KASSERT(LIST_EMPTY(&st->active_conns), ("found active connections")); 1216e948693eSPhilip Paeps 1217e948693eSPhilip Paeps for (i = 0; i <= st->conns_mask; ++i) { 1218e948693eSPhilip Paeps while (!TAILQ_EMPTY(&st->conns[i])) { 1219e948693eSPhilip Paeps c = TAILQ_LAST(&st->conns[i], sfxge_lro_tailq); 1220e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 1221e948693eSPhilip Paeps } 1222e948693eSPhilip Paeps } 1223e948693eSPhilip Paeps 1224e948693eSPhilip Paeps while (!TAILQ_EMPTY(&st->free_conns)) { 1225e948693eSPhilip Paeps c = TAILQ_FIRST(&st->free_conns); 1226e948693eSPhilip Paeps TAILQ_REMOVE(&st->free_conns, c, link); 1227e948693eSPhilip Paeps KASSERT(!c->mbuf, ("found orphaned mbuf")); 1228e948693eSPhilip Paeps free(c, M_SFXGE); 1229e948693eSPhilip Paeps } 1230e948693eSPhilip Paeps 1231e948693eSPhilip Paeps free(st->conns_n, M_SFXGE); 1232e948693eSPhilip Paeps free(st->conns, M_SFXGE); 1233e948693eSPhilip Paeps st->conns = NULL; 1234e948693eSPhilip Paeps } 1235e948693eSPhilip Paeps 123618daa0eeSAndrew Rybchenko #else 123718daa0eeSAndrew Rybchenko 123818daa0eeSAndrew Rybchenko static void 123918daa0eeSAndrew Rybchenko sfxge_lro_init(struct sfxge_rxq *rxq) 124018daa0eeSAndrew Rybchenko { 124118daa0eeSAndrew Rybchenko } 124218daa0eeSAndrew Rybchenko 124318daa0eeSAndrew Rybchenko static void 124418daa0eeSAndrew Rybchenko sfxge_lro_fini(struct sfxge_rxq *rxq) 124518daa0eeSAndrew Rybchenko { 124618daa0eeSAndrew Rybchenko } 124718daa0eeSAndrew Rybchenko 124818daa0eeSAndrew Rybchenko #endif /* SFXGE_LRO */ 124918daa0eeSAndrew Rybchenko 1250e948693eSPhilip Paeps static void 1251e948693eSPhilip Paeps sfxge_rx_qfini(struct sfxge_softc *sc, unsigned int index) 1252e948693eSPhilip Paeps { 1253e948693eSPhilip Paeps struct sfxge_rxq *rxq; 1254e948693eSPhilip Paeps 1255e948693eSPhilip Paeps rxq = sc->rxq[index]; 1256e948693eSPhilip Paeps 1257e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, 1258e948693eSPhilip Paeps ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); 1259e948693eSPhilip Paeps 1260e948693eSPhilip Paeps /* Free the context array and the flow table. */ 1261e948693eSPhilip Paeps free(rxq->queue, M_SFXGE); 1262e948693eSPhilip Paeps sfxge_lro_fini(rxq); 1263e948693eSPhilip Paeps 1264e948693eSPhilip Paeps /* Release DMA memory. */ 1265e948693eSPhilip Paeps sfxge_dma_free(&rxq->mem); 1266e948693eSPhilip Paeps 1267e948693eSPhilip Paeps sc->rxq[index] = NULL; 1268e948693eSPhilip Paeps 1269e948693eSPhilip Paeps free(rxq, M_SFXGE); 1270e948693eSPhilip Paeps } 1271e948693eSPhilip Paeps 1272e948693eSPhilip Paeps static int 1273e948693eSPhilip Paeps sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index) 1274e948693eSPhilip Paeps { 1275e948693eSPhilip Paeps struct sfxge_rxq *rxq; 1276e948693eSPhilip Paeps struct sfxge_evq *evq; 1277e948693eSPhilip Paeps efsys_mem_t *esmp; 1278e948693eSPhilip Paeps int rc; 1279e948693eSPhilip Paeps 1280133366a6SAndrew Rybchenko KASSERT(index < sc->rxq_count, ("index >= %d", sc->rxq_count)); 1281e948693eSPhilip Paeps 1282e948693eSPhilip Paeps rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK); 1283e948693eSPhilip Paeps rxq->sc = sc; 1284e948693eSPhilip Paeps rxq->index = index; 1285385b1d8eSGeorge V. Neville-Neil rxq->entries = sc->rxq_entries; 1286385b1d8eSGeorge V. Neville-Neil rxq->ptr_mask = rxq->entries - 1; 1287385b1d8eSGeorge V. Neville-Neil rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries); 1288e948693eSPhilip Paeps 1289e948693eSPhilip Paeps sc->rxq[index] = rxq; 1290e948693eSPhilip Paeps esmp = &rxq->mem; 1291e948693eSPhilip Paeps 1292e948693eSPhilip Paeps evq = sc->evq[index]; 1293e948693eSPhilip Paeps 1294e948693eSPhilip Paeps /* Allocate and zero DMA space. */ 1295385b1d8eSGeorge V. Neville-Neil if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0) 1296e948693eSPhilip Paeps return (rc); 1297e948693eSPhilip Paeps 1298e948693eSPhilip Paeps /* Allocate buffer table entries. */ 1299385b1d8eSGeorge V. Neville-Neil sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries), 1300e948693eSPhilip Paeps &rxq->buf_base_id); 1301e948693eSPhilip Paeps 1302e948693eSPhilip Paeps /* Allocate the context array and the flow table. */ 1303385b1d8eSGeorge V. Neville-Neil rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries, 1304e948693eSPhilip Paeps M_SFXGE, M_WAITOK | M_ZERO); 1305e948693eSPhilip Paeps sfxge_lro_init(rxq); 1306e948693eSPhilip Paeps 1307fd90e2edSJung-uk Kim callout_init(&rxq->refill_callout, 1); 1308e948693eSPhilip Paeps 1309e948693eSPhilip Paeps rxq->init_state = SFXGE_RXQ_INITIALIZED; 1310e948693eSPhilip Paeps 1311e948693eSPhilip Paeps return (0); 1312e948693eSPhilip Paeps } 1313e948693eSPhilip Paeps 1314e948693eSPhilip Paeps static const struct { 1315e948693eSPhilip Paeps const char *name; 1316e948693eSPhilip Paeps size_t offset; 1317e948693eSPhilip Paeps } sfxge_rx_stats[] = { 1318e948693eSPhilip Paeps #define SFXGE_RX_STAT(name, member) \ 1319e948693eSPhilip Paeps { #name, offsetof(struct sfxge_rxq, member) } 132018daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 1321e948693eSPhilip Paeps SFXGE_RX_STAT(lro_merges, lro.n_merges), 1322e948693eSPhilip Paeps SFXGE_RX_STAT(lro_bursts, lro.n_bursts), 1323e948693eSPhilip Paeps SFXGE_RX_STAT(lro_slow_start, lro.n_slow_start), 1324e948693eSPhilip Paeps SFXGE_RX_STAT(lro_misorder, lro.n_misorder), 1325e948693eSPhilip Paeps SFXGE_RX_STAT(lro_too_many, lro.n_too_many), 1326e948693eSPhilip Paeps SFXGE_RX_STAT(lro_new_stream, lro.n_new_stream), 1327e948693eSPhilip Paeps SFXGE_RX_STAT(lro_drop_idle, lro.n_drop_idle), 1328e948693eSPhilip Paeps SFXGE_RX_STAT(lro_drop_closed, lro.n_drop_closed) 132918daa0eeSAndrew Rybchenko #endif 1330e948693eSPhilip Paeps }; 1331e948693eSPhilip Paeps 1332e948693eSPhilip Paeps static int 1333e948693eSPhilip Paeps sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS) 1334e948693eSPhilip Paeps { 1335e948693eSPhilip Paeps struct sfxge_softc *sc = arg1; 1336e948693eSPhilip Paeps unsigned int id = arg2; 1337e948693eSPhilip Paeps unsigned int sum, index; 1338e948693eSPhilip Paeps 1339e948693eSPhilip Paeps /* Sum across all RX queues */ 1340e948693eSPhilip Paeps sum = 0; 1341133366a6SAndrew Rybchenko for (index = 0; index < sc->rxq_count; index++) 1342e948693eSPhilip Paeps sum += *(unsigned int *)((caddr_t)sc->rxq[index] + 1343e948693eSPhilip Paeps sfxge_rx_stats[id].offset); 1344e948693eSPhilip Paeps 1345b7b0edd1SGeorge V. Neville-Neil return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1346e948693eSPhilip Paeps } 1347e948693eSPhilip Paeps 1348e948693eSPhilip Paeps static void 1349e948693eSPhilip Paeps sfxge_rx_stat_init(struct sfxge_softc *sc) 1350e948693eSPhilip Paeps { 1351e948693eSPhilip Paeps struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1352e948693eSPhilip Paeps struct sysctl_oid_list *stat_list; 1353e948693eSPhilip Paeps unsigned int id; 1354e948693eSPhilip Paeps 1355e948693eSPhilip Paeps stat_list = SYSCTL_CHILDREN(sc->stats_node); 1356e948693eSPhilip Paeps 1357612d8e28SAndrew Rybchenko for (id = 0; id < nitems(sfxge_rx_stats); id++) { 1358e948693eSPhilip Paeps SYSCTL_ADD_PROC( 1359e948693eSPhilip Paeps ctx, stat_list, 1360e948693eSPhilip Paeps OID_AUTO, sfxge_rx_stats[id].name, 1361e948693eSPhilip Paeps CTLTYPE_UINT|CTLFLAG_RD, 1362e948693eSPhilip Paeps sc, id, sfxge_rx_stat_handler, "IU", 1363e948693eSPhilip Paeps ""); 1364e948693eSPhilip Paeps } 1365e948693eSPhilip Paeps } 1366e948693eSPhilip Paeps 1367e948693eSPhilip Paeps void 1368e948693eSPhilip Paeps sfxge_rx_fini(struct sfxge_softc *sc) 1369e948693eSPhilip Paeps { 1370e948693eSPhilip Paeps int index; 1371e948693eSPhilip Paeps 1372133366a6SAndrew Rybchenko index = sc->rxq_count; 1373e948693eSPhilip Paeps while (--index >= 0) 1374e948693eSPhilip Paeps sfxge_rx_qfini(sc, index); 1375133366a6SAndrew Rybchenko 1376133366a6SAndrew Rybchenko sc->rxq_count = 0; 1377e948693eSPhilip Paeps } 1378e948693eSPhilip Paeps 1379e948693eSPhilip Paeps int 1380e948693eSPhilip Paeps sfxge_rx_init(struct sfxge_softc *sc) 1381e948693eSPhilip Paeps { 1382e948693eSPhilip Paeps struct sfxge_intr *intr; 1383e948693eSPhilip Paeps int index; 1384e948693eSPhilip Paeps int rc; 1385e948693eSPhilip Paeps 138618daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 1387245d1576SAndrew Rybchenko if (!ISP2(lro_table_size)) { 1388245d1576SAndrew Rybchenko log(LOG_ERR, "%s=%u must be power of 2", 1389245d1576SAndrew Rybchenko SFXGE_LRO_PARAM(table_size), lro_table_size); 1390245d1576SAndrew Rybchenko rc = EINVAL; 1391245d1576SAndrew Rybchenko goto fail_lro_table_size; 1392245d1576SAndrew Rybchenko } 1393245d1576SAndrew Rybchenko 1394e948693eSPhilip Paeps if (lro_idle_ticks == 0) 1395e948693eSPhilip Paeps lro_idle_ticks = hz / 10 + 1; /* 100 ms */ 139618daa0eeSAndrew Rybchenko #endif 1397e948693eSPhilip Paeps 1398e948693eSPhilip Paeps intr = &sc->intr; 1399e948693eSPhilip Paeps 1400133366a6SAndrew Rybchenko sc->rxq_count = intr->n_alloc; 1401133366a6SAndrew Rybchenko 1402e948693eSPhilip Paeps KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1403e948693eSPhilip Paeps ("intr->state != SFXGE_INTR_INITIALIZED")); 1404e948693eSPhilip Paeps 1405e948693eSPhilip Paeps /* Initialize the receive queue(s) - one per interrupt. */ 1406133366a6SAndrew Rybchenko for (index = 0; index < sc->rxq_count; index++) { 1407e948693eSPhilip Paeps if ((rc = sfxge_rx_qinit(sc, index)) != 0) 1408e948693eSPhilip Paeps goto fail; 1409e948693eSPhilip Paeps } 1410e948693eSPhilip Paeps 1411e948693eSPhilip Paeps sfxge_rx_stat_init(sc); 1412e948693eSPhilip Paeps 1413e948693eSPhilip Paeps return (0); 1414e948693eSPhilip Paeps 1415e948693eSPhilip Paeps fail: 1416e948693eSPhilip Paeps /* Tear down the receive queue(s). */ 1417e948693eSPhilip Paeps while (--index >= 0) 1418e948693eSPhilip Paeps sfxge_rx_qfini(sc, index); 1419e948693eSPhilip Paeps 1420133366a6SAndrew Rybchenko sc->rxq_count = 0; 1421245d1576SAndrew Rybchenko 1422245d1576SAndrew Rybchenko #ifdef SFXGE_LRO 1423245d1576SAndrew Rybchenko fail_lro_table_size: 1424245d1576SAndrew Rybchenko #endif 1425e948693eSPhilip Paeps return (rc); 1426e948693eSPhilip Paeps } 1427