1e948693eSPhilip Paeps /*- 2929c7febSAndrew Rybchenko * Copyright (c) 2010-2016 Solarflare Communications Inc. 3e948693eSPhilip Paeps * All rights reserved. 4e948693eSPhilip Paeps * 5e948693eSPhilip Paeps * This software was developed in part by Philip Paeps under contract for 6e948693eSPhilip Paeps * Solarflare Communications, Inc. 7e948693eSPhilip Paeps * 8e948693eSPhilip Paeps * Redistribution and use in source and binary forms, with or without 93c838a9fSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 10e948693eSPhilip Paeps * 113c838a9fSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 123c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer. 133c838a9fSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 143c838a9fSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 153c838a9fSAndrew Rybchenko * and/or other materials provided with the distribution. 163c838a9fSAndrew Rybchenko * 173c838a9fSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 183c838a9fSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 193c838a9fSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 203c838a9fSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 213c838a9fSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 223c838a9fSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 233c838a9fSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 243c838a9fSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 253c838a9fSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 263c838a9fSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 273c838a9fSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 283c838a9fSAndrew Rybchenko * 293c838a9fSAndrew Rybchenko * The views and conclusions contained in the software and documentation are 303c838a9fSAndrew Rybchenko * those of the authors and should not be interpreted as representing official 313c838a9fSAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project. 32e948693eSPhilip Paeps */ 33e948693eSPhilip Paeps 34e948693eSPhilip Paeps #include <sys/cdefs.h> 35e948693eSPhilip Paeps __FBSDID("$FreeBSD$"); 36e948693eSPhilip Paeps 372da88194SAndrew Rybchenko #include "opt_rss.h" 382da88194SAndrew Rybchenko 398ec07310SGleb Smirnoff #include <sys/param.h> 408ec07310SGleb Smirnoff #include <sys/malloc.h> 41e948693eSPhilip Paeps #include <sys/mbuf.h> 42e948693eSPhilip Paeps #include <sys/smp.h> 43e948693eSPhilip Paeps #include <sys/socket.h> 44e948693eSPhilip Paeps #include <sys/sysctl.h> 453c838a9fSAndrew Rybchenko #include <sys/syslog.h> 46e948693eSPhilip Paeps #include <sys/limits.h> 47245d1576SAndrew Rybchenko #include <sys/syslog.h> 48e948693eSPhilip Paeps 49e948693eSPhilip Paeps #include <net/ethernet.h> 50e948693eSPhilip Paeps #include <net/if.h> 51e948693eSPhilip Paeps #include <net/if_vlan_var.h> 52e948693eSPhilip Paeps 53e948693eSPhilip Paeps #include <netinet/in.h> 54e948693eSPhilip Paeps #include <netinet/ip.h> 55e948693eSPhilip Paeps #include <netinet/ip6.h> 56e948693eSPhilip Paeps #include <netinet/tcp.h> 57e948693eSPhilip Paeps 58e948693eSPhilip Paeps #include <machine/in_cksum.h> 59e948693eSPhilip Paeps 602da88194SAndrew Rybchenko #ifdef RSS 612da88194SAndrew Rybchenko #include <net/rss_config.h> 622da88194SAndrew Rybchenko #endif 632da88194SAndrew Rybchenko 64e948693eSPhilip Paeps #include "common/efx.h" 65e948693eSPhilip Paeps 66e948693eSPhilip Paeps 67e948693eSPhilip Paeps #include "sfxge.h" 68e948693eSPhilip Paeps #include "sfxge_rx.h" 69e948693eSPhilip Paeps 70385b1d8eSGeorge V. Neville-Neil #define RX_REFILL_THRESHOLD(_entries) (EFX_RXQ_LIMIT(_entries) * 9 / 10) 71e948693eSPhilip Paeps 7218daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 7318daa0eeSAndrew Rybchenko 74245d1576SAndrew Rybchenko SYSCTL_NODE(_hw_sfxge, OID_AUTO, lro, CTLFLAG_RD, NULL, 75245d1576SAndrew Rybchenko "Large receive offload (LRO) parameters"); 76245d1576SAndrew Rybchenko 77245d1576SAndrew Rybchenko #define SFXGE_LRO_PARAM(_param) SFXGE_PARAM(lro._param) 78245d1576SAndrew Rybchenko 79e948693eSPhilip Paeps /* Size of the LRO hash table. Must be a power of 2. A larger table 80e948693eSPhilip Paeps * means we can accelerate a larger number of streams. 81e948693eSPhilip Paeps */ 82e948693eSPhilip Paeps static unsigned lro_table_size = 128; 83245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(table_size), &lro_table_size); 84245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, table_size, CTLFLAG_RDTUN, 85245d1576SAndrew Rybchenko &lro_table_size, 0, 86245d1576SAndrew Rybchenko "Size of the LRO hash table (must be a power of 2)"); 87e948693eSPhilip Paeps 88e948693eSPhilip Paeps /* Maximum length of a hash chain. If chains get too long then the lookup 89e948693eSPhilip Paeps * time increases and may exceed the benefit of LRO. 90e948693eSPhilip Paeps */ 91e948693eSPhilip Paeps static unsigned lro_chain_max = 20; 92245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(chain_max), &lro_chain_max); 93245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, chain_max, CTLFLAG_RDTUN, 94245d1576SAndrew Rybchenko &lro_chain_max, 0, 95245d1576SAndrew Rybchenko "The maximum length of a hash chain"); 96e948693eSPhilip Paeps 97e948693eSPhilip Paeps /* Maximum time (in ticks) that a connection can be idle before it's LRO 98e948693eSPhilip Paeps * state is discarded. 99e948693eSPhilip Paeps */ 100e948693eSPhilip Paeps static unsigned lro_idle_ticks; /* initialised in sfxge_rx_init() */ 101245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(idle_ticks), &lro_idle_ticks); 102245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, idle_ticks, CTLFLAG_RDTUN, 103245d1576SAndrew Rybchenko &lro_idle_ticks, 0, 104245d1576SAndrew Rybchenko "The maximum time (in ticks) that a connection can be idle " 105245d1576SAndrew Rybchenko "before it's LRO state is discarded"); 106e948693eSPhilip Paeps 107e948693eSPhilip Paeps /* Number of packets with payload that must arrive in-order before a 108e948693eSPhilip Paeps * connection is eligible for LRO. The idea is we should avoid coalescing 109e948693eSPhilip Paeps * segments when the sender is in slow-start because reducing the ACK rate 110e948693eSPhilip Paeps * can damage performance. 111e948693eSPhilip Paeps */ 112e948693eSPhilip Paeps static int lro_slow_start_packets = 2000; 113245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(slow_start_packets), &lro_slow_start_packets); 114245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, slow_start_packets, CTLFLAG_RDTUN, 115245d1576SAndrew Rybchenko &lro_slow_start_packets, 0, 116245d1576SAndrew Rybchenko "Number of packets with payload that must arrive in-order before " 117245d1576SAndrew Rybchenko "a connection is eligible for LRO"); 118e948693eSPhilip Paeps 119e948693eSPhilip Paeps /* Number of packets with payload that must arrive in-order following loss 120e948693eSPhilip Paeps * before a connection is eligible for LRO. The idea is we should avoid 121e948693eSPhilip Paeps * coalescing segments when the sender is recovering from loss, because 122e948693eSPhilip Paeps * reducing the ACK rate can damage performance. 123e948693eSPhilip Paeps */ 124e948693eSPhilip Paeps static int lro_loss_packets = 20; 125245d1576SAndrew Rybchenko TUNABLE_INT(SFXGE_LRO_PARAM(loss_packets), &lro_loss_packets); 126245d1576SAndrew Rybchenko SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, loss_packets, CTLFLAG_RDTUN, 127245d1576SAndrew Rybchenko &lro_loss_packets, 0, 128245d1576SAndrew Rybchenko "Number of packets with payload that must arrive in-order " 129245d1576SAndrew Rybchenko "following loss before a connection is eligible for LRO"); 130e948693eSPhilip Paeps 131e948693eSPhilip Paeps /* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */ 132e948693eSPhilip Paeps #define SFXGE_LRO_L2_ID_VLAN 0x4000 133e948693eSPhilip Paeps #define SFXGE_LRO_L2_ID_IPV6 0x8000 134e948693eSPhilip Paeps #define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN) 135e948693eSPhilip Paeps #define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6)) 136e948693eSPhilip Paeps 137e948693eSPhilip Paeps /* Compare IPv6 addresses, avoiding conditional branches */ 1380b28bbdcSAndrew Rybchenko static unsigned long ipv6_addr_cmp(const struct in6_addr *left, 139e948693eSPhilip Paeps const struct in6_addr *right) 140e948693eSPhilip Paeps { 141e948693eSPhilip Paeps #if LONG_BIT == 64 142e948693eSPhilip Paeps const uint64_t *left64 = (const uint64_t *)left; 143e948693eSPhilip Paeps const uint64_t *right64 = (const uint64_t *)right; 144e948693eSPhilip Paeps return (left64[0] - right64[0]) | (left64[1] - right64[1]); 145e948693eSPhilip Paeps #else 146e948693eSPhilip Paeps return (left->s6_addr32[0] - right->s6_addr32[0]) | 147e948693eSPhilip Paeps (left->s6_addr32[1] - right->s6_addr32[1]) | 148e948693eSPhilip Paeps (left->s6_addr32[2] - right->s6_addr32[2]) | 149e948693eSPhilip Paeps (left->s6_addr32[3] - right->s6_addr32[3]); 150e948693eSPhilip Paeps #endif 151e948693eSPhilip Paeps } 152e948693eSPhilip Paeps 15318daa0eeSAndrew Rybchenko #endif /* SFXGE_LRO */ 15418daa0eeSAndrew Rybchenko 155e948693eSPhilip Paeps void 156e948693eSPhilip Paeps sfxge_rx_qflush_done(struct sfxge_rxq *rxq) 157e948693eSPhilip Paeps { 158e948693eSPhilip Paeps 159e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_DONE; 160e948693eSPhilip Paeps } 161e948693eSPhilip Paeps 162e948693eSPhilip Paeps void 163e948693eSPhilip Paeps sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) 164e948693eSPhilip Paeps { 165e948693eSPhilip Paeps 166e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_FAILED; 167e948693eSPhilip Paeps } 168e948693eSPhilip Paeps 1692da88194SAndrew Rybchenko #ifdef RSS 1702da88194SAndrew Rybchenko static uint8_t toep_key[RSS_KEYSIZE]; 1712da88194SAndrew Rybchenko #else 172e948693eSPhilip Paeps static uint8_t toep_key[] = { 173e948693eSPhilip Paeps 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 174e948693eSPhilip Paeps 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 175e948693eSPhilip Paeps 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 176e948693eSPhilip Paeps 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 177e948693eSPhilip Paeps 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa 178e948693eSPhilip Paeps }; 1792da88194SAndrew Rybchenko #endif 180e948693eSPhilip Paeps 181e948693eSPhilip Paeps static void 182e948693eSPhilip Paeps sfxge_rx_post_refill(void *arg) 183e948693eSPhilip Paeps { 184e948693eSPhilip Paeps struct sfxge_rxq *rxq = arg; 185e948693eSPhilip Paeps struct sfxge_softc *sc; 186e948693eSPhilip Paeps unsigned int index; 187e948693eSPhilip Paeps struct sfxge_evq *evq; 188e948693eSPhilip Paeps uint16_t magic; 189e948693eSPhilip Paeps 190e948693eSPhilip Paeps sc = rxq->sc; 191e948693eSPhilip Paeps index = rxq->index; 192e948693eSPhilip Paeps evq = sc->evq[index]; 19384bcd65eSAndrew Rybchenko magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); 194e948693eSPhilip Paeps 195e948693eSPhilip Paeps /* This is guaranteed due to the start/stop order of rx and ev */ 196e948693eSPhilip Paeps KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 197e948693eSPhilip Paeps ("evq not started")); 198e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, 199e948693eSPhilip Paeps ("rxq not started")); 200e948693eSPhilip Paeps efx_ev_qpost(evq->common, magic); 201e948693eSPhilip Paeps } 202e948693eSPhilip Paeps 203e948693eSPhilip Paeps static void 204e948693eSPhilip Paeps sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) 205e948693eSPhilip Paeps { 206e948693eSPhilip Paeps /* Initially retry after 100 ms, but back off in case of 207e948693eSPhilip Paeps * repeated failures as we probably have to wait for the 208e948693eSPhilip Paeps * administrator to raise the pool limit. */ 209e948693eSPhilip Paeps if (retrying) 210e948693eSPhilip Paeps rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz); 211e948693eSPhilip Paeps else 212e948693eSPhilip Paeps rxq->refill_delay = hz / 10; 213e948693eSPhilip Paeps 214e948693eSPhilip Paeps callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay, 215e948693eSPhilip Paeps sfxge_rx_post_refill, rxq); 216e948693eSPhilip Paeps } 217e948693eSPhilip Paeps 218e948693eSPhilip Paeps #define SFXGE_REFILL_BATCH 64 219e948693eSPhilip Paeps 220e948693eSPhilip Paeps static void 221e948693eSPhilip Paeps sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) 222e948693eSPhilip Paeps { 223e948693eSPhilip Paeps struct sfxge_softc *sc; 224e948693eSPhilip Paeps unsigned int index; 225e948693eSPhilip Paeps struct sfxge_evq *evq; 226e948693eSPhilip Paeps unsigned int batch; 227e948693eSPhilip Paeps unsigned int rxfill; 228e948693eSPhilip Paeps unsigned int mblksize; 229e948693eSPhilip Paeps int ntodo; 230e948693eSPhilip Paeps efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; 231e948693eSPhilip Paeps 232e948693eSPhilip Paeps sc = rxq->sc; 233e948693eSPhilip Paeps index = rxq->index; 234e948693eSPhilip Paeps evq = sc->evq[index]; 235e948693eSPhilip Paeps 236e948693eSPhilip Paeps prefetch_read_many(sc->enp); 237e948693eSPhilip Paeps prefetch_read_many(rxq->common); 238e948693eSPhilip Paeps 239763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 240e948693eSPhilip Paeps 241851128b8SAndrew Rybchenko if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) 242e948693eSPhilip Paeps return; 243e948693eSPhilip Paeps 244e948693eSPhilip Paeps rxfill = rxq->added - rxq->completed; 245385b1d8eSGeorge V. Neville-Neil KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), 246385b1d8eSGeorge V. Neville-Neil ("rxfill > EFX_RXQ_LIMIT(rxq->entries)")); 247385b1d8eSGeorge V. Neville-Neil ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); 248385b1d8eSGeorge V. Neville-Neil KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), 249385b1d8eSGeorge V. Neville-Neil ("ntodo > EFX_RQX_LIMIT(rxq->entries)")); 250e948693eSPhilip Paeps 251e948693eSPhilip Paeps if (ntodo == 0) 252e948693eSPhilip Paeps return; 253e948693eSPhilip Paeps 254e948693eSPhilip Paeps batch = 0; 2553c838a9fSAndrew Rybchenko mblksize = sc->rx_buffer_size - sc->rx_buffer_align; 256e948693eSPhilip Paeps while (ntodo-- > 0) { 257e948693eSPhilip Paeps unsigned int id; 258e948693eSPhilip Paeps struct sfxge_rx_sw_desc *rx_desc; 259e948693eSPhilip Paeps bus_dma_segment_t seg; 260e948693eSPhilip Paeps struct mbuf *m; 261e948693eSPhilip Paeps 262385b1d8eSGeorge V. Neville-Neil id = (rxq->added + batch) & rxq->ptr_mask; 263e948693eSPhilip Paeps rx_desc = &rxq->queue[id]; 264e948693eSPhilip Paeps KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); 265e948693eSPhilip Paeps 266e948693eSPhilip Paeps rx_desc->flags = EFX_DISCARD; 267009d75e7SGleb Smirnoff m = rx_desc->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 268009d75e7SGleb Smirnoff sc->rx_cluster_size); 269e948693eSPhilip Paeps if (m == NULL) 270e948693eSPhilip Paeps break; 2713c838a9fSAndrew Rybchenko 2723c838a9fSAndrew Rybchenko /* m_len specifies length of area to be mapped for DMA */ 2733c838a9fSAndrew Rybchenko m->m_len = mblksize; 2743c838a9fSAndrew Rybchenko m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE); 2753c838a9fSAndrew Rybchenko m->m_data += sc->rx_buffer_align; 2763c838a9fSAndrew Rybchenko 277e948693eSPhilip Paeps sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); 278e948693eSPhilip Paeps addr[batch++] = seg.ds_addr; 279e948693eSPhilip Paeps 280e948693eSPhilip Paeps if (batch == SFXGE_REFILL_BATCH) { 281e948693eSPhilip Paeps efx_rx_qpost(rxq->common, addr, mblksize, batch, 282e948693eSPhilip Paeps rxq->completed, rxq->added); 283e948693eSPhilip Paeps rxq->added += batch; 284e948693eSPhilip Paeps batch = 0; 285e948693eSPhilip Paeps } 286e948693eSPhilip Paeps } 287e948693eSPhilip Paeps 288e948693eSPhilip Paeps if (ntodo != 0) 289e948693eSPhilip Paeps sfxge_rx_schedule_refill(rxq, retrying); 290e948693eSPhilip Paeps 291e948693eSPhilip Paeps if (batch != 0) { 292e948693eSPhilip Paeps efx_rx_qpost(rxq->common, addr, mblksize, batch, 293e948693eSPhilip Paeps rxq->completed, rxq->added); 294e948693eSPhilip Paeps rxq->added += batch; 295e948693eSPhilip Paeps } 296e948693eSPhilip Paeps 297e948693eSPhilip Paeps /* Make the descriptors visible to the hardware */ 298e948693eSPhilip Paeps bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, 299e948693eSPhilip Paeps BUS_DMASYNC_PREWRITE); 300e948693eSPhilip Paeps 3013c838a9fSAndrew Rybchenko efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); 3023c838a9fSAndrew Rybchenko 3033c838a9fSAndrew Rybchenko /* The queue could still be empty if no descriptors were actually 3043c838a9fSAndrew Rybchenko * pushed, in which case there will be no event to cause the next 3053c838a9fSAndrew Rybchenko * refill, so we must schedule a refill ourselves. 3063c838a9fSAndrew Rybchenko */ 3073c838a9fSAndrew Rybchenko if(rxq->pushed == rxq->completed) { 3083c838a9fSAndrew Rybchenko sfxge_rx_schedule_refill(rxq, retrying); 3093c838a9fSAndrew Rybchenko } 310e948693eSPhilip Paeps } 311e948693eSPhilip Paeps 312e948693eSPhilip Paeps void 313e948693eSPhilip Paeps sfxge_rx_qrefill(struct sfxge_rxq *rxq) 314e948693eSPhilip Paeps { 315e948693eSPhilip Paeps 316851128b8SAndrew Rybchenko if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) 317e948693eSPhilip Paeps return; 318e948693eSPhilip Paeps 319e948693eSPhilip Paeps /* Make sure the queue is full */ 320385b1d8eSGeorge V. Neville-Neil sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE); 321e948693eSPhilip Paeps } 322e948693eSPhilip Paeps 323e948693eSPhilip Paeps static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m) 324e948693eSPhilip Paeps { 325e948693eSPhilip Paeps struct ifnet *ifp = sc->ifnet; 326e948693eSPhilip Paeps 327e948693eSPhilip Paeps m->m_pkthdr.rcvif = ifp; 328e948693eSPhilip Paeps m->m_pkthdr.csum_data = 0xffff; 329e948693eSPhilip Paeps ifp->if_input(ifp, m); 330e948693eSPhilip Paeps } 331e948693eSPhilip Paeps 332e948693eSPhilip Paeps static void 333e948693eSPhilip Paeps sfxge_rx_deliver(struct sfxge_softc *sc, struct sfxge_rx_sw_desc *rx_desc) 334e948693eSPhilip Paeps { 335e948693eSPhilip Paeps struct mbuf *m = rx_desc->mbuf; 336588644a4SAndrew Rybchenko int flags = rx_desc->flags; 337e948693eSPhilip Paeps int csum_flags; 338e948693eSPhilip Paeps 339e948693eSPhilip Paeps /* Convert checksum flags */ 340588644a4SAndrew Rybchenko csum_flags = (flags & EFX_CKSUM_IPV4) ? 341e948693eSPhilip Paeps (CSUM_IP_CHECKED | CSUM_IP_VALID) : 0; 342588644a4SAndrew Rybchenko if (flags & EFX_CKSUM_TCPUDP) 343e948693eSPhilip Paeps csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 344e948693eSPhilip Paeps 345588644a4SAndrew Rybchenko if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { 3463c838a9fSAndrew Rybchenko m->m_pkthdr.flowid = 3473c838a9fSAndrew Rybchenko efx_psuedo_hdr_hash_get(sc->enp, 3483c838a9fSAndrew Rybchenko EFX_RX_HASHALG_TOEPLITZ, 349e948693eSPhilip Paeps mtod(m, uint8_t *)); 350dcf08586SAndrew Rybchenko /* The hash covers a 4-tuple for TCP only */ 351dcf08586SAndrew Rybchenko M_HASHTYPE_SET(m, 352588644a4SAndrew Rybchenko (flags & EFX_PKT_IPV4) ? 353588644a4SAndrew Rybchenko ((flags & EFX_PKT_TCP) ? 354dcf08586SAndrew Rybchenko M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_IPV4) : 355588644a4SAndrew Rybchenko ((flags & EFX_PKT_TCP) ? 356dcf08586SAndrew Rybchenko M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_IPV6)); 357e948693eSPhilip Paeps } 358e948693eSPhilip Paeps m->m_data += sc->rx_prefix_size; 359e948693eSPhilip Paeps m->m_len = rx_desc->size - sc->rx_prefix_size; 360e948693eSPhilip Paeps m->m_pkthdr.len = m->m_len; 361e948693eSPhilip Paeps m->m_pkthdr.csum_flags = csum_flags; 362e948693eSPhilip Paeps __sfxge_rx_deliver(sc, rx_desc->mbuf); 363e948693eSPhilip Paeps 364e948693eSPhilip Paeps rx_desc->flags = EFX_DISCARD; 365e948693eSPhilip Paeps rx_desc->mbuf = NULL; 366e948693eSPhilip Paeps } 367e948693eSPhilip Paeps 36818daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 36918daa0eeSAndrew Rybchenko 370e948693eSPhilip Paeps static void 371e948693eSPhilip Paeps sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c) 372e948693eSPhilip Paeps { 373e948693eSPhilip Paeps struct sfxge_softc *sc = st->sc; 374e948693eSPhilip Paeps struct mbuf *m = c->mbuf; 375e948693eSPhilip Paeps struct tcphdr *c_th; 376e948693eSPhilip Paeps int csum_flags; 377e948693eSPhilip Paeps 378e948693eSPhilip Paeps KASSERT(m, ("no mbuf to deliver")); 379e948693eSPhilip Paeps 380e948693eSPhilip Paeps ++st->n_bursts; 381e948693eSPhilip Paeps 382e948693eSPhilip Paeps /* Finish off packet munging and recalculate IP header checksum. */ 383e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 384e948693eSPhilip Paeps struct ip *iph = c->nh; 385e948693eSPhilip Paeps iph->ip_len = htons(iph->ip_len); 386e948693eSPhilip Paeps iph->ip_sum = 0; 387e948693eSPhilip Paeps iph->ip_sum = in_cksum_hdr(iph); 388e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 389e948693eSPhilip Paeps csum_flags = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR | 390e948693eSPhilip Paeps CSUM_IP_CHECKED | CSUM_IP_VALID); 391e948693eSPhilip Paeps } else { 392e948693eSPhilip Paeps struct ip6_hdr *iph = c->nh; 393e948693eSPhilip Paeps iph->ip6_plen = htons(iph->ip6_plen); 394e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 395e948693eSPhilip Paeps csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 396e948693eSPhilip Paeps } 397e948693eSPhilip Paeps 398e948693eSPhilip Paeps c_th->th_win = c->th_last->th_win; 399e948693eSPhilip Paeps c_th->th_ack = c->th_last->th_ack; 400e948693eSPhilip Paeps if (c_th->th_off == c->th_last->th_off) { 401e948693eSPhilip Paeps /* Copy TCP options (take care to avoid going negative). */ 402e948693eSPhilip Paeps int optlen = ((c_th->th_off - 5) & 0xf) << 2u; 403e948693eSPhilip Paeps memcpy(c_th + 1, c->th_last + 1, optlen); 404e948693eSPhilip Paeps } 405e948693eSPhilip Paeps 406e948693eSPhilip Paeps m->m_pkthdr.flowid = c->conn_hash; 407dcf08586SAndrew Rybchenko M_HASHTYPE_SET(m, 408dcf08586SAndrew Rybchenko SFXGE_LRO_CONN_IS_TCPIPV4(c) ? 409dcf08586SAndrew Rybchenko M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_TCP_IPV6); 410a411fe4eSAndrew Rybchenko 411e948693eSPhilip Paeps m->m_pkthdr.csum_flags = csum_flags; 412e948693eSPhilip Paeps __sfxge_rx_deliver(sc, m); 413e948693eSPhilip Paeps 414e948693eSPhilip Paeps c->mbuf = NULL; 415e948693eSPhilip Paeps c->delivered = 1; 416e948693eSPhilip Paeps } 417e948693eSPhilip Paeps 418e948693eSPhilip Paeps /* Drop the given connection, and add it to the free list. */ 419e948693eSPhilip Paeps static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) 420e948693eSPhilip Paeps { 421e948693eSPhilip Paeps unsigned bucket; 422e948693eSPhilip Paeps 423e948693eSPhilip Paeps KASSERT(!c->mbuf, ("found orphaned mbuf")); 424e948693eSPhilip Paeps 425b7b0edd1SGeorge V. Neville-Neil if (c->next_buf.mbuf != NULL) { 426e948693eSPhilip Paeps sfxge_rx_deliver(rxq->sc, &c->next_buf); 427e948693eSPhilip Paeps LIST_REMOVE(c, active_link); 428e948693eSPhilip Paeps } 429e948693eSPhilip Paeps 430e948693eSPhilip Paeps bucket = c->conn_hash & rxq->lro.conns_mask; 431e948693eSPhilip Paeps KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong")); 432e948693eSPhilip Paeps --rxq->lro.conns_n[bucket]; 433e948693eSPhilip Paeps TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); 434e948693eSPhilip Paeps TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link); 435e948693eSPhilip Paeps } 436e948693eSPhilip Paeps 437e948693eSPhilip Paeps /* Stop tracking connections that have gone idle in order to keep hash 438e948693eSPhilip Paeps * chains short. 439e948693eSPhilip Paeps */ 440e948693eSPhilip Paeps static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now) 441e948693eSPhilip Paeps { 442e948693eSPhilip Paeps struct sfxge_lro_conn *c; 443e948693eSPhilip Paeps unsigned i; 444e948693eSPhilip Paeps 445e948693eSPhilip Paeps KASSERT(LIST_EMPTY(&rxq->lro.active_conns), 446e948693eSPhilip Paeps ("found active connections")); 447e948693eSPhilip Paeps 448e948693eSPhilip Paeps rxq->lro.last_purge_ticks = now; 449e948693eSPhilip Paeps for (i = 0; i <= rxq->lro.conns_mask; ++i) { 450e948693eSPhilip Paeps if (TAILQ_EMPTY(&rxq->lro.conns[i])) 451e948693eSPhilip Paeps continue; 452e948693eSPhilip Paeps 453e948693eSPhilip Paeps c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq); 454e948693eSPhilip Paeps if (now - c->last_pkt_ticks > lro_idle_ticks) { 455e948693eSPhilip Paeps ++rxq->lro.n_drop_idle; 456e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 457e948693eSPhilip Paeps } 458e948693eSPhilip Paeps } 459e948693eSPhilip Paeps } 460e948693eSPhilip Paeps 461e948693eSPhilip Paeps static void 462e948693eSPhilip Paeps sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, 463e948693eSPhilip Paeps struct mbuf *mbuf, struct tcphdr *th) 464e948693eSPhilip Paeps { 465e948693eSPhilip Paeps struct tcphdr *c_th; 466e948693eSPhilip Paeps 467e948693eSPhilip Paeps /* Tack the new mbuf onto the chain. */ 468e948693eSPhilip Paeps KASSERT(!mbuf->m_next, ("mbuf already chained")); 469e948693eSPhilip Paeps c->mbuf_tail->m_next = mbuf; 470e948693eSPhilip Paeps c->mbuf_tail = mbuf; 471e948693eSPhilip Paeps 472e948693eSPhilip Paeps /* Increase length appropriately */ 473e948693eSPhilip Paeps c->mbuf->m_pkthdr.len += mbuf->m_len; 474e948693eSPhilip Paeps 475e948693eSPhilip Paeps /* Update the connection state flags */ 476e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 477e948693eSPhilip Paeps struct ip *iph = c->nh; 478e948693eSPhilip Paeps iph->ip_len += mbuf->m_len; 479e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 480e948693eSPhilip Paeps } else { 481e948693eSPhilip Paeps struct ip6_hdr *iph = c->nh; 482e948693eSPhilip Paeps iph->ip6_plen += mbuf->m_len; 483e948693eSPhilip Paeps c_th = (struct tcphdr *)(iph + 1); 484e948693eSPhilip Paeps } 485e948693eSPhilip Paeps c_th->th_flags |= (th->th_flags & TH_PUSH); 486e948693eSPhilip Paeps c->th_last = th; 487e948693eSPhilip Paeps ++st->n_merges; 488e948693eSPhilip Paeps 489e948693eSPhilip Paeps /* Pass packet up now if another segment could overflow the IP 490e948693eSPhilip Paeps * length. 491e948693eSPhilip Paeps */ 492e948693eSPhilip Paeps if (c->mbuf->m_pkthdr.len > 65536 - 9200) 493e948693eSPhilip Paeps sfxge_lro_deliver(st, c); 494e948693eSPhilip Paeps } 495e948693eSPhilip Paeps 496e948693eSPhilip Paeps static void 497e948693eSPhilip Paeps sfxge_lro_start(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, 498e948693eSPhilip Paeps struct mbuf *mbuf, void *nh, struct tcphdr *th) 499e948693eSPhilip Paeps { 500e948693eSPhilip Paeps /* Start the chain */ 501e948693eSPhilip Paeps c->mbuf = mbuf; 502e948693eSPhilip Paeps c->mbuf_tail = c->mbuf; 503e948693eSPhilip Paeps c->nh = nh; 504e948693eSPhilip Paeps c->th_last = th; 505e948693eSPhilip Paeps 506e948693eSPhilip Paeps mbuf->m_pkthdr.len = mbuf->m_len; 507e948693eSPhilip Paeps 508e948693eSPhilip Paeps /* Mangle header fields for later processing */ 509e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 510e948693eSPhilip Paeps struct ip *iph = nh; 511e948693eSPhilip Paeps iph->ip_len = ntohs(iph->ip_len); 512e948693eSPhilip Paeps } else { 513e948693eSPhilip Paeps struct ip6_hdr *iph = nh; 514e948693eSPhilip Paeps iph->ip6_plen = ntohs(iph->ip6_plen); 515e948693eSPhilip Paeps } 516e948693eSPhilip Paeps } 517e948693eSPhilip Paeps 518e948693eSPhilip Paeps /* Try to merge or otherwise hold or deliver (as appropriate) the 519e948693eSPhilip Paeps * packet buffered for this connection (c->next_buf). Return a flag 520e948693eSPhilip Paeps * indicating whether the connection is still active for LRO purposes. 521e948693eSPhilip Paeps */ 522e948693eSPhilip Paeps static int 523e948693eSPhilip Paeps sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) 524e948693eSPhilip Paeps { 525e948693eSPhilip Paeps struct sfxge_rx_sw_desc *rx_buf = &c->next_buf; 526e948693eSPhilip Paeps char *eh = c->next_eh; 527e948693eSPhilip Paeps int data_length, hdr_length, dont_merge; 528e948693eSPhilip Paeps unsigned th_seq, pkt_length; 529e948693eSPhilip Paeps struct tcphdr *th; 530e948693eSPhilip Paeps unsigned now; 531e948693eSPhilip Paeps 532e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 533e948693eSPhilip Paeps struct ip *iph = c->next_nh; 534e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 535e948693eSPhilip Paeps pkt_length = ntohs(iph->ip_len) + (char *) iph - eh; 536e948693eSPhilip Paeps } else { 537e948693eSPhilip Paeps struct ip6_hdr *iph = c->next_nh; 538e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 539e948693eSPhilip Paeps pkt_length = ntohs(iph->ip6_plen) + (char *) th - eh; 540e948693eSPhilip Paeps } 541e948693eSPhilip Paeps 542e948693eSPhilip Paeps hdr_length = (char *) th + th->th_off * 4 - eh; 543e948693eSPhilip Paeps data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) - 544e948693eSPhilip Paeps hdr_length); 545e948693eSPhilip Paeps th_seq = ntohl(th->th_seq); 546e948693eSPhilip Paeps dont_merge = ((data_length <= 0) 547e948693eSPhilip Paeps | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN))); 548e948693eSPhilip Paeps 549e948693eSPhilip Paeps /* Check for options other than aligned timestamp. */ 550e948693eSPhilip Paeps if (th->th_off != 5) { 551e948693eSPhilip Paeps const uint32_t *opt_ptr = (const uint32_t *) (th + 1); 552e948693eSPhilip Paeps if (th->th_off == 8 && 553e948693eSPhilip Paeps opt_ptr[0] == ntohl((TCPOPT_NOP << 24) | 554e948693eSPhilip Paeps (TCPOPT_NOP << 16) | 555e948693eSPhilip Paeps (TCPOPT_TIMESTAMP << 8) | 556e948693eSPhilip Paeps TCPOLEN_TIMESTAMP)) { 557e948693eSPhilip Paeps /* timestamp option -- okay */ 558e948693eSPhilip Paeps } else { 559e948693eSPhilip Paeps dont_merge = 1; 560e948693eSPhilip Paeps } 561e948693eSPhilip Paeps } 562e948693eSPhilip Paeps 563e948693eSPhilip Paeps if (__predict_false(th_seq != c->next_seq)) { 564e948693eSPhilip Paeps /* Out-of-order, so start counting again. */ 565b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 566e948693eSPhilip Paeps sfxge_lro_deliver(&rxq->lro, c); 567e948693eSPhilip Paeps c->n_in_order_pkts -= lro_loss_packets; 568e948693eSPhilip Paeps c->next_seq = th_seq + data_length; 569e948693eSPhilip Paeps ++rxq->lro.n_misorder; 570e948693eSPhilip Paeps goto deliver_buf_out; 571e948693eSPhilip Paeps } 572e948693eSPhilip Paeps c->next_seq = th_seq + data_length; 573e948693eSPhilip Paeps 574e948693eSPhilip Paeps now = ticks; 575e948693eSPhilip Paeps if (now - c->last_pkt_ticks > lro_idle_ticks) { 576e948693eSPhilip Paeps ++rxq->lro.n_drop_idle; 577b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 578e948693eSPhilip Paeps sfxge_lro_deliver(&rxq->lro, c); 579e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 580b7b0edd1SGeorge V. Neville-Neil return (0); 581e948693eSPhilip Paeps } 582e948693eSPhilip Paeps c->last_pkt_ticks = ticks; 583e948693eSPhilip Paeps 584e948693eSPhilip Paeps if (c->n_in_order_pkts < lro_slow_start_packets) { 585e948693eSPhilip Paeps /* May be in slow-start, so don't merge. */ 586e948693eSPhilip Paeps ++rxq->lro.n_slow_start; 587e948693eSPhilip Paeps ++c->n_in_order_pkts; 588e948693eSPhilip Paeps goto deliver_buf_out; 589e948693eSPhilip Paeps } 590e948693eSPhilip Paeps 591e948693eSPhilip Paeps if (__predict_false(dont_merge)) { 592b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 593e948693eSPhilip Paeps sfxge_lro_deliver(&rxq->lro, c); 594e948693eSPhilip Paeps if (th->th_flags & (TH_FIN | TH_RST)) { 595e948693eSPhilip Paeps ++rxq->lro.n_drop_closed; 596e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 597b7b0edd1SGeorge V. Neville-Neil return (0); 598e948693eSPhilip Paeps } 599e948693eSPhilip Paeps goto deliver_buf_out; 600e948693eSPhilip Paeps } 601e948693eSPhilip Paeps 602e948693eSPhilip Paeps rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size; 603e948693eSPhilip Paeps 604e948693eSPhilip Paeps if (__predict_true(c->mbuf != NULL)) { 605e948693eSPhilip Paeps /* Remove headers and any padding */ 606e948693eSPhilip Paeps rx_buf->mbuf->m_data += hdr_length; 607e948693eSPhilip Paeps rx_buf->mbuf->m_len = data_length; 608e948693eSPhilip Paeps 609e948693eSPhilip Paeps sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th); 610e948693eSPhilip Paeps } else { 611e948693eSPhilip Paeps /* Remove any padding */ 612e948693eSPhilip Paeps rx_buf->mbuf->m_len = pkt_length; 613e948693eSPhilip Paeps 614e948693eSPhilip Paeps sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th); 615e948693eSPhilip Paeps } 616e948693eSPhilip Paeps 617e948693eSPhilip Paeps rx_buf->mbuf = NULL; 618b7b0edd1SGeorge V. Neville-Neil return (1); 619e948693eSPhilip Paeps 620e948693eSPhilip Paeps deliver_buf_out: 621e948693eSPhilip Paeps sfxge_rx_deliver(rxq->sc, rx_buf); 622b7b0edd1SGeorge V. Neville-Neil return (1); 623e948693eSPhilip Paeps } 624e948693eSPhilip Paeps 625e948693eSPhilip Paeps static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash, 626e948693eSPhilip Paeps uint16_t l2_id, void *nh, struct tcphdr *th) 627e948693eSPhilip Paeps { 628e948693eSPhilip Paeps unsigned bucket = conn_hash & st->conns_mask; 629e948693eSPhilip Paeps struct sfxge_lro_conn *c; 630e948693eSPhilip Paeps 631e948693eSPhilip Paeps if (st->conns_n[bucket] >= lro_chain_max) { 632e948693eSPhilip Paeps ++st->n_too_many; 633e948693eSPhilip Paeps return; 634e948693eSPhilip Paeps } 635e948693eSPhilip Paeps 636e948693eSPhilip Paeps if (!TAILQ_EMPTY(&st->free_conns)) { 637e948693eSPhilip Paeps c = TAILQ_FIRST(&st->free_conns); 638e948693eSPhilip Paeps TAILQ_REMOVE(&st->free_conns, c, link); 639e948693eSPhilip Paeps } else { 640e275c0d3SGleb Smirnoff c = malloc(sizeof(*c), M_SFXGE, M_NOWAIT); 641e948693eSPhilip Paeps if (c == NULL) 642e948693eSPhilip Paeps return; 643e948693eSPhilip Paeps c->mbuf = NULL; 644e948693eSPhilip Paeps c->next_buf.mbuf = NULL; 645e948693eSPhilip Paeps } 646e948693eSPhilip Paeps 647e948693eSPhilip Paeps /* Create the connection tracking data */ 648e948693eSPhilip Paeps ++st->conns_n[bucket]; 649e948693eSPhilip Paeps TAILQ_INSERT_HEAD(&st->conns[bucket], c, link); 650e948693eSPhilip Paeps c->l2_id = l2_id; 651e948693eSPhilip Paeps c->conn_hash = conn_hash; 652e948693eSPhilip Paeps c->source = th->th_sport; 653e948693eSPhilip Paeps c->dest = th->th_dport; 654e948693eSPhilip Paeps c->n_in_order_pkts = 0; 655e948693eSPhilip Paeps c->last_pkt_ticks = *(volatile int *)&ticks; 656e948693eSPhilip Paeps c->delivered = 0; 657e948693eSPhilip Paeps ++st->n_new_stream; 658e948693eSPhilip Paeps /* NB. We don't initialise c->next_seq, and it doesn't matter what 659e948693eSPhilip Paeps * value it has. Most likely the next packet received for this 660e948693eSPhilip Paeps * connection will not match -- no harm done. 661e948693eSPhilip Paeps */ 662e948693eSPhilip Paeps } 663e948693eSPhilip Paeps 664e948693eSPhilip Paeps /* Process mbuf and decide whether to dispatch it to the stack now or 665e948693eSPhilip Paeps * later. 666e948693eSPhilip Paeps */ 667e948693eSPhilip Paeps static void 668e948693eSPhilip Paeps sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) 669e948693eSPhilip Paeps { 670e948693eSPhilip Paeps struct sfxge_softc *sc = rxq->sc; 671e948693eSPhilip Paeps struct mbuf *m = rx_buf->mbuf; 672e948693eSPhilip Paeps struct ether_header *eh; 673e948693eSPhilip Paeps struct sfxge_lro_conn *c; 674e948693eSPhilip Paeps uint16_t l2_id; 675e948693eSPhilip Paeps uint16_t l3_proto; 676e948693eSPhilip Paeps void *nh; 677e948693eSPhilip Paeps struct tcphdr *th; 678e948693eSPhilip Paeps uint32_t conn_hash; 679e948693eSPhilip Paeps unsigned bucket; 680e948693eSPhilip Paeps 681e948693eSPhilip Paeps /* Get the hardware hash */ 6823c838a9fSAndrew Rybchenko conn_hash = efx_psuedo_hdr_hash_get(sc->enp, 6833c838a9fSAndrew Rybchenko EFX_RX_HASHALG_TOEPLITZ, 684e948693eSPhilip Paeps mtod(m, uint8_t *)); 685e948693eSPhilip Paeps 686e948693eSPhilip Paeps eh = (struct ether_header *)(m->m_data + sc->rx_prefix_size); 687e948693eSPhilip Paeps if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 688e948693eSPhilip Paeps struct ether_vlan_header *veh = (struct ether_vlan_header *)eh; 689e948693eSPhilip Paeps l2_id = EVL_VLANOFTAG(ntohs(veh->evl_tag)) | 690e948693eSPhilip Paeps SFXGE_LRO_L2_ID_VLAN; 691e948693eSPhilip Paeps l3_proto = veh->evl_proto; 692e948693eSPhilip Paeps nh = veh + 1; 693e948693eSPhilip Paeps } else { 694e948693eSPhilip Paeps l2_id = 0; 695e948693eSPhilip Paeps l3_proto = eh->ether_type; 696e948693eSPhilip Paeps nh = eh + 1; 697e948693eSPhilip Paeps } 698e948693eSPhilip Paeps 699e948693eSPhilip Paeps /* Check whether this is a suitable packet (unfragmented 700e948693eSPhilip Paeps * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and 701e948693eSPhilip Paeps * length, and compute a hash if necessary. If not, return. 702e948693eSPhilip Paeps */ 703e948693eSPhilip Paeps if (l3_proto == htons(ETHERTYPE_IP)) { 704e948693eSPhilip Paeps struct ip *iph = nh; 7053b3390c1SAndrew Rybchenko 7063b3390c1SAndrew Rybchenko KASSERT(iph->ip_p == IPPROTO_TCP, 7073b3390c1SAndrew Rybchenko ("IPv4 protocol is not TCP, but packet marker is set")); 7083b3390c1SAndrew Rybchenko if ((iph->ip_hl - (sizeof(*iph) >> 2u)) | 709e948693eSPhilip Paeps (iph->ip_off & htons(IP_MF | IP_OFFMASK))) 710e948693eSPhilip Paeps goto deliver_now; 711e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 712e948693eSPhilip Paeps } else if (l3_proto == htons(ETHERTYPE_IPV6)) { 713e948693eSPhilip Paeps struct ip6_hdr *iph = nh; 7143b3390c1SAndrew Rybchenko 7153b3390c1SAndrew Rybchenko KASSERT(iph->ip6_nxt == IPPROTO_TCP, 7163b3390c1SAndrew Rybchenko ("IPv6 next header is not TCP, but packet marker is set")); 717e948693eSPhilip Paeps l2_id |= SFXGE_LRO_L2_ID_IPV6; 718e948693eSPhilip Paeps th = (struct tcphdr *)(iph + 1); 719e948693eSPhilip Paeps } else { 720e948693eSPhilip Paeps goto deliver_now; 721e948693eSPhilip Paeps } 722e948693eSPhilip Paeps 723e948693eSPhilip Paeps bucket = conn_hash & rxq->lro.conns_mask; 724e948693eSPhilip Paeps 725e948693eSPhilip Paeps TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) { 726e948693eSPhilip Paeps if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash)) 727e948693eSPhilip Paeps continue; 728e948693eSPhilip Paeps if ((c->source - th->th_sport) | (c->dest - th->th_dport)) 729e948693eSPhilip Paeps continue; 730b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) { 731e948693eSPhilip Paeps if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { 732e948693eSPhilip Paeps struct ip *c_iph, *iph = nh; 733e948693eSPhilip Paeps c_iph = c->nh; 734e948693eSPhilip Paeps if ((c_iph->ip_src.s_addr - iph->ip_src.s_addr) | 735e948693eSPhilip Paeps (c_iph->ip_dst.s_addr - iph->ip_dst.s_addr)) 736e948693eSPhilip Paeps continue; 737e948693eSPhilip Paeps } else { 738e948693eSPhilip Paeps struct ip6_hdr *c_iph, *iph = nh; 739e948693eSPhilip Paeps c_iph = c->nh; 740e948693eSPhilip Paeps if (ipv6_addr_cmp(&c_iph->ip6_src, &iph->ip6_src) | 741e948693eSPhilip Paeps ipv6_addr_cmp(&c_iph->ip6_dst, &iph->ip6_dst)) 742e948693eSPhilip Paeps continue; 743e948693eSPhilip Paeps } 744e948693eSPhilip Paeps } 745e948693eSPhilip Paeps 746e948693eSPhilip Paeps /* Re-insert at head of list to reduce lookup time. */ 747e948693eSPhilip Paeps TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); 748e948693eSPhilip Paeps TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link); 749e948693eSPhilip Paeps 750b7b0edd1SGeorge V. Neville-Neil if (c->next_buf.mbuf != NULL) { 751e948693eSPhilip Paeps if (!sfxge_lro_try_merge(rxq, c)) 752e948693eSPhilip Paeps goto deliver_now; 753e948693eSPhilip Paeps } else { 754e948693eSPhilip Paeps LIST_INSERT_HEAD(&rxq->lro.active_conns, c, 755e948693eSPhilip Paeps active_link); 756e948693eSPhilip Paeps } 757e948693eSPhilip Paeps c->next_buf = *rx_buf; 758e948693eSPhilip Paeps c->next_eh = eh; 759e948693eSPhilip Paeps c->next_nh = nh; 760e948693eSPhilip Paeps 761e948693eSPhilip Paeps rx_buf->mbuf = NULL; 762e948693eSPhilip Paeps rx_buf->flags = EFX_DISCARD; 763e948693eSPhilip Paeps return; 764e948693eSPhilip Paeps } 765e948693eSPhilip Paeps 766e948693eSPhilip Paeps sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th); 767e948693eSPhilip Paeps deliver_now: 768e948693eSPhilip Paeps sfxge_rx_deliver(sc, rx_buf); 769e948693eSPhilip Paeps } 770e948693eSPhilip Paeps 771e948693eSPhilip Paeps static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) 772e948693eSPhilip Paeps { 773e948693eSPhilip Paeps struct sfxge_lro_state *st = &rxq->lro; 774e948693eSPhilip Paeps struct sfxge_lro_conn *c; 775e948693eSPhilip Paeps unsigned t; 776e948693eSPhilip Paeps 777e948693eSPhilip Paeps while (!LIST_EMPTY(&st->active_conns)) { 778e948693eSPhilip Paeps c = LIST_FIRST(&st->active_conns); 779b7b0edd1SGeorge V. Neville-Neil if (!c->delivered && c->mbuf != NULL) 780e948693eSPhilip Paeps sfxge_lro_deliver(st, c); 781e948693eSPhilip Paeps if (sfxge_lro_try_merge(rxq, c)) { 782b7b0edd1SGeorge V. Neville-Neil if (c->mbuf != NULL) 783e948693eSPhilip Paeps sfxge_lro_deliver(st, c); 784e948693eSPhilip Paeps LIST_REMOVE(c, active_link); 785e948693eSPhilip Paeps } 786e948693eSPhilip Paeps c->delivered = 0; 787e948693eSPhilip Paeps } 788e948693eSPhilip Paeps 789e948693eSPhilip Paeps t = *(volatile int *)&ticks; 790e948693eSPhilip Paeps if (__predict_false(t != st->last_purge_ticks)) 791e948693eSPhilip Paeps sfxge_lro_purge_idle(rxq, t); 792e948693eSPhilip Paeps } 793e948693eSPhilip Paeps 79418daa0eeSAndrew Rybchenko #else /* !SFXGE_LRO */ 79518daa0eeSAndrew Rybchenko 79618daa0eeSAndrew Rybchenko static void 79718daa0eeSAndrew Rybchenko sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) 79818daa0eeSAndrew Rybchenko { 79918daa0eeSAndrew Rybchenko } 80018daa0eeSAndrew Rybchenko 80118daa0eeSAndrew Rybchenko static void 80218daa0eeSAndrew Rybchenko sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) 80318daa0eeSAndrew Rybchenko { 80418daa0eeSAndrew Rybchenko } 80518daa0eeSAndrew Rybchenko 80618daa0eeSAndrew Rybchenko #endif /* SFXGE_LRO */ 80718daa0eeSAndrew Rybchenko 808e948693eSPhilip Paeps void 809e948693eSPhilip Paeps sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop) 810e948693eSPhilip Paeps { 811e948693eSPhilip Paeps struct sfxge_softc *sc = rxq->sc; 812b5bae9f4SAndrew Rybchenko int if_capenable = sc->ifnet->if_capenable; 813b5bae9f4SAndrew Rybchenko int lro_enabled = if_capenable & IFCAP_LRO; 814e948693eSPhilip Paeps unsigned int index; 815e948693eSPhilip Paeps struct sfxge_evq *evq; 816e948693eSPhilip Paeps unsigned int completed; 817e948693eSPhilip Paeps unsigned int level; 818e948693eSPhilip Paeps struct mbuf *m; 819e948693eSPhilip Paeps struct sfxge_rx_sw_desc *prev = NULL; 820e948693eSPhilip Paeps 821e948693eSPhilip Paeps index = rxq->index; 822e948693eSPhilip Paeps evq = sc->evq[index]; 823e948693eSPhilip Paeps 824763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); 825e948693eSPhilip Paeps 826e948693eSPhilip Paeps completed = rxq->completed; 827e948693eSPhilip Paeps while (completed != rxq->pending) { 828e948693eSPhilip Paeps unsigned int id; 829e948693eSPhilip Paeps struct sfxge_rx_sw_desc *rx_desc; 830e948693eSPhilip Paeps 831385b1d8eSGeorge V. Neville-Neil id = completed++ & rxq->ptr_mask; 832e948693eSPhilip Paeps rx_desc = &rxq->queue[id]; 833e948693eSPhilip Paeps m = rx_desc->mbuf; 834e948693eSPhilip Paeps 835851128b8SAndrew Rybchenko if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) 836e948693eSPhilip Paeps goto discard; 837e948693eSPhilip Paeps 838e948693eSPhilip Paeps if (rx_desc->flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) 839e948693eSPhilip Paeps goto discard; 840e948693eSPhilip Paeps 841453130d9SPedro F. Giffuni /* Read the length from the pseudo header if required */ 8423c838a9fSAndrew Rybchenko if (rx_desc->flags & EFX_PKT_PREFIX_LEN) { 8433c838a9fSAndrew Rybchenko uint16_t tmp_size; 8443c838a9fSAndrew Rybchenko int rc; 8453c838a9fSAndrew Rybchenko rc = efx_psuedo_hdr_pkt_length_get(sc->enp, 8463c838a9fSAndrew Rybchenko mtod(m, uint8_t *), 8473c838a9fSAndrew Rybchenko &tmp_size); 8483c838a9fSAndrew Rybchenko KASSERT(rc == 0, ("cannot get packet length: %d", rc)); 8493c838a9fSAndrew Rybchenko rx_desc->size = (int)tmp_size + sc->rx_prefix_size; 8503c838a9fSAndrew Rybchenko } 8513c838a9fSAndrew Rybchenko 852e948693eSPhilip Paeps prefetch_read_many(mtod(m, caddr_t)); 853e948693eSPhilip Paeps 854b5bae9f4SAndrew Rybchenko switch (rx_desc->flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { 855b5bae9f4SAndrew Rybchenko case EFX_PKT_IPV4: 856b5bae9f4SAndrew Rybchenko if (~if_capenable & IFCAP_RXCSUM) 857b5bae9f4SAndrew Rybchenko rx_desc->flags &= 858b5bae9f4SAndrew Rybchenko ~(EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP); 859b5bae9f4SAndrew Rybchenko break; 860b5bae9f4SAndrew Rybchenko case EFX_PKT_IPV6: 861b5bae9f4SAndrew Rybchenko if (~if_capenable & IFCAP_RXCSUM_IPV6) 862b5bae9f4SAndrew Rybchenko rx_desc->flags &= ~EFX_CKSUM_TCPUDP; 863b5bae9f4SAndrew Rybchenko break; 864b5bae9f4SAndrew Rybchenko case 0: 865e948693eSPhilip Paeps /* Check for loopback packets */ 866b5bae9f4SAndrew Rybchenko { 867e948693eSPhilip Paeps struct ether_header *etherhp; 868e948693eSPhilip Paeps 869e948693eSPhilip Paeps /*LINTED*/ 870e948693eSPhilip Paeps etherhp = mtod(m, struct ether_header *); 871e948693eSPhilip Paeps 872e948693eSPhilip Paeps if (etherhp->ether_type == 873e948693eSPhilip Paeps htons(SFXGE_ETHERTYPE_LOOPBACK)) { 874e948693eSPhilip Paeps EFSYS_PROBE(loopback); 875e948693eSPhilip Paeps 876e948693eSPhilip Paeps rxq->loopback++; 877e948693eSPhilip Paeps goto discard; 878e948693eSPhilip Paeps } 879e948693eSPhilip Paeps } 880b5bae9f4SAndrew Rybchenko break; 881b5bae9f4SAndrew Rybchenko default: 882b5bae9f4SAndrew Rybchenko KASSERT(B_FALSE, 883b5bae9f4SAndrew Rybchenko ("Rx descriptor with both IPv4 and IPv6 flags")); 884b5bae9f4SAndrew Rybchenko goto discard; 885b5bae9f4SAndrew Rybchenko } 886e948693eSPhilip Paeps 887e948693eSPhilip Paeps /* Pass packet up the stack or into LRO (pipelined) */ 888e948693eSPhilip Paeps if (prev != NULL) { 8893b3390c1SAndrew Rybchenko if (lro_enabled && 8903b3390c1SAndrew Rybchenko ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == 8913b3390c1SAndrew Rybchenko (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) 892e948693eSPhilip Paeps sfxge_lro(rxq, prev); 893e948693eSPhilip Paeps else 894e948693eSPhilip Paeps sfxge_rx_deliver(sc, prev); 895e948693eSPhilip Paeps } 896e948693eSPhilip Paeps prev = rx_desc; 897e948693eSPhilip Paeps continue; 898e948693eSPhilip Paeps 899e948693eSPhilip Paeps discard: 900e948693eSPhilip Paeps /* Return the packet to the pool */ 901e948693eSPhilip Paeps m_free(m); 902e948693eSPhilip Paeps rx_desc->mbuf = NULL; 903e948693eSPhilip Paeps } 904e948693eSPhilip Paeps rxq->completed = completed; 905e948693eSPhilip Paeps 906e948693eSPhilip Paeps level = rxq->added - rxq->completed; 907e948693eSPhilip Paeps 908e948693eSPhilip Paeps /* Pass last packet up the stack or into LRO */ 909e948693eSPhilip Paeps if (prev != NULL) { 9103b3390c1SAndrew Rybchenko if (lro_enabled && 9113b3390c1SAndrew Rybchenko ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == 9123b3390c1SAndrew Rybchenko (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) 913e948693eSPhilip Paeps sfxge_lro(rxq, prev); 914e948693eSPhilip Paeps else 915e948693eSPhilip Paeps sfxge_rx_deliver(sc, prev); 916e948693eSPhilip Paeps } 917e948693eSPhilip Paeps 918e948693eSPhilip Paeps /* 919e948693eSPhilip Paeps * If there are any pending flows and this is the end of the 920e948693eSPhilip Paeps * poll then they must be completed. 921e948693eSPhilip Paeps */ 922e948693eSPhilip Paeps if (eop) 923e948693eSPhilip Paeps sfxge_lro_end_of_burst(rxq); 924e948693eSPhilip Paeps 925e948693eSPhilip Paeps /* Top up the queue if necessary */ 926385b1d8eSGeorge V. Neville-Neil if (level < rxq->refill_threshold) 927385b1d8eSGeorge V. Neville-Neil sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE); 928e948693eSPhilip Paeps } 929e948693eSPhilip Paeps 930e948693eSPhilip Paeps static void 931e948693eSPhilip Paeps sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index) 932e948693eSPhilip Paeps { 933e948693eSPhilip Paeps struct sfxge_rxq *rxq; 934e948693eSPhilip Paeps struct sfxge_evq *evq; 935e948693eSPhilip Paeps unsigned int count; 9363c838a9fSAndrew Rybchenko unsigned int retry = 3; 9373c838a9fSAndrew Rybchenko 9383c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 939e948693eSPhilip Paeps 940e948693eSPhilip Paeps rxq = sc->rxq[index]; 941e948693eSPhilip Paeps evq = sc->evq[index]; 942e948693eSPhilip Paeps 943763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 944e948693eSPhilip Paeps 945e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, 946e948693eSPhilip Paeps ("rxq not started")); 947e948693eSPhilip Paeps 948e948693eSPhilip Paeps rxq->init_state = SFXGE_RXQ_INITIALIZED; 949e948693eSPhilip Paeps 950e948693eSPhilip Paeps callout_stop(&rxq->refill_callout); 951e948693eSPhilip Paeps 9523c838a9fSAndrew Rybchenko while (rxq->flush_state != SFXGE_FLUSH_DONE && retry != 0) { 953e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_PENDING; 954e948693eSPhilip Paeps 955763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 956e948693eSPhilip Paeps 9573c838a9fSAndrew Rybchenko /* Flush the receive queue */ 9583c838a9fSAndrew Rybchenko if (efx_rx_qflush(rxq->common) != 0) { 9593c838a9fSAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 9603c838a9fSAndrew Rybchenko rxq->flush_state = SFXGE_FLUSH_FAILED; 9613c838a9fSAndrew Rybchenko break; 9623c838a9fSAndrew Rybchenko } 9633c838a9fSAndrew Rybchenko 964e948693eSPhilip Paeps count = 0; 965e948693eSPhilip Paeps do { 966e948693eSPhilip Paeps /* Spin for 100 ms */ 967e948693eSPhilip Paeps DELAY(100000); 968e948693eSPhilip Paeps 969e948693eSPhilip Paeps if (rxq->flush_state != SFXGE_FLUSH_PENDING) 970e948693eSPhilip Paeps break; 971e948693eSPhilip Paeps 972e948693eSPhilip Paeps } while (++count < 20); 973e948693eSPhilip Paeps 974763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 975e948693eSPhilip Paeps 9763c838a9fSAndrew Rybchenko if (rxq->flush_state == SFXGE_FLUSH_PENDING) { 9773c838a9fSAndrew Rybchenko /* Flush timeout - neither done nor failed */ 9783c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Cannot flush Rx queue %u\n", 9793c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 980e948693eSPhilip Paeps rxq->flush_state = SFXGE_FLUSH_DONE; 9813c838a9fSAndrew Rybchenko } 9823c838a9fSAndrew Rybchenko retry--; 9833c838a9fSAndrew Rybchenko } 9843c838a9fSAndrew Rybchenko if (rxq->flush_state == SFXGE_FLUSH_FAILED) { 9853c838a9fSAndrew Rybchenko log(LOG_ERR, "%s: Flushing Rx queue %u failed\n", 9863c838a9fSAndrew Rybchenko device_get_nameunit(sc->dev), index); 9873c838a9fSAndrew Rybchenko rxq->flush_state = SFXGE_FLUSH_DONE; 9883c838a9fSAndrew Rybchenko } 989e948693eSPhilip Paeps 990e948693eSPhilip Paeps rxq->pending = rxq->added; 991e948693eSPhilip Paeps sfxge_rx_qcomplete(rxq, B_TRUE); 992e948693eSPhilip Paeps 993e948693eSPhilip Paeps KASSERT(rxq->completed == rxq->pending, 994e948693eSPhilip Paeps ("rxq->completed != rxq->pending")); 995e948693eSPhilip Paeps 996e948693eSPhilip Paeps rxq->added = 0; 9973c838a9fSAndrew Rybchenko rxq->pushed = 0; 998e948693eSPhilip Paeps rxq->pending = 0; 999e948693eSPhilip Paeps rxq->completed = 0; 1000e948693eSPhilip Paeps rxq->loopback = 0; 1001e948693eSPhilip Paeps 1002e948693eSPhilip Paeps /* Destroy the common code receive queue. */ 1003e948693eSPhilip Paeps efx_rx_qdestroy(rxq->common); 1004e948693eSPhilip Paeps 1005e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, 1006385b1d8eSGeorge V. Neville-Neil EFX_RXQ_NBUFS(sc->rxq_entries)); 1007e948693eSPhilip Paeps 1008763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1009e948693eSPhilip Paeps } 1010e948693eSPhilip Paeps 1011e948693eSPhilip Paeps static int 1012e948693eSPhilip Paeps sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index) 1013e948693eSPhilip Paeps { 1014e948693eSPhilip Paeps struct sfxge_rxq *rxq; 1015e948693eSPhilip Paeps efsys_mem_t *esmp; 1016e948693eSPhilip Paeps struct sfxge_evq *evq; 1017e948693eSPhilip Paeps int rc; 1018e948693eSPhilip Paeps 10193c838a9fSAndrew Rybchenko SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); 10203c838a9fSAndrew Rybchenko 1021e948693eSPhilip Paeps rxq = sc->rxq[index]; 1022e948693eSPhilip Paeps esmp = &rxq->mem; 1023e948693eSPhilip Paeps evq = sc->evq[index]; 1024e948693eSPhilip Paeps 1025e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, 1026e948693eSPhilip Paeps ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); 1027e948693eSPhilip Paeps KASSERT(evq->init_state == SFXGE_EVQ_STARTED, 1028e948693eSPhilip Paeps ("evq->init_state != SFXGE_EVQ_STARTED")); 1029e948693eSPhilip Paeps 1030e948693eSPhilip Paeps /* Program the buffer table. */ 1031e948693eSPhilip Paeps if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp, 1032385b1d8eSGeorge V. Neville-Neil EFX_RXQ_NBUFS(sc->rxq_entries))) != 0) 1033385b1d8eSGeorge V. Neville-Neil return (rc); 1034e948693eSPhilip Paeps 1035e948693eSPhilip Paeps /* Create the common code receive queue. */ 103634113442SAndrew Rybchenko if ((rc = efx_rx_qcreate(sc->enp, index, 0, EFX_RXQ_TYPE_DEFAULT, 1037385b1d8eSGeorge V. Neville-Neil esmp, sc->rxq_entries, rxq->buf_base_id, evq->common, 1038e948693eSPhilip Paeps &rxq->common)) != 0) 1039e948693eSPhilip Paeps goto fail; 1040e948693eSPhilip Paeps 1041763cab71SAndrew Rybchenko SFXGE_EVQ_LOCK(evq); 1042e948693eSPhilip Paeps 1043e948693eSPhilip Paeps /* Enable the receive queue. */ 1044e948693eSPhilip Paeps efx_rx_qenable(rxq->common); 1045e948693eSPhilip Paeps 1046e948693eSPhilip Paeps rxq->init_state = SFXGE_RXQ_STARTED; 10473c838a9fSAndrew Rybchenko rxq->flush_state = SFXGE_FLUSH_REQUIRED; 1048e948693eSPhilip Paeps 1049e948693eSPhilip Paeps /* Try to fill the queue from the pool. */ 1050385b1d8eSGeorge V. Neville-Neil sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE); 1051e948693eSPhilip Paeps 1052763cab71SAndrew Rybchenko SFXGE_EVQ_UNLOCK(evq); 1053e948693eSPhilip Paeps 1054e948693eSPhilip Paeps return (0); 1055e948693eSPhilip Paeps 1056e948693eSPhilip Paeps fail: 1057e948693eSPhilip Paeps efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, 1058385b1d8eSGeorge V. Neville-Neil EFX_RXQ_NBUFS(sc->rxq_entries)); 1059385b1d8eSGeorge V. Neville-Neil return (rc); 1060e948693eSPhilip Paeps } 1061e948693eSPhilip Paeps 1062e948693eSPhilip Paeps void 1063e948693eSPhilip Paeps sfxge_rx_stop(struct sfxge_softc *sc) 1064e948693eSPhilip Paeps { 1065e948693eSPhilip Paeps int index; 1066e948693eSPhilip Paeps 10673c838a9fSAndrew Rybchenko efx_mac_filter_default_rxq_clear(sc->enp); 10683c838a9fSAndrew Rybchenko 1069e948693eSPhilip Paeps /* Stop the receive queue(s) */ 1070133366a6SAndrew Rybchenko index = sc->rxq_count; 1071e948693eSPhilip Paeps while (--index >= 0) 1072e948693eSPhilip Paeps sfxge_rx_qstop(sc, index); 1073e948693eSPhilip Paeps 1074e948693eSPhilip Paeps sc->rx_prefix_size = 0; 1075e948693eSPhilip Paeps sc->rx_buffer_size = 0; 1076e948693eSPhilip Paeps 1077e948693eSPhilip Paeps efx_rx_fini(sc->enp); 1078e948693eSPhilip Paeps } 1079e948693eSPhilip Paeps 1080e948693eSPhilip Paeps int 1081e948693eSPhilip Paeps sfxge_rx_start(struct sfxge_softc *sc) 1082e948693eSPhilip Paeps { 1083e948693eSPhilip Paeps struct sfxge_intr *intr; 10843c838a9fSAndrew Rybchenko const efx_nic_cfg_t *encp; 10853c838a9fSAndrew Rybchenko size_t hdrlen, align, reserved; 1086e948693eSPhilip Paeps int index; 1087e948693eSPhilip Paeps int rc; 1088e948693eSPhilip Paeps 1089e948693eSPhilip Paeps intr = &sc->intr; 1090e948693eSPhilip Paeps 1091e948693eSPhilip Paeps /* Initialize the common code receive module. */ 1092e948693eSPhilip Paeps if ((rc = efx_rx_init(sc->enp)) != 0) 1093e948693eSPhilip Paeps return (rc); 1094e948693eSPhilip Paeps 10953c838a9fSAndrew Rybchenko encp = efx_nic_cfg_get(sc->enp); 10963c838a9fSAndrew Rybchenko sc->rx_buffer_size = EFX_MAC_PDU(sc->ifnet->if_mtu); 10973c838a9fSAndrew Rybchenko 1098e948693eSPhilip Paeps /* Calculate the receive packet buffer size. */ 10993c838a9fSAndrew Rybchenko sc->rx_prefix_size = encp->enc_rx_prefix_size; 11003c838a9fSAndrew Rybchenko 11013c838a9fSAndrew Rybchenko /* Ensure IP headers are 32bit aligned */ 11023c838a9fSAndrew Rybchenko hdrlen = sc->rx_prefix_size + sizeof (struct ether_header); 11033c838a9fSAndrew Rybchenko sc->rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen; 11043c838a9fSAndrew Rybchenko 11053c838a9fSAndrew Rybchenko sc->rx_buffer_size += sc->rx_buffer_align; 11063c838a9fSAndrew Rybchenko 11073c838a9fSAndrew Rybchenko /* Align end of packet buffer for RX DMA end padding */ 11083c838a9fSAndrew Rybchenko align = MAX(1, encp->enc_rx_buf_align_end); 11093c838a9fSAndrew Rybchenko EFSYS_ASSERT(ISP2(align)); 11103c838a9fSAndrew Rybchenko sc->rx_buffer_size = P2ROUNDUP(sc->rx_buffer_size, align); 11113c838a9fSAndrew Rybchenko 11123c838a9fSAndrew Rybchenko /* 11133c838a9fSAndrew Rybchenko * Standard mbuf zones only guarantee pointer-size alignment; 11143c838a9fSAndrew Rybchenko * we need extra space to align to the cache line 11153c838a9fSAndrew Rybchenko */ 11163c838a9fSAndrew Rybchenko reserved = sc->rx_buffer_size + CACHE_LINE_SIZE; 1117e948693eSPhilip Paeps 1118e948693eSPhilip Paeps /* Select zone for packet buffers */ 11193c838a9fSAndrew Rybchenko if (reserved <= MCLBYTES) 1120009d75e7SGleb Smirnoff sc->rx_cluster_size = MCLBYTES; 11213c838a9fSAndrew Rybchenko else if (reserved <= MJUMPAGESIZE) 1122009d75e7SGleb Smirnoff sc->rx_cluster_size = MJUMPAGESIZE; 11233c838a9fSAndrew Rybchenko else if (reserved <= MJUM9BYTES) 1124009d75e7SGleb Smirnoff sc->rx_cluster_size = MJUM9BYTES; 1125e948693eSPhilip Paeps else 1126009d75e7SGleb Smirnoff sc->rx_cluster_size = MJUM16BYTES; 1127e948693eSPhilip Paeps 1128e948693eSPhilip Paeps /* 1129e948693eSPhilip Paeps * Set up the scale table. Enable all hash types and hash insertion. 1130e948693eSPhilip Paeps */ 1131*44fcad03SAndrew Rybchenko for (index = 0; index < nitems(sc->rx_indir_table); index++) 1132f949e9f8SAndrew Rybchenko #ifdef RSS 1133f949e9f8SAndrew Rybchenko sc->rx_indir_table[index] = 1134f949e9f8SAndrew Rybchenko rss_get_indirection_to_bucket(index) % sc->rxq_count; 1135f949e9f8SAndrew Rybchenko #else 1136133366a6SAndrew Rybchenko sc->rx_indir_table[index] = index % sc->rxq_count; 1137f949e9f8SAndrew Rybchenko #endif 1138e948693eSPhilip Paeps if ((rc = efx_rx_scale_tbl_set(sc->enp, sc->rx_indir_table, 1139*44fcad03SAndrew Rybchenko nitems(sc->rx_indir_table))) != 0) 1140e948693eSPhilip Paeps goto fail; 1141e948693eSPhilip Paeps (void)efx_rx_scale_mode_set(sc->enp, EFX_RX_HASHALG_TOEPLITZ, 1142e948693eSPhilip Paeps (1 << EFX_RX_HASH_IPV4) | (1 << EFX_RX_HASH_TCPIPV4) | 1143e948693eSPhilip Paeps (1 << EFX_RX_HASH_IPV6) | (1 << EFX_RX_HASH_TCPIPV6), B_TRUE); 1144e948693eSPhilip Paeps 11452da88194SAndrew Rybchenko #ifdef RSS 11462da88194SAndrew Rybchenko rss_getkey(toep_key); 11472da88194SAndrew Rybchenko #endif 11483c838a9fSAndrew Rybchenko if ((rc = efx_rx_scale_key_set(sc->enp, toep_key, 1149e948693eSPhilip Paeps sizeof(toep_key))) != 0) 1150e948693eSPhilip Paeps goto fail; 1151e948693eSPhilip Paeps 1152e948693eSPhilip Paeps /* Start the receive queue(s). */ 1153133366a6SAndrew Rybchenko for (index = 0; index < sc->rxq_count; index++) { 1154e948693eSPhilip Paeps if ((rc = sfxge_rx_qstart(sc, index)) != 0) 1155e948693eSPhilip Paeps goto fail2; 1156e948693eSPhilip Paeps } 1157e948693eSPhilip Paeps 11583c838a9fSAndrew Rybchenko rc = efx_mac_filter_default_rxq_set(sc->enp, sc->rxq[0]->common, 11593c838a9fSAndrew Rybchenko sc->intr.n_alloc > 1); 11603c838a9fSAndrew Rybchenko if (rc != 0) 11613c838a9fSAndrew Rybchenko goto fail3; 11623c838a9fSAndrew Rybchenko 1163e948693eSPhilip Paeps return (0); 1164e948693eSPhilip Paeps 11653c838a9fSAndrew Rybchenko fail3: 1166e948693eSPhilip Paeps fail2: 1167e948693eSPhilip Paeps while (--index >= 0) 1168e948693eSPhilip Paeps sfxge_rx_qstop(sc, index); 1169e948693eSPhilip Paeps 1170e948693eSPhilip Paeps fail: 1171e948693eSPhilip Paeps efx_rx_fini(sc->enp); 1172e948693eSPhilip Paeps 1173e948693eSPhilip Paeps return (rc); 1174e948693eSPhilip Paeps } 1175e948693eSPhilip Paeps 117618daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 117718daa0eeSAndrew Rybchenko 1178e948693eSPhilip Paeps static void sfxge_lro_init(struct sfxge_rxq *rxq) 1179e948693eSPhilip Paeps { 1180e948693eSPhilip Paeps struct sfxge_lro_state *st = &rxq->lro; 1181e948693eSPhilip Paeps unsigned i; 1182e948693eSPhilip Paeps 1183e948693eSPhilip Paeps st->conns_mask = lro_table_size - 1; 1184e948693eSPhilip Paeps KASSERT(!((st->conns_mask + 1) & st->conns_mask), 1185e948693eSPhilip Paeps ("lro_table_size must be a power of 2")); 1186e948693eSPhilip Paeps st->sc = rxq->sc; 1187e948693eSPhilip Paeps st->conns = malloc((st->conns_mask + 1) * sizeof(st->conns[0]), 1188e948693eSPhilip Paeps M_SFXGE, M_WAITOK); 1189e948693eSPhilip Paeps st->conns_n = malloc((st->conns_mask + 1) * sizeof(st->conns_n[0]), 1190e948693eSPhilip Paeps M_SFXGE, M_WAITOK); 1191e948693eSPhilip Paeps for (i = 0; i <= st->conns_mask; ++i) { 1192e948693eSPhilip Paeps TAILQ_INIT(&st->conns[i]); 1193e948693eSPhilip Paeps st->conns_n[i] = 0; 1194e948693eSPhilip Paeps } 1195e948693eSPhilip Paeps LIST_INIT(&st->active_conns); 1196e948693eSPhilip Paeps TAILQ_INIT(&st->free_conns); 1197e948693eSPhilip Paeps } 1198e948693eSPhilip Paeps 1199e948693eSPhilip Paeps static void sfxge_lro_fini(struct sfxge_rxq *rxq) 1200e948693eSPhilip Paeps { 1201e948693eSPhilip Paeps struct sfxge_lro_state *st = &rxq->lro; 1202e948693eSPhilip Paeps struct sfxge_lro_conn *c; 1203e948693eSPhilip Paeps unsigned i; 1204e948693eSPhilip Paeps 1205e948693eSPhilip Paeps /* Return cleanly if sfxge_lro_init() has not been called. */ 1206e948693eSPhilip Paeps if (st->conns == NULL) 1207e948693eSPhilip Paeps return; 1208e948693eSPhilip Paeps 1209e948693eSPhilip Paeps KASSERT(LIST_EMPTY(&st->active_conns), ("found active connections")); 1210e948693eSPhilip Paeps 1211e948693eSPhilip Paeps for (i = 0; i <= st->conns_mask; ++i) { 1212e948693eSPhilip Paeps while (!TAILQ_EMPTY(&st->conns[i])) { 1213e948693eSPhilip Paeps c = TAILQ_LAST(&st->conns[i], sfxge_lro_tailq); 1214e948693eSPhilip Paeps sfxge_lro_drop(rxq, c); 1215e948693eSPhilip Paeps } 1216e948693eSPhilip Paeps } 1217e948693eSPhilip Paeps 1218e948693eSPhilip Paeps while (!TAILQ_EMPTY(&st->free_conns)) { 1219e948693eSPhilip Paeps c = TAILQ_FIRST(&st->free_conns); 1220e948693eSPhilip Paeps TAILQ_REMOVE(&st->free_conns, c, link); 1221e948693eSPhilip Paeps KASSERT(!c->mbuf, ("found orphaned mbuf")); 1222e948693eSPhilip Paeps free(c, M_SFXGE); 1223e948693eSPhilip Paeps } 1224e948693eSPhilip Paeps 1225e948693eSPhilip Paeps free(st->conns_n, M_SFXGE); 1226e948693eSPhilip Paeps free(st->conns, M_SFXGE); 1227e948693eSPhilip Paeps st->conns = NULL; 1228e948693eSPhilip Paeps } 1229e948693eSPhilip Paeps 123018daa0eeSAndrew Rybchenko #else 123118daa0eeSAndrew Rybchenko 123218daa0eeSAndrew Rybchenko static void 123318daa0eeSAndrew Rybchenko sfxge_lro_init(struct sfxge_rxq *rxq) 123418daa0eeSAndrew Rybchenko { 123518daa0eeSAndrew Rybchenko } 123618daa0eeSAndrew Rybchenko 123718daa0eeSAndrew Rybchenko static void 123818daa0eeSAndrew Rybchenko sfxge_lro_fini(struct sfxge_rxq *rxq) 123918daa0eeSAndrew Rybchenko { 124018daa0eeSAndrew Rybchenko } 124118daa0eeSAndrew Rybchenko 124218daa0eeSAndrew Rybchenko #endif /* SFXGE_LRO */ 124318daa0eeSAndrew Rybchenko 1244e948693eSPhilip Paeps static void 1245e948693eSPhilip Paeps sfxge_rx_qfini(struct sfxge_softc *sc, unsigned int index) 1246e948693eSPhilip Paeps { 1247e948693eSPhilip Paeps struct sfxge_rxq *rxq; 1248e948693eSPhilip Paeps 1249e948693eSPhilip Paeps rxq = sc->rxq[index]; 1250e948693eSPhilip Paeps 1251e948693eSPhilip Paeps KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, 1252e948693eSPhilip Paeps ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); 1253e948693eSPhilip Paeps 1254e948693eSPhilip Paeps /* Free the context array and the flow table. */ 1255e948693eSPhilip Paeps free(rxq->queue, M_SFXGE); 1256e948693eSPhilip Paeps sfxge_lro_fini(rxq); 1257e948693eSPhilip Paeps 1258e948693eSPhilip Paeps /* Release DMA memory. */ 1259e948693eSPhilip Paeps sfxge_dma_free(&rxq->mem); 1260e948693eSPhilip Paeps 1261e948693eSPhilip Paeps sc->rxq[index] = NULL; 1262e948693eSPhilip Paeps 1263e948693eSPhilip Paeps free(rxq, M_SFXGE); 1264e948693eSPhilip Paeps } 1265e948693eSPhilip Paeps 1266e948693eSPhilip Paeps static int 1267e948693eSPhilip Paeps sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index) 1268e948693eSPhilip Paeps { 1269e948693eSPhilip Paeps struct sfxge_rxq *rxq; 1270e948693eSPhilip Paeps struct sfxge_evq *evq; 1271e948693eSPhilip Paeps efsys_mem_t *esmp; 1272e948693eSPhilip Paeps int rc; 1273e948693eSPhilip Paeps 1274133366a6SAndrew Rybchenko KASSERT(index < sc->rxq_count, ("index >= %d", sc->rxq_count)); 1275e948693eSPhilip Paeps 1276e948693eSPhilip Paeps rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK); 1277e948693eSPhilip Paeps rxq->sc = sc; 1278e948693eSPhilip Paeps rxq->index = index; 1279385b1d8eSGeorge V. Neville-Neil rxq->entries = sc->rxq_entries; 1280385b1d8eSGeorge V. Neville-Neil rxq->ptr_mask = rxq->entries - 1; 1281385b1d8eSGeorge V. Neville-Neil rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries); 1282e948693eSPhilip Paeps 1283e948693eSPhilip Paeps sc->rxq[index] = rxq; 1284e948693eSPhilip Paeps esmp = &rxq->mem; 1285e948693eSPhilip Paeps 1286e948693eSPhilip Paeps evq = sc->evq[index]; 1287e948693eSPhilip Paeps 1288e948693eSPhilip Paeps /* Allocate and zero DMA space. */ 1289385b1d8eSGeorge V. Neville-Neil if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0) 1290e948693eSPhilip Paeps return (rc); 1291e948693eSPhilip Paeps 1292e948693eSPhilip Paeps /* Allocate buffer table entries. */ 1293385b1d8eSGeorge V. Neville-Neil sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries), 1294e948693eSPhilip Paeps &rxq->buf_base_id); 1295e948693eSPhilip Paeps 1296e948693eSPhilip Paeps /* Allocate the context array and the flow table. */ 1297385b1d8eSGeorge V. Neville-Neil rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries, 1298e948693eSPhilip Paeps M_SFXGE, M_WAITOK | M_ZERO); 1299e948693eSPhilip Paeps sfxge_lro_init(rxq); 1300e948693eSPhilip Paeps 1301fd90e2edSJung-uk Kim callout_init(&rxq->refill_callout, 1); 1302e948693eSPhilip Paeps 1303e948693eSPhilip Paeps rxq->init_state = SFXGE_RXQ_INITIALIZED; 1304e948693eSPhilip Paeps 1305e948693eSPhilip Paeps return (0); 1306e948693eSPhilip Paeps } 1307e948693eSPhilip Paeps 1308e948693eSPhilip Paeps static const struct { 1309e948693eSPhilip Paeps const char *name; 1310e948693eSPhilip Paeps size_t offset; 1311e948693eSPhilip Paeps } sfxge_rx_stats[] = { 1312e948693eSPhilip Paeps #define SFXGE_RX_STAT(name, member) \ 1313e948693eSPhilip Paeps { #name, offsetof(struct sfxge_rxq, member) } 131418daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 1315e948693eSPhilip Paeps SFXGE_RX_STAT(lro_merges, lro.n_merges), 1316e948693eSPhilip Paeps SFXGE_RX_STAT(lro_bursts, lro.n_bursts), 1317e948693eSPhilip Paeps SFXGE_RX_STAT(lro_slow_start, lro.n_slow_start), 1318e948693eSPhilip Paeps SFXGE_RX_STAT(lro_misorder, lro.n_misorder), 1319e948693eSPhilip Paeps SFXGE_RX_STAT(lro_too_many, lro.n_too_many), 1320e948693eSPhilip Paeps SFXGE_RX_STAT(lro_new_stream, lro.n_new_stream), 1321e948693eSPhilip Paeps SFXGE_RX_STAT(lro_drop_idle, lro.n_drop_idle), 1322e948693eSPhilip Paeps SFXGE_RX_STAT(lro_drop_closed, lro.n_drop_closed) 132318daa0eeSAndrew Rybchenko #endif 1324e948693eSPhilip Paeps }; 1325e948693eSPhilip Paeps 1326e948693eSPhilip Paeps static int 1327e948693eSPhilip Paeps sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS) 1328e948693eSPhilip Paeps { 1329e948693eSPhilip Paeps struct sfxge_softc *sc = arg1; 1330e948693eSPhilip Paeps unsigned int id = arg2; 1331e948693eSPhilip Paeps unsigned int sum, index; 1332e948693eSPhilip Paeps 1333e948693eSPhilip Paeps /* Sum across all RX queues */ 1334e948693eSPhilip Paeps sum = 0; 1335133366a6SAndrew Rybchenko for (index = 0; index < sc->rxq_count; index++) 1336e948693eSPhilip Paeps sum += *(unsigned int *)((caddr_t)sc->rxq[index] + 1337e948693eSPhilip Paeps sfxge_rx_stats[id].offset); 1338e948693eSPhilip Paeps 1339b7b0edd1SGeorge V. Neville-Neil return (SYSCTL_OUT(req, &sum, sizeof(sum))); 1340e948693eSPhilip Paeps } 1341e948693eSPhilip Paeps 1342e948693eSPhilip Paeps static void 1343e948693eSPhilip Paeps sfxge_rx_stat_init(struct sfxge_softc *sc) 1344e948693eSPhilip Paeps { 1345e948693eSPhilip Paeps struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); 1346e948693eSPhilip Paeps struct sysctl_oid_list *stat_list; 1347e948693eSPhilip Paeps unsigned int id; 1348e948693eSPhilip Paeps 1349e948693eSPhilip Paeps stat_list = SYSCTL_CHILDREN(sc->stats_node); 1350e948693eSPhilip Paeps 1351612d8e28SAndrew Rybchenko for (id = 0; id < nitems(sfxge_rx_stats); id++) { 1352e948693eSPhilip Paeps SYSCTL_ADD_PROC( 1353e948693eSPhilip Paeps ctx, stat_list, 1354e948693eSPhilip Paeps OID_AUTO, sfxge_rx_stats[id].name, 1355e948693eSPhilip Paeps CTLTYPE_UINT|CTLFLAG_RD, 1356e948693eSPhilip Paeps sc, id, sfxge_rx_stat_handler, "IU", 1357e948693eSPhilip Paeps ""); 1358e948693eSPhilip Paeps } 1359e948693eSPhilip Paeps } 1360e948693eSPhilip Paeps 1361e948693eSPhilip Paeps void 1362e948693eSPhilip Paeps sfxge_rx_fini(struct sfxge_softc *sc) 1363e948693eSPhilip Paeps { 1364e948693eSPhilip Paeps int index; 1365e948693eSPhilip Paeps 1366133366a6SAndrew Rybchenko index = sc->rxq_count; 1367e948693eSPhilip Paeps while (--index >= 0) 1368e948693eSPhilip Paeps sfxge_rx_qfini(sc, index); 1369133366a6SAndrew Rybchenko 1370133366a6SAndrew Rybchenko sc->rxq_count = 0; 1371e948693eSPhilip Paeps } 1372e948693eSPhilip Paeps 1373e948693eSPhilip Paeps int 1374e948693eSPhilip Paeps sfxge_rx_init(struct sfxge_softc *sc) 1375e948693eSPhilip Paeps { 1376e948693eSPhilip Paeps struct sfxge_intr *intr; 1377e948693eSPhilip Paeps int index; 1378e948693eSPhilip Paeps int rc; 1379e948693eSPhilip Paeps 138018daa0eeSAndrew Rybchenko #ifdef SFXGE_LRO 1381245d1576SAndrew Rybchenko if (!ISP2(lro_table_size)) { 1382245d1576SAndrew Rybchenko log(LOG_ERR, "%s=%u must be power of 2", 1383245d1576SAndrew Rybchenko SFXGE_LRO_PARAM(table_size), lro_table_size); 1384245d1576SAndrew Rybchenko rc = EINVAL; 1385245d1576SAndrew Rybchenko goto fail_lro_table_size; 1386245d1576SAndrew Rybchenko } 1387245d1576SAndrew Rybchenko 1388e948693eSPhilip Paeps if (lro_idle_ticks == 0) 1389e948693eSPhilip Paeps lro_idle_ticks = hz / 10 + 1; /* 100 ms */ 139018daa0eeSAndrew Rybchenko #endif 1391e948693eSPhilip Paeps 1392e948693eSPhilip Paeps intr = &sc->intr; 1393e948693eSPhilip Paeps 1394133366a6SAndrew Rybchenko sc->rxq_count = intr->n_alloc; 1395133366a6SAndrew Rybchenko 1396e948693eSPhilip Paeps KASSERT(intr->state == SFXGE_INTR_INITIALIZED, 1397e948693eSPhilip Paeps ("intr->state != SFXGE_INTR_INITIALIZED")); 1398e948693eSPhilip Paeps 1399e948693eSPhilip Paeps /* Initialize the receive queue(s) - one per interrupt. */ 1400133366a6SAndrew Rybchenko for (index = 0; index < sc->rxq_count; index++) { 1401e948693eSPhilip Paeps if ((rc = sfxge_rx_qinit(sc, index)) != 0) 1402e948693eSPhilip Paeps goto fail; 1403e948693eSPhilip Paeps } 1404e948693eSPhilip Paeps 1405e948693eSPhilip Paeps sfxge_rx_stat_init(sc); 1406e948693eSPhilip Paeps 1407e948693eSPhilip Paeps return (0); 1408e948693eSPhilip Paeps 1409e948693eSPhilip Paeps fail: 1410e948693eSPhilip Paeps /* Tear down the receive queue(s). */ 1411e948693eSPhilip Paeps while (--index >= 0) 1412e948693eSPhilip Paeps sfxge_rx_qfini(sc, index); 1413e948693eSPhilip Paeps 1414133366a6SAndrew Rybchenko sc->rxq_count = 0; 1415245d1576SAndrew Rybchenko 1416245d1576SAndrew Rybchenko #ifdef SFXGE_LRO 1417245d1576SAndrew Rybchenko fail_lro_table_size: 1418245d1576SAndrew Rybchenko #endif 1419e948693eSPhilip Paeps return (rc); 1420e948693eSPhilip Paeps } 1421