15f5c71ccSAndrew Rybchenko /*- 2929c7febSAndrew Rybchenko * Copyright (c) 2015-2016 Solarflare Communications Inc. 35f5c71ccSAndrew Rybchenko * All rights reserved. 45f5c71ccSAndrew Rybchenko * 55f5c71ccSAndrew Rybchenko * Redistribution and use in source and binary forms, with or without 65f5c71ccSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 75f5c71ccSAndrew Rybchenko * 85f5c71ccSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 95f5c71ccSAndrew Rybchenko * this list of conditions and the following disclaimer. 105f5c71ccSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 115f5c71ccSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 125f5c71ccSAndrew Rybchenko * and/or other materials provided with the distribution. 135f5c71ccSAndrew Rybchenko * 145f5c71ccSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 155f5c71ccSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 165f5c71ccSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 175f5c71ccSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 185f5c71ccSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 195f5c71ccSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 205f5c71ccSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 215f5c71ccSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 225f5c71ccSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 235f5c71ccSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 245f5c71ccSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 255f5c71ccSAndrew Rybchenko * 265f5c71ccSAndrew Rybchenko * The views and conclusions contained in the software and documentation are 275f5c71ccSAndrew Rybchenko * those of the authors and should not be interpreted as representing official 285f5c71ccSAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project. 295f5c71ccSAndrew Rybchenko */ 305f5c71ccSAndrew Rybchenko 315f5c71ccSAndrew Rybchenko #include <sys/cdefs.h> 325f5c71ccSAndrew Rybchenko __FBSDID("$FreeBSD$"); 335f5c71ccSAndrew Rybchenko 345f5c71ccSAndrew Rybchenko #include "efx.h" 355f5c71ccSAndrew Rybchenko #include "efx_impl.h" 36dcb49ebaSAndrew Rybchenko 375f5c71ccSAndrew Rybchenko 385f5c71ccSAndrew Rybchenko #if EFSYS_OPT_MEDFORD 395f5c71ccSAndrew Rybchenko 406de7c598SAndrew Rybchenko static __checkReturn efx_rc_t 41f6d61784SAndrew Rybchenko medford_nic_get_required_pcie_bandwidth( 42f6d61784SAndrew Rybchenko __in efx_nic_t *enp, 43f6d61784SAndrew Rybchenko __out uint32_t *bandwidth_mbpsp) 44f6d61784SAndrew Rybchenko { 45f6d61784SAndrew Rybchenko uint32_t port_modes; 46f6d61784SAndrew Rybchenko uint32_t current_mode; 47f6d61784SAndrew Rybchenko uint32_t bandwidth; 48f6d61784SAndrew Rybchenko efx_rc_t rc; 49f6d61784SAndrew Rybchenko 50f6d61784SAndrew Rybchenko if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, 51f6d61784SAndrew Rybchenko ¤t_mode)) != 0) { 52f6d61784SAndrew Rybchenko /* No port mode info available. */ 53f6d61784SAndrew Rybchenko bandwidth = 0; 54f6d61784SAndrew Rybchenko goto out; 55f6d61784SAndrew Rybchenko } 56f6d61784SAndrew Rybchenko 57f6d61784SAndrew Rybchenko if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode, 58f6d61784SAndrew Rybchenko &bandwidth)) != 0) 59f6d61784SAndrew Rybchenko goto fail1; 60f6d61784SAndrew Rybchenko 61f6d61784SAndrew Rybchenko out: 62f6d61784SAndrew Rybchenko *bandwidth_mbpsp = bandwidth; 63f6d61784SAndrew Rybchenko 64f6d61784SAndrew Rybchenko return (0); 65f6d61784SAndrew Rybchenko 66f6d61784SAndrew Rybchenko fail1: 67f6d61784SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc); 68f6d61784SAndrew Rybchenko 69f6d61784SAndrew Rybchenko return (rc); 70f6d61784SAndrew Rybchenko } 71f6d61784SAndrew Rybchenko 72cfa023ebSAndrew Rybchenko __checkReturn efx_rc_t 73cfa023ebSAndrew Rybchenko medford_board_cfg( 74cfa023ebSAndrew Rybchenko __in efx_nic_t *enp) 75cfa023ebSAndrew Rybchenko { 76cfa023ebSAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 77cfa023ebSAndrew Rybchenko uint32_t mask; 7878e5c87cSAndrew Rybchenko uint32_t sysclk, dpcpu_clk; 79cfa023ebSAndrew Rybchenko uint32_t base, nvec; 806de7c598SAndrew Rybchenko uint32_t end_padding; 81f6d61784SAndrew Rybchenko uint32_t bandwidth; 82cfa023ebSAndrew Rybchenko efx_rc_t rc; 835f5c71ccSAndrew Rybchenko 84cfa023ebSAndrew Rybchenko /* 85cfa023ebSAndrew Rybchenko * FIXME: Likely to be incomplete and incorrect. 86cfa023ebSAndrew Rybchenko * Parts of this should be shared with Huntington. 87cfa023ebSAndrew Rybchenko */ 885f5c71ccSAndrew Rybchenko 89c63c8369SAndrew Rybchenko /* Medford has a fixed 8Kbyte VI window size */ 90c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_EVQ_RPTR_REG_STEP == 8192); 91c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_EVQ_TMR_REG_STEP == 8192); 92c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_RX_DESC_UPD_REG_STEP == 8192); 93c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_TX_DESC_UPD_REG_STEP == 8192); 94c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_TX_PIOBUF_STEP == 8192); 95c63c8369SAndrew Rybchenko 96c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192); 97c63c8369SAndrew Rybchenko encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; 98c63c8369SAndrew Rybchenko 99e26f5dacSAndrew Rybchenko /* 100e26f5dacSAndrew Rybchenko * Enable firmware workarounds for hardware errata. 101e26f5dacSAndrew Rybchenko * Expected responses are: 102e26f5dacSAndrew Rybchenko * - 0 (zero): 103e26f5dacSAndrew Rybchenko * Success: workaround enabled or disabled as requested. 104e26f5dacSAndrew Rybchenko * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): 105e26f5dacSAndrew Rybchenko * Firmware does not support the MC_CMD_WORKAROUND request. 106e26f5dacSAndrew Rybchenko * (assume that the workaround is not supported). 107e26f5dacSAndrew Rybchenko * - MC_CMD_ERR_ENOENT (reported as ENOENT): 108e26f5dacSAndrew Rybchenko * Firmware does not support the requested workaround. 109e26f5dacSAndrew Rybchenko * - MC_CMD_ERR_EPERM (reported as EACCES): 110e26f5dacSAndrew Rybchenko * Unprivileged function cannot enable/disable workarounds. 111e26f5dacSAndrew Rybchenko * 112e26f5dacSAndrew Rybchenko * See efx_mcdi_request_errcode() for MCDI error translations. 113e26f5dacSAndrew Rybchenko */ 114e26f5dacSAndrew Rybchenko 115e26f5dacSAndrew Rybchenko 116cfa023ebSAndrew Rybchenko if (EFX_PCI_FUNCTION_IS_VF(encp)) { 117cfa023ebSAndrew Rybchenko /* 1181c057dc0SAndrew Rybchenko * Interrupt testing does not work for VFs. See bug50084 and 1191c057dc0SAndrew Rybchenko * bug71432 comment 21. 120cfa023ebSAndrew Rybchenko */ 121cfa023ebSAndrew Rybchenko encp->enc_bug41750_workaround = B_TRUE; 122cfa023ebSAndrew Rybchenko } 123cfa023ebSAndrew Rybchenko 124cfa023ebSAndrew Rybchenko /* Chained multicast is always enabled on Medford */ 125cfa023ebSAndrew Rybchenko encp->enc_bug26807_workaround = B_TRUE; 126cfa023ebSAndrew Rybchenko 127e26f5dacSAndrew Rybchenko /* 128e26f5dacSAndrew Rybchenko * If the bug61265 workaround is enabled, then interrupt holdoff timers 129e26f5dacSAndrew Rybchenko * cannot be controlled by timer table writes, so MCDI must be used 130e26f5dacSAndrew Rybchenko * (timer table writes can still be used for wakeup timers). 131e26f5dacSAndrew Rybchenko */ 132e26f5dacSAndrew Rybchenko rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, 133e26f5dacSAndrew Rybchenko NULL); 134e26f5dacSAndrew Rybchenko if ((rc == 0) || (rc == EACCES)) 135e26f5dacSAndrew Rybchenko encp->enc_bug61265_workaround = B_TRUE; 136e26f5dacSAndrew Rybchenko else if ((rc == ENOTSUP) || (rc == ENOENT)) 137e26f5dacSAndrew Rybchenko encp->enc_bug61265_workaround = B_FALSE; 138e26f5dacSAndrew Rybchenko else 139e5f6f32fSAndrew Rybchenko goto fail1; 140e26f5dacSAndrew Rybchenko 14178e5c87cSAndrew Rybchenko /* Get clock frequencies (in MHz). */ 14278e5c87cSAndrew Rybchenko if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) 143e5f6f32fSAndrew Rybchenko goto fail2; 144cfa023ebSAndrew Rybchenko 145cfa023ebSAndrew Rybchenko /* 14678e5c87cSAndrew Rybchenko * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for 14778e5c87cSAndrew Rybchenko * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. 148cfa023ebSAndrew Rybchenko */ 14978e5c87cSAndrew Rybchenko encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ 150cfa023ebSAndrew Rybchenko encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << 151cfa023ebSAndrew Rybchenko FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; 152cfa023ebSAndrew Rybchenko 153cfa023ebSAndrew Rybchenko /* Alignment for receive packet DMA buffers */ 154cfa023ebSAndrew Rybchenko encp->enc_rx_buf_align_start = 1; 155cfa023ebSAndrew Rybchenko 1566de7c598SAndrew Rybchenko /* Get the RX DMA end padding alignment configuration */ 157ab72be51SAndrew Rybchenko if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { 158ab72be51SAndrew Rybchenko if (rc != EACCES) 159*deeaf87fSAndrew Rybchenko goto fail3; 160ab72be51SAndrew Rybchenko 161ab72be51SAndrew Rybchenko /* Assume largest tail padding size supported by hardware */ 162ab72be51SAndrew Rybchenko end_padding = 256; 163ab72be51SAndrew Rybchenko } 1646de7c598SAndrew Rybchenko encp->enc_rx_buf_align_end = end_padding; 165cfa023ebSAndrew Rybchenko 166cfa023ebSAndrew Rybchenko /* Alignment for WPTR updates */ 167cfa023ebSAndrew Rybchenko encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; 168cfa023ebSAndrew Rybchenko 1697283cea3SAndrew Rybchenko /* 1707283cea3SAndrew Rybchenko * Maximum number of exclusive RSS contexts which can be allocated. The 1717283cea3SAndrew Rybchenko * hardware supports 64, but 6 are reserved for shared contexts. They 1727283cea3SAndrew Rybchenko * are a global resource so not all may be available. 1737283cea3SAndrew Rybchenko */ 1747283cea3SAndrew Rybchenko encp->enc_rx_scale_max_exclusive_contexts = 58; 1757283cea3SAndrew Rybchenko 1766a09b206SAndrew Rybchenko encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); 1776a09b206SAndrew Rybchenko /* No boundary crossing limits */ 1786a09b206SAndrew Rybchenko encp->enc_tx_dma_desc_boundary = 0; 1796a09b206SAndrew Rybchenko 180cfa023ebSAndrew Rybchenko /* 181cfa023ebSAndrew Rybchenko * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use 182cfa023ebSAndrew Rybchenko * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available 183cfa023ebSAndrew Rybchenko * resources (allocated to this PCIe function), which is zero until 184cfa023ebSAndrew Rybchenko * after we have allocated VIs. 185cfa023ebSAndrew Rybchenko */ 186cfa023ebSAndrew Rybchenko encp->enc_evq_limit = 1024; 187cfa023ebSAndrew Rybchenko encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; 188cfa023ebSAndrew Rybchenko encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; 189cfa023ebSAndrew Rybchenko 190d343a7f4SAndrew Rybchenko /* 191d343a7f4SAndrew Rybchenko * The maximum supported transmit queue size is 2048. TXQs with 4096 192d343a7f4SAndrew Rybchenko * descriptors are not supported as the top bit is used for vfifo 193d343a7f4SAndrew Rybchenko * stuffing. 194d343a7f4SAndrew Rybchenko */ 195d343a7f4SAndrew Rybchenko encp->enc_txq_max_ndescs = 2048; 196d343a7f4SAndrew Rybchenko 197cfa023ebSAndrew Rybchenko encp->enc_buftbl_limit = 0xFFFFFFFF; 198cfa023ebSAndrew Rybchenko 1994f58306cSAndrew Rybchenko EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); 200cfa023ebSAndrew Rybchenko encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS; 201cfa023ebSAndrew Rybchenko encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE; 202cfa023ebSAndrew Rybchenko encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE; 203cfa023ebSAndrew Rybchenko 204cfa023ebSAndrew Rybchenko /* 205cfa023ebSAndrew Rybchenko * Get the current privilege mask. Note that this may be modified 206cfa023ebSAndrew Rybchenko * dynamically, so this value is informational only. DO NOT use 207cfa023ebSAndrew Rybchenko * the privilege mask to check for sufficient privileges, as that 208cfa023ebSAndrew Rybchenko * can result in time-of-check/time-of-use bugs. 209cfa023ebSAndrew Rybchenko */ 21080af6f26SAndrew Rybchenko if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) 211*deeaf87fSAndrew Rybchenko goto fail4; 212cfa023ebSAndrew Rybchenko encp->enc_privilege_mask = mask; 213cfa023ebSAndrew Rybchenko 214cfa023ebSAndrew Rybchenko /* Get interrupt vector limits */ 215cfa023ebSAndrew Rybchenko if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { 216cfa023ebSAndrew Rybchenko if (EFX_PCI_FUNCTION_IS_PF(encp)) 217*deeaf87fSAndrew Rybchenko goto fail5; 218cfa023ebSAndrew Rybchenko 219cfa023ebSAndrew Rybchenko /* Ignore error (cannot query vector limits from a VF). */ 220cfa023ebSAndrew Rybchenko base = 0; 221cfa023ebSAndrew Rybchenko nvec = 1024; 222cfa023ebSAndrew Rybchenko } 223cfa023ebSAndrew Rybchenko encp->enc_intr_vec_base = base; 224cfa023ebSAndrew Rybchenko encp->enc_intr_limit = nvec; 225cfa023ebSAndrew Rybchenko 226cfa023ebSAndrew Rybchenko /* 227cfa023ebSAndrew Rybchenko * Maximum number of bytes into the frame the TCP header can start for 228cfa023ebSAndrew Rybchenko * firmware assisted TSO to work. 229cfa023ebSAndrew Rybchenko */ 230cfa023ebSAndrew Rybchenko encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; 231cfa023ebSAndrew Rybchenko 232739ebba6SAndrew Rybchenko /* 233739ebba6SAndrew Rybchenko * Medford stores a single global copy of VPD, not per-PF as on 234739ebba6SAndrew Rybchenko * Huntington. 235739ebba6SAndrew Rybchenko */ 236739ebba6SAndrew Rybchenko encp->enc_vpd_is_global = B_TRUE; 237739ebba6SAndrew Rybchenko 238f6d61784SAndrew Rybchenko rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth); 239f6d61784SAndrew Rybchenko if (rc != 0) 240*deeaf87fSAndrew Rybchenko goto fail6; 241f6d61784SAndrew Rybchenko encp->enc_required_pcie_bandwidth_mbps = bandwidth; 242f6d61784SAndrew Rybchenko encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; 243f6d61784SAndrew Rybchenko 244cfa023ebSAndrew Rybchenko return (0); 245cfa023ebSAndrew Rybchenko 246cfa023ebSAndrew Rybchenko fail6: 247cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail6); 248cfa023ebSAndrew Rybchenko fail5: 249cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail5); 250cfa023ebSAndrew Rybchenko fail4: 251cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail4); 252cfa023ebSAndrew Rybchenko fail3: 253cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail3); 254cfa023ebSAndrew Rybchenko fail2: 255cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail2); 256cfa023ebSAndrew Rybchenko fail1: 257cfa023ebSAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc); 258cfa023ebSAndrew Rybchenko 259cfa023ebSAndrew Rybchenko return (rc); 260cfa023ebSAndrew Rybchenko } 2615f5c71ccSAndrew Rybchenko 2625f5c71ccSAndrew Rybchenko #endif /* EFSYS_OPT_MEDFORD */ 263