15f5c71ccSAndrew Rybchenko /*- 2929c7febSAndrew Rybchenko * Copyright (c) 2015-2016 Solarflare Communications Inc. 35f5c71ccSAndrew Rybchenko * All rights reserved. 45f5c71ccSAndrew Rybchenko * 55f5c71ccSAndrew Rybchenko * Redistribution and use in source and binary forms, with or without 65f5c71ccSAndrew Rybchenko * modification, are permitted provided that the following conditions are met: 75f5c71ccSAndrew Rybchenko * 85f5c71ccSAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice, 95f5c71ccSAndrew Rybchenko * this list of conditions and the following disclaimer. 105f5c71ccSAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice, 115f5c71ccSAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation 125f5c71ccSAndrew Rybchenko * and/or other materials provided with the distribution. 135f5c71ccSAndrew Rybchenko * 145f5c71ccSAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 155f5c71ccSAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 165f5c71ccSAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 175f5c71ccSAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 185f5c71ccSAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 195f5c71ccSAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 205f5c71ccSAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 215f5c71ccSAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 225f5c71ccSAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 235f5c71ccSAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 245f5c71ccSAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 255f5c71ccSAndrew Rybchenko * 265f5c71ccSAndrew Rybchenko * The views and conclusions contained in the software and documentation are 275f5c71ccSAndrew Rybchenko * those of the authors and should not be interpreted as representing official 285f5c71ccSAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project. 295f5c71ccSAndrew Rybchenko */ 305f5c71ccSAndrew Rybchenko 315f5c71ccSAndrew Rybchenko #include <sys/cdefs.h> 325f5c71ccSAndrew Rybchenko __FBSDID("$FreeBSD$"); 335f5c71ccSAndrew Rybchenko 345f5c71ccSAndrew Rybchenko #include "efx.h" 355f5c71ccSAndrew Rybchenko #include "efx_impl.h" 36dcb49ebaSAndrew Rybchenko 375f5c71ccSAndrew Rybchenko 385f5c71ccSAndrew Rybchenko #if EFSYS_OPT_MEDFORD 395f5c71ccSAndrew Rybchenko 406de7c598SAndrew Rybchenko static __checkReturn efx_rc_t 41f6d61784SAndrew Rybchenko medford_nic_get_required_pcie_bandwidth( 42f6d61784SAndrew Rybchenko __in efx_nic_t *enp, 43f6d61784SAndrew Rybchenko __out uint32_t *bandwidth_mbpsp) 44f6d61784SAndrew Rybchenko { 45f6d61784SAndrew Rybchenko uint32_t port_modes; 46f6d61784SAndrew Rybchenko uint32_t current_mode; 47f6d61784SAndrew Rybchenko uint32_t bandwidth; 48f6d61784SAndrew Rybchenko efx_rc_t rc; 49f6d61784SAndrew Rybchenko 50f6d61784SAndrew Rybchenko if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, 51f6d61784SAndrew Rybchenko ¤t_mode)) != 0) { 52f6d61784SAndrew Rybchenko /* No port mode info available. */ 53f6d61784SAndrew Rybchenko bandwidth = 0; 54f6d61784SAndrew Rybchenko goto out; 55f6d61784SAndrew Rybchenko } 56f6d61784SAndrew Rybchenko 57f6d61784SAndrew Rybchenko if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode, 58f6d61784SAndrew Rybchenko &bandwidth)) != 0) 59f6d61784SAndrew Rybchenko goto fail1; 60f6d61784SAndrew Rybchenko 61f6d61784SAndrew Rybchenko out: 62f6d61784SAndrew Rybchenko *bandwidth_mbpsp = bandwidth; 63f6d61784SAndrew Rybchenko 64f6d61784SAndrew Rybchenko return (0); 65f6d61784SAndrew Rybchenko 66f6d61784SAndrew Rybchenko fail1: 67f6d61784SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc); 68f6d61784SAndrew Rybchenko 69f6d61784SAndrew Rybchenko return (rc); 70f6d61784SAndrew Rybchenko } 71f6d61784SAndrew Rybchenko 72cfa023ebSAndrew Rybchenko __checkReturn efx_rc_t 73cfa023ebSAndrew Rybchenko medford_board_cfg( 74cfa023ebSAndrew Rybchenko __in efx_nic_t *enp) 75cfa023ebSAndrew Rybchenko { 76cfa023ebSAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 7778e5c87cSAndrew Rybchenko uint32_t sysclk, dpcpu_clk; 786de7c598SAndrew Rybchenko uint32_t end_padding; 79f6d61784SAndrew Rybchenko uint32_t bandwidth; 80cfa023ebSAndrew Rybchenko efx_rc_t rc; 815f5c71ccSAndrew Rybchenko 82cfa023ebSAndrew Rybchenko /* 83cfa023ebSAndrew Rybchenko * FIXME: Likely to be incomplete and incorrect. 84cfa023ebSAndrew Rybchenko * Parts of this should be shared with Huntington. 85cfa023ebSAndrew Rybchenko */ 865f5c71ccSAndrew Rybchenko 87c63c8369SAndrew Rybchenko /* Medford has a fixed 8Kbyte VI window size */ 88c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_EVQ_RPTR_REG_STEP == 8192); 89c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_EVQ_TMR_REG_STEP == 8192); 90c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_RX_DESC_UPD_REG_STEP == 8192); 91c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_TX_DESC_UPD_REG_STEP == 8192); 92c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(ER_DZ_TX_PIOBUF_STEP == 8192); 93c63c8369SAndrew Rybchenko 94c63c8369SAndrew Rybchenko EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192); 95c63c8369SAndrew Rybchenko encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; 96c63c8369SAndrew Rybchenko 97e26f5dacSAndrew Rybchenko /* 98e26f5dacSAndrew Rybchenko * Enable firmware workarounds for hardware errata. 99e26f5dacSAndrew Rybchenko * Expected responses are: 100e26f5dacSAndrew Rybchenko * - 0 (zero): 101e26f5dacSAndrew Rybchenko * Success: workaround enabled or disabled as requested. 102e26f5dacSAndrew Rybchenko * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): 103e26f5dacSAndrew Rybchenko * Firmware does not support the MC_CMD_WORKAROUND request. 104e26f5dacSAndrew Rybchenko * (assume that the workaround is not supported). 105e26f5dacSAndrew Rybchenko * - MC_CMD_ERR_ENOENT (reported as ENOENT): 106e26f5dacSAndrew Rybchenko * Firmware does not support the requested workaround. 107e26f5dacSAndrew Rybchenko * - MC_CMD_ERR_EPERM (reported as EACCES): 108e26f5dacSAndrew Rybchenko * Unprivileged function cannot enable/disable workarounds. 109e26f5dacSAndrew Rybchenko * 110e26f5dacSAndrew Rybchenko * See efx_mcdi_request_errcode() for MCDI error translations. 111e26f5dacSAndrew Rybchenko */ 112e26f5dacSAndrew Rybchenko 113e26f5dacSAndrew Rybchenko 114cfa023ebSAndrew Rybchenko if (EFX_PCI_FUNCTION_IS_VF(encp)) { 115cfa023ebSAndrew Rybchenko /* 1161c057dc0SAndrew Rybchenko * Interrupt testing does not work for VFs. See bug50084 and 1171c057dc0SAndrew Rybchenko * bug71432 comment 21. 118cfa023ebSAndrew Rybchenko */ 119cfa023ebSAndrew Rybchenko encp->enc_bug41750_workaround = B_TRUE; 120cfa023ebSAndrew Rybchenko } 121cfa023ebSAndrew Rybchenko 122cfa023ebSAndrew Rybchenko /* Chained multicast is always enabled on Medford */ 123cfa023ebSAndrew Rybchenko encp->enc_bug26807_workaround = B_TRUE; 124cfa023ebSAndrew Rybchenko 125e26f5dacSAndrew Rybchenko /* 126e26f5dacSAndrew Rybchenko * If the bug61265 workaround is enabled, then interrupt holdoff timers 127e26f5dacSAndrew Rybchenko * cannot be controlled by timer table writes, so MCDI must be used 128e26f5dacSAndrew Rybchenko * (timer table writes can still be used for wakeup timers). 129e26f5dacSAndrew Rybchenko */ 130e26f5dacSAndrew Rybchenko rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE, 131e26f5dacSAndrew Rybchenko NULL); 132e26f5dacSAndrew Rybchenko if ((rc == 0) || (rc == EACCES)) 133e26f5dacSAndrew Rybchenko encp->enc_bug61265_workaround = B_TRUE; 134e26f5dacSAndrew Rybchenko else if ((rc == ENOTSUP) || (rc == ENOENT)) 135e26f5dacSAndrew Rybchenko encp->enc_bug61265_workaround = B_FALSE; 136e26f5dacSAndrew Rybchenko else 137e5f6f32fSAndrew Rybchenko goto fail1; 138e26f5dacSAndrew Rybchenko 13978e5c87cSAndrew Rybchenko /* Get clock frequencies (in MHz). */ 14078e5c87cSAndrew Rybchenko if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) 141e5f6f32fSAndrew Rybchenko goto fail2; 142cfa023ebSAndrew Rybchenko 143cfa023ebSAndrew Rybchenko /* 14478e5c87cSAndrew Rybchenko * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for 14578e5c87cSAndrew Rybchenko * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units. 146cfa023ebSAndrew Rybchenko */ 14778e5c87cSAndrew Rybchenko encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */ 148cfa023ebSAndrew Rybchenko encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns << 149cfa023ebSAndrew Rybchenko FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000; 150cfa023ebSAndrew Rybchenko 151cfa023ebSAndrew Rybchenko /* Alignment for receive packet DMA buffers */ 152cfa023ebSAndrew Rybchenko encp->enc_rx_buf_align_start = 1; 153cfa023ebSAndrew Rybchenko 1546de7c598SAndrew Rybchenko /* Get the RX DMA end padding alignment configuration */ 155ab72be51SAndrew Rybchenko if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { 156ab72be51SAndrew Rybchenko if (rc != EACCES) 157deeaf87fSAndrew Rybchenko goto fail3; 158ab72be51SAndrew Rybchenko 159ab72be51SAndrew Rybchenko /* Assume largest tail padding size supported by hardware */ 160ab72be51SAndrew Rybchenko end_padding = 256; 161ab72be51SAndrew Rybchenko } 1626de7c598SAndrew Rybchenko encp->enc_rx_buf_align_end = end_padding; 163cfa023ebSAndrew Rybchenko 164cfa023ebSAndrew Rybchenko /* 165d343a7f4SAndrew Rybchenko * The maximum supported transmit queue size is 2048. TXQs with 4096 166d343a7f4SAndrew Rybchenko * descriptors are not supported as the top bit is used for vfifo 167d343a7f4SAndrew Rybchenko * stuffing. 168d343a7f4SAndrew Rybchenko */ 169d343a7f4SAndrew Rybchenko encp->enc_txq_max_ndescs = 2048; 170d343a7f4SAndrew Rybchenko 1714f58306cSAndrew Rybchenko EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS); 172cfa023ebSAndrew Rybchenko encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS; 173cfa023ebSAndrew Rybchenko encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE; 174cfa023ebSAndrew Rybchenko encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE; 175cfa023ebSAndrew Rybchenko 176cfa023ebSAndrew Rybchenko /* 177739ebba6SAndrew Rybchenko * Medford stores a single global copy of VPD, not per-PF as on 178739ebba6SAndrew Rybchenko * Huntington. 179739ebba6SAndrew Rybchenko */ 180739ebba6SAndrew Rybchenko encp->enc_vpd_is_global = B_TRUE; 181739ebba6SAndrew Rybchenko 182f6d61784SAndrew Rybchenko rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth); 183f6d61784SAndrew Rybchenko if (rc != 0) 184*26fcca57SAndrew Rybchenko goto fail4; 185f6d61784SAndrew Rybchenko encp->enc_required_pcie_bandwidth_mbps = bandwidth; 186f6d61784SAndrew Rybchenko encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; 187f6d61784SAndrew Rybchenko 188cfa023ebSAndrew Rybchenko return (0); 189cfa023ebSAndrew Rybchenko 190cfa023ebSAndrew Rybchenko fail4: 191cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail4); 192cfa023ebSAndrew Rybchenko fail3: 193cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail3); 194cfa023ebSAndrew Rybchenko fail2: 195cfa023ebSAndrew Rybchenko EFSYS_PROBE(fail2); 196cfa023ebSAndrew Rybchenko fail1: 197cfa023ebSAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc); 198cfa023ebSAndrew Rybchenko 199cfa023ebSAndrew Rybchenko return (rc); 200cfa023ebSAndrew Rybchenko } 2015f5c71ccSAndrew Rybchenko 2025f5c71ccSAndrew Rybchenko #endif /* EFSYS_OPT_MEDFORD */ 203