1 /* 2 * Copyright(c) 2015 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 /* 49 * This file contains all of the code that is specific to the HFI chip 50 */ 51 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/interrupt.h> 55 #include <linux/module.h> 56 57 #include "hfi.h" 58 #include "trace.h" 59 #include "mad.h" 60 #include "pio.h" 61 #include "sdma.h" 62 #include "eprom.h" 63 #include "efivar.h" 64 #include "platform.h" 65 #include "aspm.h" 66 #include "affinity.h" 67 #include "debugfs.h" 68 #include "fault.h" 69 70 uint kdeth_qp; 71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO); 72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix"); 73 74 uint num_vls = HFI1_MAX_VLS_SUPPORTED; 75 module_param(num_vls, uint, S_IRUGO); 76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 77 78 /* 79 * Default time to aggregate two 10K packets from the idle state 80 * (timer not running). The timer starts at the end of the first packet, 81 * so only the time for one 10K packet and header plus a bit extra is needed. 82 * 10 * 1024 + 64 header byte = 10304 byte 83 * 10304 byte / 12.5 GB/s = 824.32ns 84 */ 85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */ 86 module_param(rcv_intr_timeout, uint, S_IRUGO); 87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns"); 88 89 uint rcv_intr_count = 16; /* same as qib */ 90 module_param(rcv_intr_count, uint, S_IRUGO); 91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count"); 92 93 ushort link_crc_mask = SUPPORTED_CRCS; 94 module_param(link_crc_mask, ushort, S_IRUGO); 95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link"); 96 97 uint loopback; 98 module_param_named(loopback, loopback, uint, S_IRUGO); 99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable"); 100 101 /* Other driver tunables */ 102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/ 103 static ushort crc_14b_sideband = 1; 104 static uint use_flr = 1; 105 uint quick_linkup; /* skip LNI */ 106 107 struct flag_table { 108 u64 flag; /* the flag */ 109 char *str; /* description string */ 110 u16 extra; /* extra information */ 111 u16 unused0; 112 u32 unused1; 113 }; 114 115 /* str must be a string constant */ 116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra} 117 #define FLAG_ENTRY0(str, flag) {flag, str, 0} 118 119 /* Send Error Consequences */ 120 #define SEC_WRITE_DROPPED 0x1 121 #define SEC_PACKET_DROPPED 0x2 122 #define SEC_SC_HALTED 0x4 /* per-context only */ 123 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */ 124 125 #define DEFAULT_KRCVQS 2 126 #define MIN_KERNEL_KCTXTS 2 127 #define FIRST_KERNEL_KCTXT 1 128 129 /* 130 * RSM instance allocation 131 * 0 - Verbs 132 * 1 - User Fecn Handling 133 * 2 - Vnic 134 */ 135 #define RSM_INS_VERBS 0 136 #define RSM_INS_FECN 1 137 #define RSM_INS_VNIC 2 138 139 /* Bit offset into the GUID which carries HFI id information */ 140 #define GUID_HFI_INDEX_SHIFT 39 141 142 /* extract the emulation revision */ 143 #define emulator_rev(dd) ((dd)->irev >> 8) 144 /* parallel and serial emulation versions are 3 and 4 respectively */ 145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) 146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) 147 148 /* RSM fields for Verbs */ 149 /* packet type */ 150 #define IB_PACKET_TYPE 2ull 151 #define QW_SHIFT 6ull 152 /* QPN[7..1] */ 153 #define QPN_WIDTH 7ull 154 155 /* LRH.BTH: QW 0, OFFSET 48 - for match */ 156 #define LRH_BTH_QW 0ull 157 #define LRH_BTH_BIT_OFFSET 48ull 158 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off)) 159 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET) 160 #define LRH_BTH_SELECT 161 #define LRH_BTH_MASK 3ull 162 #define LRH_BTH_VALUE 2ull 163 164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */ 165 #define LRH_SC_QW 0ull 166 #define LRH_SC_BIT_OFFSET 56ull 167 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off)) 168 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET) 169 #define LRH_SC_MASK 128ull 170 #define LRH_SC_VALUE 0ull 171 172 /* SC[n..0] QW 0, OFFSET 60 - for select */ 173 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull)) 174 175 /* QPN[m+n:1] QW 1, OFFSET 1 */ 176 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull)) 177 178 /* RSM fields for Vnic */ 179 /* L2_TYPE: QW 0, OFFSET 61 - for match */ 180 #define L2_TYPE_QW 0ull 181 #define L2_TYPE_BIT_OFFSET 61ull 182 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off)) 183 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET) 184 #define L2_TYPE_MASK 3ull 185 #define L2_16B_VALUE 2ull 186 187 /* L4_TYPE QW 1, OFFSET 0 - for match */ 188 #define L4_TYPE_QW 1ull 189 #define L4_TYPE_BIT_OFFSET 0ull 190 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off)) 191 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET) 192 #define L4_16B_TYPE_MASK 0xFFull 193 #define L4_16B_ETH_VALUE 0x78ull 194 195 /* 16B VESWID - for select */ 196 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull)) 197 /* 16B ENTROPY - for select */ 198 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull)) 199 200 /* defines to build power on SC2VL table */ 201 #define SC2VL_VAL( \ 202 num, \ 203 sc0, sc0val, \ 204 sc1, sc1val, \ 205 sc2, sc2val, \ 206 sc3, sc3val, \ 207 sc4, sc4val, \ 208 sc5, sc5val, \ 209 sc6, sc6val, \ 210 sc7, sc7val) \ 211 ( \ 212 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \ 213 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \ 214 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \ 215 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \ 216 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \ 217 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \ 218 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \ 219 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \ 220 ) 221 222 #define DC_SC_VL_VAL( \ 223 range, \ 224 e0, e0val, \ 225 e1, e1val, \ 226 e2, e2val, \ 227 e3, e3val, \ 228 e4, e4val, \ 229 e5, e5val, \ 230 e6, e6val, \ 231 e7, e7val, \ 232 e8, e8val, \ 233 e9, e9val, \ 234 e10, e10val, \ 235 e11, e11val, \ 236 e12, e12val, \ 237 e13, e13val, \ 238 e14, e14val, \ 239 e15, e15val) \ 240 ( \ 241 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \ 242 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \ 243 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \ 244 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \ 245 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \ 246 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \ 247 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \ 248 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \ 249 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \ 250 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \ 251 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \ 252 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \ 253 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \ 254 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \ 255 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \ 256 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \ 257 ) 258 259 /* all CceStatus sub-block freeze bits */ 260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \ 261 | CCE_STATUS_RXE_FROZE_SMASK \ 262 | CCE_STATUS_TXE_FROZE_SMASK \ 263 | CCE_STATUS_TXE_PIO_FROZE_SMASK) 264 /* all CceStatus sub-block TXE pause bits */ 265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \ 266 | CCE_STATUS_TXE_PAUSED_SMASK \ 267 | CCE_STATUS_SDMA_PAUSED_SMASK) 268 /* all CceStatus sub-block RXE pause bits */ 269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK 270 271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL 272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF 273 274 /* 275 * CCE Error flags. 276 */ 277 static struct flag_table cce_err_status_flags[] = { 278 /* 0*/ FLAG_ENTRY0("CceCsrParityErr", 279 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK), 280 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr", 281 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK), 282 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr", 283 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK), 284 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr", 285 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK), 286 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr", 287 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK), 288 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr", 289 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK), 290 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr", 291 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK), 292 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr", 293 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK), 294 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr", 295 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK), 296 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", 297 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK), 298 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", 299 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK), 300 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError", 301 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK), 302 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError", 303 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK), 304 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr", 305 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK), 306 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr", 307 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK), 308 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr", 309 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK), 310 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr", 311 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK), 312 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr", 313 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK), 314 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr", 315 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK), 316 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr", 317 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK), 318 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr", 319 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK), 320 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr", 321 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK), 322 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr", 323 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK), 324 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr", 325 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK), 326 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr", 327 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK), 328 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr", 329 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK), 330 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr", 331 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK), 332 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr", 333 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK), 334 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr", 335 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK), 336 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr", 337 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK), 338 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr", 339 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK), 340 /*31*/ FLAG_ENTRY0("LATriggered", 341 CCE_ERR_STATUS_LA_TRIGGERED_SMASK), 342 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr", 343 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK), 344 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr", 345 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK), 346 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr", 347 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK), 348 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr", 349 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK), 350 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr", 351 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK), 352 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr", 353 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK), 354 /*38*/ FLAG_ENTRY0("CceIntMapCorErr", 355 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK), 356 /*39*/ FLAG_ENTRY0("CceIntMapUncErr", 357 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK), 358 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr", 359 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK), 360 /*41-63 reserved*/ 361 }; 362 363 /* 364 * Misc Error flags 365 */ 366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK 367 static struct flag_table misc_err_status_flags[] = { 368 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)), 369 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)), 370 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)), 371 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)), 372 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)), 373 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)), 374 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)), 375 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)), 376 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)), 377 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)), 378 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)), 379 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)), 380 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL)) 381 }; 382 383 /* 384 * TXE PIO Error flags and consequences 385 */ 386 static struct flag_table pio_err_status_flags[] = { 387 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt", 388 SEC_WRITE_DROPPED, 389 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK), 390 /* 1*/ FLAG_ENTRY("PioWriteAddrParity", 391 SEC_SPC_FREEZE, 392 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK), 393 /* 2*/ FLAG_ENTRY("PioCsrParity", 394 SEC_SPC_FREEZE, 395 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK), 396 /* 3*/ FLAG_ENTRY("PioSbMemFifo0", 397 SEC_SPC_FREEZE, 398 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK), 399 /* 4*/ FLAG_ENTRY("PioSbMemFifo1", 400 SEC_SPC_FREEZE, 401 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK), 402 /* 5*/ FLAG_ENTRY("PioPccFifoParity", 403 SEC_SPC_FREEZE, 404 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK), 405 /* 6*/ FLAG_ENTRY("PioPecFifoParity", 406 SEC_SPC_FREEZE, 407 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK), 408 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity", 409 SEC_SPC_FREEZE, 410 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK), 411 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity", 412 SEC_SPC_FREEZE, 413 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK), 414 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr", 415 SEC_SPC_FREEZE, 416 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK), 417 /*10*/ FLAG_ENTRY("PioSmPktResetParity", 418 SEC_SPC_FREEZE, 419 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK), 420 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc", 421 SEC_SPC_FREEZE, 422 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK), 423 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc", 424 SEC_SPC_FREEZE, 425 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK), 426 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor", 427 0, 428 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK), 429 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor", 430 0, 431 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK), 432 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity", 433 SEC_SPC_FREEZE, 434 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK), 435 /*16*/ FLAG_ENTRY("PioPpmcPblFifo", 436 SEC_SPC_FREEZE, 437 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK), 438 /*17*/ FLAG_ENTRY("PioInitSmIn", 439 0, 440 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK), 441 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm", 442 SEC_SPC_FREEZE, 443 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK), 444 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc", 445 SEC_SPC_FREEZE, 446 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK), 447 /*20*/ FLAG_ENTRY("PioHostAddrMemCor", 448 0, 449 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK), 450 /*21*/ FLAG_ENTRY("PioWriteDataParity", 451 SEC_SPC_FREEZE, 452 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK), 453 /*22*/ FLAG_ENTRY("PioStateMachine", 454 SEC_SPC_FREEZE, 455 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK), 456 /*23*/ FLAG_ENTRY("PioWriteQwValidParity", 457 SEC_WRITE_DROPPED | SEC_SPC_FREEZE, 458 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK), 459 /*24*/ FLAG_ENTRY("PioBlockQwCountParity", 460 SEC_WRITE_DROPPED | SEC_SPC_FREEZE, 461 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK), 462 /*25*/ FLAG_ENTRY("PioVlfVlLenParity", 463 SEC_SPC_FREEZE, 464 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK), 465 /*26*/ FLAG_ENTRY("PioVlfSopParity", 466 SEC_SPC_FREEZE, 467 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK), 468 /*27*/ FLAG_ENTRY("PioVlFifoParity", 469 SEC_SPC_FREEZE, 470 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK), 471 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity", 472 SEC_SPC_FREEZE, 473 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK), 474 /*29*/ FLAG_ENTRY("PioPpmcSopLen", 475 SEC_SPC_FREEZE, 476 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK), 477 /*30-31 reserved*/ 478 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity", 479 SEC_SPC_FREEZE, 480 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK), 481 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity", 482 SEC_SPC_FREEZE, 483 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK), 484 /*34*/ FLAG_ENTRY("PioPccSopHeadParity", 485 SEC_SPC_FREEZE, 486 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK), 487 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr", 488 SEC_SPC_FREEZE, 489 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK), 490 /*36-63 reserved*/ 491 }; 492 493 /* TXE PIO errors that cause an SPC freeze */ 494 #define ALL_PIO_FREEZE_ERR \ 495 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \ 496 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \ 497 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \ 498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \ 499 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \ 500 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \ 501 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \ 502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \ 503 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \ 504 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \ 505 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \ 506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \ 507 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \ 508 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \ 509 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \ 510 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \ 511 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \ 512 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \ 513 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \ 514 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \ 515 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \ 516 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \ 517 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \ 518 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \ 519 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \ 520 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \ 521 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \ 522 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \ 523 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK) 524 525 /* 526 * TXE SDMA Error flags 527 */ 528 static struct flag_table sdma_err_status_flags[] = { 529 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr", 530 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK), 531 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr", 532 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK), 533 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr", 534 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK), 535 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr", 536 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK), 537 /*04-63 reserved*/ 538 }; 539 540 /* TXE SDMA errors that cause an SPC freeze */ 541 #define ALL_SDMA_FREEZE_ERR \ 542 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \ 543 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \ 544 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK) 545 546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */ 547 #define PORT_DISCARD_EGRESS_ERRS \ 548 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \ 549 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \ 550 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK) 551 552 /* 553 * TXE Egress Error flags 554 */ 555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK 556 static struct flag_table egress_err_status_flags[] = { 557 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)), 558 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)), 559 /* 2 reserved */ 560 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr", 561 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)), 562 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)), 563 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)), 564 /* 6 reserved */ 565 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr", 566 SEES(TX_PIO_LAUNCH_INTF_PARITY)), 567 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr", 568 SEES(TX_SDMA_LAUNCH_INTF_PARITY)), 569 /* 9-10 reserved */ 570 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr", 571 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)), 572 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)), 573 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)), 574 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)), 575 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)), 576 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr", 577 SEES(TX_SDMA0_DISALLOWED_PACKET)), 578 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr", 579 SEES(TX_SDMA1_DISALLOWED_PACKET)), 580 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr", 581 SEES(TX_SDMA2_DISALLOWED_PACKET)), 582 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr", 583 SEES(TX_SDMA3_DISALLOWED_PACKET)), 584 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr", 585 SEES(TX_SDMA4_DISALLOWED_PACKET)), 586 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr", 587 SEES(TX_SDMA5_DISALLOWED_PACKET)), 588 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr", 589 SEES(TX_SDMA6_DISALLOWED_PACKET)), 590 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr", 591 SEES(TX_SDMA7_DISALLOWED_PACKET)), 592 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr", 593 SEES(TX_SDMA8_DISALLOWED_PACKET)), 594 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr", 595 SEES(TX_SDMA9_DISALLOWED_PACKET)), 596 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr", 597 SEES(TX_SDMA10_DISALLOWED_PACKET)), 598 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr", 599 SEES(TX_SDMA11_DISALLOWED_PACKET)), 600 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr", 601 SEES(TX_SDMA12_DISALLOWED_PACKET)), 602 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr", 603 SEES(TX_SDMA13_DISALLOWED_PACKET)), 604 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr", 605 SEES(TX_SDMA14_DISALLOWED_PACKET)), 606 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr", 607 SEES(TX_SDMA15_DISALLOWED_PACKET)), 608 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr", 609 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)), 610 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr", 611 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)), 612 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr", 613 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)), 614 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr", 615 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)), 616 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr", 617 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)), 618 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr", 619 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)), 620 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr", 621 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)), 622 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr", 623 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)), 624 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr", 625 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)), 626 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)), 627 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)), 628 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)), 629 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)), 630 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)), 631 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)), 632 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)), 633 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)), 634 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)), 635 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)), 636 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)), 637 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)), 638 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)), 639 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)), 640 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)), 641 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)), 642 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)), 643 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)), 644 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)), 645 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)), 646 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)), 647 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr", 648 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)), 649 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr", 650 SEES(TX_READ_PIO_MEMORY_CSR_UNC)), 651 }; 652 653 /* 654 * TXE Egress Error Info flags 655 */ 656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK 657 static struct flag_table egress_err_info_flags[] = { 658 /* 0*/ FLAG_ENTRY0("Reserved", 0ull), 659 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)), 660 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), 661 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), 662 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)), 663 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)), 664 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)), 665 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)), 666 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)), 667 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)), 668 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)), 669 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)), 670 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)), 671 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)), 672 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)), 673 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)), 674 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)), 675 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)), 676 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)), 677 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)), 678 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)), 679 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)), 680 }; 681 682 /* TXE Egress errors that cause an SPC freeze */ 683 #define ALL_TXE_EGRESS_FREEZE_ERR \ 684 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \ 685 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \ 686 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \ 687 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \ 688 | SEES(TX_LAUNCH_CSR_PARITY) \ 689 | SEES(TX_SBRD_CTL_CSR_PARITY) \ 690 | SEES(TX_CONFIG_PARITY) \ 691 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \ 692 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \ 693 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \ 694 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \ 695 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \ 696 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \ 697 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \ 698 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \ 699 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \ 700 | SEES(TX_CREDIT_RETURN_PARITY)) 701 702 /* 703 * TXE Send error flags 704 */ 705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK 706 static struct flag_table send_err_status_flags[] = { 707 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)), 708 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)), 709 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR)) 710 }; 711 712 /* 713 * TXE Send Context Error flags and consequences 714 */ 715 static struct flag_table sc_err_status_flags[] = { 716 /* 0*/ FLAG_ENTRY("InconsistentSop", 717 SEC_PACKET_DROPPED | SEC_SC_HALTED, 718 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK), 719 /* 1*/ FLAG_ENTRY("DisallowedPacket", 720 SEC_PACKET_DROPPED | SEC_SC_HALTED, 721 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK), 722 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary", 723 SEC_WRITE_DROPPED | SEC_SC_HALTED, 724 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK), 725 /* 3*/ FLAG_ENTRY("WriteOverflow", 726 SEC_WRITE_DROPPED | SEC_SC_HALTED, 727 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK), 728 /* 4*/ FLAG_ENTRY("WriteOutOfBounds", 729 SEC_WRITE_DROPPED | SEC_SC_HALTED, 730 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK), 731 /* 5-63 reserved*/ 732 }; 733 734 /* 735 * RXE Receive Error flags 736 */ 737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK 738 static struct flag_table rxe_err_status_flags[] = { 739 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)), 740 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)), 741 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)), 742 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)), 743 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)), 744 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)), 745 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)), 746 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)), 747 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)), 748 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)), 749 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)), 750 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)), 751 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)), 752 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)), 753 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)), 754 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)), 755 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr", 756 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)), 757 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)), 758 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)), 759 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr", 760 RXES(RBUF_BLOCK_LIST_READ_UNC)), 761 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr", 762 RXES(RBUF_BLOCK_LIST_READ_COR)), 763 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr", 764 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)), 765 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr", 766 RXES(RBUF_CSR_QENT_CNT_PARITY)), 767 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr", 768 RXES(RBUF_CSR_QNEXT_BUF_PARITY)), 769 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr", 770 RXES(RBUF_CSR_QVLD_BIT_PARITY)), 771 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)), 772 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)), 773 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr", 774 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)), 775 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)), 776 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)), 777 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)), 778 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)), 779 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)), 780 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)), 781 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)), 782 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr", 783 RXES(RBUF_FL_INITDONE_PARITY)), 784 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr", 785 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)), 786 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)), 787 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)), 788 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)), 789 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr", 790 RXES(LOOKUP_DES_PART1_UNC_COR)), 791 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr", 792 RXES(LOOKUP_DES_PART2_PARITY)), 793 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)), 794 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)), 795 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)), 796 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)), 797 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)), 798 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)), 799 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)), 800 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)), 801 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)), 802 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)), 803 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)), 804 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)), 805 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)), 806 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)), 807 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)), 808 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)), 809 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)), 810 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)), 811 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)), 812 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)), 813 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)), 814 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY)) 815 }; 816 817 /* RXE errors that will trigger an SPC freeze */ 818 #define ALL_RXE_FREEZE_ERR \ 819 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \ 820 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \ 821 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \ 822 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \ 823 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \ 824 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \ 825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \ 826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \ 827 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \ 828 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \ 829 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \ 830 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \ 831 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \ 832 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \ 833 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \ 834 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \ 835 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \ 836 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \ 837 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \ 838 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \ 839 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \ 840 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \ 841 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \ 842 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \ 843 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \ 844 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \ 845 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \ 846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \ 847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \ 848 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \ 849 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \ 850 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \ 851 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \ 852 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \ 853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \ 854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \ 855 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \ 856 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \ 857 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \ 858 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \ 859 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \ 860 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \ 861 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \ 862 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK) 863 864 #define RXE_FREEZE_ABORT_MASK \ 865 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \ 866 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \ 867 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK) 868 869 /* 870 * DCC Error Flags 871 */ 872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK 873 static struct flag_table dcc_err_flags[] = { 874 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)), 875 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)), 876 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)), 877 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)), 878 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)), 879 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)), 880 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)), 881 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)), 882 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)), 883 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)), 884 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)), 885 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)), 886 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)), 887 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)), 888 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)), 889 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)), 890 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)), 891 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)), 892 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)), 893 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)), 894 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)), 895 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)), 896 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)), 897 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)), 898 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)), 899 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)), 900 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)), 901 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)), 902 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)), 903 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)), 904 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)), 905 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)), 906 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)), 907 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)), 908 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)), 909 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)), 910 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)), 911 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)), 912 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)), 913 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)), 914 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)), 915 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)), 916 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)), 917 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)), 918 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)), 919 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)), 920 }; 921 922 /* 923 * LCB error flags 924 */ 925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK 926 static struct flag_table lcb_err_flags[] = { 927 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)), 928 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)), 929 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)), 930 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST", 931 LCBE(ALL_LNS_FAILED_REINIT_TEST)), 932 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)), 933 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)), 934 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)), 935 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)), 936 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)), 937 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)), 938 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)), 939 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)), 940 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)), 941 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER", 942 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)), 943 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)), 944 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)), 945 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)), 946 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)), 947 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)), 948 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE", 949 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)), 950 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)), 951 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)), 952 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)), 953 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)), 954 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)), 955 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)), 956 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP", 957 LCBE(RST_FOR_INCOMPLT_RND_TRIP)), 958 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)), 959 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE", 960 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)), 961 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR", 962 LCBE(REDUNDANT_FLIT_PARITY_ERR)) 963 }; 964 965 /* 966 * DC8051 Error Flags 967 */ 968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK 969 static struct flag_table dc8051_err_flags[] = { 970 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)), 971 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)), 972 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)), 973 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)), 974 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)), 975 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)), 976 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)), 977 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)), 978 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES", 979 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), 980 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)), 981 }; 982 983 /* 984 * DC8051 Information Error flags 985 * 986 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field. 987 */ 988 static struct flag_table dc8051_info_err_flags[] = { 989 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED), 990 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME), 991 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET), 992 FLAG_ENTRY0("Serdes internal loopback failure", 993 FAILED_SERDES_INTERNAL_LOOPBACK), 994 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT), 995 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING), 996 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE), 997 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM), 998 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ), 999 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1), 1000 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2), 1001 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT), 1002 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT), 1003 FLAG_ENTRY0("External Device Request Timeout", 1004 EXTERNAL_DEVICE_REQ_TIMEOUT), 1005 }; 1006 1007 /* 1008 * DC8051 Information Host Information flags 1009 * 1010 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field. 1011 */ 1012 static struct flag_table dc8051_info_host_msg_flags[] = { 1013 FLAG_ENTRY0("Host request done", 0x0001), 1014 FLAG_ENTRY0("BC PWR_MGM message", 0x0002), 1015 FLAG_ENTRY0("BC SMA message", 0x0004), 1016 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008), 1017 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010), 1018 FLAG_ENTRY0("External device config request", 0x0020), 1019 FLAG_ENTRY0("VerifyCap all frames received", 0x0040), 1020 FLAG_ENTRY0("LinkUp achieved", 0x0080), 1021 FLAG_ENTRY0("Link going down", 0x0100), 1022 FLAG_ENTRY0("Link width downgraded", 0x0200), 1023 }; 1024 1025 static u32 encoded_size(u32 size); 1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate); 1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state); 1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, 1029 u8 *continuous); 1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, 1031 u8 *vcu, u16 *vl15buf, u8 *crc_sizes); 1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd, 1033 u8 *remote_tx_rate, u16 *link_widths); 1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, 1035 u8 *flag_bits, u16 *link_widths); 1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, 1037 u8 *device_rev); 1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx); 1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx, 1040 u8 *tx_polarity_inversion, 1041 u8 *rx_polarity_inversion, u8 *max_rate); 1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd, 1043 unsigned int context, u64 err_status); 1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg); 1045 static void handle_dcc_err(struct hfi1_devdata *dd, 1046 unsigned int context, u64 err_status); 1047 static void handle_lcb_err(struct hfi1_devdata *dd, 1048 unsigned int context, u64 err_status); 1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg); 1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1057 static void set_partition_keys(struct hfi1_pportdata *ppd); 1058 static const char *link_state_name(u32 state); 1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, 1060 u32 state); 1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, 1062 u64 *out_data); 1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); 1064 static int thermal_init(struct hfi1_devdata *dd); 1065 1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state); 1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, 1068 int msecs); 1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1070 int msecs); 1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); 1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state); 1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1074 int msecs); 1075 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); 1076 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); 1077 static void handle_temp_err(struct hfi1_devdata *dd); 1078 static void dc_shutdown(struct hfi1_devdata *dd); 1079 static void dc_start(struct hfi1_devdata *dd); 1080 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 1081 unsigned int *np); 1082 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); 1083 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); 1084 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index); 1085 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width); 1086 1087 /* 1088 * Error interrupt table entry. This is used as input to the interrupt 1089 * "clear down" routine used for all second tier error interrupt register. 1090 * Second tier interrupt registers have a single bit representing them 1091 * in the top-level CceIntStatus. 1092 */ 1093 struct err_reg_info { 1094 u32 status; /* status CSR offset */ 1095 u32 clear; /* clear CSR offset */ 1096 u32 mask; /* mask CSR offset */ 1097 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg); 1098 const char *desc; 1099 }; 1100 1101 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START) 1102 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START) 1103 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START) 1104 1105 /* 1106 * Helpers for building HFI and DC error interrupt table entries. Different 1107 * helpers are needed because of inconsistent register names. 1108 */ 1109 #define EE(reg, handler, desc) \ 1110 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \ 1111 handler, desc } 1112 #define DC_EE1(reg, handler, desc) \ 1113 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc } 1114 #define DC_EE2(reg, handler, desc) \ 1115 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc } 1116 1117 /* 1118 * Table of the "misc" grouping of error interrupts. Each entry refers to 1119 * another register containing more information. 1120 */ 1121 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = { 1122 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"), 1123 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"), 1124 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"), 1125 /* 3*/ { 0, 0, 0, NULL }, /* reserved */ 1126 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"), 1127 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"), 1128 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"), 1129 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr") 1130 /* the rest are reserved */ 1131 }; 1132 1133 /* 1134 * Index into the Various section of the interrupt sources 1135 * corresponding to the Critical Temperature interrupt. 1136 */ 1137 #define TCRIT_INT_SOURCE 4 1138 1139 /* 1140 * SDMA error interrupt entry - refers to another register containing more 1141 * information. 1142 */ 1143 static const struct err_reg_info sdma_eng_err = 1144 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr"); 1145 1146 static const struct err_reg_info various_err[NUM_VARIOUS] = { 1147 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */ 1148 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */ 1149 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"), 1150 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"), 1151 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */ 1152 /* rest are reserved */ 1153 }; 1154 1155 /* 1156 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG 1157 * register can not be derived from the MTU value because 10K is not 1158 * a power of 2. Therefore, we need a constant. Everything else can 1159 * be calculated. 1160 */ 1161 #define DCC_CFG_PORT_MTU_CAP_10240 7 1162 1163 /* 1164 * Table of the DC grouping of error interrupts. Each entry refers to 1165 * another register containing more information. 1166 */ 1167 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = { 1168 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"), 1169 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"), 1170 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"), 1171 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */ 1172 /* the rest are reserved */ 1173 }; 1174 1175 struct cntr_entry { 1176 /* 1177 * counter name 1178 */ 1179 char *name; 1180 1181 /* 1182 * csr to read for name (if applicable) 1183 */ 1184 u64 csr; 1185 1186 /* 1187 * offset into dd or ppd to store the counter's value 1188 */ 1189 int offset; 1190 1191 /* 1192 * flags 1193 */ 1194 u8 flags; 1195 1196 /* 1197 * accessor for stat element, context either dd or ppd 1198 */ 1199 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl, 1200 int mode, u64 data); 1201 }; 1202 1203 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0 1204 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159 1205 1206 #define CNTR_ELEM(name, csr, offset, flags, accessor) \ 1207 { \ 1208 name, \ 1209 csr, \ 1210 offset, \ 1211 flags, \ 1212 accessor \ 1213 } 1214 1215 /* 32bit RXE */ 1216 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \ 1217 CNTR_ELEM(#name, \ 1218 (counter * 8 + RCV_COUNTER_ARRAY32), \ 1219 0, flags | CNTR_32BIT, \ 1220 port_access_u32_csr) 1221 1222 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \ 1223 CNTR_ELEM(#name, \ 1224 (counter * 8 + RCV_COUNTER_ARRAY32), \ 1225 0, flags | CNTR_32BIT, \ 1226 dev_access_u32_csr) 1227 1228 /* 64bit RXE */ 1229 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \ 1230 CNTR_ELEM(#name, \ 1231 (counter * 8 + RCV_COUNTER_ARRAY64), \ 1232 0, flags, \ 1233 port_access_u64_csr) 1234 1235 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \ 1236 CNTR_ELEM(#name, \ 1237 (counter * 8 + RCV_COUNTER_ARRAY64), \ 1238 0, flags, \ 1239 dev_access_u64_csr) 1240 1241 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx 1242 #define OVR_ELM(ctx) \ 1243 CNTR_ELEM("RcvHdrOvr" #ctx, \ 1244 (RCV_HDR_OVFL_CNT + ctx * 0x100), \ 1245 0, CNTR_NORMAL, port_access_u64_csr) 1246 1247 /* 32bit TXE */ 1248 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \ 1249 CNTR_ELEM(#name, \ 1250 (counter * 8 + SEND_COUNTER_ARRAY32), \ 1251 0, flags | CNTR_32BIT, \ 1252 port_access_u32_csr) 1253 1254 /* 64bit TXE */ 1255 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \ 1256 CNTR_ELEM(#name, \ 1257 (counter * 8 + SEND_COUNTER_ARRAY64), \ 1258 0, flags, \ 1259 port_access_u64_csr) 1260 1261 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \ 1262 CNTR_ELEM(#name,\ 1263 counter * 8 + SEND_COUNTER_ARRAY64, \ 1264 0, \ 1265 flags, \ 1266 dev_access_u64_csr) 1267 1268 /* CCE */ 1269 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \ 1270 CNTR_ELEM(#name, \ 1271 (counter * 8 + CCE_COUNTER_ARRAY32), \ 1272 0, flags | CNTR_32BIT, \ 1273 dev_access_u32_csr) 1274 1275 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \ 1276 CNTR_ELEM(#name, \ 1277 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \ 1278 0, flags | CNTR_32BIT, \ 1279 dev_access_u32_csr) 1280 1281 /* DC */ 1282 #define DC_PERF_CNTR(name, counter, flags) \ 1283 CNTR_ELEM(#name, \ 1284 counter, \ 1285 0, \ 1286 flags, \ 1287 dev_access_u64_csr) 1288 1289 #define DC_PERF_CNTR_LCB(name, counter, flags) \ 1290 CNTR_ELEM(#name, \ 1291 counter, \ 1292 0, \ 1293 flags, \ 1294 dc_access_lcb_cntr) 1295 1296 /* ibp counters */ 1297 #define SW_IBP_CNTR(name, cntr) \ 1298 CNTR_ELEM(#name, \ 1299 0, \ 1300 0, \ 1301 CNTR_SYNTH, \ 1302 access_ibp_##cntr) 1303 1304 /** 1305 * hfi_addr_from_offset - return addr for readq/writeq 1306 * @dd - the dd device 1307 * @offset - the offset of the CSR within bar0 1308 * 1309 * This routine selects the appropriate base address 1310 * based on the indicated offset. 1311 */ 1312 static inline void __iomem *hfi1_addr_from_offset( 1313 const struct hfi1_devdata *dd, 1314 u32 offset) 1315 { 1316 if (offset >= dd->base2_start) 1317 return dd->kregbase2 + (offset - dd->base2_start); 1318 return dd->kregbase1 + offset; 1319 } 1320 1321 /** 1322 * read_csr - read CSR at the indicated offset 1323 * @dd - the dd device 1324 * @offset - the offset of the CSR within bar0 1325 * 1326 * Return: the value read or all FF's if there 1327 * is no mapping 1328 */ 1329 u64 read_csr(const struct hfi1_devdata *dd, u32 offset) 1330 { 1331 if (dd->flags & HFI1_PRESENT) 1332 return readq(hfi1_addr_from_offset(dd, offset)); 1333 return -1; 1334 } 1335 1336 /** 1337 * write_csr - write CSR at the indicated offset 1338 * @dd - the dd device 1339 * @offset - the offset of the CSR within bar0 1340 * @value - value to write 1341 */ 1342 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) 1343 { 1344 if (dd->flags & HFI1_PRESENT) { 1345 void __iomem *base = hfi1_addr_from_offset(dd, offset); 1346 1347 /* avoid write to RcvArray */ 1348 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) 1349 return; 1350 writeq(value, base); 1351 } 1352 } 1353 1354 /** 1355 * get_csr_addr - return te iomem address for offset 1356 * @dd - the dd device 1357 * @offset - the offset of the CSR within bar0 1358 * 1359 * Return: The iomem address to use in subsequent 1360 * writeq/readq operations. 1361 */ 1362 void __iomem *get_csr_addr( 1363 const struct hfi1_devdata *dd, 1364 u32 offset) 1365 { 1366 if (dd->flags & HFI1_PRESENT) 1367 return hfi1_addr_from_offset(dd, offset); 1368 return NULL; 1369 } 1370 1371 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, 1372 int mode, u64 value) 1373 { 1374 u64 ret; 1375 1376 if (mode == CNTR_MODE_R) { 1377 ret = read_csr(dd, csr); 1378 } else if (mode == CNTR_MODE_W) { 1379 write_csr(dd, csr, value); 1380 ret = value; 1381 } else { 1382 dd_dev_err(dd, "Invalid cntr register access mode"); 1383 return 0; 1384 } 1385 1386 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode); 1387 return ret; 1388 } 1389 1390 /* Dev Access */ 1391 static u64 dev_access_u32_csr(const struct cntr_entry *entry, 1392 void *context, int vl, int mode, u64 data) 1393 { 1394 struct hfi1_devdata *dd = context; 1395 u64 csr = entry->csr; 1396 1397 if (entry->flags & CNTR_SDMA) { 1398 if (vl == CNTR_INVALID_VL) 1399 return 0; 1400 csr += 0x100 * vl; 1401 } else { 1402 if (vl != CNTR_INVALID_VL) 1403 return 0; 1404 } 1405 return read_write_csr(dd, csr, mode, data); 1406 } 1407 1408 static u64 access_sde_err_cnt(const struct cntr_entry *entry, 1409 void *context, int idx, int mode, u64 data) 1410 { 1411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1412 1413 if (dd->per_sdma && idx < dd->num_sdma) 1414 return dd->per_sdma[idx].err_cnt; 1415 return 0; 1416 } 1417 1418 static u64 access_sde_int_cnt(const struct cntr_entry *entry, 1419 void *context, int idx, int mode, u64 data) 1420 { 1421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1422 1423 if (dd->per_sdma && idx < dd->num_sdma) 1424 return dd->per_sdma[idx].sdma_int_cnt; 1425 return 0; 1426 } 1427 1428 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry, 1429 void *context, int idx, int mode, u64 data) 1430 { 1431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1432 1433 if (dd->per_sdma && idx < dd->num_sdma) 1434 return dd->per_sdma[idx].idle_int_cnt; 1435 return 0; 1436 } 1437 1438 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, 1439 void *context, int idx, int mode, 1440 u64 data) 1441 { 1442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1443 1444 if (dd->per_sdma && idx < dd->num_sdma) 1445 return dd->per_sdma[idx].progress_int_cnt; 1446 return 0; 1447 } 1448 1449 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, 1450 int vl, int mode, u64 data) 1451 { 1452 struct hfi1_devdata *dd = context; 1453 1454 u64 val = 0; 1455 u64 csr = entry->csr; 1456 1457 if (entry->flags & CNTR_VL) { 1458 if (vl == CNTR_INVALID_VL) 1459 return 0; 1460 csr += 8 * vl; 1461 } else { 1462 if (vl != CNTR_INVALID_VL) 1463 return 0; 1464 } 1465 1466 val = read_write_csr(dd, csr, mode, data); 1467 return val; 1468 } 1469 1470 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, 1471 int vl, int mode, u64 data) 1472 { 1473 struct hfi1_devdata *dd = context; 1474 u32 csr = entry->csr; 1475 int ret = 0; 1476 1477 if (vl != CNTR_INVALID_VL) 1478 return 0; 1479 if (mode == CNTR_MODE_R) 1480 ret = read_lcb_csr(dd, csr, &data); 1481 else if (mode == CNTR_MODE_W) 1482 ret = write_lcb_csr(dd, csr, data); 1483 1484 if (ret) { 1485 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr); 1486 return 0; 1487 } 1488 1489 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode); 1490 return data; 1491 } 1492 1493 /* Port Access */ 1494 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, 1495 int vl, int mode, u64 data) 1496 { 1497 struct hfi1_pportdata *ppd = context; 1498 1499 if (vl != CNTR_INVALID_VL) 1500 return 0; 1501 return read_write_csr(ppd->dd, entry->csr, mode, data); 1502 } 1503 1504 static u64 port_access_u64_csr(const struct cntr_entry *entry, 1505 void *context, int vl, int mode, u64 data) 1506 { 1507 struct hfi1_pportdata *ppd = context; 1508 u64 val; 1509 u64 csr = entry->csr; 1510 1511 if (entry->flags & CNTR_VL) { 1512 if (vl == CNTR_INVALID_VL) 1513 return 0; 1514 csr += 8 * vl; 1515 } else { 1516 if (vl != CNTR_INVALID_VL) 1517 return 0; 1518 } 1519 val = read_write_csr(ppd->dd, csr, mode, data); 1520 return val; 1521 } 1522 1523 /* Software defined */ 1524 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, 1525 u64 data) 1526 { 1527 u64 ret; 1528 1529 if (mode == CNTR_MODE_R) { 1530 ret = *cntr; 1531 } else if (mode == CNTR_MODE_W) { 1532 *cntr = data; 1533 ret = data; 1534 } else { 1535 dd_dev_err(dd, "Invalid cntr sw access mode"); 1536 return 0; 1537 } 1538 1539 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode); 1540 1541 return ret; 1542 } 1543 1544 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, 1545 int vl, int mode, u64 data) 1546 { 1547 struct hfi1_pportdata *ppd = context; 1548 1549 if (vl != CNTR_INVALID_VL) 1550 return 0; 1551 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); 1552 } 1553 1554 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context, 1555 int vl, int mode, u64 data) 1556 { 1557 struct hfi1_pportdata *ppd = context; 1558 1559 if (vl != CNTR_INVALID_VL) 1560 return 0; 1561 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); 1562 } 1563 1564 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, 1565 void *context, int vl, int mode, 1566 u64 data) 1567 { 1568 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; 1569 1570 if (vl != CNTR_INVALID_VL) 1571 return 0; 1572 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); 1573 } 1574 1575 static u64 access_sw_xmit_discards(const struct cntr_entry *entry, 1576 void *context, int vl, int mode, u64 data) 1577 { 1578 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; 1579 u64 zero = 0; 1580 u64 *counter; 1581 1582 if (vl == CNTR_INVALID_VL) 1583 counter = &ppd->port_xmit_discards; 1584 else if (vl >= 0 && vl < C_VL_COUNT) 1585 counter = &ppd->port_xmit_discards_vl[vl]; 1586 else 1587 counter = &zero; 1588 1589 return read_write_sw(ppd->dd, counter, mode, data); 1590 } 1591 1592 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, 1593 void *context, int vl, int mode, 1594 u64 data) 1595 { 1596 struct hfi1_pportdata *ppd = context; 1597 1598 if (vl != CNTR_INVALID_VL) 1599 return 0; 1600 1601 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, 1602 mode, data); 1603 } 1604 1605 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry, 1606 void *context, int vl, int mode, u64 data) 1607 { 1608 struct hfi1_pportdata *ppd = context; 1609 1610 if (vl != CNTR_INVALID_VL) 1611 return 0; 1612 1613 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, 1614 mode, data); 1615 } 1616 1617 u64 get_all_cpu_total(u64 __percpu *cntr) 1618 { 1619 int cpu; 1620 u64 counter = 0; 1621 1622 for_each_possible_cpu(cpu) 1623 counter += *per_cpu_ptr(cntr, cpu); 1624 return counter; 1625 } 1626 1627 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, 1628 u64 __percpu *cntr, 1629 int vl, int mode, u64 data) 1630 { 1631 u64 ret = 0; 1632 1633 if (vl != CNTR_INVALID_VL) 1634 return 0; 1635 1636 if (mode == CNTR_MODE_R) { 1637 ret = get_all_cpu_total(cntr) - *z_val; 1638 } else if (mode == CNTR_MODE_W) { 1639 /* A write can only zero the counter */ 1640 if (data == 0) 1641 *z_val = get_all_cpu_total(cntr); 1642 else 1643 dd_dev_err(dd, "Per CPU cntrs can only be zeroed"); 1644 } else { 1645 dd_dev_err(dd, "Invalid cntr sw cpu access mode"); 1646 return 0; 1647 } 1648 1649 return ret; 1650 } 1651 1652 static u64 access_sw_cpu_intr(const struct cntr_entry *entry, 1653 void *context, int vl, int mode, u64 data) 1654 { 1655 struct hfi1_devdata *dd = context; 1656 1657 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, 1658 mode, data); 1659 } 1660 1661 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry, 1662 void *context, int vl, int mode, u64 data) 1663 { 1664 struct hfi1_devdata *dd = context; 1665 1666 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, 1667 mode, data); 1668 } 1669 1670 static u64 access_sw_pio_wait(const struct cntr_entry *entry, 1671 void *context, int vl, int mode, u64 data) 1672 { 1673 struct hfi1_devdata *dd = context; 1674 1675 return dd->verbs_dev.n_piowait; 1676 } 1677 1678 static u64 access_sw_pio_drain(const struct cntr_entry *entry, 1679 void *context, int vl, int mode, u64 data) 1680 { 1681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1682 1683 return dd->verbs_dev.n_piodrain; 1684 } 1685 1686 static u64 access_sw_vtx_wait(const struct cntr_entry *entry, 1687 void *context, int vl, int mode, u64 data) 1688 { 1689 struct hfi1_devdata *dd = context; 1690 1691 return dd->verbs_dev.n_txwait; 1692 } 1693 1694 static u64 access_sw_kmem_wait(const struct cntr_entry *entry, 1695 void *context, int vl, int mode, u64 data) 1696 { 1697 struct hfi1_devdata *dd = context; 1698 1699 return dd->verbs_dev.n_kmem_wait; 1700 } 1701 1702 static u64 access_sw_send_schedule(const struct cntr_entry *entry, 1703 void *context, int vl, int mode, u64 data) 1704 { 1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1706 1707 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, 1708 mode, data); 1709 } 1710 1711 /* Software counters for the error status bits within MISC_ERR_STATUS */ 1712 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry, 1713 void *context, int vl, int mode, 1714 u64 data) 1715 { 1716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1717 1718 return dd->misc_err_status_cnt[12]; 1719 } 1720 1721 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry, 1722 void *context, int vl, int mode, 1723 u64 data) 1724 { 1725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1726 1727 return dd->misc_err_status_cnt[11]; 1728 } 1729 1730 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry, 1731 void *context, int vl, int mode, 1732 u64 data) 1733 { 1734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1735 1736 return dd->misc_err_status_cnt[10]; 1737 } 1738 1739 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry, 1740 void *context, int vl, 1741 int mode, u64 data) 1742 { 1743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1744 1745 return dd->misc_err_status_cnt[9]; 1746 } 1747 1748 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry, 1749 void *context, int vl, int mode, 1750 u64 data) 1751 { 1752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1753 1754 return dd->misc_err_status_cnt[8]; 1755 } 1756 1757 static u64 access_misc_efuse_read_bad_addr_err_cnt( 1758 const struct cntr_entry *entry, 1759 void *context, int vl, int mode, u64 data) 1760 { 1761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1762 1763 return dd->misc_err_status_cnt[7]; 1764 } 1765 1766 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry, 1767 void *context, int vl, 1768 int mode, u64 data) 1769 { 1770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1771 1772 return dd->misc_err_status_cnt[6]; 1773 } 1774 1775 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry, 1776 void *context, int vl, int mode, 1777 u64 data) 1778 { 1779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1780 1781 return dd->misc_err_status_cnt[5]; 1782 } 1783 1784 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry, 1785 void *context, int vl, int mode, 1786 u64 data) 1787 { 1788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1789 1790 return dd->misc_err_status_cnt[4]; 1791 } 1792 1793 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry, 1794 void *context, int vl, 1795 int mode, u64 data) 1796 { 1797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1798 1799 return dd->misc_err_status_cnt[3]; 1800 } 1801 1802 static u64 access_misc_csr_write_bad_addr_err_cnt( 1803 const struct cntr_entry *entry, 1804 void *context, int vl, int mode, u64 data) 1805 { 1806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1807 1808 return dd->misc_err_status_cnt[2]; 1809 } 1810 1811 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 1812 void *context, int vl, 1813 int mode, u64 data) 1814 { 1815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1816 1817 return dd->misc_err_status_cnt[1]; 1818 } 1819 1820 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry, 1821 void *context, int vl, int mode, 1822 u64 data) 1823 { 1824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1825 1826 return dd->misc_err_status_cnt[0]; 1827 } 1828 1829 /* 1830 * Software counter for the aggregate of 1831 * individual CceErrStatus counters 1832 */ 1833 static u64 access_sw_cce_err_status_aggregated_cnt( 1834 const struct cntr_entry *entry, 1835 void *context, int vl, int mode, u64 data) 1836 { 1837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1838 1839 return dd->sw_cce_err_status_aggregate; 1840 } 1841 1842 /* 1843 * Software counters corresponding to each of the 1844 * error status bits within CceErrStatus 1845 */ 1846 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry, 1847 void *context, int vl, int mode, 1848 u64 data) 1849 { 1850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1851 1852 return dd->cce_err_status_cnt[40]; 1853 } 1854 1855 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry, 1856 void *context, int vl, int mode, 1857 u64 data) 1858 { 1859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1860 1861 return dd->cce_err_status_cnt[39]; 1862 } 1863 1864 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry, 1865 void *context, int vl, int mode, 1866 u64 data) 1867 { 1868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1869 1870 return dd->cce_err_status_cnt[38]; 1871 } 1872 1873 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry, 1874 void *context, int vl, int mode, 1875 u64 data) 1876 { 1877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1878 1879 return dd->cce_err_status_cnt[37]; 1880 } 1881 1882 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry, 1883 void *context, int vl, int mode, 1884 u64 data) 1885 { 1886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1887 1888 return dd->cce_err_status_cnt[36]; 1889 } 1890 1891 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt( 1892 const struct cntr_entry *entry, 1893 void *context, int vl, int mode, u64 data) 1894 { 1895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1896 1897 return dd->cce_err_status_cnt[35]; 1898 } 1899 1900 static u64 access_cce_rcpl_async_fifo_parity_err_cnt( 1901 const struct cntr_entry *entry, 1902 void *context, int vl, int mode, u64 data) 1903 { 1904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1905 1906 return dd->cce_err_status_cnt[34]; 1907 } 1908 1909 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry, 1910 void *context, int vl, 1911 int mode, u64 data) 1912 { 1913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1914 1915 return dd->cce_err_status_cnt[33]; 1916 } 1917 1918 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry, 1919 void *context, int vl, int mode, 1920 u64 data) 1921 { 1922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1923 1924 return dd->cce_err_status_cnt[32]; 1925 } 1926 1927 static u64 access_la_triggered_cnt(const struct cntr_entry *entry, 1928 void *context, int vl, int mode, u64 data) 1929 { 1930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1931 1932 return dd->cce_err_status_cnt[31]; 1933 } 1934 1935 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry, 1936 void *context, int vl, int mode, 1937 u64 data) 1938 { 1939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1940 1941 return dd->cce_err_status_cnt[30]; 1942 } 1943 1944 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry, 1945 void *context, int vl, int mode, 1946 u64 data) 1947 { 1948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1949 1950 return dd->cce_err_status_cnt[29]; 1951 } 1952 1953 static u64 access_pcic_transmit_back_parity_err_cnt( 1954 const struct cntr_entry *entry, 1955 void *context, int vl, int mode, u64 data) 1956 { 1957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1958 1959 return dd->cce_err_status_cnt[28]; 1960 } 1961 1962 static u64 access_pcic_transmit_front_parity_err_cnt( 1963 const struct cntr_entry *entry, 1964 void *context, int vl, int mode, u64 data) 1965 { 1966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1967 1968 return dd->cce_err_status_cnt[27]; 1969 } 1970 1971 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry, 1972 void *context, int vl, int mode, 1973 u64 data) 1974 { 1975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1976 1977 return dd->cce_err_status_cnt[26]; 1978 } 1979 1980 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry, 1981 void *context, int vl, int mode, 1982 u64 data) 1983 { 1984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1985 1986 return dd->cce_err_status_cnt[25]; 1987 } 1988 1989 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry, 1990 void *context, int vl, int mode, 1991 u64 data) 1992 { 1993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1994 1995 return dd->cce_err_status_cnt[24]; 1996 } 1997 1998 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry, 1999 void *context, int vl, int mode, 2000 u64 data) 2001 { 2002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2003 2004 return dd->cce_err_status_cnt[23]; 2005 } 2006 2007 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry, 2008 void *context, int vl, 2009 int mode, u64 data) 2010 { 2011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2012 2013 return dd->cce_err_status_cnt[22]; 2014 } 2015 2016 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry, 2017 void *context, int vl, int mode, 2018 u64 data) 2019 { 2020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2021 2022 return dd->cce_err_status_cnt[21]; 2023 } 2024 2025 static u64 access_pcic_n_post_dat_q_parity_err_cnt( 2026 const struct cntr_entry *entry, 2027 void *context, int vl, int mode, u64 data) 2028 { 2029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2030 2031 return dd->cce_err_status_cnt[20]; 2032 } 2033 2034 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry, 2035 void *context, int vl, 2036 int mode, u64 data) 2037 { 2038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2039 2040 return dd->cce_err_status_cnt[19]; 2041 } 2042 2043 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry, 2044 void *context, int vl, int mode, 2045 u64 data) 2046 { 2047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2048 2049 return dd->cce_err_status_cnt[18]; 2050 } 2051 2052 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry, 2053 void *context, int vl, int mode, 2054 u64 data) 2055 { 2056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2057 2058 return dd->cce_err_status_cnt[17]; 2059 } 2060 2061 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry, 2062 void *context, int vl, int mode, 2063 u64 data) 2064 { 2065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2066 2067 return dd->cce_err_status_cnt[16]; 2068 } 2069 2070 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry, 2071 void *context, int vl, int mode, 2072 u64 data) 2073 { 2074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2075 2076 return dd->cce_err_status_cnt[15]; 2077 } 2078 2079 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry, 2080 void *context, int vl, 2081 int mode, u64 data) 2082 { 2083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2084 2085 return dd->cce_err_status_cnt[14]; 2086 } 2087 2088 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry, 2089 void *context, int vl, int mode, 2090 u64 data) 2091 { 2092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2093 2094 return dd->cce_err_status_cnt[13]; 2095 } 2096 2097 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt( 2098 const struct cntr_entry *entry, 2099 void *context, int vl, int mode, u64 data) 2100 { 2101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2102 2103 return dd->cce_err_status_cnt[12]; 2104 } 2105 2106 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt( 2107 const struct cntr_entry *entry, 2108 void *context, int vl, int mode, u64 data) 2109 { 2110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2111 2112 return dd->cce_err_status_cnt[11]; 2113 } 2114 2115 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt( 2116 const struct cntr_entry *entry, 2117 void *context, int vl, int mode, u64 data) 2118 { 2119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2120 2121 return dd->cce_err_status_cnt[10]; 2122 } 2123 2124 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt( 2125 const struct cntr_entry *entry, 2126 void *context, int vl, int mode, u64 data) 2127 { 2128 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2129 2130 return dd->cce_err_status_cnt[9]; 2131 } 2132 2133 static u64 access_cce_cli2_async_fifo_parity_err_cnt( 2134 const struct cntr_entry *entry, 2135 void *context, int vl, int mode, u64 data) 2136 { 2137 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2138 2139 return dd->cce_err_status_cnt[8]; 2140 } 2141 2142 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry, 2143 void *context, int vl, 2144 int mode, u64 data) 2145 { 2146 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2147 2148 return dd->cce_err_status_cnt[7]; 2149 } 2150 2151 static u64 access_cce_cli0_async_fifo_parity_err_cnt( 2152 const struct cntr_entry *entry, 2153 void *context, int vl, int mode, u64 data) 2154 { 2155 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2156 2157 return dd->cce_err_status_cnt[6]; 2158 } 2159 2160 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry, 2161 void *context, int vl, int mode, 2162 u64 data) 2163 { 2164 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2165 2166 return dd->cce_err_status_cnt[5]; 2167 } 2168 2169 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry, 2170 void *context, int vl, int mode, 2171 u64 data) 2172 { 2173 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2174 2175 return dd->cce_err_status_cnt[4]; 2176 } 2177 2178 static u64 access_cce_trgt_async_fifo_parity_err_cnt( 2179 const struct cntr_entry *entry, 2180 void *context, int vl, int mode, u64 data) 2181 { 2182 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2183 2184 return dd->cce_err_status_cnt[3]; 2185 } 2186 2187 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, 2188 void *context, int vl, 2189 int mode, u64 data) 2190 { 2191 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2192 2193 return dd->cce_err_status_cnt[2]; 2194 } 2195 2196 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 2197 void *context, int vl, 2198 int mode, u64 data) 2199 { 2200 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2201 2202 return dd->cce_err_status_cnt[1]; 2203 } 2204 2205 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry, 2206 void *context, int vl, int mode, 2207 u64 data) 2208 { 2209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2210 2211 return dd->cce_err_status_cnt[0]; 2212 } 2213 2214 /* 2215 * Software counters corresponding to each of the 2216 * error status bits within RcvErrStatus 2217 */ 2218 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry, 2219 void *context, int vl, int mode, 2220 u64 data) 2221 { 2222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2223 2224 return dd->rcv_err_status_cnt[63]; 2225 } 2226 2227 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, 2228 void *context, int vl, 2229 int mode, u64 data) 2230 { 2231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2232 2233 return dd->rcv_err_status_cnt[62]; 2234 } 2235 2236 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 2237 void *context, int vl, int mode, 2238 u64 data) 2239 { 2240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2241 2242 return dd->rcv_err_status_cnt[61]; 2243 } 2244 2245 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry, 2246 void *context, int vl, int mode, 2247 u64 data) 2248 { 2249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2250 2251 return dd->rcv_err_status_cnt[60]; 2252 } 2253 2254 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2255 void *context, int vl, 2256 int mode, u64 data) 2257 { 2258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2259 2260 return dd->rcv_err_status_cnt[59]; 2261 } 2262 2263 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2264 void *context, int vl, 2265 int mode, u64 data) 2266 { 2267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2268 2269 return dd->rcv_err_status_cnt[58]; 2270 } 2271 2272 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry, 2273 void *context, int vl, int mode, 2274 u64 data) 2275 { 2276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2277 2278 return dd->rcv_err_status_cnt[57]; 2279 } 2280 2281 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry, 2282 void *context, int vl, int mode, 2283 u64 data) 2284 { 2285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2286 2287 return dd->rcv_err_status_cnt[56]; 2288 } 2289 2290 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry, 2291 void *context, int vl, int mode, 2292 u64 data) 2293 { 2294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2295 2296 return dd->rcv_err_status_cnt[55]; 2297 } 2298 2299 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt( 2300 const struct cntr_entry *entry, 2301 void *context, int vl, int mode, u64 data) 2302 { 2303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2304 2305 return dd->rcv_err_status_cnt[54]; 2306 } 2307 2308 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt( 2309 const struct cntr_entry *entry, 2310 void *context, int vl, int mode, u64 data) 2311 { 2312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2313 2314 return dd->rcv_err_status_cnt[53]; 2315 } 2316 2317 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry, 2318 void *context, int vl, 2319 int mode, u64 data) 2320 { 2321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2322 2323 return dd->rcv_err_status_cnt[52]; 2324 } 2325 2326 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry, 2327 void *context, int vl, 2328 int mode, u64 data) 2329 { 2330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2331 2332 return dd->rcv_err_status_cnt[51]; 2333 } 2334 2335 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry, 2336 void *context, int vl, 2337 int mode, u64 data) 2338 { 2339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2340 2341 return dd->rcv_err_status_cnt[50]; 2342 } 2343 2344 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry, 2345 void *context, int vl, 2346 int mode, u64 data) 2347 { 2348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2349 2350 return dd->rcv_err_status_cnt[49]; 2351 } 2352 2353 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry, 2354 void *context, int vl, 2355 int mode, u64 data) 2356 { 2357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2358 2359 return dd->rcv_err_status_cnt[48]; 2360 } 2361 2362 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry, 2363 void *context, int vl, 2364 int mode, u64 data) 2365 { 2366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2367 2368 return dd->rcv_err_status_cnt[47]; 2369 } 2370 2371 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry, 2372 void *context, int vl, int mode, 2373 u64 data) 2374 { 2375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2376 2377 return dd->rcv_err_status_cnt[46]; 2378 } 2379 2380 static u64 access_rx_hq_intr_csr_parity_err_cnt( 2381 const struct cntr_entry *entry, 2382 void *context, int vl, int mode, u64 data) 2383 { 2384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2385 2386 return dd->rcv_err_status_cnt[45]; 2387 } 2388 2389 static u64 access_rx_lookup_csr_parity_err_cnt( 2390 const struct cntr_entry *entry, 2391 void *context, int vl, int mode, u64 data) 2392 { 2393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2394 2395 return dd->rcv_err_status_cnt[44]; 2396 } 2397 2398 static u64 access_rx_lookup_rcv_array_cor_err_cnt( 2399 const struct cntr_entry *entry, 2400 void *context, int vl, int mode, u64 data) 2401 { 2402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2403 2404 return dd->rcv_err_status_cnt[43]; 2405 } 2406 2407 static u64 access_rx_lookup_rcv_array_unc_err_cnt( 2408 const struct cntr_entry *entry, 2409 void *context, int vl, int mode, u64 data) 2410 { 2411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2412 2413 return dd->rcv_err_status_cnt[42]; 2414 } 2415 2416 static u64 access_rx_lookup_des_part2_parity_err_cnt( 2417 const struct cntr_entry *entry, 2418 void *context, int vl, int mode, u64 data) 2419 { 2420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2421 2422 return dd->rcv_err_status_cnt[41]; 2423 } 2424 2425 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt( 2426 const struct cntr_entry *entry, 2427 void *context, int vl, int mode, u64 data) 2428 { 2429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2430 2431 return dd->rcv_err_status_cnt[40]; 2432 } 2433 2434 static u64 access_rx_lookup_des_part1_unc_err_cnt( 2435 const struct cntr_entry *entry, 2436 void *context, int vl, int mode, u64 data) 2437 { 2438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2439 2440 return dd->rcv_err_status_cnt[39]; 2441 } 2442 2443 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt( 2444 const struct cntr_entry *entry, 2445 void *context, int vl, int mode, u64 data) 2446 { 2447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2448 2449 return dd->rcv_err_status_cnt[38]; 2450 } 2451 2452 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt( 2453 const struct cntr_entry *entry, 2454 void *context, int vl, int mode, u64 data) 2455 { 2456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2457 2458 return dd->rcv_err_status_cnt[37]; 2459 } 2460 2461 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt( 2462 const struct cntr_entry *entry, 2463 void *context, int vl, int mode, u64 data) 2464 { 2465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2466 2467 return dd->rcv_err_status_cnt[36]; 2468 } 2469 2470 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt( 2471 const struct cntr_entry *entry, 2472 void *context, int vl, int mode, u64 data) 2473 { 2474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2475 2476 return dd->rcv_err_status_cnt[35]; 2477 } 2478 2479 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt( 2480 const struct cntr_entry *entry, 2481 void *context, int vl, int mode, u64 data) 2482 { 2483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2484 2485 return dd->rcv_err_status_cnt[34]; 2486 } 2487 2488 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt( 2489 const struct cntr_entry *entry, 2490 void *context, int vl, int mode, u64 data) 2491 { 2492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2493 2494 return dd->rcv_err_status_cnt[33]; 2495 } 2496 2497 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry, 2498 void *context, int vl, int mode, 2499 u64 data) 2500 { 2501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2502 2503 return dd->rcv_err_status_cnt[32]; 2504 } 2505 2506 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry, 2507 void *context, int vl, int mode, 2508 u64 data) 2509 { 2510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2511 2512 return dd->rcv_err_status_cnt[31]; 2513 } 2514 2515 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry, 2516 void *context, int vl, int mode, 2517 u64 data) 2518 { 2519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2520 2521 return dd->rcv_err_status_cnt[30]; 2522 } 2523 2524 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry, 2525 void *context, int vl, int mode, 2526 u64 data) 2527 { 2528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2529 2530 return dd->rcv_err_status_cnt[29]; 2531 } 2532 2533 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry, 2534 void *context, int vl, 2535 int mode, u64 data) 2536 { 2537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2538 2539 return dd->rcv_err_status_cnt[28]; 2540 } 2541 2542 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt( 2543 const struct cntr_entry *entry, 2544 void *context, int vl, int mode, u64 data) 2545 { 2546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2547 2548 return dd->rcv_err_status_cnt[27]; 2549 } 2550 2551 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt( 2552 const struct cntr_entry *entry, 2553 void *context, int vl, int mode, u64 data) 2554 { 2555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2556 2557 return dd->rcv_err_status_cnt[26]; 2558 } 2559 2560 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt( 2561 const struct cntr_entry *entry, 2562 void *context, int vl, int mode, u64 data) 2563 { 2564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2565 2566 return dd->rcv_err_status_cnt[25]; 2567 } 2568 2569 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt( 2570 const struct cntr_entry *entry, 2571 void *context, int vl, int mode, u64 data) 2572 { 2573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2574 2575 return dd->rcv_err_status_cnt[24]; 2576 } 2577 2578 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt( 2579 const struct cntr_entry *entry, 2580 void *context, int vl, int mode, u64 data) 2581 { 2582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2583 2584 return dd->rcv_err_status_cnt[23]; 2585 } 2586 2587 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt( 2588 const struct cntr_entry *entry, 2589 void *context, int vl, int mode, u64 data) 2590 { 2591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2592 2593 return dd->rcv_err_status_cnt[22]; 2594 } 2595 2596 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt( 2597 const struct cntr_entry *entry, 2598 void *context, int vl, int mode, u64 data) 2599 { 2600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2601 2602 return dd->rcv_err_status_cnt[21]; 2603 } 2604 2605 static u64 access_rx_rbuf_block_list_read_cor_err_cnt( 2606 const struct cntr_entry *entry, 2607 void *context, int vl, int mode, u64 data) 2608 { 2609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2610 2611 return dd->rcv_err_status_cnt[20]; 2612 } 2613 2614 static u64 access_rx_rbuf_block_list_read_unc_err_cnt( 2615 const struct cntr_entry *entry, 2616 void *context, int vl, int mode, u64 data) 2617 { 2618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2619 2620 return dd->rcv_err_status_cnt[19]; 2621 } 2622 2623 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry, 2624 void *context, int vl, 2625 int mode, u64 data) 2626 { 2627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2628 2629 return dd->rcv_err_status_cnt[18]; 2630 } 2631 2632 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry, 2633 void *context, int vl, 2634 int mode, u64 data) 2635 { 2636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2637 2638 return dd->rcv_err_status_cnt[17]; 2639 } 2640 2641 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt( 2642 const struct cntr_entry *entry, 2643 void *context, int vl, int mode, u64 data) 2644 { 2645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2646 2647 return dd->rcv_err_status_cnt[16]; 2648 } 2649 2650 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt( 2651 const struct cntr_entry *entry, 2652 void *context, int vl, int mode, u64 data) 2653 { 2654 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2655 2656 return dd->rcv_err_status_cnt[15]; 2657 } 2658 2659 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry, 2660 void *context, int vl, 2661 int mode, u64 data) 2662 { 2663 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2664 2665 return dd->rcv_err_status_cnt[14]; 2666 } 2667 2668 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry, 2669 void *context, int vl, 2670 int mode, u64 data) 2671 { 2672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2673 2674 return dd->rcv_err_status_cnt[13]; 2675 } 2676 2677 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2678 void *context, int vl, int mode, 2679 u64 data) 2680 { 2681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2682 2683 return dd->rcv_err_status_cnt[12]; 2684 } 2685 2686 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry, 2687 void *context, int vl, int mode, 2688 u64 data) 2689 { 2690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2691 2692 return dd->rcv_err_status_cnt[11]; 2693 } 2694 2695 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry, 2696 void *context, int vl, int mode, 2697 u64 data) 2698 { 2699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2700 2701 return dd->rcv_err_status_cnt[10]; 2702 } 2703 2704 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry, 2705 void *context, int vl, int mode, 2706 u64 data) 2707 { 2708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2709 2710 return dd->rcv_err_status_cnt[9]; 2711 } 2712 2713 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry, 2714 void *context, int vl, int mode, 2715 u64 data) 2716 { 2717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2718 2719 return dd->rcv_err_status_cnt[8]; 2720 } 2721 2722 static u64 access_rx_rcv_qp_map_table_cor_err_cnt( 2723 const struct cntr_entry *entry, 2724 void *context, int vl, int mode, u64 data) 2725 { 2726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2727 2728 return dd->rcv_err_status_cnt[7]; 2729 } 2730 2731 static u64 access_rx_rcv_qp_map_table_unc_err_cnt( 2732 const struct cntr_entry *entry, 2733 void *context, int vl, int mode, u64 data) 2734 { 2735 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2736 2737 return dd->rcv_err_status_cnt[6]; 2738 } 2739 2740 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry, 2741 void *context, int vl, int mode, 2742 u64 data) 2743 { 2744 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2745 2746 return dd->rcv_err_status_cnt[5]; 2747 } 2748 2749 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry, 2750 void *context, int vl, int mode, 2751 u64 data) 2752 { 2753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2754 2755 return dd->rcv_err_status_cnt[4]; 2756 } 2757 2758 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry, 2759 void *context, int vl, int mode, 2760 u64 data) 2761 { 2762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2763 2764 return dd->rcv_err_status_cnt[3]; 2765 } 2766 2767 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry, 2768 void *context, int vl, int mode, 2769 u64 data) 2770 { 2771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2772 2773 return dd->rcv_err_status_cnt[2]; 2774 } 2775 2776 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry, 2777 void *context, int vl, int mode, 2778 u64 data) 2779 { 2780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2781 2782 return dd->rcv_err_status_cnt[1]; 2783 } 2784 2785 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry, 2786 void *context, int vl, int mode, 2787 u64 data) 2788 { 2789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2790 2791 return dd->rcv_err_status_cnt[0]; 2792 } 2793 2794 /* 2795 * Software counters corresponding to each of the 2796 * error status bits within SendPioErrStatus 2797 */ 2798 static u64 access_pio_pec_sop_head_parity_err_cnt( 2799 const struct cntr_entry *entry, 2800 void *context, int vl, int mode, u64 data) 2801 { 2802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2803 2804 return dd->send_pio_err_status_cnt[35]; 2805 } 2806 2807 static u64 access_pio_pcc_sop_head_parity_err_cnt( 2808 const struct cntr_entry *entry, 2809 void *context, int vl, int mode, u64 data) 2810 { 2811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2812 2813 return dd->send_pio_err_status_cnt[34]; 2814 } 2815 2816 static u64 access_pio_last_returned_cnt_parity_err_cnt( 2817 const struct cntr_entry *entry, 2818 void *context, int vl, int mode, u64 data) 2819 { 2820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2821 2822 return dd->send_pio_err_status_cnt[33]; 2823 } 2824 2825 static u64 access_pio_current_free_cnt_parity_err_cnt( 2826 const struct cntr_entry *entry, 2827 void *context, int vl, int mode, u64 data) 2828 { 2829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2830 2831 return dd->send_pio_err_status_cnt[32]; 2832 } 2833 2834 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry, 2835 void *context, int vl, int mode, 2836 u64 data) 2837 { 2838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2839 2840 return dd->send_pio_err_status_cnt[31]; 2841 } 2842 2843 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry, 2844 void *context, int vl, int mode, 2845 u64 data) 2846 { 2847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2848 2849 return dd->send_pio_err_status_cnt[30]; 2850 } 2851 2852 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry, 2853 void *context, int vl, int mode, 2854 u64 data) 2855 { 2856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2857 2858 return dd->send_pio_err_status_cnt[29]; 2859 } 2860 2861 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt( 2862 const struct cntr_entry *entry, 2863 void *context, int vl, int mode, u64 data) 2864 { 2865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2866 2867 return dd->send_pio_err_status_cnt[28]; 2868 } 2869 2870 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry, 2871 void *context, int vl, int mode, 2872 u64 data) 2873 { 2874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2875 2876 return dd->send_pio_err_status_cnt[27]; 2877 } 2878 2879 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry, 2880 void *context, int vl, int mode, 2881 u64 data) 2882 { 2883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2884 2885 return dd->send_pio_err_status_cnt[26]; 2886 } 2887 2888 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry, 2889 void *context, int vl, 2890 int mode, u64 data) 2891 { 2892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2893 2894 return dd->send_pio_err_status_cnt[25]; 2895 } 2896 2897 static u64 access_pio_block_qw_count_parity_err_cnt( 2898 const struct cntr_entry *entry, 2899 void *context, int vl, int mode, u64 data) 2900 { 2901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2902 2903 return dd->send_pio_err_status_cnt[24]; 2904 } 2905 2906 static u64 access_pio_write_qw_valid_parity_err_cnt( 2907 const struct cntr_entry *entry, 2908 void *context, int vl, int mode, u64 data) 2909 { 2910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2911 2912 return dd->send_pio_err_status_cnt[23]; 2913 } 2914 2915 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry, 2916 void *context, int vl, int mode, 2917 u64 data) 2918 { 2919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2920 2921 return dd->send_pio_err_status_cnt[22]; 2922 } 2923 2924 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry, 2925 void *context, int vl, 2926 int mode, u64 data) 2927 { 2928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2929 2930 return dd->send_pio_err_status_cnt[21]; 2931 } 2932 2933 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry, 2934 void *context, int vl, 2935 int mode, u64 data) 2936 { 2937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2938 2939 return dd->send_pio_err_status_cnt[20]; 2940 } 2941 2942 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry, 2943 void *context, int vl, 2944 int mode, u64 data) 2945 { 2946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2947 2948 return dd->send_pio_err_status_cnt[19]; 2949 } 2950 2951 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt( 2952 const struct cntr_entry *entry, 2953 void *context, int vl, int mode, u64 data) 2954 { 2955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2956 2957 return dd->send_pio_err_status_cnt[18]; 2958 } 2959 2960 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry, 2961 void *context, int vl, int mode, 2962 u64 data) 2963 { 2964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2965 2966 return dd->send_pio_err_status_cnt[17]; 2967 } 2968 2969 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry, 2970 void *context, int vl, int mode, 2971 u64 data) 2972 { 2973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2974 2975 return dd->send_pio_err_status_cnt[16]; 2976 } 2977 2978 static u64 access_pio_credit_ret_fifo_parity_err_cnt( 2979 const struct cntr_entry *entry, 2980 void *context, int vl, int mode, u64 data) 2981 { 2982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2983 2984 return dd->send_pio_err_status_cnt[15]; 2985 } 2986 2987 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt( 2988 const struct cntr_entry *entry, 2989 void *context, int vl, int mode, u64 data) 2990 { 2991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2992 2993 return dd->send_pio_err_status_cnt[14]; 2994 } 2995 2996 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt( 2997 const struct cntr_entry *entry, 2998 void *context, int vl, int mode, u64 data) 2999 { 3000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3001 3002 return dd->send_pio_err_status_cnt[13]; 3003 } 3004 3005 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt( 3006 const struct cntr_entry *entry, 3007 void *context, int vl, int mode, u64 data) 3008 { 3009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3010 3011 return dd->send_pio_err_status_cnt[12]; 3012 } 3013 3014 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt( 3015 const struct cntr_entry *entry, 3016 void *context, int vl, int mode, u64 data) 3017 { 3018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3019 3020 return dd->send_pio_err_status_cnt[11]; 3021 } 3022 3023 static u64 access_pio_sm_pkt_reset_parity_err_cnt( 3024 const struct cntr_entry *entry, 3025 void *context, int vl, int mode, u64 data) 3026 { 3027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3028 3029 return dd->send_pio_err_status_cnt[10]; 3030 } 3031 3032 static u64 access_pio_pkt_evict_fifo_parity_err_cnt( 3033 const struct cntr_entry *entry, 3034 void *context, int vl, int mode, u64 data) 3035 { 3036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3037 3038 return dd->send_pio_err_status_cnt[9]; 3039 } 3040 3041 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt( 3042 const struct cntr_entry *entry, 3043 void *context, int vl, int mode, u64 data) 3044 { 3045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3046 3047 return dd->send_pio_err_status_cnt[8]; 3048 } 3049 3050 static u64 access_pio_sbrdctl_crrel_parity_err_cnt( 3051 const struct cntr_entry *entry, 3052 void *context, int vl, int mode, u64 data) 3053 { 3054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3055 3056 return dd->send_pio_err_status_cnt[7]; 3057 } 3058 3059 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry, 3060 void *context, int vl, int mode, 3061 u64 data) 3062 { 3063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3064 3065 return dd->send_pio_err_status_cnt[6]; 3066 } 3067 3068 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry, 3069 void *context, int vl, int mode, 3070 u64 data) 3071 { 3072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3073 3074 return dd->send_pio_err_status_cnt[5]; 3075 } 3076 3077 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry, 3078 void *context, int vl, int mode, 3079 u64 data) 3080 { 3081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3082 3083 return dd->send_pio_err_status_cnt[4]; 3084 } 3085 3086 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry, 3087 void *context, int vl, int mode, 3088 u64 data) 3089 { 3090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3091 3092 return dd->send_pio_err_status_cnt[3]; 3093 } 3094 3095 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry, 3096 void *context, int vl, int mode, 3097 u64 data) 3098 { 3099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3100 3101 return dd->send_pio_err_status_cnt[2]; 3102 } 3103 3104 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry, 3105 void *context, int vl, 3106 int mode, u64 data) 3107 { 3108 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3109 3110 return dd->send_pio_err_status_cnt[1]; 3111 } 3112 3113 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry, 3114 void *context, int vl, int mode, 3115 u64 data) 3116 { 3117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3118 3119 return dd->send_pio_err_status_cnt[0]; 3120 } 3121 3122 /* 3123 * Software counters corresponding to each of the 3124 * error status bits within SendDmaErrStatus 3125 */ 3126 static u64 access_sdma_pcie_req_tracking_cor_err_cnt( 3127 const struct cntr_entry *entry, 3128 void *context, int vl, int mode, u64 data) 3129 { 3130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3131 3132 return dd->send_dma_err_status_cnt[3]; 3133 } 3134 3135 static u64 access_sdma_pcie_req_tracking_unc_err_cnt( 3136 const struct cntr_entry *entry, 3137 void *context, int vl, int mode, u64 data) 3138 { 3139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3140 3141 return dd->send_dma_err_status_cnt[2]; 3142 } 3143 3144 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry, 3145 void *context, int vl, int mode, 3146 u64 data) 3147 { 3148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3149 3150 return dd->send_dma_err_status_cnt[1]; 3151 } 3152 3153 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry, 3154 void *context, int vl, int mode, 3155 u64 data) 3156 { 3157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3158 3159 return dd->send_dma_err_status_cnt[0]; 3160 } 3161 3162 /* 3163 * Software counters corresponding to each of the 3164 * error status bits within SendEgressErrStatus 3165 */ 3166 static u64 access_tx_read_pio_memory_csr_unc_err_cnt( 3167 const struct cntr_entry *entry, 3168 void *context, int vl, int mode, u64 data) 3169 { 3170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3171 3172 return dd->send_egress_err_status_cnt[63]; 3173 } 3174 3175 static u64 access_tx_read_sdma_memory_csr_err_cnt( 3176 const struct cntr_entry *entry, 3177 void *context, int vl, int mode, u64 data) 3178 { 3179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3180 3181 return dd->send_egress_err_status_cnt[62]; 3182 } 3183 3184 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry, 3185 void *context, int vl, int mode, 3186 u64 data) 3187 { 3188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3189 3190 return dd->send_egress_err_status_cnt[61]; 3191 } 3192 3193 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry, 3194 void *context, int vl, 3195 int mode, u64 data) 3196 { 3197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3198 3199 return dd->send_egress_err_status_cnt[60]; 3200 } 3201 3202 static u64 access_tx_read_sdma_memory_cor_err_cnt( 3203 const struct cntr_entry *entry, 3204 void *context, int vl, int mode, u64 data) 3205 { 3206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3207 3208 return dd->send_egress_err_status_cnt[59]; 3209 } 3210 3211 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry, 3212 void *context, int vl, int mode, 3213 u64 data) 3214 { 3215 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3216 3217 return dd->send_egress_err_status_cnt[58]; 3218 } 3219 3220 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry, 3221 void *context, int vl, int mode, 3222 u64 data) 3223 { 3224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3225 3226 return dd->send_egress_err_status_cnt[57]; 3227 } 3228 3229 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry, 3230 void *context, int vl, int mode, 3231 u64 data) 3232 { 3233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3234 3235 return dd->send_egress_err_status_cnt[56]; 3236 } 3237 3238 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry, 3239 void *context, int vl, int mode, 3240 u64 data) 3241 { 3242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3243 3244 return dd->send_egress_err_status_cnt[55]; 3245 } 3246 3247 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry, 3248 void *context, int vl, int mode, 3249 u64 data) 3250 { 3251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3252 3253 return dd->send_egress_err_status_cnt[54]; 3254 } 3255 3256 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry, 3257 void *context, int vl, int mode, 3258 u64 data) 3259 { 3260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3261 3262 return dd->send_egress_err_status_cnt[53]; 3263 } 3264 3265 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry, 3266 void *context, int vl, int mode, 3267 u64 data) 3268 { 3269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3270 3271 return dd->send_egress_err_status_cnt[52]; 3272 } 3273 3274 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry, 3275 void *context, int vl, int mode, 3276 u64 data) 3277 { 3278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3279 3280 return dd->send_egress_err_status_cnt[51]; 3281 } 3282 3283 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry, 3284 void *context, int vl, int mode, 3285 u64 data) 3286 { 3287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3288 3289 return dd->send_egress_err_status_cnt[50]; 3290 } 3291 3292 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry, 3293 void *context, int vl, int mode, 3294 u64 data) 3295 { 3296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3297 3298 return dd->send_egress_err_status_cnt[49]; 3299 } 3300 3301 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry, 3302 void *context, int vl, int mode, 3303 u64 data) 3304 { 3305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3306 3307 return dd->send_egress_err_status_cnt[48]; 3308 } 3309 3310 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry, 3311 void *context, int vl, int mode, 3312 u64 data) 3313 { 3314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3315 3316 return dd->send_egress_err_status_cnt[47]; 3317 } 3318 3319 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry, 3320 void *context, int vl, int mode, 3321 u64 data) 3322 { 3323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3324 3325 return dd->send_egress_err_status_cnt[46]; 3326 } 3327 3328 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry, 3329 void *context, int vl, int mode, 3330 u64 data) 3331 { 3332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3333 3334 return dd->send_egress_err_status_cnt[45]; 3335 } 3336 3337 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry, 3338 void *context, int vl, 3339 int mode, u64 data) 3340 { 3341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3342 3343 return dd->send_egress_err_status_cnt[44]; 3344 } 3345 3346 static u64 access_tx_read_sdma_memory_unc_err_cnt( 3347 const struct cntr_entry *entry, 3348 void *context, int vl, int mode, u64 data) 3349 { 3350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3351 3352 return dd->send_egress_err_status_cnt[43]; 3353 } 3354 3355 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry, 3356 void *context, int vl, int mode, 3357 u64 data) 3358 { 3359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3360 3361 return dd->send_egress_err_status_cnt[42]; 3362 } 3363 3364 static u64 access_tx_credit_return_partiy_err_cnt( 3365 const struct cntr_entry *entry, 3366 void *context, int vl, int mode, u64 data) 3367 { 3368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3369 3370 return dd->send_egress_err_status_cnt[41]; 3371 } 3372 3373 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt( 3374 const struct cntr_entry *entry, 3375 void *context, int vl, int mode, u64 data) 3376 { 3377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3378 3379 return dd->send_egress_err_status_cnt[40]; 3380 } 3381 3382 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt( 3383 const struct cntr_entry *entry, 3384 void *context, int vl, int mode, u64 data) 3385 { 3386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3387 3388 return dd->send_egress_err_status_cnt[39]; 3389 } 3390 3391 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt( 3392 const struct cntr_entry *entry, 3393 void *context, int vl, int mode, u64 data) 3394 { 3395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3396 3397 return dd->send_egress_err_status_cnt[38]; 3398 } 3399 3400 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt( 3401 const struct cntr_entry *entry, 3402 void *context, int vl, int mode, u64 data) 3403 { 3404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3405 3406 return dd->send_egress_err_status_cnt[37]; 3407 } 3408 3409 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt( 3410 const struct cntr_entry *entry, 3411 void *context, int vl, int mode, u64 data) 3412 { 3413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3414 3415 return dd->send_egress_err_status_cnt[36]; 3416 } 3417 3418 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt( 3419 const struct cntr_entry *entry, 3420 void *context, int vl, int mode, u64 data) 3421 { 3422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3423 3424 return dd->send_egress_err_status_cnt[35]; 3425 } 3426 3427 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt( 3428 const struct cntr_entry *entry, 3429 void *context, int vl, int mode, u64 data) 3430 { 3431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3432 3433 return dd->send_egress_err_status_cnt[34]; 3434 } 3435 3436 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt( 3437 const struct cntr_entry *entry, 3438 void *context, int vl, int mode, u64 data) 3439 { 3440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3441 3442 return dd->send_egress_err_status_cnt[33]; 3443 } 3444 3445 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt( 3446 const struct cntr_entry *entry, 3447 void *context, int vl, int mode, u64 data) 3448 { 3449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3450 3451 return dd->send_egress_err_status_cnt[32]; 3452 } 3453 3454 static u64 access_tx_sdma15_disallowed_packet_err_cnt( 3455 const struct cntr_entry *entry, 3456 void *context, int vl, int mode, u64 data) 3457 { 3458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3459 3460 return dd->send_egress_err_status_cnt[31]; 3461 } 3462 3463 static u64 access_tx_sdma14_disallowed_packet_err_cnt( 3464 const struct cntr_entry *entry, 3465 void *context, int vl, int mode, u64 data) 3466 { 3467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3468 3469 return dd->send_egress_err_status_cnt[30]; 3470 } 3471 3472 static u64 access_tx_sdma13_disallowed_packet_err_cnt( 3473 const struct cntr_entry *entry, 3474 void *context, int vl, int mode, u64 data) 3475 { 3476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3477 3478 return dd->send_egress_err_status_cnt[29]; 3479 } 3480 3481 static u64 access_tx_sdma12_disallowed_packet_err_cnt( 3482 const struct cntr_entry *entry, 3483 void *context, int vl, int mode, u64 data) 3484 { 3485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3486 3487 return dd->send_egress_err_status_cnt[28]; 3488 } 3489 3490 static u64 access_tx_sdma11_disallowed_packet_err_cnt( 3491 const struct cntr_entry *entry, 3492 void *context, int vl, int mode, u64 data) 3493 { 3494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3495 3496 return dd->send_egress_err_status_cnt[27]; 3497 } 3498 3499 static u64 access_tx_sdma10_disallowed_packet_err_cnt( 3500 const struct cntr_entry *entry, 3501 void *context, int vl, int mode, u64 data) 3502 { 3503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3504 3505 return dd->send_egress_err_status_cnt[26]; 3506 } 3507 3508 static u64 access_tx_sdma9_disallowed_packet_err_cnt( 3509 const struct cntr_entry *entry, 3510 void *context, int vl, int mode, u64 data) 3511 { 3512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3513 3514 return dd->send_egress_err_status_cnt[25]; 3515 } 3516 3517 static u64 access_tx_sdma8_disallowed_packet_err_cnt( 3518 const struct cntr_entry *entry, 3519 void *context, int vl, int mode, u64 data) 3520 { 3521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3522 3523 return dd->send_egress_err_status_cnt[24]; 3524 } 3525 3526 static u64 access_tx_sdma7_disallowed_packet_err_cnt( 3527 const struct cntr_entry *entry, 3528 void *context, int vl, int mode, u64 data) 3529 { 3530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3531 3532 return dd->send_egress_err_status_cnt[23]; 3533 } 3534 3535 static u64 access_tx_sdma6_disallowed_packet_err_cnt( 3536 const struct cntr_entry *entry, 3537 void *context, int vl, int mode, u64 data) 3538 { 3539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3540 3541 return dd->send_egress_err_status_cnt[22]; 3542 } 3543 3544 static u64 access_tx_sdma5_disallowed_packet_err_cnt( 3545 const struct cntr_entry *entry, 3546 void *context, int vl, int mode, u64 data) 3547 { 3548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3549 3550 return dd->send_egress_err_status_cnt[21]; 3551 } 3552 3553 static u64 access_tx_sdma4_disallowed_packet_err_cnt( 3554 const struct cntr_entry *entry, 3555 void *context, int vl, int mode, u64 data) 3556 { 3557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3558 3559 return dd->send_egress_err_status_cnt[20]; 3560 } 3561 3562 static u64 access_tx_sdma3_disallowed_packet_err_cnt( 3563 const struct cntr_entry *entry, 3564 void *context, int vl, int mode, u64 data) 3565 { 3566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3567 3568 return dd->send_egress_err_status_cnt[19]; 3569 } 3570 3571 static u64 access_tx_sdma2_disallowed_packet_err_cnt( 3572 const struct cntr_entry *entry, 3573 void *context, int vl, int mode, u64 data) 3574 { 3575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3576 3577 return dd->send_egress_err_status_cnt[18]; 3578 } 3579 3580 static u64 access_tx_sdma1_disallowed_packet_err_cnt( 3581 const struct cntr_entry *entry, 3582 void *context, int vl, int mode, u64 data) 3583 { 3584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3585 3586 return dd->send_egress_err_status_cnt[17]; 3587 } 3588 3589 static u64 access_tx_sdma0_disallowed_packet_err_cnt( 3590 const struct cntr_entry *entry, 3591 void *context, int vl, int mode, u64 data) 3592 { 3593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3594 3595 return dd->send_egress_err_status_cnt[16]; 3596 } 3597 3598 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry, 3599 void *context, int vl, int mode, 3600 u64 data) 3601 { 3602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3603 3604 return dd->send_egress_err_status_cnt[15]; 3605 } 3606 3607 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry, 3608 void *context, int vl, 3609 int mode, u64 data) 3610 { 3611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3612 3613 return dd->send_egress_err_status_cnt[14]; 3614 } 3615 3616 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry, 3617 void *context, int vl, int mode, 3618 u64 data) 3619 { 3620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3621 3622 return dd->send_egress_err_status_cnt[13]; 3623 } 3624 3625 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry, 3626 void *context, int vl, int mode, 3627 u64 data) 3628 { 3629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3630 3631 return dd->send_egress_err_status_cnt[12]; 3632 } 3633 3634 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt( 3635 const struct cntr_entry *entry, 3636 void *context, int vl, int mode, u64 data) 3637 { 3638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3639 3640 return dd->send_egress_err_status_cnt[11]; 3641 } 3642 3643 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry, 3644 void *context, int vl, int mode, 3645 u64 data) 3646 { 3647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3648 3649 return dd->send_egress_err_status_cnt[10]; 3650 } 3651 3652 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry, 3653 void *context, int vl, int mode, 3654 u64 data) 3655 { 3656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3657 3658 return dd->send_egress_err_status_cnt[9]; 3659 } 3660 3661 static u64 access_tx_sdma_launch_intf_parity_err_cnt( 3662 const struct cntr_entry *entry, 3663 void *context, int vl, int mode, u64 data) 3664 { 3665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3666 3667 return dd->send_egress_err_status_cnt[8]; 3668 } 3669 3670 static u64 access_tx_pio_launch_intf_parity_err_cnt( 3671 const struct cntr_entry *entry, 3672 void *context, int vl, int mode, u64 data) 3673 { 3674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3675 3676 return dd->send_egress_err_status_cnt[7]; 3677 } 3678 3679 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry, 3680 void *context, int vl, int mode, 3681 u64 data) 3682 { 3683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3684 3685 return dd->send_egress_err_status_cnt[6]; 3686 } 3687 3688 static u64 access_tx_incorrect_link_state_err_cnt( 3689 const struct cntr_entry *entry, 3690 void *context, int vl, int mode, u64 data) 3691 { 3692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3693 3694 return dd->send_egress_err_status_cnt[5]; 3695 } 3696 3697 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry, 3698 void *context, int vl, int mode, 3699 u64 data) 3700 { 3701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3702 3703 return dd->send_egress_err_status_cnt[4]; 3704 } 3705 3706 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt( 3707 const struct cntr_entry *entry, 3708 void *context, int vl, int mode, u64 data) 3709 { 3710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3711 3712 return dd->send_egress_err_status_cnt[3]; 3713 } 3714 3715 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry, 3716 void *context, int vl, int mode, 3717 u64 data) 3718 { 3719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3720 3721 return dd->send_egress_err_status_cnt[2]; 3722 } 3723 3724 static u64 access_tx_pkt_integrity_mem_unc_err_cnt( 3725 const struct cntr_entry *entry, 3726 void *context, int vl, int mode, u64 data) 3727 { 3728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3729 3730 return dd->send_egress_err_status_cnt[1]; 3731 } 3732 3733 static u64 access_tx_pkt_integrity_mem_cor_err_cnt( 3734 const struct cntr_entry *entry, 3735 void *context, int vl, int mode, u64 data) 3736 { 3737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3738 3739 return dd->send_egress_err_status_cnt[0]; 3740 } 3741 3742 /* 3743 * Software counters corresponding to each of the 3744 * error status bits within SendErrStatus 3745 */ 3746 static u64 access_send_csr_write_bad_addr_err_cnt( 3747 const struct cntr_entry *entry, 3748 void *context, int vl, int mode, u64 data) 3749 { 3750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3751 3752 return dd->send_err_status_cnt[2]; 3753 } 3754 3755 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 3756 void *context, int vl, 3757 int mode, u64 data) 3758 { 3759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3760 3761 return dd->send_err_status_cnt[1]; 3762 } 3763 3764 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry, 3765 void *context, int vl, int mode, 3766 u64 data) 3767 { 3768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3769 3770 return dd->send_err_status_cnt[0]; 3771 } 3772 3773 /* 3774 * Software counters corresponding to each of the 3775 * error status bits within SendCtxtErrStatus 3776 */ 3777 static u64 access_pio_write_out_of_bounds_err_cnt( 3778 const struct cntr_entry *entry, 3779 void *context, int vl, int mode, u64 data) 3780 { 3781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3782 3783 return dd->sw_ctxt_err_status_cnt[4]; 3784 } 3785 3786 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry, 3787 void *context, int vl, int mode, 3788 u64 data) 3789 { 3790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3791 3792 return dd->sw_ctxt_err_status_cnt[3]; 3793 } 3794 3795 static u64 access_pio_write_crosses_boundary_err_cnt( 3796 const struct cntr_entry *entry, 3797 void *context, int vl, int mode, u64 data) 3798 { 3799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3800 3801 return dd->sw_ctxt_err_status_cnt[2]; 3802 } 3803 3804 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry, 3805 void *context, int vl, 3806 int mode, u64 data) 3807 { 3808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3809 3810 return dd->sw_ctxt_err_status_cnt[1]; 3811 } 3812 3813 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry, 3814 void *context, int vl, int mode, 3815 u64 data) 3816 { 3817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3818 3819 return dd->sw_ctxt_err_status_cnt[0]; 3820 } 3821 3822 /* 3823 * Software counters corresponding to each of the 3824 * error status bits within SendDmaEngErrStatus 3825 */ 3826 static u64 access_sdma_header_request_fifo_cor_err_cnt( 3827 const struct cntr_entry *entry, 3828 void *context, int vl, int mode, u64 data) 3829 { 3830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3831 3832 return dd->sw_send_dma_eng_err_status_cnt[23]; 3833 } 3834 3835 static u64 access_sdma_header_storage_cor_err_cnt( 3836 const struct cntr_entry *entry, 3837 void *context, int vl, int mode, u64 data) 3838 { 3839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3840 3841 return dd->sw_send_dma_eng_err_status_cnt[22]; 3842 } 3843 3844 static u64 access_sdma_packet_tracking_cor_err_cnt( 3845 const struct cntr_entry *entry, 3846 void *context, int vl, int mode, u64 data) 3847 { 3848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3849 3850 return dd->sw_send_dma_eng_err_status_cnt[21]; 3851 } 3852 3853 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry, 3854 void *context, int vl, int mode, 3855 u64 data) 3856 { 3857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3858 3859 return dd->sw_send_dma_eng_err_status_cnt[20]; 3860 } 3861 3862 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry, 3863 void *context, int vl, int mode, 3864 u64 data) 3865 { 3866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3867 3868 return dd->sw_send_dma_eng_err_status_cnt[19]; 3869 } 3870 3871 static u64 access_sdma_header_request_fifo_unc_err_cnt( 3872 const struct cntr_entry *entry, 3873 void *context, int vl, int mode, u64 data) 3874 { 3875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3876 3877 return dd->sw_send_dma_eng_err_status_cnt[18]; 3878 } 3879 3880 static u64 access_sdma_header_storage_unc_err_cnt( 3881 const struct cntr_entry *entry, 3882 void *context, int vl, int mode, u64 data) 3883 { 3884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3885 3886 return dd->sw_send_dma_eng_err_status_cnt[17]; 3887 } 3888 3889 static u64 access_sdma_packet_tracking_unc_err_cnt( 3890 const struct cntr_entry *entry, 3891 void *context, int vl, int mode, u64 data) 3892 { 3893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3894 3895 return dd->sw_send_dma_eng_err_status_cnt[16]; 3896 } 3897 3898 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry, 3899 void *context, int vl, int mode, 3900 u64 data) 3901 { 3902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3903 3904 return dd->sw_send_dma_eng_err_status_cnt[15]; 3905 } 3906 3907 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry, 3908 void *context, int vl, int mode, 3909 u64 data) 3910 { 3911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3912 3913 return dd->sw_send_dma_eng_err_status_cnt[14]; 3914 } 3915 3916 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry, 3917 void *context, int vl, int mode, 3918 u64 data) 3919 { 3920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3921 3922 return dd->sw_send_dma_eng_err_status_cnt[13]; 3923 } 3924 3925 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry, 3926 void *context, int vl, int mode, 3927 u64 data) 3928 { 3929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3930 3931 return dd->sw_send_dma_eng_err_status_cnt[12]; 3932 } 3933 3934 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry, 3935 void *context, int vl, int mode, 3936 u64 data) 3937 { 3938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3939 3940 return dd->sw_send_dma_eng_err_status_cnt[11]; 3941 } 3942 3943 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry, 3944 void *context, int vl, int mode, 3945 u64 data) 3946 { 3947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3948 3949 return dd->sw_send_dma_eng_err_status_cnt[10]; 3950 } 3951 3952 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry, 3953 void *context, int vl, int mode, 3954 u64 data) 3955 { 3956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3957 3958 return dd->sw_send_dma_eng_err_status_cnt[9]; 3959 } 3960 3961 static u64 access_sdma_packet_desc_overflow_err_cnt( 3962 const struct cntr_entry *entry, 3963 void *context, int vl, int mode, u64 data) 3964 { 3965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3966 3967 return dd->sw_send_dma_eng_err_status_cnt[8]; 3968 } 3969 3970 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry, 3971 void *context, int vl, 3972 int mode, u64 data) 3973 { 3974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3975 3976 return dd->sw_send_dma_eng_err_status_cnt[7]; 3977 } 3978 3979 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry, 3980 void *context, int vl, int mode, u64 data) 3981 { 3982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3983 3984 return dd->sw_send_dma_eng_err_status_cnt[6]; 3985 } 3986 3987 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry, 3988 void *context, int vl, int mode, 3989 u64 data) 3990 { 3991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3992 3993 return dd->sw_send_dma_eng_err_status_cnt[5]; 3994 } 3995 3996 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry, 3997 void *context, int vl, int mode, 3998 u64 data) 3999 { 4000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4001 4002 return dd->sw_send_dma_eng_err_status_cnt[4]; 4003 } 4004 4005 static u64 access_sdma_tail_out_of_bounds_err_cnt( 4006 const struct cntr_entry *entry, 4007 void *context, int vl, int mode, u64 data) 4008 { 4009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4010 4011 return dd->sw_send_dma_eng_err_status_cnt[3]; 4012 } 4013 4014 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry, 4015 void *context, int vl, int mode, 4016 u64 data) 4017 { 4018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4019 4020 return dd->sw_send_dma_eng_err_status_cnt[2]; 4021 } 4022 4023 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry, 4024 void *context, int vl, int mode, 4025 u64 data) 4026 { 4027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4028 4029 return dd->sw_send_dma_eng_err_status_cnt[1]; 4030 } 4031 4032 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry, 4033 void *context, int vl, int mode, 4034 u64 data) 4035 { 4036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4037 4038 return dd->sw_send_dma_eng_err_status_cnt[0]; 4039 } 4040 4041 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry, 4042 void *context, int vl, int mode, 4043 u64 data) 4044 { 4045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4046 4047 u64 val = 0; 4048 u64 csr = entry->csr; 4049 4050 val = read_write_csr(dd, csr, mode, data); 4051 if (mode == CNTR_MODE_R) { 4052 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? 4053 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; 4054 } else if (mode == CNTR_MODE_W) { 4055 dd->sw_rcv_bypass_packet_errors = 0; 4056 } else { 4057 dd_dev_err(dd, "Invalid cntr register access mode"); 4058 return 0; 4059 } 4060 return val; 4061 } 4062 4063 #define def_access_sw_cpu(cntr) \ 4064 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \ 4065 void *context, int vl, int mode, u64 data) \ 4066 { \ 4067 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ 4068 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \ 4069 ppd->ibport_data.rvp.cntr, vl, \ 4070 mode, data); \ 4071 } 4072 4073 def_access_sw_cpu(rc_acks); 4074 def_access_sw_cpu(rc_qacks); 4075 def_access_sw_cpu(rc_delayed_comp); 4076 4077 #define def_access_ibp_counter(cntr) \ 4078 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \ 4079 void *context, int vl, int mode, u64 data) \ 4080 { \ 4081 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ 4082 \ 4083 if (vl != CNTR_INVALID_VL) \ 4084 return 0; \ 4085 \ 4086 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \ 4087 mode, data); \ 4088 } 4089 4090 def_access_ibp_counter(loop_pkts); 4091 def_access_ibp_counter(rc_resends); 4092 def_access_ibp_counter(rnr_naks); 4093 def_access_ibp_counter(other_naks); 4094 def_access_ibp_counter(rc_timeouts); 4095 def_access_ibp_counter(pkt_drops); 4096 def_access_ibp_counter(dmawait); 4097 def_access_ibp_counter(rc_seqnak); 4098 def_access_ibp_counter(rc_dupreq); 4099 def_access_ibp_counter(rdma_seq); 4100 def_access_ibp_counter(unaligned); 4101 def_access_ibp_counter(seq_naks); 4102 4103 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { 4104 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), 4105 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, 4106 CNTR_NORMAL), 4107 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT, 4108 CNTR_NORMAL), 4109 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs, 4110 RCV_TID_FLOW_GEN_MISMATCH_CNT, 4111 CNTR_NORMAL), 4112 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL, 4113 CNTR_NORMAL), 4114 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs, 4115 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL), 4116 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt, 4117 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL), 4118 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT, 4119 CNTR_NORMAL), 4120 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT, 4121 CNTR_NORMAL), 4122 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT, 4123 CNTR_NORMAL), 4124 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT, 4125 CNTR_NORMAL), 4126 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT, 4127 CNTR_NORMAL), 4128 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT, 4129 CNTR_NORMAL), 4130 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt, 4131 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL), 4132 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt, 4133 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL), 4134 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT, 4135 CNTR_SYNTH), 4136 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH, 4137 access_dc_rcv_err_cnt), 4138 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT, 4139 CNTR_SYNTH), 4140 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT, 4141 CNTR_SYNTH), 4142 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT, 4143 CNTR_SYNTH), 4144 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts, 4145 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH), 4146 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts, 4147 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT, 4148 CNTR_SYNTH), 4149 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr, 4150 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH), 4151 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT, 4152 CNTR_SYNTH), 4153 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT, 4154 CNTR_SYNTH), 4155 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT, 4156 CNTR_SYNTH), 4157 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT, 4158 CNTR_SYNTH), 4159 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT, 4160 CNTR_SYNTH), 4161 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT, 4162 CNTR_SYNTH), 4163 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT, 4164 CNTR_SYNTH), 4165 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT, 4166 CNTR_SYNTH | CNTR_VL), 4167 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT, 4168 CNTR_SYNTH | CNTR_VL), 4169 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH), 4170 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT, 4171 CNTR_SYNTH | CNTR_VL), 4172 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH), 4173 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT, 4174 CNTR_SYNTH | CNTR_VL), 4175 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT, 4176 CNTR_SYNTH), 4177 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT, 4178 CNTR_SYNTH | CNTR_VL), 4179 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT, 4180 CNTR_SYNTH), 4181 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT, 4182 CNTR_SYNTH | CNTR_VL), 4183 [C_DC_TOTAL_CRC] = 4184 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR, 4185 CNTR_SYNTH), 4186 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0, 4187 CNTR_SYNTH), 4188 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1, 4189 CNTR_SYNTH), 4190 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2, 4191 CNTR_SYNTH), 4192 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3, 4193 CNTR_SYNTH), 4194 [C_DC_CRC_MULT_LN] = 4195 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN, 4196 CNTR_SYNTH), 4197 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT, 4198 CNTR_SYNTH), 4199 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT, 4200 CNTR_SYNTH), 4201 [C_DC_SEQ_CRC_CNT] = 4202 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT, 4203 CNTR_SYNTH), 4204 [C_DC_ESC0_ONLY_CNT] = 4205 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT, 4206 CNTR_SYNTH), 4207 [C_DC_ESC0_PLUS1_CNT] = 4208 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT, 4209 CNTR_SYNTH), 4210 [C_DC_ESC0_PLUS2_CNT] = 4211 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT, 4212 CNTR_SYNTH), 4213 [C_DC_REINIT_FROM_PEER_CNT] = 4214 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 4215 CNTR_SYNTH), 4216 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT, 4217 CNTR_SYNTH), 4218 [C_DC_MISC_FLG_CNT] = 4219 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT, 4220 CNTR_SYNTH), 4221 [C_DC_PRF_GOOD_LTP_CNT] = 4222 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH), 4223 [C_DC_PRF_ACCEPTED_LTP_CNT] = 4224 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT, 4225 CNTR_SYNTH), 4226 [C_DC_PRF_RX_FLIT_CNT] = 4227 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH), 4228 [C_DC_PRF_TX_FLIT_CNT] = 4229 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH), 4230 [C_DC_PRF_CLK_CNTR] = 4231 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH), 4232 [C_DC_PG_DBG_FLIT_CRDTS_CNT] = 4233 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH), 4234 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] = 4235 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT, 4236 CNTR_SYNTH), 4237 [C_DC_PG_STS_TX_SBE_CNT] = 4238 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH), 4239 [C_DC_PG_STS_TX_MBE_CNT] = 4240 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT, 4241 CNTR_SYNTH), 4242 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL, 4243 access_sw_cpu_intr), 4244 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL, 4245 access_sw_cpu_rcv_limit), 4246 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL, 4247 access_sw_vtx_wait), 4248 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, 4249 access_sw_pio_wait), 4250 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL, 4251 access_sw_pio_drain), 4252 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, 4253 access_sw_kmem_wait), 4254 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, 4255 access_sw_send_schedule), 4256 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", 4257 SEND_DMA_DESC_FETCHED_CNT, 0, 4258 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4259 dev_access_u32_csr), 4260 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0, 4261 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4262 access_sde_int_cnt), 4263 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0, 4264 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4265 access_sde_err_cnt), 4266 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0, 4267 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4268 access_sde_idle_int_cnt), 4269 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0, 4270 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4271 access_sde_progress_int_cnt), 4272 /* MISC_ERR_STATUS */ 4273 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0, 4274 CNTR_NORMAL, 4275 access_misc_pll_lock_fail_err_cnt), 4276 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0, 4277 CNTR_NORMAL, 4278 access_misc_mbist_fail_err_cnt), 4279 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0, 4280 CNTR_NORMAL, 4281 access_misc_invalid_eep_cmd_err_cnt), 4282 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0, 4283 CNTR_NORMAL, 4284 access_misc_efuse_done_parity_err_cnt), 4285 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0, 4286 CNTR_NORMAL, 4287 access_misc_efuse_write_err_cnt), 4288 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0, 4289 0, CNTR_NORMAL, 4290 access_misc_efuse_read_bad_addr_err_cnt), 4291 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0, 4292 CNTR_NORMAL, 4293 access_misc_efuse_csr_parity_err_cnt), 4294 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0, 4295 CNTR_NORMAL, 4296 access_misc_fw_auth_failed_err_cnt), 4297 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0, 4298 CNTR_NORMAL, 4299 access_misc_key_mismatch_err_cnt), 4300 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0, 4301 CNTR_NORMAL, 4302 access_misc_sbus_write_failed_err_cnt), 4303 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0, 4304 CNTR_NORMAL, 4305 access_misc_csr_write_bad_addr_err_cnt), 4306 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0, 4307 CNTR_NORMAL, 4308 access_misc_csr_read_bad_addr_err_cnt), 4309 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0, 4310 CNTR_NORMAL, 4311 access_misc_csr_parity_err_cnt), 4312 /* CceErrStatus */ 4313 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0, 4314 CNTR_NORMAL, 4315 access_sw_cce_err_status_aggregated_cnt), 4316 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0, 4317 CNTR_NORMAL, 4318 access_cce_msix_csr_parity_err_cnt), 4319 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0, 4320 CNTR_NORMAL, 4321 access_cce_int_map_unc_err_cnt), 4322 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0, 4323 CNTR_NORMAL, 4324 access_cce_int_map_cor_err_cnt), 4325 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0, 4326 CNTR_NORMAL, 4327 access_cce_msix_table_unc_err_cnt), 4328 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0, 4329 CNTR_NORMAL, 4330 access_cce_msix_table_cor_err_cnt), 4331 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0, 4332 0, CNTR_NORMAL, 4333 access_cce_rxdma_conv_fifo_parity_err_cnt), 4334 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0, 4335 0, CNTR_NORMAL, 4336 access_cce_rcpl_async_fifo_parity_err_cnt), 4337 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0, 4338 CNTR_NORMAL, 4339 access_cce_seg_write_bad_addr_err_cnt), 4340 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0, 4341 CNTR_NORMAL, 4342 access_cce_seg_read_bad_addr_err_cnt), 4343 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0, 4344 CNTR_NORMAL, 4345 access_la_triggered_cnt), 4346 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0, 4347 CNTR_NORMAL, 4348 access_cce_trgt_cpl_timeout_err_cnt), 4349 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0, 4350 CNTR_NORMAL, 4351 access_pcic_receive_parity_err_cnt), 4352 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0, 4353 CNTR_NORMAL, 4354 access_pcic_transmit_back_parity_err_cnt), 4355 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0, 4356 0, CNTR_NORMAL, 4357 access_pcic_transmit_front_parity_err_cnt), 4358 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0, 4359 CNTR_NORMAL, 4360 access_pcic_cpl_dat_q_unc_err_cnt), 4361 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0, 4362 CNTR_NORMAL, 4363 access_pcic_cpl_hd_q_unc_err_cnt), 4364 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0, 4365 CNTR_NORMAL, 4366 access_pcic_post_dat_q_unc_err_cnt), 4367 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0, 4368 CNTR_NORMAL, 4369 access_pcic_post_hd_q_unc_err_cnt), 4370 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0, 4371 CNTR_NORMAL, 4372 access_pcic_retry_sot_mem_unc_err_cnt), 4373 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0, 4374 CNTR_NORMAL, 4375 access_pcic_retry_mem_unc_err), 4376 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0, 4377 CNTR_NORMAL, 4378 access_pcic_n_post_dat_q_parity_err_cnt), 4379 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0, 4380 CNTR_NORMAL, 4381 access_pcic_n_post_h_q_parity_err_cnt), 4382 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0, 4383 CNTR_NORMAL, 4384 access_pcic_cpl_dat_q_cor_err_cnt), 4385 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0, 4386 CNTR_NORMAL, 4387 access_pcic_cpl_hd_q_cor_err_cnt), 4388 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0, 4389 CNTR_NORMAL, 4390 access_pcic_post_dat_q_cor_err_cnt), 4391 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0, 4392 CNTR_NORMAL, 4393 access_pcic_post_hd_q_cor_err_cnt), 4394 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0, 4395 CNTR_NORMAL, 4396 access_pcic_retry_sot_mem_cor_err_cnt), 4397 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0, 4398 CNTR_NORMAL, 4399 access_pcic_retry_mem_cor_err_cnt), 4400 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM( 4401 "CceCli1AsyncFifoDbgParityError", 0, 0, 4402 CNTR_NORMAL, 4403 access_cce_cli1_async_fifo_dbg_parity_err_cnt), 4404 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM( 4405 "CceCli1AsyncFifoRxdmaParityError", 0, 0, 4406 CNTR_NORMAL, 4407 access_cce_cli1_async_fifo_rxdma_parity_err_cnt 4408 ), 4409 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM( 4410 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0, 4411 CNTR_NORMAL, 4412 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt), 4413 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM( 4414 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0, 4415 CNTR_NORMAL, 4416 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt), 4417 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0, 4418 0, CNTR_NORMAL, 4419 access_cce_cli2_async_fifo_parity_err_cnt), 4420 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0, 4421 CNTR_NORMAL, 4422 access_cce_csr_cfg_bus_parity_err_cnt), 4423 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0, 4424 0, CNTR_NORMAL, 4425 access_cce_cli0_async_fifo_parity_err_cnt), 4426 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0, 4427 CNTR_NORMAL, 4428 access_cce_rspd_data_parity_err_cnt), 4429 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0, 4430 CNTR_NORMAL, 4431 access_cce_trgt_access_err_cnt), 4432 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0, 4433 0, CNTR_NORMAL, 4434 access_cce_trgt_async_fifo_parity_err_cnt), 4435 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0, 4436 CNTR_NORMAL, 4437 access_cce_csr_write_bad_addr_err_cnt), 4438 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0, 4439 CNTR_NORMAL, 4440 access_cce_csr_read_bad_addr_err_cnt), 4441 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0, 4442 CNTR_NORMAL, 4443 access_ccs_csr_parity_err_cnt), 4444 4445 /* RcvErrStatus */ 4446 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0, 4447 CNTR_NORMAL, 4448 access_rx_csr_parity_err_cnt), 4449 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0, 4450 CNTR_NORMAL, 4451 access_rx_csr_write_bad_addr_err_cnt), 4452 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0, 4453 CNTR_NORMAL, 4454 access_rx_csr_read_bad_addr_err_cnt), 4455 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0, 4456 CNTR_NORMAL, 4457 access_rx_dma_csr_unc_err_cnt), 4458 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0, 4459 CNTR_NORMAL, 4460 access_rx_dma_dq_fsm_encoding_err_cnt), 4461 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0, 4462 CNTR_NORMAL, 4463 access_rx_dma_eq_fsm_encoding_err_cnt), 4464 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0, 4465 CNTR_NORMAL, 4466 access_rx_dma_csr_parity_err_cnt), 4467 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0, 4468 CNTR_NORMAL, 4469 access_rx_rbuf_data_cor_err_cnt), 4470 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0, 4471 CNTR_NORMAL, 4472 access_rx_rbuf_data_unc_err_cnt), 4473 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0, 4474 CNTR_NORMAL, 4475 access_rx_dma_data_fifo_rd_cor_err_cnt), 4476 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0, 4477 CNTR_NORMAL, 4478 access_rx_dma_data_fifo_rd_unc_err_cnt), 4479 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0, 4480 CNTR_NORMAL, 4481 access_rx_dma_hdr_fifo_rd_cor_err_cnt), 4482 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0, 4483 CNTR_NORMAL, 4484 access_rx_dma_hdr_fifo_rd_unc_err_cnt), 4485 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0, 4486 CNTR_NORMAL, 4487 access_rx_rbuf_desc_part2_cor_err_cnt), 4488 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0, 4489 CNTR_NORMAL, 4490 access_rx_rbuf_desc_part2_unc_err_cnt), 4491 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0, 4492 CNTR_NORMAL, 4493 access_rx_rbuf_desc_part1_cor_err_cnt), 4494 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0, 4495 CNTR_NORMAL, 4496 access_rx_rbuf_desc_part1_unc_err_cnt), 4497 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0, 4498 CNTR_NORMAL, 4499 access_rx_hq_intr_fsm_err_cnt), 4500 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0, 4501 CNTR_NORMAL, 4502 access_rx_hq_intr_csr_parity_err_cnt), 4503 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0, 4504 CNTR_NORMAL, 4505 access_rx_lookup_csr_parity_err_cnt), 4506 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0, 4507 CNTR_NORMAL, 4508 access_rx_lookup_rcv_array_cor_err_cnt), 4509 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0, 4510 CNTR_NORMAL, 4511 access_rx_lookup_rcv_array_unc_err_cnt), 4512 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0, 4513 0, CNTR_NORMAL, 4514 access_rx_lookup_des_part2_parity_err_cnt), 4515 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0, 4516 0, CNTR_NORMAL, 4517 access_rx_lookup_des_part1_unc_cor_err_cnt), 4518 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0, 4519 CNTR_NORMAL, 4520 access_rx_lookup_des_part1_unc_err_cnt), 4521 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0, 4522 CNTR_NORMAL, 4523 access_rx_rbuf_next_free_buf_cor_err_cnt), 4524 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0, 4525 CNTR_NORMAL, 4526 access_rx_rbuf_next_free_buf_unc_err_cnt), 4527 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM( 4528 "RxRbufFlInitWrAddrParityErr", 0, 0, 4529 CNTR_NORMAL, 4530 access_rbuf_fl_init_wr_addr_parity_err_cnt), 4531 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0, 4532 0, CNTR_NORMAL, 4533 access_rx_rbuf_fl_initdone_parity_err_cnt), 4534 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0, 4535 0, CNTR_NORMAL, 4536 access_rx_rbuf_fl_write_addr_parity_err_cnt), 4537 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0, 4538 CNTR_NORMAL, 4539 access_rx_rbuf_fl_rd_addr_parity_err_cnt), 4540 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0, 4541 CNTR_NORMAL, 4542 access_rx_rbuf_empty_err_cnt), 4543 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0, 4544 CNTR_NORMAL, 4545 access_rx_rbuf_full_err_cnt), 4546 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0, 4547 CNTR_NORMAL, 4548 access_rbuf_bad_lookup_err_cnt), 4549 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0, 4550 CNTR_NORMAL, 4551 access_rbuf_ctx_id_parity_err_cnt), 4552 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0, 4553 CNTR_NORMAL, 4554 access_rbuf_csr_qeopdw_parity_err_cnt), 4555 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM( 4556 "RxRbufCsrQNumOfPktParityErr", 0, 0, 4557 CNTR_NORMAL, 4558 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt), 4559 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM( 4560 "RxRbufCsrQTlPtrParityErr", 0, 0, 4561 CNTR_NORMAL, 4562 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt), 4563 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0, 4564 0, CNTR_NORMAL, 4565 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt), 4566 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0, 4567 0, CNTR_NORMAL, 4568 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt), 4569 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr", 4570 0, 0, CNTR_NORMAL, 4571 access_rx_rbuf_csr_q_next_buf_parity_err_cnt), 4572 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0, 4573 0, CNTR_NORMAL, 4574 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt), 4575 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM( 4576 "RxRbufCsrQHeadBufNumParityErr", 0, 0, 4577 CNTR_NORMAL, 4578 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt), 4579 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0, 4580 0, CNTR_NORMAL, 4581 access_rx_rbuf_block_list_read_cor_err_cnt), 4582 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0, 4583 0, CNTR_NORMAL, 4584 access_rx_rbuf_block_list_read_unc_err_cnt), 4585 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0, 4586 CNTR_NORMAL, 4587 access_rx_rbuf_lookup_des_cor_err_cnt), 4588 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0, 4589 CNTR_NORMAL, 4590 access_rx_rbuf_lookup_des_unc_err_cnt), 4591 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM( 4592 "RxRbufLookupDesRegUncCorErr", 0, 0, 4593 CNTR_NORMAL, 4594 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt), 4595 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0, 4596 CNTR_NORMAL, 4597 access_rx_rbuf_lookup_des_reg_unc_err_cnt), 4598 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0, 4599 CNTR_NORMAL, 4600 access_rx_rbuf_free_list_cor_err_cnt), 4601 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0, 4602 CNTR_NORMAL, 4603 access_rx_rbuf_free_list_unc_err_cnt), 4604 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0, 4605 CNTR_NORMAL, 4606 access_rx_rcv_fsm_encoding_err_cnt), 4607 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0, 4608 CNTR_NORMAL, 4609 access_rx_dma_flag_cor_err_cnt), 4610 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0, 4611 CNTR_NORMAL, 4612 access_rx_dma_flag_unc_err_cnt), 4613 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0, 4614 CNTR_NORMAL, 4615 access_rx_dc_sop_eop_parity_err_cnt), 4616 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0, 4617 CNTR_NORMAL, 4618 access_rx_rcv_csr_parity_err_cnt), 4619 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0, 4620 CNTR_NORMAL, 4621 access_rx_rcv_qp_map_table_cor_err_cnt), 4622 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0, 4623 CNTR_NORMAL, 4624 access_rx_rcv_qp_map_table_unc_err_cnt), 4625 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0, 4626 CNTR_NORMAL, 4627 access_rx_rcv_data_cor_err_cnt), 4628 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0, 4629 CNTR_NORMAL, 4630 access_rx_rcv_data_unc_err_cnt), 4631 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0, 4632 CNTR_NORMAL, 4633 access_rx_rcv_hdr_cor_err_cnt), 4634 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0, 4635 CNTR_NORMAL, 4636 access_rx_rcv_hdr_unc_err_cnt), 4637 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0, 4638 CNTR_NORMAL, 4639 access_rx_dc_intf_parity_err_cnt), 4640 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0, 4641 CNTR_NORMAL, 4642 access_rx_dma_csr_cor_err_cnt), 4643 /* SendPioErrStatus */ 4644 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0, 4645 CNTR_NORMAL, 4646 access_pio_pec_sop_head_parity_err_cnt), 4647 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0, 4648 CNTR_NORMAL, 4649 access_pio_pcc_sop_head_parity_err_cnt), 4650 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr", 4651 0, 0, CNTR_NORMAL, 4652 access_pio_last_returned_cnt_parity_err_cnt), 4653 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0, 4654 0, CNTR_NORMAL, 4655 access_pio_current_free_cnt_parity_err_cnt), 4656 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0, 4657 CNTR_NORMAL, 4658 access_pio_reserved_31_err_cnt), 4659 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0, 4660 CNTR_NORMAL, 4661 access_pio_reserved_30_err_cnt), 4662 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0, 4663 CNTR_NORMAL, 4664 access_pio_ppmc_sop_len_err_cnt), 4665 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0, 4666 CNTR_NORMAL, 4667 access_pio_ppmc_bqc_mem_parity_err_cnt), 4668 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0, 4669 CNTR_NORMAL, 4670 access_pio_vl_fifo_parity_err_cnt), 4671 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0, 4672 CNTR_NORMAL, 4673 access_pio_vlf_sop_parity_err_cnt), 4674 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0, 4675 CNTR_NORMAL, 4676 access_pio_vlf_v1_len_parity_err_cnt), 4677 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0, 4678 CNTR_NORMAL, 4679 access_pio_block_qw_count_parity_err_cnt), 4680 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0, 4681 CNTR_NORMAL, 4682 access_pio_write_qw_valid_parity_err_cnt), 4683 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0, 4684 CNTR_NORMAL, 4685 access_pio_state_machine_err_cnt), 4686 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0, 4687 CNTR_NORMAL, 4688 access_pio_write_data_parity_err_cnt), 4689 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0, 4690 CNTR_NORMAL, 4691 access_pio_host_addr_mem_cor_err_cnt), 4692 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0, 4693 CNTR_NORMAL, 4694 access_pio_host_addr_mem_unc_err_cnt), 4695 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0, 4696 CNTR_NORMAL, 4697 access_pio_pkt_evict_sm_or_arb_sm_err_cnt), 4698 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0, 4699 CNTR_NORMAL, 4700 access_pio_init_sm_in_err_cnt), 4701 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0, 4702 CNTR_NORMAL, 4703 access_pio_ppmc_pbl_fifo_err_cnt), 4704 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0, 4705 0, CNTR_NORMAL, 4706 access_pio_credit_ret_fifo_parity_err_cnt), 4707 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0, 4708 CNTR_NORMAL, 4709 access_pio_v1_len_mem_bank1_cor_err_cnt), 4710 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0, 4711 CNTR_NORMAL, 4712 access_pio_v1_len_mem_bank0_cor_err_cnt), 4713 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0, 4714 CNTR_NORMAL, 4715 access_pio_v1_len_mem_bank1_unc_err_cnt), 4716 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0, 4717 CNTR_NORMAL, 4718 access_pio_v1_len_mem_bank0_unc_err_cnt), 4719 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0, 4720 CNTR_NORMAL, 4721 access_pio_sm_pkt_reset_parity_err_cnt), 4722 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0, 4723 CNTR_NORMAL, 4724 access_pio_pkt_evict_fifo_parity_err_cnt), 4725 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM( 4726 "PioSbrdctrlCrrelFifoParityErr", 0, 0, 4727 CNTR_NORMAL, 4728 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt), 4729 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0, 4730 CNTR_NORMAL, 4731 access_pio_sbrdctl_crrel_parity_err_cnt), 4732 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0, 4733 CNTR_NORMAL, 4734 access_pio_pec_fifo_parity_err_cnt), 4735 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0, 4736 CNTR_NORMAL, 4737 access_pio_pcc_fifo_parity_err_cnt), 4738 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0, 4739 CNTR_NORMAL, 4740 access_pio_sb_mem_fifo1_err_cnt), 4741 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0, 4742 CNTR_NORMAL, 4743 access_pio_sb_mem_fifo0_err_cnt), 4744 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0, 4745 CNTR_NORMAL, 4746 access_pio_csr_parity_err_cnt), 4747 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0, 4748 CNTR_NORMAL, 4749 access_pio_write_addr_parity_err_cnt), 4750 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0, 4751 CNTR_NORMAL, 4752 access_pio_write_bad_ctxt_err_cnt), 4753 /* SendDmaErrStatus */ 4754 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0, 4755 0, CNTR_NORMAL, 4756 access_sdma_pcie_req_tracking_cor_err_cnt), 4757 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0, 4758 0, CNTR_NORMAL, 4759 access_sdma_pcie_req_tracking_unc_err_cnt), 4760 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0, 4761 CNTR_NORMAL, 4762 access_sdma_csr_parity_err_cnt), 4763 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0, 4764 CNTR_NORMAL, 4765 access_sdma_rpy_tag_err_cnt), 4766 /* SendEgressErrStatus */ 4767 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0, 4768 CNTR_NORMAL, 4769 access_tx_read_pio_memory_csr_unc_err_cnt), 4770 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0, 4771 0, CNTR_NORMAL, 4772 access_tx_read_sdma_memory_csr_err_cnt), 4773 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0, 4774 CNTR_NORMAL, 4775 access_tx_egress_fifo_cor_err_cnt), 4776 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0, 4777 CNTR_NORMAL, 4778 access_tx_read_pio_memory_cor_err_cnt), 4779 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0, 4780 CNTR_NORMAL, 4781 access_tx_read_sdma_memory_cor_err_cnt), 4782 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0, 4783 CNTR_NORMAL, 4784 access_tx_sb_hdr_cor_err_cnt), 4785 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0, 4786 CNTR_NORMAL, 4787 access_tx_credit_overrun_err_cnt), 4788 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0, 4789 CNTR_NORMAL, 4790 access_tx_launch_fifo8_cor_err_cnt), 4791 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0, 4792 CNTR_NORMAL, 4793 access_tx_launch_fifo7_cor_err_cnt), 4794 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0, 4795 CNTR_NORMAL, 4796 access_tx_launch_fifo6_cor_err_cnt), 4797 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0, 4798 CNTR_NORMAL, 4799 access_tx_launch_fifo5_cor_err_cnt), 4800 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0, 4801 CNTR_NORMAL, 4802 access_tx_launch_fifo4_cor_err_cnt), 4803 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0, 4804 CNTR_NORMAL, 4805 access_tx_launch_fifo3_cor_err_cnt), 4806 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0, 4807 CNTR_NORMAL, 4808 access_tx_launch_fifo2_cor_err_cnt), 4809 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0, 4810 CNTR_NORMAL, 4811 access_tx_launch_fifo1_cor_err_cnt), 4812 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0, 4813 CNTR_NORMAL, 4814 access_tx_launch_fifo0_cor_err_cnt), 4815 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0, 4816 CNTR_NORMAL, 4817 access_tx_credit_return_vl_err_cnt), 4818 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0, 4819 CNTR_NORMAL, 4820 access_tx_hcrc_insertion_err_cnt), 4821 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0, 4822 CNTR_NORMAL, 4823 access_tx_egress_fifo_unc_err_cnt), 4824 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0, 4825 CNTR_NORMAL, 4826 access_tx_read_pio_memory_unc_err_cnt), 4827 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0, 4828 CNTR_NORMAL, 4829 access_tx_read_sdma_memory_unc_err_cnt), 4830 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0, 4831 CNTR_NORMAL, 4832 access_tx_sb_hdr_unc_err_cnt), 4833 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0, 4834 CNTR_NORMAL, 4835 access_tx_credit_return_partiy_err_cnt), 4836 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr", 4837 0, 0, CNTR_NORMAL, 4838 access_tx_launch_fifo8_unc_or_parity_err_cnt), 4839 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr", 4840 0, 0, CNTR_NORMAL, 4841 access_tx_launch_fifo7_unc_or_parity_err_cnt), 4842 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr", 4843 0, 0, CNTR_NORMAL, 4844 access_tx_launch_fifo6_unc_or_parity_err_cnt), 4845 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr", 4846 0, 0, CNTR_NORMAL, 4847 access_tx_launch_fifo5_unc_or_parity_err_cnt), 4848 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr", 4849 0, 0, CNTR_NORMAL, 4850 access_tx_launch_fifo4_unc_or_parity_err_cnt), 4851 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr", 4852 0, 0, CNTR_NORMAL, 4853 access_tx_launch_fifo3_unc_or_parity_err_cnt), 4854 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr", 4855 0, 0, CNTR_NORMAL, 4856 access_tx_launch_fifo2_unc_or_parity_err_cnt), 4857 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr", 4858 0, 0, CNTR_NORMAL, 4859 access_tx_launch_fifo1_unc_or_parity_err_cnt), 4860 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr", 4861 0, 0, CNTR_NORMAL, 4862 access_tx_launch_fifo0_unc_or_parity_err_cnt), 4863 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr", 4864 0, 0, CNTR_NORMAL, 4865 access_tx_sdma15_disallowed_packet_err_cnt), 4866 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr", 4867 0, 0, CNTR_NORMAL, 4868 access_tx_sdma14_disallowed_packet_err_cnt), 4869 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr", 4870 0, 0, CNTR_NORMAL, 4871 access_tx_sdma13_disallowed_packet_err_cnt), 4872 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr", 4873 0, 0, CNTR_NORMAL, 4874 access_tx_sdma12_disallowed_packet_err_cnt), 4875 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr", 4876 0, 0, CNTR_NORMAL, 4877 access_tx_sdma11_disallowed_packet_err_cnt), 4878 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr", 4879 0, 0, CNTR_NORMAL, 4880 access_tx_sdma10_disallowed_packet_err_cnt), 4881 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr", 4882 0, 0, CNTR_NORMAL, 4883 access_tx_sdma9_disallowed_packet_err_cnt), 4884 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr", 4885 0, 0, CNTR_NORMAL, 4886 access_tx_sdma8_disallowed_packet_err_cnt), 4887 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr", 4888 0, 0, CNTR_NORMAL, 4889 access_tx_sdma7_disallowed_packet_err_cnt), 4890 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr", 4891 0, 0, CNTR_NORMAL, 4892 access_tx_sdma6_disallowed_packet_err_cnt), 4893 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr", 4894 0, 0, CNTR_NORMAL, 4895 access_tx_sdma5_disallowed_packet_err_cnt), 4896 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr", 4897 0, 0, CNTR_NORMAL, 4898 access_tx_sdma4_disallowed_packet_err_cnt), 4899 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr", 4900 0, 0, CNTR_NORMAL, 4901 access_tx_sdma3_disallowed_packet_err_cnt), 4902 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr", 4903 0, 0, CNTR_NORMAL, 4904 access_tx_sdma2_disallowed_packet_err_cnt), 4905 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr", 4906 0, 0, CNTR_NORMAL, 4907 access_tx_sdma1_disallowed_packet_err_cnt), 4908 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr", 4909 0, 0, CNTR_NORMAL, 4910 access_tx_sdma0_disallowed_packet_err_cnt), 4911 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0, 4912 CNTR_NORMAL, 4913 access_tx_config_parity_err_cnt), 4914 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0, 4915 CNTR_NORMAL, 4916 access_tx_sbrd_ctl_csr_parity_err_cnt), 4917 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0, 4918 CNTR_NORMAL, 4919 access_tx_launch_csr_parity_err_cnt), 4920 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0, 4921 CNTR_NORMAL, 4922 access_tx_illegal_vl_err_cnt), 4923 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM( 4924 "TxSbrdCtlStateMachineParityErr", 0, 0, 4925 CNTR_NORMAL, 4926 access_tx_sbrd_ctl_state_machine_parity_err_cnt), 4927 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0, 4928 CNTR_NORMAL, 4929 access_egress_reserved_10_err_cnt), 4930 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0, 4931 CNTR_NORMAL, 4932 access_egress_reserved_9_err_cnt), 4933 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr", 4934 0, 0, CNTR_NORMAL, 4935 access_tx_sdma_launch_intf_parity_err_cnt), 4936 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0, 4937 CNTR_NORMAL, 4938 access_tx_pio_launch_intf_parity_err_cnt), 4939 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0, 4940 CNTR_NORMAL, 4941 access_egress_reserved_6_err_cnt), 4942 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0, 4943 CNTR_NORMAL, 4944 access_tx_incorrect_link_state_err_cnt), 4945 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0, 4946 CNTR_NORMAL, 4947 access_tx_linkdown_err_cnt), 4948 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM( 4949 "EgressFifoUnderrunOrParityErr", 0, 0, 4950 CNTR_NORMAL, 4951 access_tx_egress_fifi_underrun_or_parity_err_cnt), 4952 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0, 4953 CNTR_NORMAL, 4954 access_egress_reserved_2_err_cnt), 4955 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0, 4956 CNTR_NORMAL, 4957 access_tx_pkt_integrity_mem_unc_err_cnt), 4958 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0, 4959 CNTR_NORMAL, 4960 access_tx_pkt_integrity_mem_cor_err_cnt), 4961 /* SendErrStatus */ 4962 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0, 4963 CNTR_NORMAL, 4964 access_send_csr_write_bad_addr_err_cnt), 4965 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0, 4966 CNTR_NORMAL, 4967 access_send_csr_read_bad_addr_err_cnt), 4968 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0, 4969 CNTR_NORMAL, 4970 access_send_csr_parity_cnt), 4971 /* SendCtxtErrStatus */ 4972 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0, 4973 CNTR_NORMAL, 4974 access_pio_write_out_of_bounds_err_cnt), 4975 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0, 4976 CNTR_NORMAL, 4977 access_pio_write_overflow_err_cnt), 4978 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr", 4979 0, 0, CNTR_NORMAL, 4980 access_pio_write_crosses_boundary_err_cnt), 4981 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0, 4982 CNTR_NORMAL, 4983 access_pio_disallowed_packet_err_cnt), 4984 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0, 4985 CNTR_NORMAL, 4986 access_pio_inconsistent_sop_err_cnt), 4987 /* SendDmaEngErrStatus */ 4988 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr", 4989 0, 0, CNTR_NORMAL, 4990 access_sdma_header_request_fifo_cor_err_cnt), 4991 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0, 4992 CNTR_NORMAL, 4993 access_sdma_header_storage_cor_err_cnt), 4994 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0, 4995 CNTR_NORMAL, 4996 access_sdma_packet_tracking_cor_err_cnt), 4997 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0, 4998 CNTR_NORMAL, 4999 access_sdma_assembly_cor_err_cnt), 5000 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0, 5001 CNTR_NORMAL, 5002 access_sdma_desc_table_cor_err_cnt), 5003 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr", 5004 0, 0, CNTR_NORMAL, 5005 access_sdma_header_request_fifo_unc_err_cnt), 5006 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0, 5007 CNTR_NORMAL, 5008 access_sdma_header_storage_unc_err_cnt), 5009 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0, 5010 CNTR_NORMAL, 5011 access_sdma_packet_tracking_unc_err_cnt), 5012 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0, 5013 CNTR_NORMAL, 5014 access_sdma_assembly_unc_err_cnt), 5015 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0, 5016 CNTR_NORMAL, 5017 access_sdma_desc_table_unc_err_cnt), 5018 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0, 5019 CNTR_NORMAL, 5020 access_sdma_timeout_err_cnt), 5021 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0, 5022 CNTR_NORMAL, 5023 access_sdma_header_length_err_cnt), 5024 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0, 5025 CNTR_NORMAL, 5026 access_sdma_header_address_err_cnt), 5027 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0, 5028 CNTR_NORMAL, 5029 access_sdma_header_select_err_cnt), 5030 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0, 5031 CNTR_NORMAL, 5032 access_sdma_reserved_9_err_cnt), 5033 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0, 5034 CNTR_NORMAL, 5035 access_sdma_packet_desc_overflow_err_cnt), 5036 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0, 5037 CNTR_NORMAL, 5038 access_sdma_length_mismatch_err_cnt), 5039 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0, 5040 CNTR_NORMAL, 5041 access_sdma_halt_err_cnt), 5042 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0, 5043 CNTR_NORMAL, 5044 access_sdma_mem_read_err_cnt), 5045 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0, 5046 CNTR_NORMAL, 5047 access_sdma_first_desc_err_cnt), 5048 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0, 5049 CNTR_NORMAL, 5050 access_sdma_tail_out_of_bounds_err_cnt), 5051 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0, 5052 CNTR_NORMAL, 5053 access_sdma_too_long_err_cnt), 5054 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0, 5055 CNTR_NORMAL, 5056 access_sdma_gen_mismatch_err_cnt), 5057 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0, 5058 CNTR_NORMAL, 5059 access_sdma_wrong_dw_err_cnt), 5060 }; 5061 5062 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { 5063 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT, 5064 CNTR_NORMAL), 5065 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT, 5066 CNTR_NORMAL), 5067 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT, 5068 CNTR_NORMAL), 5069 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT, 5070 CNTR_NORMAL), 5071 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT, 5072 CNTR_NORMAL), 5073 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT, 5074 CNTR_NORMAL), 5075 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT, 5076 CNTR_NORMAL), 5077 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL), 5078 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL), 5079 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH), 5080 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT, 5081 CNTR_SYNTH | CNTR_VL), 5082 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT, 5083 CNTR_SYNTH | CNTR_VL), 5084 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT, 5085 CNTR_SYNTH | CNTR_VL), 5086 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL), 5087 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL), 5088 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5089 access_sw_link_dn_cnt), 5090 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5091 access_sw_link_up_cnt), 5092 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL, 5093 access_sw_unknown_frame_cnt), 5094 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5095 access_sw_xmit_discards), 5096 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0, 5097 CNTR_SYNTH | CNTR_32BIT | CNTR_VL, 5098 access_sw_xmit_discards), 5099 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH, 5100 access_xmit_constraint_errs), 5101 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH, 5102 access_rcv_constraint_errs), 5103 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts), 5104 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends), 5105 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks), 5106 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks), 5107 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts), 5108 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops), 5109 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait), 5110 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak), 5111 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq), 5112 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), 5113 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), 5114 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), 5115 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, 5116 access_sw_cpu_rc_acks), 5117 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, 5118 access_sw_cpu_rc_qacks), 5119 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL, 5120 access_sw_cpu_rc_delayed_comp), 5121 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1), 5122 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3), 5123 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5), 5124 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7), 5125 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9), 5126 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11), 5127 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13), 5128 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15), 5129 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17), 5130 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19), 5131 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21), 5132 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23), 5133 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25), 5134 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27), 5135 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29), 5136 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31), 5137 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33), 5138 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35), 5139 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37), 5140 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39), 5141 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41), 5142 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43), 5143 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45), 5144 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47), 5145 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49), 5146 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51), 5147 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53), 5148 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55), 5149 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57), 5150 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59), 5151 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61), 5152 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63), 5153 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65), 5154 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67), 5155 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69), 5156 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71), 5157 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73), 5158 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75), 5159 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77), 5160 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79), 5161 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81), 5162 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83), 5163 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85), 5164 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87), 5165 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89), 5166 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91), 5167 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93), 5168 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95), 5169 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97), 5170 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99), 5171 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101), 5172 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103), 5173 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105), 5174 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107), 5175 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109), 5176 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111), 5177 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113), 5178 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115), 5179 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117), 5180 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119), 5181 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121), 5182 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123), 5183 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125), 5184 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127), 5185 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129), 5186 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131), 5187 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133), 5188 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135), 5189 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137), 5190 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139), 5191 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141), 5192 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143), 5193 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145), 5194 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147), 5195 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149), 5196 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151), 5197 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153), 5198 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155), 5199 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157), 5200 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159), 5201 }; 5202 5203 /* ======================================================================== */ 5204 5205 /* return true if this is chip revision revision a */ 5206 int is_ax(struct hfi1_devdata *dd) 5207 { 5208 u8 chip_rev_minor = 5209 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT 5210 & CCE_REVISION_CHIP_REV_MINOR_MASK; 5211 return (chip_rev_minor & 0xf0) == 0; 5212 } 5213 5214 /* return true if this is chip revision revision b */ 5215 int is_bx(struct hfi1_devdata *dd) 5216 { 5217 u8 chip_rev_minor = 5218 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT 5219 & CCE_REVISION_CHIP_REV_MINOR_MASK; 5220 return (chip_rev_minor & 0xF0) == 0x10; 5221 } 5222 5223 /* 5224 * Append string s to buffer buf. Arguments curp and len are the current 5225 * position and remaining length, respectively. 5226 * 5227 * return 0 on success, 1 on out of room 5228 */ 5229 static int append_str(char *buf, char **curp, int *lenp, const char *s) 5230 { 5231 char *p = *curp; 5232 int len = *lenp; 5233 int result = 0; /* success */ 5234 char c; 5235 5236 /* add a comma, if first in the buffer */ 5237 if (p != buf) { 5238 if (len == 0) { 5239 result = 1; /* out of room */ 5240 goto done; 5241 } 5242 *p++ = ','; 5243 len--; 5244 } 5245 5246 /* copy the string */ 5247 while ((c = *s++) != 0) { 5248 if (len == 0) { 5249 result = 1; /* out of room */ 5250 goto done; 5251 } 5252 *p++ = c; 5253 len--; 5254 } 5255 5256 done: 5257 /* write return values */ 5258 *curp = p; 5259 *lenp = len; 5260 5261 return result; 5262 } 5263 5264 /* 5265 * Using the given flag table, print a comma separated string into 5266 * the buffer. End in '*' if the buffer is too short. 5267 */ 5268 static char *flag_string(char *buf, int buf_len, u64 flags, 5269 struct flag_table *table, int table_size) 5270 { 5271 char extra[32]; 5272 char *p = buf; 5273 int len = buf_len; 5274 int no_room = 0; 5275 int i; 5276 5277 /* make sure there is at least 2 so we can form "*" */ 5278 if (len < 2) 5279 return ""; 5280 5281 len--; /* leave room for a nul */ 5282 for (i = 0; i < table_size; i++) { 5283 if (flags & table[i].flag) { 5284 no_room = append_str(buf, &p, &len, table[i].str); 5285 if (no_room) 5286 break; 5287 flags &= ~table[i].flag; 5288 } 5289 } 5290 5291 /* any undocumented bits left? */ 5292 if (!no_room && flags) { 5293 snprintf(extra, sizeof(extra), "bits 0x%llx", flags); 5294 no_room = append_str(buf, &p, &len, extra); 5295 } 5296 5297 /* add * if ran out of room */ 5298 if (no_room) { 5299 /* may need to back up to add space for a '*' */ 5300 if (len == 0) 5301 --p; 5302 *p++ = '*'; 5303 } 5304 5305 /* add final nul - space already allocated above */ 5306 *p = 0; 5307 return buf; 5308 } 5309 5310 /* first 8 CCE error interrupt source names */ 5311 static const char * const cce_misc_names[] = { 5312 "CceErrInt", /* 0 */ 5313 "RxeErrInt", /* 1 */ 5314 "MiscErrInt", /* 2 */ 5315 "Reserved3", /* 3 */ 5316 "PioErrInt", /* 4 */ 5317 "SDmaErrInt", /* 5 */ 5318 "EgressErrInt", /* 6 */ 5319 "TxeErrInt" /* 7 */ 5320 }; 5321 5322 /* 5323 * Return the miscellaneous error interrupt name. 5324 */ 5325 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) 5326 { 5327 if (source < ARRAY_SIZE(cce_misc_names)) 5328 strncpy(buf, cce_misc_names[source], bsize); 5329 else 5330 snprintf(buf, bsize, "Reserved%u", 5331 source + IS_GENERAL_ERR_START); 5332 5333 return buf; 5334 } 5335 5336 /* 5337 * Return the SDMA engine error interrupt name. 5338 */ 5339 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source) 5340 { 5341 snprintf(buf, bsize, "SDmaEngErrInt%u", source); 5342 return buf; 5343 } 5344 5345 /* 5346 * Return the send context error interrupt name. 5347 */ 5348 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source) 5349 { 5350 snprintf(buf, bsize, "SendCtxtErrInt%u", source); 5351 return buf; 5352 } 5353 5354 static const char * const various_names[] = { 5355 "PbcInt", 5356 "GpioAssertInt", 5357 "Qsfp1Int", 5358 "Qsfp2Int", 5359 "TCritInt" 5360 }; 5361 5362 /* 5363 * Return the various interrupt name. 5364 */ 5365 static char *is_various_name(char *buf, size_t bsize, unsigned int source) 5366 { 5367 if (source < ARRAY_SIZE(various_names)) 5368 strncpy(buf, various_names[source], bsize); 5369 else 5370 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START); 5371 return buf; 5372 } 5373 5374 /* 5375 * Return the DC interrupt name. 5376 */ 5377 static char *is_dc_name(char *buf, size_t bsize, unsigned int source) 5378 { 5379 static const char * const dc_int_names[] = { 5380 "common", 5381 "lcb", 5382 "8051", 5383 "lbm" /* local block merge */ 5384 }; 5385 5386 if (source < ARRAY_SIZE(dc_int_names)) 5387 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]); 5388 else 5389 snprintf(buf, bsize, "DCInt%u", source); 5390 return buf; 5391 } 5392 5393 static const char * const sdma_int_names[] = { 5394 "SDmaInt", 5395 "SdmaIdleInt", 5396 "SdmaProgressInt", 5397 }; 5398 5399 /* 5400 * Return the SDMA engine interrupt name. 5401 */ 5402 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source) 5403 { 5404 /* what interrupt */ 5405 unsigned int what = source / TXE_NUM_SDMA_ENGINES; 5406 /* which engine */ 5407 unsigned int which = source % TXE_NUM_SDMA_ENGINES; 5408 5409 if (likely(what < 3)) 5410 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which); 5411 else 5412 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source); 5413 return buf; 5414 } 5415 5416 /* 5417 * Return the receive available interrupt name. 5418 */ 5419 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source) 5420 { 5421 snprintf(buf, bsize, "RcvAvailInt%u", source); 5422 return buf; 5423 } 5424 5425 /* 5426 * Return the receive urgent interrupt name. 5427 */ 5428 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source) 5429 { 5430 snprintf(buf, bsize, "RcvUrgentInt%u", source); 5431 return buf; 5432 } 5433 5434 /* 5435 * Return the send credit interrupt name. 5436 */ 5437 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source) 5438 { 5439 snprintf(buf, bsize, "SendCreditInt%u", source); 5440 return buf; 5441 } 5442 5443 /* 5444 * Return the reserved interrupt name. 5445 */ 5446 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source) 5447 { 5448 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START); 5449 return buf; 5450 } 5451 5452 static char *cce_err_status_string(char *buf, int buf_len, u64 flags) 5453 { 5454 return flag_string(buf, buf_len, flags, 5455 cce_err_status_flags, 5456 ARRAY_SIZE(cce_err_status_flags)); 5457 } 5458 5459 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags) 5460 { 5461 return flag_string(buf, buf_len, flags, 5462 rxe_err_status_flags, 5463 ARRAY_SIZE(rxe_err_status_flags)); 5464 } 5465 5466 static char *misc_err_status_string(char *buf, int buf_len, u64 flags) 5467 { 5468 return flag_string(buf, buf_len, flags, misc_err_status_flags, 5469 ARRAY_SIZE(misc_err_status_flags)); 5470 } 5471 5472 static char *pio_err_status_string(char *buf, int buf_len, u64 flags) 5473 { 5474 return flag_string(buf, buf_len, flags, 5475 pio_err_status_flags, 5476 ARRAY_SIZE(pio_err_status_flags)); 5477 } 5478 5479 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags) 5480 { 5481 return flag_string(buf, buf_len, flags, 5482 sdma_err_status_flags, 5483 ARRAY_SIZE(sdma_err_status_flags)); 5484 } 5485 5486 static char *egress_err_status_string(char *buf, int buf_len, u64 flags) 5487 { 5488 return flag_string(buf, buf_len, flags, 5489 egress_err_status_flags, 5490 ARRAY_SIZE(egress_err_status_flags)); 5491 } 5492 5493 static char *egress_err_info_string(char *buf, int buf_len, u64 flags) 5494 { 5495 return flag_string(buf, buf_len, flags, 5496 egress_err_info_flags, 5497 ARRAY_SIZE(egress_err_info_flags)); 5498 } 5499 5500 static char *send_err_status_string(char *buf, int buf_len, u64 flags) 5501 { 5502 return flag_string(buf, buf_len, flags, 5503 send_err_status_flags, 5504 ARRAY_SIZE(send_err_status_flags)); 5505 } 5506 5507 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5508 { 5509 char buf[96]; 5510 int i = 0; 5511 5512 /* 5513 * For most these errors, there is nothing that can be done except 5514 * report or record it. 5515 */ 5516 dd_dev_info(dd, "CCE Error: %s\n", 5517 cce_err_status_string(buf, sizeof(buf), reg)); 5518 5519 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) && 5520 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { 5521 /* this error requires a manual drop into SPC freeze mode */ 5522 /* then a fix up */ 5523 start_freeze_handling(dd->pport, FREEZE_SELF); 5524 } 5525 5526 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) { 5527 if (reg & (1ull << i)) { 5528 incr_cntr64(&dd->cce_err_status_cnt[i]); 5529 /* maintain a counter over all cce_err_status errors */ 5530 incr_cntr64(&dd->sw_cce_err_status_aggregate); 5531 } 5532 } 5533 } 5534 5535 /* 5536 * Check counters for receive errors that do not have an interrupt 5537 * associated with them. 5538 */ 5539 #define RCVERR_CHECK_TIME 10 5540 static void update_rcverr_timer(struct timer_list *t) 5541 { 5542 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer); 5543 struct hfi1_pportdata *ppd = dd->pport; 5544 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); 5545 5546 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && 5547 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { 5548 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); 5549 set_link_down_reason( 5550 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0, 5551 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); 5552 queue_work(ppd->link_wq, &ppd->link_bounce_work); 5553 } 5554 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; 5555 5556 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); 5557 } 5558 5559 static int init_rcverr(struct hfi1_devdata *dd) 5560 { 5561 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); 5562 /* Assume the hardware counter has been reset */ 5563 dd->rcv_ovfl_cnt = 0; 5564 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); 5565 } 5566 5567 static void free_rcverr(struct hfi1_devdata *dd) 5568 { 5569 if (dd->rcverr_timer.function) 5570 del_timer_sync(&dd->rcverr_timer); 5571 } 5572 5573 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5574 { 5575 char buf[96]; 5576 int i = 0; 5577 5578 dd_dev_info(dd, "Receive Error: %s\n", 5579 rxe_err_status_string(buf, sizeof(buf), reg)); 5580 5581 if (reg & ALL_RXE_FREEZE_ERR) { 5582 int flags = 0; 5583 5584 /* 5585 * Freeze mode recovery is disabled for the errors 5586 * in RXE_FREEZE_ABORT_MASK 5587 */ 5588 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) 5589 flags = FREEZE_ABORT; 5590 5591 start_freeze_handling(dd->pport, flags); 5592 } 5593 5594 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) { 5595 if (reg & (1ull << i)) 5596 incr_cntr64(&dd->rcv_err_status_cnt[i]); 5597 } 5598 } 5599 5600 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5601 { 5602 char buf[96]; 5603 int i = 0; 5604 5605 dd_dev_info(dd, "Misc Error: %s", 5606 misc_err_status_string(buf, sizeof(buf), reg)); 5607 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) { 5608 if (reg & (1ull << i)) 5609 incr_cntr64(&dd->misc_err_status_cnt[i]); 5610 } 5611 } 5612 5613 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5614 { 5615 char buf[96]; 5616 int i = 0; 5617 5618 dd_dev_info(dd, "PIO Error: %s\n", 5619 pio_err_status_string(buf, sizeof(buf), reg)); 5620 5621 if (reg & ALL_PIO_FREEZE_ERR) 5622 start_freeze_handling(dd->pport, 0); 5623 5624 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) { 5625 if (reg & (1ull << i)) 5626 incr_cntr64(&dd->send_pio_err_status_cnt[i]); 5627 } 5628 } 5629 5630 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5631 { 5632 char buf[96]; 5633 int i = 0; 5634 5635 dd_dev_info(dd, "SDMA Error: %s\n", 5636 sdma_err_status_string(buf, sizeof(buf), reg)); 5637 5638 if (reg & ALL_SDMA_FREEZE_ERR) 5639 start_freeze_handling(dd->pport, 0); 5640 5641 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) { 5642 if (reg & (1ull << i)) 5643 incr_cntr64(&dd->send_dma_err_status_cnt[i]); 5644 } 5645 } 5646 5647 static inline void __count_port_discards(struct hfi1_pportdata *ppd) 5648 { 5649 incr_cntr64(&ppd->port_xmit_discards); 5650 } 5651 5652 static void count_port_inactive(struct hfi1_devdata *dd) 5653 { 5654 __count_port_discards(dd->pport); 5655 } 5656 5657 /* 5658 * We have had a "disallowed packet" error during egress. Determine the 5659 * integrity check which failed, and update relevant error counter, etc. 5660 * 5661 * Note that the SEND_EGRESS_ERR_INFO register has only a single 5662 * bit of state per integrity check, and so we can miss the reason for an 5663 * egress error if more than one packet fails the same integrity check 5664 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO. 5665 */ 5666 static void handle_send_egress_err_info(struct hfi1_devdata *dd, 5667 int vl) 5668 { 5669 struct hfi1_pportdata *ppd = dd->pport; 5670 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ 5671 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); 5672 char buf[96]; 5673 5674 /* clear down all observed info as quickly as possible after read */ 5675 write_csr(dd, SEND_EGRESS_ERR_INFO, info); 5676 5677 dd_dev_info(dd, 5678 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n", 5679 info, egress_err_info_string(buf, sizeof(buf), info), src); 5680 5681 /* Eventually add other counters for each bit */ 5682 if (info & PORT_DISCARD_EGRESS_ERRS) { 5683 int weight, i; 5684 5685 /* 5686 * Count all applicable bits as individual errors and 5687 * attribute them to the packet that triggered this handler. 5688 * This may not be completely accurate due to limitations 5689 * on the available hardware error information. There is 5690 * a single information register and any number of error 5691 * packets may have occurred and contributed to it before 5692 * this routine is called. This means that: 5693 * a) If multiple packets with the same error occur before 5694 * this routine is called, earlier packets are missed. 5695 * There is only a single bit for each error type. 5696 * b) Errors may not be attributed to the correct VL. 5697 * The driver is attributing all bits in the info register 5698 * to the packet that triggered this call, but bits 5699 * could be an accumulation of different packets with 5700 * different VLs. 5701 * c) A single error packet may have multiple counts attached 5702 * to it. There is no way for the driver to know if 5703 * multiple bits set in the info register are due to a 5704 * single packet or multiple packets. The driver assumes 5705 * multiple packets. 5706 */ 5707 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS); 5708 for (i = 0; i < weight; i++) { 5709 __count_port_discards(ppd); 5710 if (vl >= 0 && vl < TXE_NUM_DATA_VL) 5711 incr_cntr64(&ppd->port_xmit_discards_vl[vl]); 5712 else if (vl == 15) 5713 incr_cntr64(&ppd->port_xmit_discards_vl 5714 [C_VL_15]); 5715 } 5716 } 5717 } 5718 5719 /* 5720 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS 5721 * register. Does it represent a 'port inactive' error? 5722 */ 5723 static inline int port_inactive_err(u64 posn) 5724 { 5725 return (posn >= SEES(TX_LINKDOWN) && 5726 posn <= SEES(TX_INCORRECT_LINK_STATE)); 5727 } 5728 5729 /* 5730 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS 5731 * register. Does it represent a 'disallowed packet' error? 5732 */ 5733 static inline int disallowed_pkt_err(int posn) 5734 { 5735 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) && 5736 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET)); 5737 } 5738 5739 /* 5740 * Input value is a bit position of one of the SDMA engine disallowed 5741 * packet errors. Return which engine. Use of this must be guarded by 5742 * disallowed_pkt_err(). 5743 */ 5744 static inline int disallowed_pkt_engine(int posn) 5745 { 5746 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); 5747 } 5748 5749 /* 5750 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot 5751 * be done. 5752 */ 5753 static int engine_to_vl(struct hfi1_devdata *dd, int engine) 5754 { 5755 struct sdma_vl_map *m; 5756 int vl; 5757 5758 /* range check */ 5759 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES) 5760 return -1; 5761 5762 rcu_read_lock(); 5763 m = rcu_dereference(dd->sdma_map); 5764 vl = m->engine_to_vl[engine]; 5765 rcu_read_unlock(); 5766 5767 return vl; 5768 } 5769 5770 /* 5771 * Translate the send context (sofware index) into a VL. Return -1 if the 5772 * translation cannot be done. 5773 */ 5774 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) 5775 { 5776 struct send_context_info *sci; 5777 struct send_context *sc; 5778 int i; 5779 5780 sci = &dd->send_contexts[sw_index]; 5781 5782 /* there is no information for user (PSM) and ack contexts */ 5783 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) 5784 return -1; 5785 5786 sc = sci->sc; 5787 if (!sc) 5788 return -1; 5789 if (dd->vld[15].sc == sc) 5790 return 15; 5791 for (i = 0; i < num_vls; i++) 5792 if (dd->vld[i].sc == sc) 5793 return i; 5794 5795 return -1; 5796 } 5797 5798 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5799 { 5800 u64 reg_copy = reg, handled = 0; 5801 char buf[96]; 5802 int i = 0; 5803 5804 if (reg & ALL_TXE_EGRESS_FREEZE_ERR) 5805 start_freeze_handling(dd->pport, 0); 5806 else if (is_ax(dd) && 5807 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) && 5808 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) 5809 start_freeze_handling(dd->pport, 0); 5810 5811 while (reg_copy) { 5812 int posn = fls64(reg_copy); 5813 /* fls64() returns a 1-based offset, we want it zero based */ 5814 int shift = posn - 1; 5815 u64 mask = 1ULL << shift; 5816 5817 if (port_inactive_err(shift)) { 5818 count_port_inactive(dd); 5819 handled |= mask; 5820 } else if (disallowed_pkt_err(shift)) { 5821 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); 5822 5823 handle_send_egress_err_info(dd, vl); 5824 handled |= mask; 5825 } 5826 reg_copy &= ~mask; 5827 } 5828 5829 reg &= ~handled; 5830 5831 if (reg) 5832 dd_dev_info(dd, "Egress Error: %s\n", 5833 egress_err_status_string(buf, sizeof(buf), reg)); 5834 5835 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) { 5836 if (reg & (1ull << i)) 5837 incr_cntr64(&dd->send_egress_err_status_cnt[i]); 5838 } 5839 } 5840 5841 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5842 { 5843 char buf[96]; 5844 int i = 0; 5845 5846 dd_dev_info(dd, "Send Error: %s\n", 5847 send_err_status_string(buf, sizeof(buf), reg)); 5848 5849 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) { 5850 if (reg & (1ull << i)) 5851 incr_cntr64(&dd->send_err_status_cnt[i]); 5852 } 5853 } 5854 5855 /* 5856 * The maximum number of times the error clear down will loop before 5857 * blocking a repeating error. This value is arbitrary. 5858 */ 5859 #define MAX_CLEAR_COUNT 20 5860 5861 /* 5862 * Clear and handle an error register. All error interrupts are funneled 5863 * through here to have a central location to correctly handle single- 5864 * or multi-shot errors. 5865 * 5866 * For non per-context registers, call this routine with a context value 5867 * of 0 so the per-context offset is zero. 5868 * 5869 * If the handler loops too many times, assume that something is wrong 5870 * and can't be fixed, so mask the error bits. 5871 */ 5872 static void interrupt_clear_down(struct hfi1_devdata *dd, 5873 u32 context, 5874 const struct err_reg_info *eri) 5875 { 5876 u64 reg; 5877 u32 count; 5878 5879 /* read in a loop until no more errors are seen */ 5880 count = 0; 5881 while (1) { 5882 reg = read_kctxt_csr(dd, context, eri->status); 5883 if (reg == 0) 5884 break; 5885 write_kctxt_csr(dd, context, eri->clear, reg); 5886 if (likely(eri->handler)) 5887 eri->handler(dd, context, reg); 5888 count++; 5889 if (count > MAX_CLEAR_COUNT) { 5890 u64 mask; 5891 5892 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", 5893 eri->desc, reg); 5894 /* 5895 * Read-modify-write so any other masked bits 5896 * remain masked. 5897 */ 5898 mask = read_kctxt_csr(dd, context, eri->mask); 5899 mask &= ~reg; 5900 write_kctxt_csr(dd, context, eri->mask, mask); 5901 break; 5902 } 5903 } 5904 } 5905 5906 /* 5907 * CCE block "misc" interrupt. Source is < 16. 5908 */ 5909 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) 5910 { 5911 const struct err_reg_info *eri = &misc_errs[source]; 5912 5913 if (eri->handler) { 5914 interrupt_clear_down(dd, 0, eri); 5915 } else { 5916 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", 5917 source); 5918 } 5919 } 5920 5921 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags) 5922 { 5923 return flag_string(buf, buf_len, flags, 5924 sc_err_status_flags, 5925 ARRAY_SIZE(sc_err_status_flags)); 5926 } 5927 5928 /* 5929 * Send context error interrupt. Source (hw_context) is < 160. 5930 * 5931 * All send context errors cause the send context to halt. The normal 5932 * clear-down mechanism cannot be used because we cannot clear the 5933 * error bits until several other long-running items are done first. 5934 * This is OK because with the context halted, nothing else is going 5935 * to happen on it anyway. 5936 */ 5937 static void is_sendctxt_err_int(struct hfi1_devdata *dd, 5938 unsigned int hw_context) 5939 { 5940 struct send_context_info *sci; 5941 struct send_context *sc; 5942 char flags[96]; 5943 u64 status; 5944 u32 sw_index; 5945 int i = 0; 5946 unsigned long irq_flags; 5947 5948 sw_index = dd->hw_to_sw[hw_context]; 5949 if (sw_index >= dd->num_send_contexts) { 5950 dd_dev_err(dd, 5951 "out of range sw index %u for send context %u\n", 5952 sw_index, hw_context); 5953 return; 5954 } 5955 sci = &dd->send_contexts[sw_index]; 5956 spin_lock_irqsave(&dd->sc_lock, irq_flags); 5957 sc = sci->sc; 5958 if (!sc) { 5959 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5960 sw_index, hw_context); 5961 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5962 return; 5963 } 5964 5965 /* tell the software that a halt has begun */ 5966 sc_stop(sc, SCF_HALTED); 5967 5968 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); 5969 5970 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, 5971 send_context_err_status_string(flags, sizeof(flags), 5972 status)); 5973 5974 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) 5975 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); 5976 5977 /* 5978 * Automatically restart halted kernel contexts out of interrupt 5979 * context. User contexts must ask the driver to restart the context. 5980 */ 5981 if (sc->type != SC_USER) 5982 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5983 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5984 5985 /* 5986 * Update the counters for the corresponding status bits. 5987 * Note that these particular counters are aggregated over all 5988 * 160 contexts. 5989 */ 5990 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) { 5991 if (status & (1ull << i)) 5992 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); 5993 } 5994 } 5995 5996 static void handle_sdma_eng_err(struct hfi1_devdata *dd, 5997 unsigned int source, u64 status) 5998 { 5999 struct sdma_engine *sde; 6000 int i = 0; 6001 6002 sde = &dd->per_sdma[source]; 6003 #ifdef CONFIG_SDMA_VERBOSITY 6004 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 6005 slashstrip(__FILE__), __LINE__, __func__); 6006 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", 6007 sde->this_idx, source, (unsigned long long)status); 6008 #endif 6009 sde->err_cnt++; 6010 sdma_engine_error(sde, status); 6011 6012 /* 6013 * Update the counters for the corresponding status bits. 6014 * Note that these particular counters are aggregated over 6015 * all 16 DMA engines. 6016 */ 6017 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) { 6018 if (status & (1ull << i)) 6019 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); 6020 } 6021 } 6022 6023 /* 6024 * CCE block SDMA error interrupt. Source is < 16. 6025 */ 6026 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) 6027 { 6028 #ifdef CONFIG_SDMA_VERBOSITY 6029 struct sdma_engine *sde = &dd->per_sdma[source]; 6030 6031 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 6032 slashstrip(__FILE__), __LINE__, __func__); 6033 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, 6034 source); 6035 sdma_dumpstate(sde); 6036 #endif 6037 interrupt_clear_down(dd, source, &sdma_eng_err); 6038 } 6039 6040 /* 6041 * CCE block "various" interrupt. Source is < 8. 6042 */ 6043 static void is_various_int(struct hfi1_devdata *dd, unsigned int source) 6044 { 6045 const struct err_reg_info *eri = &various_err[source]; 6046 6047 /* 6048 * TCritInt cannot go through interrupt_clear_down() 6049 * because it is not a second tier interrupt. The handler 6050 * should be called directly. 6051 */ 6052 if (source == TCRIT_INT_SOURCE) 6053 handle_temp_err(dd); 6054 else if (eri->handler) 6055 interrupt_clear_down(dd, 0, eri); 6056 else 6057 dd_dev_info(dd, 6058 "%s: Unimplemented/reserved interrupt %d\n", 6059 __func__, source); 6060 } 6061 6062 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) 6063 { 6064 /* src_ctx is always zero */ 6065 struct hfi1_pportdata *ppd = dd->pport; 6066 unsigned long flags; 6067 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); 6068 6069 if (reg & QSFP_HFI0_MODPRST_N) { 6070 if (!qsfp_mod_present(ppd)) { 6071 dd_dev_info(dd, "%s: QSFP module removed\n", 6072 __func__); 6073 6074 ppd->driver_link_ready = 0; 6075 /* 6076 * Cable removed, reset all our information about the 6077 * cache and cable capabilities 6078 */ 6079 6080 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6081 /* 6082 * We don't set cache_refresh_required here as we expect 6083 * an interrupt when a cable is inserted 6084 */ 6085 ppd->qsfp_info.cache_valid = 0; 6086 ppd->qsfp_info.reset_needed = 0; 6087 ppd->qsfp_info.limiting_active = 0; 6088 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 6089 flags); 6090 /* Invert the ModPresent pin now to detect plug-in */ 6091 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : 6092 ASIC_QSFP1_INVERT, qsfp_int_mgmt); 6093 6094 if ((ppd->offline_disabled_reason > 6095 HFI1_ODR_MASK( 6096 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) || 6097 (ppd->offline_disabled_reason == 6098 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) 6099 ppd->offline_disabled_reason = 6100 HFI1_ODR_MASK( 6101 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); 6102 6103 if (ppd->host_link_state == HLS_DN_POLL) { 6104 /* 6105 * The link is still in POLL. This means 6106 * that the normal link down processing 6107 * will not happen. We have to do it here 6108 * before turning the DC off. 6109 */ 6110 queue_work(ppd->link_wq, &ppd->link_down_work); 6111 } 6112 } else { 6113 dd_dev_info(dd, "%s: QSFP module inserted\n", 6114 __func__); 6115 6116 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6117 ppd->qsfp_info.cache_valid = 0; 6118 ppd->qsfp_info.cache_refresh_required = 1; 6119 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 6120 flags); 6121 6122 /* 6123 * Stop inversion of ModPresent pin to detect 6124 * removal of the cable 6125 */ 6126 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N; 6127 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : 6128 ASIC_QSFP1_INVERT, qsfp_int_mgmt); 6129 6130 ppd->offline_disabled_reason = 6131 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 6132 } 6133 } 6134 6135 if (reg & QSFP_HFI0_INT_N) { 6136 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", 6137 __func__); 6138 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6139 ppd->qsfp_info.check_interrupt_flags = 1; 6140 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); 6141 } 6142 6143 /* Schedule the QSFP work only if there is a cable attached. */ 6144 if (qsfp_mod_present(ppd)) 6145 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); 6146 } 6147 6148 static int request_host_lcb_access(struct hfi1_devdata *dd) 6149 { 6150 int ret; 6151 6152 ret = do_8051_command(dd, HCMD_MISC, 6153 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << 6154 LOAD_DATA_FIELD_ID_SHIFT, NULL); 6155 if (ret != HCMD_SUCCESS) { 6156 dd_dev_err(dd, "%s: command failed with error %d\n", 6157 __func__, ret); 6158 } 6159 return ret == HCMD_SUCCESS ? 0 : -EBUSY; 6160 } 6161 6162 static int request_8051_lcb_access(struct hfi1_devdata *dd) 6163 { 6164 int ret; 6165 6166 ret = do_8051_command(dd, HCMD_MISC, 6167 (u64)HCMD_MISC_GRANT_LCB_ACCESS << 6168 LOAD_DATA_FIELD_ID_SHIFT, NULL); 6169 if (ret != HCMD_SUCCESS) { 6170 dd_dev_err(dd, "%s: command failed with error %d\n", 6171 __func__, ret); 6172 } 6173 return ret == HCMD_SUCCESS ? 0 : -EBUSY; 6174 } 6175 6176 /* 6177 * Set the LCB selector - allow host access. The DCC selector always 6178 * points to the host. 6179 */ 6180 static inline void set_host_lcb_access(struct hfi1_devdata *dd) 6181 { 6182 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, 6183 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK | 6184 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); 6185 } 6186 6187 /* 6188 * Clear the LCB selector - allow 8051 access. The DCC selector always 6189 * points to the host. 6190 */ 6191 static inline void set_8051_lcb_access(struct hfi1_devdata *dd) 6192 { 6193 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, 6194 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); 6195 } 6196 6197 /* 6198 * Acquire LCB access from the 8051. If the host already has access, 6199 * just increment a counter. Otherwise, inform the 8051 that the 6200 * host is taking access. 6201 * 6202 * Returns: 6203 * 0 on success 6204 * -EBUSY if the 8051 has control and cannot be disturbed 6205 * -errno if unable to acquire access from the 8051 6206 */ 6207 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) 6208 { 6209 struct hfi1_pportdata *ppd = dd->pport; 6210 int ret = 0; 6211 6212 /* 6213 * Use the host link state lock so the operation of this routine 6214 * { link state check, selector change, count increment } can occur 6215 * as a unit against a link state change. Otherwise there is a 6216 * race between the state change and the count increment. 6217 */ 6218 if (sleep_ok) { 6219 mutex_lock(&ppd->hls_lock); 6220 } else { 6221 while (!mutex_trylock(&ppd->hls_lock)) 6222 udelay(1); 6223 } 6224 6225 /* this access is valid only when the link is up */ 6226 if (ppd->host_link_state & HLS_DOWN) { 6227 dd_dev_info(dd, "%s: link state %s not up\n", 6228 __func__, link_state_name(ppd->host_link_state)); 6229 ret = -EBUSY; 6230 goto done; 6231 } 6232 6233 if (dd->lcb_access_count == 0) { 6234 ret = request_host_lcb_access(dd); 6235 if (ret) { 6236 dd_dev_err(dd, 6237 "%s: unable to acquire LCB access, err %d\n", 6238 __func__, ret); 6239 goto done; 6240 } 6241 set_host_lcb_access(dd); 6242 } 6243 dd->lcb_access_count++; 6244 done: 6245 mutex_unlock(&ppd->hls_lock); 6246 return ret; 6247 } 6248 6249 /* 6250 * Release LCB access by decrementing the use count. If the count is moving 6251 * from 1 to 0, inform 8051 that it has control back. 6252 * 6253 * Returns: 6254 * 0 on success 6255 * -errno if unable to release access to the 8051 6256 */ 6257 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) 6258 { 6259 int ret = 0; 6260 6261 /* 6262 * Use the host link state lock because the acquire needed it. 6263 * Here, we only need to keep { selector change, count decrement } 6264 * as a unit. 6265 */ 6266 if (sleep_ok) { 6267 mutex_lock(&dd->pport->hls_lock); 6268 } else { 6269 while (!mutex_trylock(&dd->pport->hls_lock)) 6270 udelay(1); 6271 } 6272 6273 if (dd->lcb_access_count == 0) { 6274 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", 6275 __func__); 6276 goto done; 6277 } 6278 6279 if (dd->lcb_access_count == 1) { 6280 set_8051_lcb_access(dd); 6281 ret = request_8051_lcb_access(dd); 6282 if (ret) { 6283 dd_dev_err(dd, 6284 "%s: unable to release LCB access, err %d\n", 6285 __func__, ret); 6286 /* restore host access if the grant didn't work */ 6287 set_host_lcb_access(dd); 6288 goto done; 6289 } 6290 } 6291 dd->lcb_access_count--; 6292 done: 6293 mutex_unlock(&dd->pport->hls_lock); 6294 return ret; 6295 } 6296 6297 /* 6298 * Initialize LCB access variables and state. Called during driver load, 6299 * after most of the initialization is finished. 6300 * 6301 * The DC default is LCB access on for the host. The driver defaults to 6302 * leaving access to the 8051. Assign access now - this constrains the call 6303 * to this routine to be after all LCB set-up is done. In particular, after 6304 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts() 6305 */ 6306 static void init_lcb_access(struct hfi1_devdata *dd) 6307 { 6308 dd->lcb_access_count = 0; 6309 } 6310 6311 /* 6312 * Write a response back to a 8051 request. 6313 */ 6314 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) 6315 { 6316 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 6317 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK | 6318 (u64)return_code << 6319 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT | 6320 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); 6321 } 6322 6323 /* 6324 * Handle host requests from the 8051. 6325 */ 6326 static void handle_8051_request(struct hfi1_pportdata *ppd) 6327 { 6328 struct hfi1_devdata *dd = ppd->dd; 6329 u64 reg; 6330 u16 data = 0; 6331 u8 type; 6332 6333 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); 6334 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0) 6335 return; /* no request */ 6336 6337 /* zero out COMPLETED so the response is seen */ 6338 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0); 6339 6340 /* extract request details */ 6341 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT) 6342 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK; 6343 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT) 6344 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK; 6345 6346 switch (type) { 6347 case HREQ_LOAD_CONFIG: 6348 case HREQ_SAVE_CONFIG: 6349 case HREQ_READ_CONFIG: 6350 case HREQ_SET_TX_EQ_ABS: 6351 case HREQ_SET_TX_EQ_REL: 6352 case HREQ_ENABLE: 6353 dd_dev_info(dd, "8051 request: request 0x%x not supported\n", 6354 type); 6355 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); 6356 break; 6357 case HREQ_LCB_RESET: 6358 /* Put the LCB, RX FPE and TX FPE into reset */ 6359 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); 6360 /* Make sure the write completed */ 6361 (void)read_csr(dd, DCC_CFG_RESET); 6362 /* Hold the reset long enough to take effect */ 6363 udelay(1); 6364 /* Take the LCB, RX FPE and TX FPE out of reset */ 6365 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); 6366 hreq_response(dd, HREQ_SUCCESS, 0); 6367 6368 break; 6369 case HREQ_CONFIG_DONE: 6370 hreq_response(dd, HREQ_SUCCESS, 0); 6371 break; 6372 6373 case HREQ_INTERFACE_TEST: 6374 hreq_response(dd, HREQ_SUCCESS, data); 6375 break; 6376 default: 6377 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type); 6378 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); 6379 break; 6380 } 6381 } 6382 6383 /* 6384 * Set up allocation unit vaulue. 6385 */ 6386 void set_up_vau(struct hfi1_devdata *dd, u8 vau) 6387 { 6388 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6389 6390 /* do not modify other values in the register */ 6391 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; 6392 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; 6393 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6394 } 6395 6396 /* 6397 * Set up initial VL15 credits of the remote. Assumes the rest of 6398 * the CM credit registers are zero from a previous global or credit reset. 6399 * Shared limit for VL15 will always be 0. 6400 */ 6401 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) 6402 { 6403 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6404 6405 /* set initial values for total and shared credit limit */ 6406 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | 6407 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); 6408 6409 /* 6410 * Set total limit to be equal to VL15 credits. 6411 * Leave shared limit at 0. 6412 */ 6413 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 6414 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6415 6416 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6417 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6418 } 6419 6420 /* 6421 * Zero all credit details from the previous connection and 6422 * reset the CM manager's internal counters. 6423 */ 6424 void reset_link_credits(struct hfi1_devdata *dd) 6425 { 6426 int i; 6427 6428 /* remove all previous VL credit limits */ 6429 for (i = 0; i < TXE_NUM_DATA_VL; i++) 6430 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 6431 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 6432 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); 6433 /* reset the CM block */ 6434 pio_send_control(dd, PSC_CM_RESET); 6435 /* reset cached value */ 6436 dd->vl15buf_cached = 0; 6437 } 6438 6439 /* convert a vCU to a CU */ 6440 static u32 vcu_to_cu(u8 vcu) 6441 { 6442 return 1 << vcu; 6443 } 6444 6445 /* convert a CU to a vCU */ 6446 static u8 cu_to_vcu(u32 cu) 6447 { 6448 return ilog2(cu); 6449 } 6450 6451 /* convert a vAU to an AU */ 6452 static u32 vau_to_au(u8 vau) 6453 { 6454 return 8 * (1 << vau); 6455 } 6456 6457 static void set_linkup_defaults(struct hfi1_pportdata *ppd) 6458 { 6459 ppd->sm_trap_qp = 0x0; 6460 ppd->sa_qp = 0x1; 6461 } 6462 6463 /* 6464 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset. 6465 */ 6466 static void lcb_shutdown(struct hfi1_devdata *dd, int abort) 6467 { 6468 u64 reg; 6469 6470 /* clear lcb run: LCB_CFG_RUN.EN = 0 */ 6471 write_csr(dd, DC_LCB_CFG_RUN, 0); 6472 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */ 6473 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 6474 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); 6475 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */ 6476 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); 6477 reg = read_csr(dd, DCC_CFG_RESET); 6478 write_csr(dd, DCC_CFG_RESET, reg | 6479 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE); 6480 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ 6481 if (!abort) { 6482 udelay(1); /* must hold for the longer of 16cclks or 20ns */ 6483 write_csr(dd, DCC_CFG_RESET, reg); 6484 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); 6485 } 6486 } 6487 6488 /* 6489 * This routine should be called after the link has been transitioned to 6490 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into 6491 * reset). 6492 * 6493 * The expectation is that the caller of this routine would have taken 6494 * care of properly transitioning the link into the correct state. 6495 * NOTE: the caller needs to acquire the dd->dc8051_lock lock 6496 * before calling this function. 6497 */ 6498 static void _dc_shutdown(struct hfi1_devdata *dd) 6499 { 6500 lockdep_assert_held(&dd->dc8051_lock); 6501 6502 if (dd->dc_shutdown) 6503 return; 6504 6505 dd->dc_shutdown = 1; 6506 /* Shutdown the LCB */ 6507 lcb_shutdown(dd, 1); 6508 /* 6509 * Going to OFFLINE would have causes the 8051 to put the 6510 * SerDes into reset already. Just need to shut down the 8051, 6511 * itself. 6512 */ 6513 write_csr(dd, DC_DC8051_CFG_RST, 0x1); 6514 } 6515 6516 static void dc_shutdown(struct hfi1_devdata *dd) 6517 { 6518 mutex_lock(&dd->dc8051_lock); 6519 _dc_shutdown(dd); 6520 mutex_unlock(&dd->dc8051_lock); 6521 } 6522 6523 /* 6524 * Calling this after the DC has been brought out of reset should not 6525 * do any damage. 6526 * NOTE: the caller needs to acquire the dd->dc8051_lock lock 6527 * before calling this function. 6528 */ 6529 static void _dc_start(struct hfi1_devdata *dd) 6530 { 6531 lockdep_assert_held(&dd->dc8051_lock); 6532 6533 if (!dd->dc_shutdown) 6534 return; 6535 6536 /* Take the 8051 out of reset */ 6537 write_csr(dd, DC_DC8051_CFG_RST, 0ull); 6538 /* Wait until 8051 is ready */ 6539 if (wait_fm_ready(dd, TIMEOUT_8051_START)) 6540 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", 6541 __func__); 6542 6543 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */ 6544 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); 6545 /* lcb_shutdown() with abort=1 does not restore these */ 6546 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); 6547 dd->dc_shutdown = 0; 6548 } 6549 6550 static void dc_start(struct hfi1_devdata *dd) 6551 { 6552 mutex_lock(&dd->dc8051_lock); 6553 _dc_start(dd); 6554 mutex_unlock(&dd->dc8051_lock); 6555 } 6556 6557 /* 6558 * These LCB adjustments are for the Aurora SerDes core in the FPGA. 6559 */ 6560 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) 6561 { 6562 u64 rx_radr, tx_radr; 6563 u32 version; 6564 6565 if (dd->icode != ICODE_FPGA_EMULATION) 6566 return; 6567 6568 /* 6569 * These LCB defaults on emulator _s are good, nothing to do here: 6570 * LCB_CFG_TX_FIFOS_RADR 6571 * LCB_CFG_RX_FIFOS_RADR 6572 * LCB_CFG_LN_DCLK 6573 * LCB_CFG_IGNORE_LOST_RCLK 6574 */ 6575 if (is_emulator_s(dd)) 6576 return; 6577 /* else this is _p */ 6578 6579 version = emulator_rev(dd); 6580 if (!is_ax(dd)) 6581 version = 0x2d; /* all B0 use 0x2d or higher settings */ 6582 6583 if (version <= 0x12) { 6584 /* release 0x12 and below */ 6585 6586 /* 6587 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9 6588 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9 6589 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa 6590 */ 6591 rx_radr = 6592 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6593 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6594 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6595 /* 6596 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default) 6597 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6 6598 */ 6599 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6600 } else if (version <= 0x18) { 6601 /* release 0x13 up to 0x18 */ 6602 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ 6603 rx_radr = 6604 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6605 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6606 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6607 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6608 } else if (version == 0x19) { 6609 /* release 0x19 */ 6610 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */ 6611 rx_radr = 6612 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6613 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6614 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6615 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6616 } else if (version == 0x1a) { 6617 /* release 0x1a */ 6618 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ 6619 rx_radr = 6620 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6621 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6622 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6623 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6624 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull); 6625 } else { 6626 /* release 0x1b and higher */ 6627 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */ 6628 rx_radr = 6629 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6630 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6631 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6632 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6633 } 6634 6635 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); 6636 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */ 6637 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 6638 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); 6639 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); 6640 } 6641 6642 /* 6643 * Handle a SMA idle message 6644 * 6645 * This is a work-queue function outside of the interrupt. 6646 */ 6647 void handle_sma_message(struct work_struct *work) 6648 { 6649 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6650 sma_message_work); 6651 struct hfi1_devdata *dd = ppd->dd; 6652 u64 msg; 6653 int ret; 6654 6655 /* 6656 * msg is bytes 1-4 of the 40-bit idle message - the command code 6657 * is stripped off 6658 */ 6659 ret = read_idle_sma(dd, &msg); 6660 if (ret) 6661 return; 6662 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg); 6663 /* 6664 * React to the SMA message. Byte[1] (0 for us) is the command. 6665 */ 6666 switch (msg & 0xff) { 6667 case SMA_IDLE_ARM: 6668 /* 6669 * See OPAv1 table 9-14 - HFI and External Switch Ports Key 6670 * State Transitions 6671 * 6672 * Only expected in INIT or ARMED, discard otherwise. 6673 */ 6674 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) 6675 ppd->neighbor_normal = 1; 6676 break; 6677 case SMA_IDLE_ACTIVE: 6678 /* 6679 * See OPAv1 table 9-14 - HFI and External Switch Ports Key 6680 * State Transitions 6681 * 6682 * Can activate the node. Discard otherwise. 6683 */ 6684 if (ppd->host_link_state == HLS_UP_ARMED && 6685 ppd->is_active_optimize_enabled) { 6686 ppd->neighbor_normal = 1; 6687 ret = set_link_state(ppd, HLS_UP_ACTIVE); 6688 if (ret) 6689 dd_dev_err( 6690 dd, 6691 "%s: received Active SMA idle message, couldn't set link to Active\n", 6692 __func__); 6693 } 6694 break; 6695 default: 6696 dd_dev_err(dd, 6697 "%s: received unexpected SMA idle message 0x%llx\n", 6698 __func__, msg); 6699 break; 6700 } 6701 } 6702 6703 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) 6704 { 6705 u64 rcvctrl; 6706 unsigned long flags; 6707 6708 spin_lock_irqsave(&dd->rcvctrl_lock, flags); 6709 rcvctrl = read_csr(dd, RCV_CTRL); 6710 rcvctrl |= add; 6711 rcvctrl &= ~clear; 6712 write_csr(dd, RCV_CTRL, rcvctrl); 6713 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); 6714 } 6715 6716 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) 6717 { 6718 adjust_rcvctrl(dd, add, 0); 6719 } 6720 6721 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) 6722 { 6723 adjust_rcvctrl(dd, 0, clear); 6724 } 6725 6726 /* 6727 * Called from all interrupt handlers to start handling an SPC freeze. 6728 */ 6729 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) 6730 { 6731 struct hfi1_devdata *dd = ppd->dd; 6732 struct send_context *sc; 6733 int i; 6734 int sc_flags; 6735 6736 if (flags & FREEZE_SELF) 6737 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6738 6739 /* enter frozen mode */ 6740 dd->flags |= HFI1_FROZEN; 6741 6742 /* notify all SDMA engines that they are going into a freeze */ 6743 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); 6744 6745 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? 6746 SCF_LINK_DOWN : 0); 6747 /* do halt pre-handling on all enabled send contexts */ 6748 for (i = 0; i < dd->num_send_contexts; i++) { 6749 sc = dd->send_contexts[i].sc; 6750 if (sc && (sc->flags & SCF_ENABLED)) 6751 sc_stop(sc, sc_flags); 6752 } 6753 6754 /* Send context are frozen. Notify user space */ 6755 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT); 6756 6757 if (flags & FREEZE_ABORT) { 6758 dd_dev_err(dd, 6759 "Aborted freeze recovery. Please REBOOT system\n"); 6760 return; 6761 } 6762 /* queue non-interrupt handler */ 6763 queue_work(ppd->hfi1_wq, &ppd->freeze_work); 6764 } 6765 6766 /* 6767 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen, 6768 * depending on the "freeze" parameter. 6769 * 6770 * No need to return an error if it times out, our only option 6771 * is to proceed anyway. 6772 */ 6773 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) 6774 { 6775 unsigned long timeout; 6776 u64 reg; 6777 6778 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT); 6779 while (1) { 6780 reg = read_csr(dd, CCE_STATUS); 6781 if (freeze) { 6782 /* waiting until all indicators are set */ 6783 if ((reg & ALL_FROZE) == ALL_FROZE) 6784 return; /* all done */ 6785 } else { 6786 /* waiting until all indicators are clear */ 6787 if ((reg & ALL_FROZE) == 0) 6788 return; /* all done */ 6789 } 6790 6791 if (time_after(jiffies, timeout)) { 6792 dd_dev_err(dd, 6793 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing", 6794 freeze ? "" : "un", reg & ALL_FROZE, 6795 freeze ? ALL_FROZE : 0ull); 6796 return; 6797 } 6798 usleep_range(80, 120); 6799 } 6800 } 6801 6802 /* 6803 * Do all freeze handling for the RXE block. 6804 */ 6805 static void rxe_freeze(struct hfi1_devdata *dd) 6806 { 6807 int i; 6808 struct hfi1_ctxtdata *rcd; 6809 6810 /* disable port */ 6811 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6812 6813 /* disable all receive contexts */ 6814 for (i = 0; i < dd->num_rcv_contexts; i++) { 6815 rcd = hfi1_rcd_get_by_index(dd, i); 6816 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); 6817 hfi1_rcd_put(rcd); 6818 } 6819 } 6820 6821 /* 6822 * Unfreeze handling for the RXE block - kernel contexts only. 6823 * This will also enable the port. User contexts will do unfreeze 6824 * handling on a per-context basis as they call into the driver. 6825 * 6826 */ 6827 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) 6828 { 6829 u32 rcvmask; 6830 u16 i; 6831 struct hfi1_ctxtdata *rcd; 6832 6833 /* enable all kernel contexts */ 6834 for (i = 0; i < dd->num_rcv_contexts; i++) { 6835 rcd = hfi1_rcd_get_by_index(dd, i); 6836 6837 /* Ensure all non-user contexts(including vnic) are enabled */ 6838 if (!rcd || 6839 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { 6840 hfi1_rcd_put(rcd); 6841 continue; 6842 } 6843 rcvmask = HFI1_RCVCTRL_CTXT_ENB; 6844 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ 6845 rcvmask |= rcd->rcvhdrtail_kvaddr ? 6846 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 6847 hfi1_rcvctrl(dd, rcvmask, rcd); 6848 hfi1_rcd_put(rcd); 6849 } 6850 6851 /* enable port */ 6852 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6853 } 6854 6855 /* 6856 * Non-interrupt SPC freeze handling. 6857 * 6858 * This is a work-queue function outside of the triggering interrupt. 6859 */ 6860 void handle_freeze(struct work_struct *work) 6861 { 6862 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6863 freeze_work); 6864 struct hfi1_devdata *dd = ppd->dd; 6865 6866 /* wait for freeze indicators on all affected blocks */ 6867 wait_for_freeze_status(dd, 1); 6868 6869 /* SPC is now frozen */ 6870 6871 /* do send PIO freeze steps */ 6872 pio_freeze(dd); 6873 6874 /* do send DMA freeze steps */ 6875 sdma_freeze(dd); 6876 6877 /* do send egress freeze steps - nothing to do */ 6878 6879 /* do receive freeze steps */ 6880 rxe_freeze(dd); 6881 6882 /* 6883 * Unfreeze the hardware - clear the freeze, wait for each 6884 * block's frozen bit to clear, then clear the frozen flag. 6885 */ 6886 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); 6887 wait_for_freeze_status(dd, 0); 6888 6889 if (is_ax(dd)) { 6890 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6891 wait_for_freeze_status(dd, 1); 6892 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); 6893 wait_for_freeze_status(dd, 0); 6894 } 6895 6896 /* do send PIO unfreeze steps for kernel contexts */ 6897 pio_kernel_unfreeze(dd); 6898 6899 /* do send DMA unfreeze steps */ 6900 sdma_unfreeze(dd); 6901 6902 /* do send egress unfreeze steps - nothing to do */ 6903 6904 /* do receive unfreeze steps for kernel contexts */ 6905 rxe_kernel_unfreeze(dd); 6906 6907 /* 6908 * The unfreeze procedure touches global device registers when 6909 * it disables and re-enables RXE. Mark the device unfrozen 6910 * after all that is done so other parts of the driver waiting 6911 * for the device to unfreeze don't do things out of order. 6912 * 6913 * The above implies that the meaning of HFI1_FROZEN flag is 6914 * "Device has gone into freeze mode and freeze mode handling 6915 * is still in progress." 6916 * 6917 * The flag will be removed when freeze mode processing has 6918 * completed. 6919 */ 6920 dd->flags &= ~HFI1_FROZEN; 6921 wake_up(&dd->event_queue); 6922 6923 /* no longer frozen */ 6924 } 6925 6926 /** 6927 * update_xmit_counters - update PortXmitWait/PortVlXmitWait 6928 * counters. 6929 * @ppd: info of physical Hfi port 6930 * @link_width: new link width after link up or downgrade 6931 * 6932 * Update the PortXmitWait and PortVlXmitWait counters after 6933 * a link up or downgrade event to reflect a link width change. 6934 */ 6935 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width) 6936 { 6937 int i; 6938 u16 tx_width; 6939 u16 link_speed; 6940 6941 tx_width = tx_link_width(link_width); 6942 link_speed = get_link_speed(ppd->link_speed_active); 6943 6944 /* 6945 * There are C_VL_COUNT number of PortVLXmitWait counters. 6946 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 6947 */ 6948 for (i = 0; i < C_VL_COUNT + 1; i++) 6949 get_xmit_wait_counters(ppd, tx_width, link_speed, i); 6950 } 6951 6952 /* 6953 * Handle a link up interrupt from the 8051. 6954 * 6955 * This is a work-queue function outside of the interrupt. 6956 */ 6957 void handle_link_up(struct work_struct *work) 6958 { 6959 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6960 link_up_work); 6961 struct hfi1_devdata *dd = ppd->dd; 6962 6963 set_link_state(ppd, HLS_UP_INIT); 6964 6965 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 6966 read_ltp_rtt(dd); 6967 /* 6968 * OPA specifies that certain counters are cleared on a transition 6969 * to link up, so do that. 6970 */ 6971 clear_linkup_counters(dd); 6972 /* 6973 * And (re)set link up default values. 6974 */ 6975 set_linkup_defaults(ppd); 6976 6977 /* 6978 * Set VL15 credits. Use cached value from verify cap interrupt. 6979 * In case of quick linkup or simulator, vl15 value will be set by 6980 * handle_linkup_change. VerifyCap interrupt handler will not be 6981 * called in those scenarios. 6982 */ 6983 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) 6984 set_up_vl15(dd, dd->vl15buf_cached); 6985 6986 /* enforce link speed enabled */ 6987 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { 6988 /* oops - current speed is not enabled, bounce */ 6989 dd_dev_err(dd, 6990 "Link speed active 0x%x is outside enabled 0x%x, downing link\n", 6991 ppd->link_speed_active, ppd->link_speed_enabled); 6992 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, 6993 OPA_LINKDOWN_REASON_SPEED_POLICY); 6994 set_link_state(ppd, HLS_DN_OFFLINE); 6995 start_link(ppd); 6996 } 6997 } 6998 6999 /* 7000 * Several pieces of LNI information were cached for SMA in ppd. 7001 * Reset these on link down 7002 */ 7003 static void reset_neighbor_info(struct hfi1_pportdata *ppd) 7004 { 7005 ppd->neighbor_guid = 0; 7006 ppd->neighbor_port_number = 0; 7007 ppd->neighbor_type = 0; 7008 ppd->neighbor_fm_security = 0; 7009 } 7010 7011 static const char * const link_down_reason_strs[] = { 7012 [OPA_LINKDOWN_REASON_NONE] = "None", 7013 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0", 7014 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length", 7015 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long", 7016 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short", 7017 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID", 7018 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID", 7019 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2", 7020 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC", 7021 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8", 7022 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail", 7023 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10", 7024 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error", 7025 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15", 7026 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker", 7027 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14", 7028 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15", 7029 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance", 7030 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance", 7031 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance", 7032 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack", 7033 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker", 7034 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt", 7035 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit", 7036 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit", 7037 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24", 7038 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25", 7039 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26", 7040 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27", 7041 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28", 7042 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29", 7043 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30", 7044 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] = 7045 "Excessive buffer overrun", 7046 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown", 7047 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot", 7048 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown", 7049 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce", 7050 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy", 7051 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy", 7052 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected", 7053 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] = 7054 "Local media not installed", 7055 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed", 7056 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config", 7057 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] = 7058 "End to end not installed", 7059 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy", 7060 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy", 7061 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy", 7062 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management", 7063 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled", 7064 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient" 7065 }; 7066 7067 /* return the neighbor link down reason string */ 7068 static const char *link_down_reason_str(u8 reason) 7069 { 7070 const char *str = NULL; 7071 7072 if (reason < ARRAY_SIZE(link_down_reason_strs)) 7073 str = link_down_reason_strs[reason]; 7074 if (!str) 7075 str = "(invalid)"; 7076 7077 return str; 7078 } 7079 7080 /* 7081 * Handle a link down interrupt from the 8051. 7082 * 7083 * This is a work-queue function outside of the interrupt. 7084 */ 7085 void handle_link_down(struct work_struct *work) 7086 { 7087 u8 lcl_reason, neigh_reason = 0; 7088 u8 link_down_reason; 7089 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7090 link_down_work); 7091 int was_up; 7092 static const char ldr_str[] = "Link down reason: "; 7093 7094 if ((ppd->host_link_state & 7095 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) && 7096 ppd->port_type == PORT_TYPE_FIXED) 7097 ppd->offline_disabled_reason = 7098 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED); 7099 7100 /* Go offline first, then deal with reading/writing through 8051 */ 7101 was_up = !!(ppd->host_link_state & HLS_UP); 7102 set_link_state(ppd, HLS_DN_OFFLINE); 7103 xchg(&ppd->is_link_down_queued, 0); 7104 7105 if (was_up) { 7106 lcl_reason = 0; 7107 /* link down reason is only valid if the link was up */ 7108 read_link_down_reason(ppd->dd, &link_down_reason); 7109 switch (link_down_reason) { 7110 case LDR_LINK_TRANSFER_ACTIVE_LOW: 7111 /* the link went down, no idle message reason */ 7112 dd_dev_info(ppd->dd, "%sUnexpected link down\n", 7113 ldr_str); 7114 break; 7115 case LDR_RECEIVED_LINKDOWN_IDLE_MSG: 7116 /* 7117 * The neighbor reason is only valid if an idle message 7118 * was received for it. 7119 */ 7120 read_planned_down_reason_code(ppd->dd, &neigh_reason); 7121 dd_dev_info(ppd->dd, 7122 "%sNeighbor link down message %d, %s\n", 7123 ldr_str, neigh_reason, 7124 link_down_reason_str(neigh_reason)); 7125 break; 7126 case LDR_RECEIVED_HOST_OFFLINE_REQ: 7127 dd_dev_info(ppd->dd, 7128 "%sHost requested link to go offline\n", 7129 ldr_str); 7130 break; 7131 default: 7132 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", 7133 ldr_str, link_down_reason); 7134 break; 7135 } 7136 7137 /* 7138 * If no reason, assume peer-initiated but missed 7139 * LinkGoingDown idle flits. 7140 */ 7141 if (neigh_reason == 0) 7142 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN; 7143 } else { 7144 /* went down while polling or going up */ 7145 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT; 7146 } 7147 7148 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0); 7149 7150 /* inform the SMA when the link transitions from up to down */ 7151 if (was_up && ppd->local_link_down_reason.sma == 0 && 7152 ppd->neigh_link_down_reason.sma == 0) { 7153 ppd->local_link_down_reason.sma = 7154 ppd->local_link_down_reason.latest; 7155 ppd->neigh_link_down_reason.sma = 7156 ppd->neigh_link_down_reason.latest; 7157 } 7158 7159 reset_neighbor_info(ppd); 7160 7161 /* disable the port */ 7162 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 7163 7164 /* 7165 * If there is no cable attached, turn the DC off. Otherwise, 7166 * start the link bring up. 7167 */ 7168 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) 7169 dc_shutdown(ppd->dd); 7170 else 7171 start_link(ppd); 7172 } 7173 7174 void handle_link_bounce(struct work_struct *work) 7175 { 7176 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7177 link_bounce_work); 7178 7179 /* 7180 * Only do something if the link is currently up. 7181 */ 7182 if (ppd->host_link_state & HLS_UP) { 7183 set_link_state(ppd, HLS_DN_OFFLINE); 7184 start_link(ppd); 7185 } else { 7186 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", 7187 __func__, link_state_name(ppd->host_link_state)); 7188 } 7189 } 7190 7191 /* 7192 * Mask conversion: Capability exchange to Port LTP. The capability 7193 * exchange has an implicit 16b CRC that is mandatory. 7194 */ 7195 static int cap_to_port_ltp(int cap) 7196 { 7197 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */ 7198 7199 if (cap & CAP_CRC_14B) 7200 port_ltp |= PORT_LTP_CRC_MODE_14; 7201 if (cap & CAP_CRC_48B) 7202 port_ltp |= PORT_LTP_CRC_MODE_48; 7203 if (cap & CAP_CRC_12B_16B_PER_LANE) 7204 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE; 7205 7206 return port_ltp; 7207 } 7208 7209 /* 7210 * Convert an OPA Port LTP mask to capability mask 7211 */ 7212 int port_ltp_to_cap(int port_ltp) 7213 { 7214 int cap_mask = 0; 7215 7216 if (port_ltp & PORT_LTP_CRC_MODE_14) 7217 cap_mask |= CAP_CRC_14B; 7218 if (port_ltp & PORT_LTP_CRC_MODE_48) 7219 cap_mask |= CAP_CRC_48B; 7220 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE) 7221 cap_mask |= CAP_CRC_12B_16B_PER_LANE; 7222 7223 return cap_mask; 7224 } 7225 7226 /* 7227 * Convert a single DC LCB CRC mode to an OPA Port LTP mask. 7228 */ 7229 static int lcb_to_port_ltp(int lcb_crc) 7230 { 7231 int port_ltp = 0; 7232 7233 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE) 7234 port_ltp = PORT_LTP_CRC_MODE_PER_LANE; 7235 else if (lcb_crc == LCB_CRC_48B) 7236 port_ltp = PORT_LTP_CRC_MODE_48; 7237 else if (lcb_crc == LCB_CRC_14B) 7238 port_ltp = PORT_LTP_CRC_MODE_14; 7239 else 7240 port_ltp = PORT_LTP_CRC_MODE_16; 7241 7242 return port_ltp; 7243 } 7244 7245 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) 7246 { 7247 if (ppd->pkeys[2] != 0) { 7248 ppd->pkeys[2] = 0; 7249 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7250 hfi1_event_pkey_change(ppd->dd, ppd->port); 7251 } 7252 } 7253 7254 /* 7255 * Convert the given link width to the OPA link width bitmask. 7256 */ 7257 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) 7258 { 7259 switch (width) { 7260 case 0: 7261 /* 7262 * Simulator and quick linkup do not set the width. 7263 * Just set it to 4x without complaint. 7264 */ 7265 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) 7266 return OPA_LINK_WIDTH_4X; 7267 return 0; /* no lanes up */ 7268 case 1: return OPA_LINK_WIDTH_1X; 7269 case 2: return OPA_LINK_WIDTH_2X; 7270 case 3: return OPA_LINK_WIDTH_3X; 7271 default: 7272 dd_dev_info(dd, "%s: invalid width %d, using 4\n", 7273 __func__, width); 7274 /* fall through */ 7275 case 4: return OPA_LINK_WIDTH_4X; 7276 } 7277 } 7278 7279 /* 7280 * Do a population count on the bottom nibble. 7281 */ 7282 static const u8 bit_counts[16] = { 7283 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 7284 }; 7285 7286 static inline u8 nibble_to_count(u8 nibble) 7287 { 7288 return bit_counts[nibble & 0xf]; 7289 } 7290 7291 /* 7292 * Read the active lane information from the 8051 registers and return 7293 * their widths. 7294 * 7295 * Active lane information is found in these 8051 registers: 7296 * enable_lane_tx 7297 * enable_lane_rx 7298 */ 7299 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, 7300 u16 *rx_width) 7301 { 7302 u16 tx, rx; 7303 u8 enable_lane_rx; 7304 u8 enable_lane_tx; 7305 u8 tx_polarity_inversion; 7306 u8 rx_polarity_inversion; 7307 u8 max_rate; 7308 7309 /* read the active lanes */ 7310 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, 7311 &rx_polarity_inversion, &max_rate); 7312 read_local_lni(dd, &enable_lane_rx); 7313 7314 /* convert to counts */ 7315 tx = nibble_to_count(enable_lane_tx); 7316 rx = nibble_to_count(enable_lane_rx); 7317 7318 /* 7319 * Set link_speed_active here, overriding what was set in 7320 * handle_verify_cap(). The ASIC 8051 firmware does not correctly 7321 * set the max_rate field in handle_verify_cap until v0.19. 7322 */ 7323 if ((dd->icode == ICODE_RTL_SILICON) && 7324 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { 7325 /* max_rate: 0 = 12.5G, 1 = 25G */ 7326 switch (max_rate) { 7327 case 0: 7328 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; 7329 break; 7330 default: 7331 dd_dev_err(dd, 7332 "%s: unexpected max rate %d, using 25Gb\n", 7333 __func__, (int)max_rate); 7334 /* fall through */ 7335 case 1: 7336 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; 7337 break; 7338 } 7339 } 7340 7341 dd_dev_info(dd, 7342 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n", 7343 enable_lane_tx, tx, enable_lane_rx, rx); 7344 *tx_width = link_width_to_bits(dd, tx); 7345 *rx_width = link_width_to_bits(dd, rx); 7346 } 7347 7348 /* 7349 * Read verify_cap_local_fm_link_width[1] to obtain the link widths. 7350 * Valid after the end of VerifyCap and during LinkUp. Does not change 7351 * after link up. I.e. look elsewhere for downgrade information. 7352 * 7353 * Bits are: 7354 * + bits [7:4] contain the number of active transmitters 7355 * + bits [3:0] contain the number of active receivers 7356 * These are numbers 1 through 4 and can be different values if the 7357 * link is asymmetric. 7358 * 7359 * verify_cap_local_fm_link_width[0] retains its original value. 7360 */ 7361 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, 7362 u16 *rx_width) 7363 { 7364 u16 widths, tx, rx; 7365 u8 misc_bits, local_flags; 7366 u16 active_tx, active_rx; 7367 7368 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths); 7369 tx = widths >> 12; 7370 rx = (widths >> 8) & 0xf; 7371 7372 *tx_width = link_width_to_bits(dd, tx); 7373 *rx_width = link_width_to_bits(dd, rx); 7374 7375 /* print the active widths */ 7376 get_link_widths(dd, &active_tx, &active_rx); 7377 } 7378 7379 /* 7380 * Set ppd->link_width_active and ppd->link_width_downgrade_active using 7381 * hardware information when the link first comes up. 7382 * 7383 * The link width is not available until after VerifyCap.AllFramesReceived 7384 * (the trigger for handle_verify_cap), so this is outside that routine 7385 * and should be called when the 8051 signals linkup. 7386 */ 7387 void get_linkup_link_widths(struct hfi1_pportdata *ppd) 7388 { 7389 u16 tx_width, rx_width; 7390 7391 /* get end-of-LNI link widths */ 7392 get_linkup_widths(ppd->dd, &tx_width, &rx_width); 7393 7394 /* use tx_width as the link is supposed to be symmetric on link up */ 7395 ppd->link_width_active = tx_width; 7396 /* link width downgrade active (LWD.A) starts out matching LW.A */ 7397 ppd->link_width_downgrade_tx_active = ppd->link_width_active; 7398 ppd->link_width_downgrade_rx_active = ppd->link_width_active; 7399 /* per OPA spec, on link up LWD.E resets to LWD.S */ 7400 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; 7401 /* cache the active egress rate (units {10^6 bits/sec]) */ 7402 ppd->current_egress_rate = active_egress_rate(ppd); 7403 } 7404 7405 /* 7406 * Handle a verify capabilities interrupt from the 8051. 7407 * 7408 * This is a work-queue function outside of the interrupt. 7409 */ 7410 void handle_verify_cap(struct work_struct *work) 7411 { 7412 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7413 link_vc_work); 7414 struct hfi1_devdata *dd = ppd->dd; 7415 u64 reg; 7416 u8 power_management; 7417 u8 continuous; 7418 u8 vcu; 7419 u8 vau; 7420 u8 z; 7421 u16 vl15buf; 7422 u16 link_widths; 7423 u16 crc_mask; 7424 u16 crc_val; 7425 u16 device_id; 7426 u16 active_tx, active_rx; 7427 u8 partner_supported_crc; 7428 u8 remote_tx_rate; 7429 u8 device_rev; 7430 7431 set_link_state(ppd, HLS_VERIFY_CAP); 7432 7433 lcb_shutdown(dd, 0); 7434 adjust_lcb_for_fpga_serdes(dd); 7435 7436 read_vc_remote_phy(dd, &power_management, &continuous); 7437 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, 7438 &partner_supported_crc); 7439 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); 7440 read_remote_device_id(dd, &device_id, &device_rev); 7441 7442 /* print the active widths */ 7443 get_link_widths(dd, &active_tx, &active_rx); 7444 dd_dev_info(dd, 7445 "Peer PHY: power management 0x%x, continuous updates 0x%x\n", 7446 (int)power_management, (int)continuous); 7447 dd_dev_info(dd, 7448 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n", 7449 (int)vau, (int)z, (int)vcu, (int)vl15buf, 7450 (int)partner_supported_crc); 7451 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", 7452 (u32)remote_tx_rate, (u32)link_widths); 7453 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", 7454 (u32)device_id, (u32)device_rev); 7455 /* 7456 * The peer vAU value just read is the peer receiver value. HFI does 7457 * not support a transmit vAU of 0 (AU == 8). We advertised that 7458 * with Z=1 in the fabric capabilities sent to the peer. The peer 7459 * will see our Z=1, and, if it advertised a vAU of 0, will move its 7460 * receive to vAU of 1 (AU == 16). Do the same here. We do not care 7461 * about the peer Z value - our sent vAU is 3 (hardwired) and is not 7462 * subject to the Z value exception. 7463 */ 7464 if (vau == 0) 7465 vau = 1; 7466 set_up_vau(dd, vau); 7467 7468 /* 7469 * Set VL15 credits to 0 in global credit register. Cache remote VL15 7470 * credits value and wait for link-up interrupt ot set it. 7471 */ 7472 set_up_vl15(dd, 0); 7473 dd->vl15buf_cached = vl15buf; 7474 7475 /* set up the LCB CRC mode */ 7476 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; 7477 7478 /* order is important: use the lowest bit in common */ 7479 if (crc_mask & CAP_CRC_14B) 7480 crc_val = LCB_CRC_14B; 7481 else if (crc_mask & CAP_CRC_48B) 7482 crc_val = LCB_CRC_48B; 7483 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE) 7484 crc_val = LCB_CRC_12B_16B_PER_LANE; 7485 else 7486 crc_val = LCB_CRC_16B; 7487 7488 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val); 7489 write_csr(dd, DC_LCB_CFG_CRC_MODE, 7490 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT); 7491 7492 /* set (14b only) or clear sideband credit */ 7493 reg = read_csr(dd, SEND_CM_CTRL); 7494 if (crc_val == LCB_CRC_14B && crc_14b_sideband) { 7495 write_csr(dd, SEND_CM_CTRL, 7496 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); 7497 } else { 7498 write_csr(dd, SEND_CM_CTRL, 7499 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); 7500 } 7501 7502 ppd->link_speed_active = 0; /* invalid value */ 7503 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { 7504 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */ 7505 switch (remote_tx_rate) { 7506 case 0: 7507 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; 7508 break; 7509 case 1: 7510 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7511 break; 7512 } 7513 } else { 7514 /* actual rate is highest bit of the ANDed rates */ 7515 u8 rate = remote_tx_rate & ppd->local_tx_rate; 7516 7517 if (rate & 2) 7518 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7519 else if (rate & 1) 7520 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; 7521 } 7522 if (ppd->link_speed_active == 0) { 7523 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", 7524 __func__, (int)remote_tx_rate); 7525 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7526 } 7527 7528 /* 7529 * Cache the values of the supported, enabled, and active 7530 * LTP CRC modes to return in 'portinfo' queries. But the bit 7531 * flags that are returned in the portinfo query differ from 7532 * what's in the link_crc_mask, crc_sizes, and crc_val 7533 * variables. Convert these here. 7534 */ 7535 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; 7536 /* supported crc modes */ 7537 ppd->port_ltp_crc_mode |= 7538 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; 7539 /* enabled crc modes */ 7540 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); 7541 /* active crc mode */ 7542 7543 /* set up the remote credit return table */ 7544 assign_remote_cm_au_table(dd, vcu); 7545 7546 /* 7547 * The LCB is reset on entry to handle_verify_cap(), so this must 7548 * be applied on every link up. 7549 * 7550 * Adjust LCB error kill enable to kill the link if 7551 * these RBUF errors are seen: 7552 * REPLAY_BUF_MBE_SMASK 7553 * FLIT_INPUT_BUF_MBE_SMASK 7554 */ 7555 if (is_ax(dd)) { /* fixed in B0 */ 7556 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); 7557 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK 7558 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK; 7559 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg); 7560 } 7561 7562 /* pull LCB fifos out of reset - all fifo clocks must be stable */ 7563 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 7564 7565 /* give 8051 access to the LCB CSRs */ 7566 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ 7567 set_8051_lcb_access(dd); 7568 7569 /* tell the 8051 to go to LinkUp */ 7570 set_link_state(ppd, HLS_GOING_UP); 7571 } 7572 7573 /** 7574 * apply_link_downgrade_policy - Apply the link width downgrade enabled 7575 * policy against the current active link widths. 7576 * @ppd: info of physical Hfi port 7577 * @refresh_widths: True indicates link downgrade event 7578 * @return: True indicates a successful link downgrade. False indicates 7579 * link downgrade event failed and the link will bounce back to 7580 * default link width. 7581 * 7582 * Called when the enabled policy changes or the active link widths 7583 * change. 7584 * Refresh_widths indicates that a link downgrade occurred. The 7585 * link_downgraded variable is set by refresh_widths and 7586 * determines the success/failure of the policy application. 7587 */ 7588 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 7589 bool refresh_widths) 7590 { 7591 int do_bounce = 0; 7592 int tries; 7593 u16 lwde; 7594 u16 tx, rx; 7595 bool link_downgraded = refresh_widths; 7596 7597 /* use the hls lock to avoid a race with actual link up */ 7598 tries = 0; 7599 retry: 7600 mutex_lock(&ppd->hls_lock); 7601 /* only apply if the link is up */ 7602 if (ppd->host_link_state & HLS_DOWN) { 7603 /* still going up..wait and retry */ 7604 if (ppd->host_link_state & HLS_GOING_UP) { 7605 if (++tries < 1000) { 7606 mutex_unlock(&ppd->hls_lock); 7607 usleep_range(100, 120); /* arbitrary */ 7608 goto retry; 7609 } 7610 dd_dev_err(ppd->dd, 7611 "%s: giving up waiting for link state change\n", 7612 __func__); 7613 } 7614 goto done; 7615 } 7616 7617 lwde = ppd->link_width_downgrade_enabled; 7618 7619 if (refresh_widths) { 7620 get_link_widths(ppd->dd, &tx, &rx); 7621 ppd->link_width_downgrade_tx_active = tx; 7622 ppd->link_width_downgrade_rx_active = rx; 7623 } 7624 7625 if (ppd->link_width_downgrade_tx_active == 0 || 7626 ppd->link_width_downgrade_rx_active == 0) { 7627 /* the 8051 reported a dead link as a downgrade */ 7628 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); 7629 link_downgraded = false; 7630 } else if (lwde == 0) { 7631 /* downgrade is disabled */ 7632 7633 /* bounce if not at starting active width */ 7634 if ((ppd->link_width_active != 7635 ppd->link_width_downgrade_tx_active) || 7636 (ppd->link_width_active != 7637 ppd->link_width_downgrade_rx_active)) { 7638 dd_dev_err(ppd->dd, 7639 "Link downgrade is disabled and link has downgraded, downing link\n"); 7640 dd_dev_err(ppd->dd, 7641 " original 0x%x, tx active 0x%x, rx active 0x%x\n", 7642 ppd->link_width_active, 7643 ppd->link_width_downgrade_tx_active, 7644 ppd->link_width_downgrade_rx_active); 7645 do_bounce = 1; 7646 link_downgraded = false; 7647 } 7648 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || 7649 (lwde & ppd->link_width_downgrade_rx_active) == 0) { 7650 /* Tx or Rx is outside the enabled policy */ 7651 dd_dev_err(ppd->dd, 7652 "Link is outside of downgrade allowed, downing link\n"); 7653 dd_dev_err(ppd->dd, 7654 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n", 7655 lwde, ppd->link_width_downgrade_tx_active, 7656 ppd->link_width_downgrade_rx_active); 7657 do_bounce = 1; 7658 link_downgraded = false; 7659 } 7660 7661 done: 7662 mutex_unlock(&ppd->hls_lock); 7663 7664 if (do_bounce) { 7665 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0, 7666 OPA_LINKDOWN_REASON_WIDTH_POLICY); 7667 set_link_state(ppd, HLS_DN_OFFLINE); 7668 start_link(ppd); 7669 } 7670 7671 return link_downgraded; 7672 } 7673 7674 /* 7675 * Handle a link downgrade interrupt from the 8051. 7676 * 7677 * This is a work-queue function outside of the interrupt. 7678 */ 7679 void handle_link_downgrade(struct work_struct *work) 7680 { 7681 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7682 link_downgrade_work); 7683 7684 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); 7685 if (apply_link_downgrade_policy(ppd, true)) 7686 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); 7687 } 7688 7689 static char *dcc_err_string(char *buf, int buf_len, u64 flags) 7690 { 7691 return flag_string(buf, buf_len, flags, dcc_err_flags, 7692 ARRAY_SIZE(dcc_err_flags)); 7693 } 7694 7695 static char *lcb_err_string(char *buf, int buf_len, u64 flags) 7696 { 7697 return flag_string(buf, buf_len, flags, lcb_err_flags, 7698 ARRAY_SIZE(lcb_err_flags)); 7699 } 7700 7701 static char *dc8051_err_string(char *buf, int buf_len, u64 flags) 7702 { 7703 return flag_string(buf, buf_len, flags, dc8051_err_flags, 7704 ARRAY_SIZE(dc8051_err_flags)); 7705 } 7706 7707 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags) 7708 { 7709 return flag_string(buf, buf_len, flags, dc8051_info_err_flags, 7710 ARRAY_SIZE(dc8051_info_err_flags)); 7711 } 7712 7713 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags) 7714 { 7715 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags, 7716 ARRAY_SIZE(dc8051_info_host_msg_flags)); 7717 } 7718 7719 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) 7720 { 7721 struct hfi1_pportdata *ppd = dd->pport; 7722 u64 info, err, host_msg; 7723 int queue_link_down = 0; 7724 char buf[96]; 7725 7726 /* look at the flags */ 7727 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) { 7728 /* 8051 information set by firmware */ 7729 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */ 7730 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); 7731 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT) 7732 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK; 7733 host_msg = (info >> 7734 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT) 7735 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK; 7736 7737 /* 7738 * Handle error flags. 7739 */ 7740 if (err & FAILED_LNI) { 7741 /* 7742 * LNI error indications are cleared by the 8051 7743 * only when starting polling. Only pay attention 7744 * to them when in the states that occur during 7745 * LNI. 7746 */ 7747 if (ppd->host_link_state 7748 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 7749 queue_link_down = 1; 7750 dd_dev_info(dd, "Link error: %s\n", 7751 dc8051_info_err_string(buf, 7752 sizeof(buf), 7753 err & 7754 FAILED_LNI)); 7755 } 7756 err &= ~(u64)FAILED_LNI; 7757 } 7758 /* unknown frames can happen durning LNI, just count */ 7759 if (err & UNKNOWN_FRAME) { 7760 ppd->unknown_frame_count++; 7761 err &= ~(u64)UNKNOWN_FRAME; 7762 } 7763 if (err) { 7764 /* report remaining errors, but do not do anything */ 7765 dd_dev_err(dd, "8051 info error: %s\n", 7766 dc8051_info_err_string(buf, sizeof(buf), 7767 err)); 7768 } 7769 7770 /* 7771 * Handle host message flags. 7772 */ 7773 if (host_msg & HOST_REQ_DONE) { 7774 /* 7775 * Presently, the driver does a busy wait for 7776 * host requests to complete. This is only an 7777 * informational message. 7778 * NOTE: The 8051 clears the host message 7779 * information *on the next 8051 command*. 7780 * Therefore, when linkup is achieved, 7781 * this flag will still be set. 7782 */ 7783 host_msg &= ~(u64)HOST_REQ_DONE; 7784 } 7785 if (host_msg & BC_SMA_MSG) { 7786 queue_work(ppd->link_wq, &ppd->sma_message_work); 7787 host_msg &= ~(u64)BC_SMA_MSG; 7788 } 7789 if (host_msg & LINKUP_ACHIEVED) { 7790 dd_dev_info(dd, "8051: Link up\n"); 7791 queue_work(ppd->link_wq, &ppd->link_up_work); 7792 host_msg &= ~(u64)LINKUP_ACHIEVED; 7793 } 7794 if (host_msg & EXT_DEVICE_CFG_REQ) { 7795 handle_8051_request(ppd); 7796 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ; 7797 } 7798 if (host_msg & VERIFY_CAP_FRAME) { 7799 queue_work(ppd->link_wq, &ppd->link_vc_work); 7800 host_msg &= ~(u64)VERIFY_CAP_FRAME; 7801 } 7802 if (host_msg & LINK_GOING_DOWN) { 7803 const char *extra = ""; 7804 /* no downgrade action needed if going down */ 7805 if (host_msg & LINK_WIDTH_DOWNGRADED) { 7806 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; 7807 extra = " (ignoring downgrade)"; 7808 } 7809 dd_dev_info(dd, "8051: Link down%s\n", extra); 7810 queue_link_down = 1; 7811 host_msg &= ~(u64)LINK_GOING_DOWN; 7812 } 7813 if (host_msg & LINK_WIDTH_DOWNGRADED) { 7814 queue_work(ppd->link_wq, &ppd->link_downgrade_work); 7815 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; 7816 } 7817 if (host_msg) { 7818 /* report remaining messages, but do not do anything */ 7819 dd_dev_info(dd, "8051 info host message: %s\n", 7820 dc8051_info_host_msg_string(buf, 7821 sizeof(buf), 7822 host_msg)); 7823 } 7824 7825 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK; 7826 } 7827 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) { 7828 /* 7829 * Lost the 8051 heartbeat. If this happens, we 7830 * receive constant interrupts about it. Disable 7831 * the interrupt after the first. 7832 */ 7833 dd_dev_err(dd, "Lost 8051 heartbeat\n"); 7834 write_csr(dd, DC_DC8051_ERR_EN, 7835 read_csr(dd, DC_DC8051_ERR_EN) & 7836 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); 7837 7838 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK; 7839 } 7840 if (reg) { 7841 /* report the error, but do not do anything */ 7842 dd_dev_err(dd, "8051 error: %s\n", 7843 dc8051_err_string(buf, sizeof(buf), reg)); 7844 } 7845 7846 if (queue_link_down) { 7847 /* 7848 * if the link is already going down or disabled, do not 7849 * queue another. If there's a link down entry already 7850 * queued, don't queue another one. 7851 */ 7852 if ((ppd->host_link_state & 7853 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || 7854 ppd->link_enabled == 0) { 7855 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n", 7856 __func__, ppd->host_link_state, 7857 ppd->link_enabled); 7858 } else { 7859 if (xchg(&ppd->is_link_down_queued, 1) == 1) 7860 dd_dev_info(dd, 7861 "%s: link down request already queued\n", 7862 __func__); 7863 else 7864 queue_work(ppd->link_wq, &ppd->link_down_work); 7865 } 7866 } 7867 } 7868 7869 static const char * const fm_config_txt[] = { 7870 [0] = 7871 "BadHeadDist: Distance violation between two head flits", 7872 [1] = 7873 "BadTailDist: Distance violation between two tail flits", 7874 [2] = 7875 "BadCtrlDist: Distance violation between two credit control flits", 7876 [3] = 7877 "BadCrdAck: Credits return for unsupported VL", 7878 [4] = 7879 "UnsupportedVLMarker: Received VL Marker", 7880 [5] = 7881 "BadPreempt: Exceeded the preemption nesting level", 7882 [6] = 7883 "BadControlFlit: Received unsupported control flit", 7884 /* no 7 */ 7885 [8] = 7886 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL", 7887 }; 7888 7889 static const char * const port_rcv_txt[] = { 7890 [1] = 7891 "BadPktLen: Illegal PktLen", 7892 [2] = 7893 "PktLenTooLong: Packet longer than PktLen", 7894 [3] = 7895 "PktLenTooShort: Packet shorter than PktLen", 7896 [4] = 7897 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)", 7898 [5] = 7899 "BadDLID: Illegal DLID (0, doesn't match HFI)", 7900 [6] = 7901 "BadL2: Illegal L2 opcode", 7902 [7] = 7903 "BadSC: Unsupported SC", 7904 [9] = 7905 "BadRC: Illegal RC", 7906 [11] = 7907 "PreemptError: Preempting with same VL", 7908 [12] = 7909 "PreemptVL15: Preempting a VL15 packet", 7910 }; 7911 7912 #define OPA_LDR_FMCONFIG_OFFSET 16 7913 #define OPA_LDR_PORTRCV_OFFSET 0 7914 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 7915 { 7916 u64 info, hdr0, hdr1; 7917 const char *extra; 7918 char buf[96]; 7919 struct hfi1_pportdata *ppd = dd->pport; 7920 u8 lcl_reason = 0; 7921 int do_bounce = 0; 7922 7923 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) { 7924 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { 7925 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); 7926 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; 7927 /* set status bit */ 7928 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; 7929 } 7930 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK; 7931 } 7932 7933 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) { 7934 struct hfi1_pportdata *ppd = dd->pport; 7935 /* this counter saturates at (2^32) - 1 */ 7936 if (ppd->link_downed < (u32)UINT_MAX) 7937 ppd->link_downed++; 7938 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK; 7939 } 7940 7941 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) { 7942 u8 reason_valid = 1; 7943 7944 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); 7945 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { 7946 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; 7947 /* set status bit */ 7948 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; 7949 } 7950 switch (info) { 7951 case 0: 7952 case 1: 7953 case 2: 7954 case 3: 7955 case 4: 7956 case 5: 7957 case 6: 7958 extra = fm_config_txt[info]; 7959 break; 7960 case 8: 7961 extra = fm_config_txt[info]; 7962 if (ppd->port_error_action & 7963 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) { 7964 do_bounce = 1; 7965 /* 7966 * lcl_reason cannot be derived from info 7967 * for this error 7968 */ 7969 lcl_reason = 7970 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER; 7971 } 7972 break; 7973 default: 7974 reason_valid = 0; 7975 snprintf(buf, sizeof(buf), "reserved%lld", info); 7976 extra = buf; 7977 break; 7978 } 7979 7980 if (reason_valid && !do_bounce) { 7981 do_bounce = ppd->port_error_action & 7982 (1 << (OPA_LDR_FMCONFIG_OFFSET + info)); 7983 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST; 7984 } 7985 7986 /* just report this */ 7987 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n", 7988 extra); 7989 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK; 7990 } 7991 7992 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) { 7993 u8 reason_valid = 1; 7994 7995 info = read_csr(dd, DCC_ERR_INFO_PORTRCV); 7996 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); 7997 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); 7998 if (!(dd->err_info_rcvport.status_and_code & 7999 OPA_EI_STATUS_SMASK)) { 8000 dd->err_info_rcvport.status_and_code = 8001 info & OPA_EI_CODE_SMASK; 8002 /* set status bit */ 8003 dd->err_info_rcvport.status_and_code |= 8004 OPA_EI_STATUS_SMASK; 8005 /* 8006 * save first 2 flits in the packet that caused 8007 * the error 8008 */ 8009 dd->err_info_rcvport.packet_flit1 = hdr0; 8010 dd->err_info_rcvport.packet_flit2 = hdr1; 8011 } 8012 switch (info) { 8013 case 1: 8014 case 2: 8015 case 3: 8016 case 4: 8017 case 5: 8018 case 6: 8019 case 7: 8020 case 9: 8021 case 11: 8022 case 12: 8023 extra = port_rcv_txt[info]; 8024 break; 8025 default: 8026 reason_valid = 0; 8027 snprintf(buf, sizeof(buf), "reserved%lld", info); 8028 extra = buf; 8029 break; 8030 } 8031 8032 if (reason_valid && !do_bounce) { 8033 do_bounce = ppd->port_error_action & 8034 (1 << (OPA_LDR_PORTRCV_OFFSET + info)); 8035 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0; 8036 } 8037 8038 /* just report this */ 8039 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" 8040 " hdr0 0x%llx, hdr1 0x%llx\n", 8041 extra, hdr0, hdr1); 8042 8043 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK; 8044 } 8045 8046 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) { 8047 /* informative only */ 8048 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n"); 8049 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK; 8050 } 8051 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) { 8052 /* informative only */ 8053 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n"); 8054 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK; 8055 } 8056 8057 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) 8058 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK; 8059 8060 /* report any remaining errors */ 8061 if (reg) 8062 dd_dev_info_ratelimited(dd, "DCC Error: %s\n", 8063 dcc_err_string(buf, sizeof(buf), reg)); 8064 8065 if (lcl_reason == 0) 8066 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN; 8067 8068 if (do_bounce) { 8069 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n", 8070 __func__); 8071 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason); 8072 queue_work(ppd->link_wq, &ppd->link_bounce_work); 8073 } 8074 } 8075 8076 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 8077 { 8078 char buf[96]; 8079 8080 dd_dev_info(dd, "LCB Error: %s\n", 8081 lcb_err_string(buf, sizeof(buf), reg)); 8082 } 8083 8084 /* 8085 * CCE block DC interrupt. Source is < 8. 8086 */ 8087 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) 8088 { 8089 const struct err_reg_info *eri = &dc_errs[source]; 8090 8091 if (eri->handler) { 8092 interrupt_clear_down(dd, 0, eri); 8093 } else if (source == 3 /* dc_lbm_int */) { 8094 /* 8095 * This indicates that a parity error has occurred on the 8096 * address/control lines presented to the LBM. The error 8097 * is a single pulse, there is no associated error flag, 8098 * and it is non-maskable. This is because if a parity 8099 * error occurs on the request the request is dropped. 8100 * This should never occur, but it is nice to know if it 8101 * ever does. 8102 */ 8103 dd_dev_err(dd, "Parity error in DC LBM block\n"); 8104 } else { 8105 dd_dev_err(dd, "Invalid DC interrupt %u\n", source); 8106 } 8107 } 8108 8109 /* 8110 * TX block send credit interrupt. Source is < 160. 8111 */ 8112 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) 8113 { 8114 sc_group_release_update(dd, source); 8115 } 8116 8117 /* 8118 * TX block SDMA interrupt. Source is < 48. 8119 * 8120 * SDMA interrupts are grouped by type: 8121 * 8122 * 0 - N-1 = SDma 8123 * N - 2N-1 = SDmaProgress 8124 * 2N - 3N-1 = SDmaIdle 8125 */ 8126 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) 8127 { 8128 /* what interrupt */ 8129 unsigned int what = source / TXE_NUM_SDMA_ENGINES; 8130 /* which engine */ 8131 unsigned int which = source % TXE_NUM_SDMA_ENGINES; 8132 8133 #ifdef CONFIG_SDMA_VERBOSITY 8134 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which, 8135 slashstrip(__FILE__), __LINE__, __func__); 8136 sdma_dumpstate(&dd->per_sdma[which]); 8137 #endif 8138 8139 if (likely(what < 3 && which < dd->num_sdma)) { 8140 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); 8141 } else { 8142 /* should not happen */ 8143 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source); 8144 } 8145 } 8146 8147 /** 8148 * is_rcv_avail_int() - User receive context available IRQ handler 8149 * @dd: valid dd 8150 * @source: logical IRQ source (offset from IS_RCVAVAIL_START) 8151 * 8152 * RX block receive available interrupt. Source is < 160. 8153 * 8154 * This is the general interrupt handler for user (PSM) receive contexts, 8155 * and can only be used for non-threaded IRQs. 8156 */ 8157 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) 8158 { 8159 struct hfi1_ctxtdata *rcd; 8160 char *err_detail; 8161 8162 if (likely(source < dd->num_rcv_contexts)) { 8163 rcd = hfi1_rcd_get_by_index(dd, source); 8164 if (rcd) { 8165 handle_user_interrupt(rcd); 8166 hfi1_rcd_put(rcd); 8167 return; /* OK */ 8168 } 8169 /* received an interrupt, but no rcd */ 8170 err_detail = "dataless"; 8171 } else { 8172 /* received an interrupt, but are not using that context */ 8173 err_detail = "out of range"; 8174 } 8175 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", 8176 err_detail, source); 8177 } 8178 8179 /** 8180 * is_rcv_urgent_int() - User receive context urgent IRQ handler 8181 * @dd: valid dd 8182 * @source: logical IRQ source (offset from IS_RCVURGENT_START) 8183 * 8184 * RX block receive urgent interrupt. Source is < 160. 8185 * 8186 * NOTE: kernel receive contexts specifically do NOT enable this IRQ. 8187 */ 8188 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) 8189 { 8190 struct hfi1_ctxtdata *rcd; 8191 char *err_detail; 8192 8193 if (likely(source < dd->num_rcv_contexts)) { 8194 rcd = hfi1_rcd_get_by_index(dd, source); 8195 if (rcd) { 8196 handle_user_interrupt(rcd); 8197 hfi1_rcd_put(rcd); 8198 return; /* OK */ 8199 } 8200 /* received an interrupt, but no rcd */ 8201 err_detail = "dataless"; 8202 } else { 8203 /* received an interrupt, but are not using that context */ 8204 err_detail = "out of range"; 8205 } 8206 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", 8207 err_detail, source); 8208 } 8209 8210 /* 8211 * Reserved range interrupt. Should not be called in normal operation. 8212 */ 8213 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) 8214 { 8215 char name[64]; 8216 8217 dd_dev_err(dd, "unexpected %s interrupt\n", 8218 is_reserved_name(name, sizeof(name), source)); 8219 } 8220 8221 static const struct is_table is_table[] = { 8222 /* 8223 * start end 8224 * name func interrupt func 8225 */ 8226 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END, 8227 is_misc_err_name, is_misc_err_int }, 8228 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END, 8229 is_sdma_eng_err_name, is_sdma_eng_err_int }, 8230 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, 8231 is_sendctxt_err_name, is_sendctxt_err_int }, 8232 { IS_SDMA_START, IS_SDMA_IDLE_END, 8233 is_sdma_eng_name, is_sdma_eng_int }, 8234 { IS_VARIOUS_START, IS_VARIOUS_END, 8235 is_various_name, is_various_int }, 8236 { IS_DC_START, IS_DC_END, 8237 is_dc_name, is_dc_int }, 8238 { IS_RCVAVAIL_START, IS_RCVAVAIL_END, 8239 is_rcv_avail_name, is_rcv_avail_int }, 8240 { IS_RCVURGENT_START, IS_RCVURGENT_END, 8241 is_rcv_urgent_name, is_rcv_urgent_int }, 8242 { IS_SENDCREDIT_START, IS_SENDCREDIT_END, 8243 is_send_credit_name, is_send_credit_int}, 8244 { IS_RESERVED_START, IS_RESERVED_END, 8245 is_reserved_name, is_reserved_int}, 8246 }; 8247 8248 /* 8249 * Interrupt source interrupt - called when the given source has an interrupt. 8250 * Source is a bit index into an array of 64-bit integers. 8251 */ 8252 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) 8253 { 8254 const struct is_table *entry; 8255 8256 /* avoids a double compare by walking the table in-order */ 8257 for (entry = &is_table[0]; entry->is_name; entry++) { 8258 if (source <= entry->end) { 8259 trace_hfi1_interrupt(dd, entry, source); 8260 entry->is_int(dd, source - entry->start); 8261 return; 8262 } 8263 } 8264 /* fell off the end */ 8265 dd_dev_err(dd, "invalid interrupt source %u\n", source); 8266 } 8267 8268 /** 8269 * gerneral_interrupt() - General interrupt handler 8270 * @irq: MSIx IRQ vector 8271 * @data: hfi1 devdata 8272 * 8273 * This is able to correctly handle all non-threaded interrupts. Receive 8274 * context DATA IRQs are threaded and are not supported by this handler. 8275 * 8276 */ 8277 irqreturn_t general_interrupt(int irq, void *data) 8278 { 8279 struct hfi1_devdata *dd = data; 8280 u64 regs[CCE_NUM_INT_CSRS]; 8281 u32 bit; 8282 int i; 8283 irqreturn_t handled = IRQ_NONE; 8284 8285 this_cpu_inc(*dd->int_counter); 8286 8287 /* phase 1: scan and clear all handled interrupts */ 8288 for (i = 0; i < CCE_NUM_INT_CSRS; i++) { 8289 if (dd->gi_mask[i] == 0) { 8290 regs[i] = 0; /* used later */ 8291 continue; 8292 } 8293 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & 8294 dd->gi_mask[i]; 8295 /* only clear if anything is set */ 8296 if (regs[i]) 8297 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]); 8298 } 8299 8300 /* phase 2: call the appropriate handler */ 8301 for_each_set_bit(bit, (unsigned long *)®s[0], 8302 CCE_NUM_INT_CSRS * 64) { 8303 is_interrupt(dd, bit); 8304 handled = IRQ_HANDLED; 8305 } 8306 8307 return handled; 8308 } 8309 8310 irqreturn_t sdma_interrupt(int irq, void *data) 8311 { 8312 struct sdma_engine *sde = data; 8313 struct hfi1_devdata *dd = sde->dd; 8314 u64 status; 8315 8316 #ifdef CONFIG_SDMA_VERBOSITY 8317 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 8318 slashstrip(__FILE__), __LINE__, __func__); 8319 sdma_dumpstate(sde); 8320 #endif 8321 8322 this_cpu_inc(*dd->int_counter); 8323 8324 /* This read_csr is really bad in the hot path */ 8325 status = read_csr(dd, 8326 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) 8327 & sde->imask; 8328 if (likely(status)) { 8329 /* clear the interrupt(s) */ 8330 write_csr(dd, 8331 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), 8332 status); 8333 8334 /* handle the interrupt(s) */ 8335 sdma_engine_interrupt(sde, status); 8336 } else { 8337 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", 8338 sde->this_idx); 8339 } 8340 return IRQ_HANDLED; 8341 } 8342 8343 /* 8344 * Clear the receive interrupt. Use a read of the interrupt clear CSR 8345 * to insure that the write completed. This does NOT guarantee that 8346 * queued DMA writes to memory from the chip are pushed. 8347 */ 8348 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) 8349 { 8350 struct hfi1_devdata *dd = rcd->dd; 8351 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); 8352 8353 mmiowb(); /* make sure everything before is written */ 8354 write_csr(dd, addr, rcd->imask); 8355 /* force the above write on the chip and get a value back */ 8356 (void)read_csr(dd, addr); 8357 } 8358 8359 /* force the receive interrupt */ 8360 void force_recv_intr(struct hfi1_ctxtdata *rcd) 8361 { 8362 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); 8363 } 8364 8365 /* 8366 * Return non-zero if a packet is present. 8367 * 8368 * This routine is called when rechecking for packets after the RcvAvail 8369 * interrupt has been cleared down. First, do a quick check of memory for 8370 * a packet present. If not found, use an expensive CSR read of the context 8371 * tail to determine the actual tail. The CSR read is necessary because there 8372 * is no method to push pending DMAs to memory other than an interrupt and we 8373 * are trying to determine if we need to force an interrupt. 8374 */ 8375 static inline int check_packet_present(struct hfi1_ctxtdata *rcd) 8376 { 8377 u32 tail; 8378 int present; 8379 8380 if (!rcd->rcvhdrtail_kvaddr) 8381 present = (rcd->seq_cnt == 8382 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); 8383 else /* is RDMA rtail */ 8384 present = (rcd->head != get_rcvhdrtail(rcd)); 8385 8386 if (present) 8387 return 1; 8388 8389 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ 8390 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); 8391 return rcd->head != tail; 8392 } 8393 8394 /* 8395 * Receive packet IRQ handler. This routine expects to be on its own IRQ. 8396 * This routine will try to handle packets immediately (latency), but if 8397 * it finds too many, it will invoke the thread handler (bandwitdh). The 8398 * chip receive interrupt is *not* cleared down until this or the thread (if 8399 * invoked) is finished. The intent is to avoid extra interrupts while we 8400 * are processing packets anyway. 8401 */ 8402 irqreturn_t receive_context_interrupt(int irq, void *data) 8403 { 8404 struct hfi1_ctxtdata *rcd = data; 8405 struct hfi1_devdata *dd = rcd->dd; 8406 int disposition; 8407 int present; 8408 8409 trace_hfi1_receive_interrupt(dd, rcd); 8410 this_cpu_inc(*dd->int_counter); 8411 aspm_ctx_disable(rcd); 8412 8413 /* receive interrupt remains blocked while processing packets */ 8414 disposition = rcd->do_interrupt(rcd, 0); 8415 8416 /* 8417 * Too many packets were seen while processing packets in this 8418 * IRQ handler. Invoke the handler thread. The receive interrupt 8419 * remains blocked. 8420 */ 8421 if (disposition == RCV_PKT_LIMIT) 8422 return IRQ_WAKE_THREAD; 8423 8424 /* 8425 * The packet processor detected no more packets. Clear the receive 8426 * interrupt and recheck for a packet packet that may have arrived 8427 * after the previous check and interrupt clear. If a packet arrived, 8428 * force another interrupt. 8429 */ 8430 clear_recv_intr(rcd); 8431 present = check_packet_present(rcd); 8432 if (present) 8433 force_recv_intr(rcd); 8434 8435 return IRQ_HANDLED; 8436 } 8437 8438 /* 8439 * Receive packet thread handler. This expects to be invoked with the 8440 * receive interrupt still blocked. 8441 */ 8442 irqreturn_t receive_context_thread(int irq, void *data) 8443 { 8444 struct hfi1_ctxtdata *rcd = data; 8445 int present; 8446 8447 /* receive interrupt is still blocked from the IRQ handler */ 8448 (void)rcd->do_interrupt(rcd, 1); 8449 8450 /* 8451 * The packet processor will only return if it detected no more 8452 * packets. Hold IRQs here so we can safely clear the interrupt and 8453 * recheck for a packet that may have arrived after the previous 8454 * check and the interrupt clear. If a packet arrived, force another 8455 * interrupt. 8456 */ 8457 local_irq_disable(); 8458 clear_recv_intr(rcd); 8459 present = check_packet_present(rcd); 8460 if (present) 8461 force_recv_intr(rcd); 8462 local_irq_enable(); 8463 8464 return IRQ_HANDLED; 8465 } 8466 8467 /* ========================================================================= */ 8468 8469 u32 read_physical_state(struct hfi1_devdata *dd) 8470 { 8471 u64 reg; 8472 8473 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); 8474 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT) 8475 & DC_DC8051_STS_CUR_STATE_PORT_MASK; 8476 } 8477 8478 u32 read_logical_state(struct hfi1_devdata *dd) 8479 { 8480 u64 reg; 8481 8482 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); 8483 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT) 8484 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK; 8485 } 8486 8487 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) 8488 { 8489 u64 reg; 8490 8491 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); 8492 /* clear current state, set new state */ 8493 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK; 8494 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT; 8495 write_csr(dd, DCC_CFG_PORT_CONFIG, reg); 8496 } 8497 8498 /* 8499 * Use the 8051 to read a LCB CSR. 8500 */ 8501 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) 8502 { 8503 u32 regno; 8504 int ret; 8505 8506 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 8507 if (acquire_lcb_access(dd, 0) == 0) { 8508 *data = read_csr(dd, addr); 8509 release_lcb_access(dd, 0); 8510 return 0; 8511 } 8512 return -EBUSY; 8513 } 8514 8515 /* register is an index of LCB registers: (offset - base) / 8 */ 8516 regno = (addr - DC_LCB_CFG_RUN) >> 3; 8517 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data); 8518 if (ret != HCMD_SUCCESS) 8519 return -EBUSY; 8520 return 0; 8521 } 8522 8523 /* 8524 * Provide a cache for some of the LCB registers in case the LCB is 8525 * unavailable. 8526 * (The LCB is unavailable in certain link states, for example.) 8527 */ 8528 struct lcb_datum { 8529 u32 off; 8530 u64 val; 8531 }; 8532 8533 static struct lcb_datum lcb_cache[] = { 8534 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0}, 8535 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 }, 8536 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 }, 8537 }; 8538 8539 static void update_lcb_cache(struct hfi1_devdata *dd) 8540 { 8541 int i; 8542 int ret; 8543 u64 val; 8544 8545 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 8546 ret = read_lcb_csr(dd, lcb_cache[i].off, &val); 8547 8548 /* Update if we get good data */ 8549 if (likely(ret != -EBUSY)) 8550 lcb_cache[i].val = val; 8551 } 8552 } 8553 8554 static int read_lcb_cache(u32 off, u64 *val) 8555 { 8556 int i; 8557 8558 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 8559 if (lcb_cache[i].off == off) { 8560 *val = lcb_cache[i].val; 8561 return 0; 8562 } 8563 } 8564 8565 pr_warn("%s bad offset 0x%x\n", __func__, off); 8566 return -1; 8567 } 8568 8569 /* 8570 * Read an LCB CSR. Access may not be in host control, so check. 8571 * Return 0 on success, -EBUSY on failure. 8572 */ 8573 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) 8574 { 8575 struct hfi1_pportdata *ppd = dd->pport; 8576 8577 /* if up, go through the 8051 for the value */ 8578 if (ppd->host_link_state & HLS_UP) 8579 return read_lcb_via_8051(dd, addr, data); 8580 /* if going up or down, check the cache, otherwise, no access */ 8581 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { 8582 if (read_lcb_cache(addr, data)) 8583 return -EBUSY; 8584 return 0; 8585 } 8586 8587 /* otherwise, host has access */ 8588 *data = read_csr(dd, addr); 8589 return 0; 8590 } 8591 8592 /* 8593 * Use the 8051 to write a LCB CSR. 8594 */ 8595 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) 8596 { 8597 u32 regno; 8598 int ret; 8599 8600 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || 8601 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { 8602 if (acquire_lcb_access(dd, 0) == 0) { 8603 write_csr(dd, addr, data); 8604 release_lcb_access(dd, 0); 8605 return 0; 8606 } 8607 return -EBUSY; 8608 } 8609 8610 /* register is an index of LCB registers: (offset - base) / 8 */ 8611 regno = (addr - DC_LCB_CFG_RUN) >> 3; 8612 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data); 8613 if (ret != HCMD_SUCCESS) 8614 return -EBUSY; 8615 return 0; 8616 } 8617 8618 /* 8619 * Write an LCB CSR. Access may not be in host control, so check. 8620 * Return 0 on success, -EBUSY on failure. 8621 */ 8622 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) 8623 { 8624 struct hfi1_pportdata *ppd = dd->pport; 8625 8626 /* if up, go through the 8051 for the value */ 8627 if (ppd->host_link_state & HLS_UP) 8628 return write_lcb_via_8051(dd, addr, data); 8629 /* if going up or down, no access */ 8630 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) 8631 return -EBUSY; 8632 /* otherwise, host has access */ 8633 write_csr(dd, addr, data); 8634 return 0; 8635 } 8636 8637 /* 8638 * Returns: 8639 * < 0 = Linux error, not able to get access 8640 * > 0 = 8051 command RETURN_CODE 8641 */ 8642 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, 8643 u64 *out_data) 8644 { 8645 u64 reg, completed; 8646 int return_code; 8647 unsigned long timeout; 8648 8649 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data); 8650 8651 mutex_lock(&dd->dc8051_lock); 8652 8653 /* We can't send any commands to the 8051 if it's in reset */ 8654 if (dd->dc_shutdown) { 8655 return_code = -ENODEV; 8656 goto fail; 8657 } 8658 8659 /* 8660 * If an 8051 host command timed out previously, then the 8051 is 8661 * stuck. 8662 * 8663 * On first timeout, attempt to reset and restart the entire DC 8664 * block (including 8051). (Is this too big of a hammer?) 8665 * 8666 * If the 8051 times out a second time, the reset did not bring it 8667 * back to healthy life. In that case, fail any subsequent commands. 8668 */ 8669 if (dd->dc8051_timed_out) { 8670 if (dd->dc8051_timed_out > 1) { 8671 dd_dev_err(dd, 8672 "Previous 8051 host command timed out, skipping command %u\n", 8673 type); 8674 return_code = -ENXIO; 8675 goto fail; 8676 } 8677 _dc_shutdown(dd); 8678 _dc_start(dd); 8679 } 8680 8681 /* 8682 * If there is no timeout, then the 8051 command interface is 8683 * waiting for a command. 8684 */ 8685 8686 /* 8687 * When writing a LCB CSR, out_data contains the full value to 8688 * to be written, while in_data contains the relative LCB 8689 * address in 7:0. Do the work here, rather than the caller, 8690 * of distrubting the write data to where it needs to go: 8691 * 8692 * Write data 8693 * 39:00 -> in_data[47:8] 8694 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE 8695 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA 8696 */ 8697 if (type == HCMD_WRITE_LCB_CSR) { 8698 in_data |= ((*out_data) & 0xffffffffffull) << 8; 8699 /* must preserve COMPLETED - it is tied to hardware */ 8700 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); 8701 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK; 8702 reg |= ((((*out_data) >> 40) & 0xff) << 8703 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT) 8704 | ((((*out_data) >> 48) & 0xffff) << 8705 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); 8706 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg); 8707 } 8708 8709 /* 8710 * Do two writes: the first to stabilize the type and req_data, the 8711 * second to activate. 8712 */ 8713 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK) 8714 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT 8715 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK) 8716 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT; 8717 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); 8718 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK; 8719 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); 8720 8721 /* wait for completion, alternate: interrupt */ 8722 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT); 8723 while (1) { 8724 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); 8725 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK; 8726 if (completed) 8727 break; 8728 if (time_after(jiffies, timeout)) { 8729 dd->dc8051_timed_out++; 8730 dd_dev_err(dd, "8051 host command %u timeout\n", type); 8731 if (out_data) 8732 *out_data = 0; 8733 return_code = -ETIMEDOUT; 8734 goto fail; 8735 } 8736 udelay(2); 8737 } 8738 8739 if (out_data) { 8740 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT) 8741 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK; 8742 if (type == HCMD_READ_LCB_CSR) { 8743 /* top 16 bits are in a different register */ 8744 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) 8745 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK) 8746 << (48 8747 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); 8748 } 8749 } 8750 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT) 8751 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK; 8752 dd->dc8051_timed_out = 0; 8753 /* 8754 * Clear command for next user. 8755 */ 8756 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0); 8757 8758 fail: 8759 mutex_unlock(&dd->dc8051_lock); 8760 return return_code; 8761 } 8762 8763 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) 8764 { 8765 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); 8766 } 8767 8768 int load_8051_config(struct hfi1_devdata *dd, u8 field_id, 8769 u8 lane_id, u32 config_data) 8770 { 8771 u64 data; 8772 int ret; 8773 8774 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT 8775 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT 8776 | (u64)config_data << LOAD_DATA_DATA_SHIFT; 8777 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); 8778 if (ret != HCMD_SUCCESS) { 8779 dd_dev_err(dd, 8780 "load 8051 config: field id %d, lane %d, err %d\n", 8781 (int)field_id, (int)lane_id, ret); 8782 } 8783 return ret; 8784 } 8785 8786 /* 8787 * Read the 8051 firmware "registers". Use the RAM directly. Always 8788 * set the result, even on error. 8789 * Return 0 on success, -errno on failure 8790 */ 8791 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, 8792 u32 *result) 8793 { 8794 u64 big_data; 8795 u32 addr; 8796 int ret; 8797 8798 /* address start depends on the lane_id */ 8799 if (lane_id < 4) 8800 addr = (4 * NUM_GENERAL_FIELDS) 8801 + (lane_id * 4 * NUM_LANE_FIELDS); 8802 else 8803 addr = 0; 8804 addr += field_id * 4; 8805 8806 /* read is in 8-byte chunks, hardware will truncate the address down */ 8807 ret = read_8051_data(dd, addr, 8, &big_data); 8808 8809 if (ret == 0) { 8810 /* extract the 4 bytes we want */ 8811 if (addr & 0x4) 8812 *result = (u32)(big_data >> 32); 8813 else 8814 *result = (u32)big_data; 8815 } else { 8816 *result = 0; 8817 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", 8818 __func__, lane_id, field_id); 8819 } 8820 8821 return ret; 8822 } 8823 8824 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, 8825 u8 continuous) 8826 { 8827 u32 frame; 8828 8829 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT 8830 | power_management << POWER_MANAGEMENT_SHIFT; 8831 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, 8832 GENERAL_CONFIG, frame); 8833 } 8834 8835 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, 8836 u16 vl15buf, u8 crc_sizes) 8837 { 8838 u32 frame; 8839 8840 frame = (u32)vau << VAU_SHIFT 8841 | (u32)z << Z_SHIFT 8842 | (u32)vcu << VCU_SHIFT 8843 | (u32)vl15buf << VL15BUF_SHIFT 8844 | (u32)crc_sizes << CRC_SIZES_SHIFT; 8845 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, 8846 GENERAL_CONFIG, frame); 8847 } 8848 8849 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, 8850 u8 *flag_bits, u16 *link_widths) 8851 { 8852 u32 frame; 8853 8854 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, 8855 &frame); 8856 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; 8857 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; 8858 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; 8859 } 8860 8861 static int write_vc_local_link_mode(struct hfi1_devdata *dd, 8862 u8 misc_bits, 8863 u8 flag_bits, 8864 u16 link_widths) 8865 { 8866 u32 frame; 8867 8868 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT 8869 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT 8870 | (u32)link_widths << LINK_WIDTH_SHIFT; 8871 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, 8872 frame); 8873 } 8874 8875 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, 8876 u8 device_rev) 8877 { 8878 u32 frame; 8879 8880 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT) 8881 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT); 8882 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); 8883 } 8884 8885 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, 8886 u8 *device_rev) 8887 { 8888 u32 frame; 8889 8890 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); 8891 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK; 8892 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT) 8893 & REMOTE_DEVICE_REV_MASK; 8894 } 8895 8896 int write_host_interface_version(struct hfi1_devdata *dd, u8 version) 8897 { 8898 u32 frame; 8899 u32 mask; 8900 8901 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT); 8902 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); 8903 /* Clear, then set field */ 8904 frame &= ~mask; 8905 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT); 8906 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, 8907 frame); 8908 } 8909 8910 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, 8911 u8 *ver_patch) 8912 { 8913 u32 frame; 8914 8915 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); 8916 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) & 8917 STS_FM_VERSION_MAJOR_MASK; 8918 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) & 8919 STS_FM_VERSION_MINOR_MASK; 8920 8921 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); 8922 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) & 8923 STS_FM_VERSION_PATCH_MASK; 8924 } 8925 8926 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, 8927 u8 *continuous) 8928 { 8929 u32 frame; 8930 8931 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); 8932 *power_management = (frame >> POWER_MANAGEMENT_SHIFT) 8933 & POWER_MANAGEMENT_MASK; 8934 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT) 8935 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK; 8936 } 8937 8938 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, 8939 u8 *vcu, u16 *vl15buf, u8 *crc_sizes) 8940 { 8941 u32 frame; 8942 8943 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); 8944 *vau = (frame >> VAU_SHIFT) & VAU_MASK; 8945 *z = (frame >> Z_SHIFT) & Z_MASK; 8946 *vcu = (frame >> VCU_SHIFT) & VCU_MASK; 8947 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK; 8948 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK; 8949 } 8950 8951 static void read_vc_remote_link_width(struct hfi1_devdata *dd, 8952 u8 *remote_tx_rate, 8953 u16 *link_widths) 8954 { 8955 u32 frame; 8956 8957 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, 8958 &frame); 8959 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) 8960 & REMOTE_TX_RATE_MASK; 8961 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; 8962 } 8963 8964 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) 8965 { 8966 u32 frame; 8967 8968 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); 8969 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK; 8970 } 8971 8972 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) 8973 { 8974 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls); 8975 } 8976 8977 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) 8978 { 8979 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs); 8980 } 8981 8982 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) 8983 { 8984 u32 frame; 8985 int ret; 8986 8987 *link_quality = 0; 8988 if (dd->pport->host_link_state & HLS_UP) { 8989 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, 8990 &frame); 8991 if (ret == 0) 8992 *link_quality = (frame >> LINK_QUALITY_SHIFT) 8993 & LINK_QUALITY_MASK; 8994 } 8995 } 8996 8997 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) 8998 { 8999 u32 frame; 9000 9001 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); 9002 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK; 9003 } 9004 9005 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) 9006 { 9007 u32 frame; 9008 9009 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); 9010 *ldr = (frame & 0xff); 9011 } 9012 9013 static int read_tx_settings(struct hfi1_devdata *dd, 9014 u8 *enable_lane_tx, 9015 u8 *tx_polarity_inversion, 9016 u8 *rx_polarity_inversion, 9017 u8 *max_rate) 9018 { 9019 u32 frame; 9020 int ret; 9021 9022 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); 9023 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT) 9024 & ENABLE_LANE_TX_MASK; 9025 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT) 9026 & TX_POLARITY_INVERSION_MASK; 9027 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT) 9028 & RX_POLARITY_INVERSION_MASK; 9029 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK; 9030 return ret; 9031 } 9032 9033 static int write_tx_settings(struct hfi1_devdata *dd, 9034 u8 enable_lane_tx, 9035 u8 tx_polarity_inversion, 9036 u8 rx_polarity_inversion, 9037 u8 max_rate) 9038 { 9039 u32 frame; 9040 9041 /* no need to mask, all variable sizes match field widths */ 9042 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT 9043 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT 9044 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT 9045 | max_rate << MAX_RATE_SHIFT; 9046 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); 9047 } 9048 9049 /* 9050 * Read an idle LCB message. 9051 * 9052 * Returns 0 on success, -EINVAL on error 9053 */ 9054 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) 9055 { 9056 int ret; 9057 9058 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); 9059 if (ret != HCMD_SUCCESS) { 9060 dd_dev_err(dd, "read idle message: type %d, err %d\n", 9061 (u32)type, ret); 9062 return -EINVAL; 9063 } 9064 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); 9065 /* return only the payload as we already know the type */ 9066 *data_out >>= IDLE_PAYLOAD_SHIFT; 9067 return 0; 9068 } 9069 9070 /* 9071 * Read an idle SMA message. To be done in response to a notification from 9072 * the 8051. 9073 * 9074 * Returns 0 on success, -EINVAL on error 9075 */ 9076 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) 9077 { 9078 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, 9079 data); 9080 } 9081 9082 /* 9083 * Send an idle LCB message. 9084 * 9085 * Returns 0 on success, -EINVAL on error 9086 */ 9087 static int send_idle_message(struct hfi1_devdata *dd, u64 data) 9088 { 9089 int ret; 9090 9091 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data); 9092 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); 9093 if (ret != HCMD_SUCCESS) { 9094 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", 9095 data, ret); 9096 return -EINVAL; 9097 } 9098 return 0; 9099 } 9100 9101 /* 9102 * Send an idle SMA message. 9103 * 9104 * Returns 0 on success, -EINVAL on error 9105 */ 9106 int send_idle_sma(struct hfi1_devdata *dd, u64 message) 9107 { 9108 u64 data; 9109 9110 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) | 9111 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); 9112 return send_idle_message(dd, data); 9113 } 9114 9115 /* 9116 * Initialize the LCB then do a quick link up. This may or may not be 9117 * in loopback. 9118 * 9119 * return 0 on success, -errno on error 9120 */ 9121 static int do_quick_linkup(struct hfi1_devdata *dd) 9122 { 9123 int ret; 9124 9125 lcb_shutdown(dd, 0); 9126 9127 if (loopback) { 9128 /* LCB_CFG_LOOPBACK.VAL = 2 */ 9129 /* LCB_CFG_LANE_WIDTH.VAL = 0 */ 9130 write_csr(dd, DC_LCB_CFG_LOOPBACK, 9131 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); 9132 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); 9133 } 9134 9135 /* start the LCBs */ 9136 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */ 9137 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 9138 9139 /* simulator only loopback steps */ 9140 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 9141 /* LCB_CFG_RUN.EN = 1 */ 9142 write_csr(dd, DC_LCB_CFG_RUN, 9143 1ull << DC_LCB_CFG_RUN_EN_SHIFT); 9144 9145 ret = wait_link_transfer_active(dd, 10); 9146 if (ret) 9147 return ret; 9148 9149 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 9150 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); 9151 } 9152 9153 if (!loopback) { 9154 /* 9155 * When doing quick linkup and not in loopback, both 9156 * sides must be done with LCB set-up before either 9157 * starts the quick linkup. Put a delay here so that 9158 * both sides can be started and have a chance to be 9159 * done with LCB set up before resuming. 9160 */ 9161 dd_dev_err(dd, 9162 "Pausing for peer to be finished with LCB set up\n"); 9163 msleep(5000); 9164 dd_dev_err(dd, "Continuing with quick linkup\n"); 9165 } 9166 9167 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ 9168 set_8051_lcb_access(dd); 9169 9170 /* 9171 * State "quick" LinkUp request sets the physical link state to 9172 * LinkUp without a verify capability sequence. 9173 * This state is in simulator v37 and later. 9174 */ 9175 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); 9176 if (ret != HCMD_SUCCESS) { 9177 dd_dev_err(dd, 9178 "%s: set physical link state to quick LinkUp failed with return %d\n", 9179 __func__, ret); 9180 9181 set_host_lcb_access(dd); 9182 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ 9183 9184 if (ret >= 0) 9185 ret = -EINVAL; 9186 return ret; 9187 } 9188 9189 return 0; /* success */ 9190 } 9191 9192 /* 9193 * Do all special steps to set up loopback. 9194 */ 9195 static int init_loopback(struct hfi1_devdata *dd) 9196 { 9197 dd_dev_info(dd, "Entering loopback mode\n"); 9198 9199 /* all loopbacks should disable self GUID check */ 9200 write_csr(dd, DC_DC8051_CFG_MODE, 9201 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); 9202 9203 /* 9204 * The simulator has only one loopback option - LCB. Switch 9205 * to that option, which includes quick link up. 9206 * 9207 * Accept all valid loopback values. 9208 */ 9209 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && 9210 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || 9211 loopback == LOOPBACK_CABLE)) { 9212 loopback = LOOPBACK_LCB; 9213 quick_linkup = 1; 9214 return 0; 9215 } 9216 9217 /* 9218 * SerDes loopback init sequence is handled in set_local_link_attributes 9219 */ 9220 if (loopback == LOOPBACK_SERDES) 9221 return 0; 9222 9223 /* LCB loopback - handled at poll time */ 9224 if (loopback == LOOPBACK_LCB) { 9225 quick_linkup = 1; /* LCB is always quick linkup */ 9226 9227 /* not supported in emulation due to emulation RTL changes */ 9228 if (dd->icode == ICODE_FPGA_EMULATION) { 9229 dd_dev_err(dd, 9230 "LCB loopback not supported in emulation\n"); 9231 return -EINVAL; 9232 } 9233 return 0; 9234 } 9235 9236 /* external cable loopback requires no extra steps */ 9237 if (loopback == LOOPBACK_CABLE) 9238 return 0; 9239 9240 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback); 9241 return -EINVAL; 9242 } 9243 9244 /* 9245 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits 9246 * used in the Verify Capability link width attribute. 9247 */ 9248 static u16 opa_to_vc_link_widths(u16 opa_widths) 9249 { 9250 int i; 9251 u16 result = 0; 9252 9253 static const struct link_bits { 9254 u16 from; 9255 u16 to; 9256 } opa_link_xlate[] = { 9257 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, 9258 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, 9259 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, 9260 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, 9261 }; 9262 9263 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) { 9264 if (opa_widths & opa_link_xlate[i].from) 9265 result |= opa_link_xlate[i].to; 9266 } 9267 return result; 9268 } 9269 9270 /* 9271 * Set link attributes before moving to polling. 9272 */ 9273 static int set_local_link_attributes(struct hfi1_pportdata *ppd) 9274 { 9275 struct hfi1_devdata *dd = ppd->dd; 9276 u8 enable_lane_tx; 9277 u8 tx_polarity_inversion; 9278 u8 rx_polarity_inversion; 9279 int ret; 9280 u32 misc_bits = 0; 9281 /* reset our fabric serdes to clear any lingering problems */ 9282 fabric_serdes_reset(dd); 9283 9284 /* set the local tx rate - need to read-modify-write */ 9285 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, 9286 &rx_polarity_inversion, &ppd->local_tx_rate); 9287 if (ret) 9288 goto set_local_link_attributes_fail; 9289 9290 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { 9291 /* set the tx rate to the fastest enabled */ 9292 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) 9293 ppd->local_tx_rate = 1; 9294 else 9295 ppd->local_tx_rate = 0; 9296 } else { 9297 /* set the tx rate to all enabled */ 9298 ppd->local_tx_rate = 0; 9299 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) 9300 ppd->local_tx_rate |= 2; 9301 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) 9302 ppd->local_tx_rate |= 1; 9303 } 9304 9305 enable_lane_tx = 0xF; /* enable all four lanes */ 9306 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, 9307 rx_polarity_inversion, ppd->local_tx_rate); 9308 if (ret != HCMD_SUCCESS) 9309 goto set_local_link_attributes_fail; 9310 9311 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); 9312 if (ret != HCMD_SUCCESS) { 9313 dd_dev_err(dd, 9314 "Failed to set host interface version, return 0x%x\n", 9315 ret); 9316 goto set_local_link_attributes_fail; 9317 } 9318 9319 /* 9320 * DC supports continuous updates. 9321 */ 9322 ret = write_vc_local_phy(dd, 9323 0 /* no power management */, 9324 1 /* continuous updates */); 9325 if (ret != HCMD_SUCCESS) 9326 goto set_local_link_attributes_fail; 9327 9328 /* z=1 in the next call: AU of 0 is not supported by the hardware */ 9329 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, 9330 ppd->port_crc_mode_enabled); 9331 if (ret != HCMD_SUCCESS) 9332 goto set_local_link_attributes_fail; 9333 9334 /* 9335 * SerDes loopback init sequence requires 9336 * setting bit 0 of MISC_CONFIG_BITS 9337 */ 9338 if (loopback == LOOPBACK_SERDES) 9339 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT; 9340 9341 /* 9342 * An external device configuration request is used to reset the LCB 9343 * to retry to obtain operational lanes when the first attempt is 9344 * unsuccesful. 9345 */ 9346 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) 9347 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT; 9348 9349 ret = write_vc_local_link_mode(dd, misc_bits, 0, 9350 opa_to_vc_link_widths( 9351 ppd->link_width_enabled)); 9352 if (ret != HCMD_SUCCESS) 9353 goto set_local_link_attributes_fail; 9354 9355 /* let peer know who we are */ 9356 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); 9357 if (ret == HCMD_SUCCESS) 9358 return 0; 9359 9360 set_local_link_attributes_fail: 9361 dd_dev_err(dd, 9362 "Failed to set local link attributes, return 0x%x\n", 9363 ret); 9364 return ret; 9365 } 9366 9367 /* 9368 * Call this to start the link. 9369 * Do not do anything if the link is disabled. 9370 * Returns 0 if link is disabled, moved to polling, or the driver is not ready. 9371 */ 9372 int start_link(struct hfi1_pportdata *ppd) 9373 { 9374 /* 9375 * Tune the SerDes to a ballpark setting for optimal signal and bit 9376 * error rate. Needs to be done before starting the link. 9377 */ 9378 tune_serdes(ppd); 9379 9380 if (!ppd->driver_link_ready) { 9381 dd_dev_info(ppd->dd, 9382 "%s: stopping link start because driver is not ready\n", 9383 __func__); 9384 return 0; 9385 } 9386 9387 /* 9388 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the 9389 * pkey table can be configured properly if the HFI unit is connected 9390 * to switch port with MgmtAllowed=NO 9391 */ 9392 clear_full_mgmt_pkey(ppd); 9393 9394 return set_link_state(ppd, HLS_DN_POLL); 9395 } 9396 9397 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd) 9398 { 9399 struct hfi1_devdata *dd = ppd->dd; 9400 u64 mask; 9401 unsigned long timeout; 9402 9403 /* 9404 * Some QSFP cables have a quirk that asserts the IntN line as a side 9405 * effect of power up on plug-in. We ignore this false positive 9406 * interrupt until the module has finished powering up by waiting for 9407 * a minimum timeout of the module inrush initialization time of 9408 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the 9409 * module have stabilized. 9410 */ 9411 msleep(500); 9412 9413 /* 9414 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) 9415 */ 9416 timeout = jiffies + msecs_to_jiffies(2000); 9417 while (1) { 9418 mask = read_csr(dd, dd->hfi1_id ? 9419 ASIC_QSFP2_IN : ASIC_QSFP1_IN); 9420 if (!(mask & QSFP_HFI0_INT_N)) 9421 break; 9422 if (time_after(jiffies, timeout)) { 9423 dd_dev_info(dd, "%s: No IntN detected, reset complete\n", 9424 __func__); 9425 break; 9426 } 9427 udelay(2); 9428 } 9429 } 9430 9431 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) 9432 { 9433 struct hfi1_devdata *dd = ppd->dd; 9434 u64 mask; 9435 9436 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); 9437 if (enable) { 9438 /* 9439 * Clear the status register to avoid an immediate interrupt 9440 * when we re-enable the IntN pin 9441 */ 9442 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, 9443 QSFP_HFI0_INT_N); 9444 mask |= (u64)QSFP_HFI0_INT_N; 9445 } else { 9446 mask &= ~(u64)QSFP_HFI0_INT_N; 9447 } 9448 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); 9449 } 9450 9451 int reset_qsfp(struct hfi1_pportdata *ppd) 9452 { 9453 struct hfi1_devdata *dd = ppd->dd; 9454 u64 mask, qsfp_mask; 9455 9456 /* Disable INT_N from triggering QSFP interrupts */ 9457 set_qsfp_int_n(ppd, 0); 9458 9459 /* Reset the QSFP */ 9460 mask = (u64)QSFP_HFI0_RESET_N; 9461 9462 qsfp_mask = read_csr(dd, 9463 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); 9464 qsfp_mask &= ~mask; 9465 write_csr(dd, 9466 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); 9467 9468 udelay(10); 9469 9470 qsfp_mask |= mask; 9471 write_csr(dd, 9472 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); 9473 9474 wait_for_qsfp_init(ppd); 9475 9476 /* 9477 * Allow INT_N to trigger the QSFP interrupt to watch 9478 * for alarms and warnings 9479 */ 9480 set_qsfp_int_n(ppd, 1); 9481 9482 /* 9483 * After the reset, AOC transmitters are enabled by default. They need 9484 * to be turned off to complete the QSFP setup before they can be 9485 * enabled again. 9486 */ 9487 return set_qsfp_tx(ppd, 0); 9488 } 9489 9490 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, 9491 u8 *qsfp_interrupt_status) 9492 { 9493 struct hfi1_devdata *dd = ppd->dd; 9494 9495 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || 9496 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) 9497 dd_dev_err(dd, "%s: QSFP cable temperature too high\n", 9498 __func__); 9499 9500 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || 9501 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) 9502 dd_dev_err(dd, "%s: QSFP cable temperature too low\n", 9503 __func__); 9504 9505 /* 9506 * The remaining alarms/warnings don't matter if the link is down. 9507 */ 9508 if (ppd->host_link_state & HLS_DOWN) 9509 return 0; 9510 9511 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || 9512 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) 9513 dd_dev_err(dd, "%s: QSFP supply voltage too high\n", 9514 __func__); 9515 9516 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || 9517 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) 9518 dd_dev_err(dd, "%s: QSFP supply voltage too low\n", 9519 __func__); 9520 9521 /* Byte 2 is vendor specific */ 9522 9523 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || 9524 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) 9525 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", 9526 __func__); 9527 9528 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || 9529 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) 9530 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", 9531 __func__); 9532 9533 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || 9534 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) 9535 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", 9536 __func__); 9537 9538 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || 9539 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) 9540 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", 9541 __func__); 9542 9543 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || 9544 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) 9545 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", 9546 __func__); 9547 9548 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || 9549 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) 9550 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", 9551 __func__); 9552 9553 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || 9554 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) 9555 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", 9556 __func__); 9557 9558 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || 9559 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) 9560 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", 9561 __func__); 9562 9563 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || 9564 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) 9565 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", 9566 __func__); 9567 9568 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || 9569 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) 9570 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", 9571 __func__); 9572 9573 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || 9574 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) 9575 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", 9576 __func__); 9577 9578 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || 9579 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) 9580 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", 9581 __func__); 9582 9583 /* Bytes 9-10 and 11-12 are reserved */ 9584 /* Bytes 13-15 are vendor specific */ 9585 9586 return 0; 9587 } 9588 9589 /* This routine will only be scheduled if the QSFP module present is asserted */ 9590 void qsfp_event(struct work_struct *work) 9591 { 9592 struct qsfp_data *qd; 9593 struct hfi1_pportdata *ppd; 9594 struct hfi1_devdata *dd; 9595 9596 qd = container_of(work, struct qsfp_data, qsfp_work); 9597 ppd = qd->ppd; 9598 dd = ppd->dd; 9599 9600 /* Sanity check */ 9601 if (!qsfp_mod_present(ppd)) 9602 return; 9603 9604 if (ppd->host_link_state == HLS_DN_DISABLE) { 9605 dd_dev_info(ppd->dd, 9606 "%s: stopping link start because link is disabled\n", 9607 __func__); 9608 return; 9609 } 9610 9611 /* 9612 * Turn DC back on after cable has been re-inserted. Up until 9613 * now, the DC has been in reset to save power. 9614 */ 9615 dc_start(dd); 9616 9617 if (qd->cache_refresh_required) { 9618 set_qsfp_int_n(ppd, 0); 9619 9620 wait_for_qsfp_init(ppd); 9621 9622 /* 9623 * Allow INT_N to trigger the QSFP interrupt to watch 9624 * for alarms and warnings 9625 */ 9626 set_qsfp_int_n(ppd, 1); 9627 9628 start_link(ppd); 9629 } 9630 9631 if (qd->check_interrupt_flags) { 9632 u8 qsfp_interrupt_status[16] = {0,}; 9633 9634 if (one_qsfp_read(ppd, dd->hfi1_id, 6, 9635 &qsfp_interrupt_status[0], 16) != 16) { 9636 dd_dev_info(dd, 9637 "%s: Failed to read status of QSFP module\n", 9638 __func__); 9639 } else { 9640 unsigned long flags; 9641 9642 handle_qsfp_error_conditions( 9643 ppd, qsfp_interrupt_status); 9644 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 9645 ppd->qsfp_info.check_interrupt_flags = 0; 9646 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 9647 flags); 9648 } 9649 } 9650 } 9651 9652 void init_qsfp_int(struct hfi1_devdata *dd) 9653 { 9654 struct hfi1_pportdata *ppd = dd->pport; 9655 u64 qsfp_mask; 9656 9657 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); 9658 /* Clear current status to avoid spurious interrupts */ 9659 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, 9660 qsfp_mask); 9661 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, 9662 qsfp_mask); 9663 9664 set_qsfp_int_n(ppd, 0); 9665 9666 /* Handle active low nature of INT_N and MODPRST_N pins */ 9667 if (qsfp_mod_present(ppd)) 9668 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N; 9669 write_csr(dd, 9670 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, 9671 qsfp_mask); 9672 9673 /* Enable the appropriate QSFP IRQ source */ 9674 if (!dd->hfi1_id) 9675 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); 9676 else 9677 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); 9678 } 9679 9680 /* 9681 * Do a one-time initialize of the LCB block. 9682 */ 9683 static void init_lcb(struct hfi1_devdata *dd) 9684 { 9685 /* simulator does not correctly handle LCB cclk loopback, skip */ 9686 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 9687 return; 9688 9689 /* the DC has been reset earlier in the driver load */ 9690 9691 /* set LCB for cclk loopback on the port */ 9692 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01); 9693 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00); 9694 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00); 9695 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); 9696 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08); 9697 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02); 9698 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); 9699 } 9700 9701 /* 9702 * Perform a test read on the QSFP. Return 0 on success, -ERRNO 9703 * on error. 9704 */ 9705 static int test_qsfp_read(struct hfi1_pportdata *ppd) 9706 { 9707 int ret; 9708 u8 status; 9709 9710 /* 9711 * Report success if not a QSFP or, if it is a QSFP, but the cable is 9712 * not present 9713 */ 9714 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) 9715 return 0; 9716 9717 /* read byte 2, the status byte */ 9718 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); 9719 if (ret < 0) 9720 return ret; 9721 if (ret != 1) 9722 return -EIO; 9723 9724 return 0; /* success */ 9725 } 9726 9727 /* 9728 * Values for QSFP retry. 9729 * 9730 * Give up after 10s (20 x 500ms). The overall timeout was empirically 9731 * arrived at from experience on a large cluster. 9732 */ 9733 #define MAX_QSFP_RETRIES 20 9734 #define QSFP_RETRY_WAIT 500 /* msec */ 9735 9736 /* 9737 * Try a QSFP read. If it fails, schedule a retry for later. 9738 * Called on first link activation after driver load. 9739 */ 9740 static void try_start_link(struct hfi1_pportdata *ppd) 9741 { 9742 if (test_qsfp_read(ppd)) { 9743 /* read failed */ 9744 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { 9745 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); 9746 return; 9747 } 9748 dd_dev_info(ppd->dd, 9749 "QSFP not responding, waiting and retrying %d\n", 9750 (int)ppd->qsfp_retry_count); 9751 ppd->qsfp_retry_count++; 9752 queue_delayed_work(ppd->link_wq, &ppd->start_link_work, 9753 msecs_to_jiffies(QSFP_RETRY_WAIT)); 9754 return; 9755 } 9756 ppd->qsfp_retry_count = 0; 9757 9758 start_link(ppd); 9759 } 9760 9761 /* 9762 * Workqueue function to start the link after a delay. 9763 */ 9764 void handle_start_link(struct work_struct *work) 9765 { 9766 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 9767 start_link_work.work); 9768 try_start_link(ppd); 9769 } 9770 9771 int bringup_serdes(struct hfi1_pportdata *ppd) 9772 { 9773 struct hfi1_devdata *dd = ppd->dd; 9774 u64 guid; 9775 int ret; 9776 9777 if (HFI1_CAP_IS_KSET(EXTENDED_PSN)) 9778 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); 9779 9780 guid = ppd->guids[HFI1_PORT_GUID_INDEX]; 9781 if (!guid) { 9782 if (dd->base_guid) 9783 guid = dd->base_guid + ppd->port - 1; 9784 ppd->guids[HFI1_PORT_GUID_INDEX] = guid; 9785 } 9786 9787 /* Set linkinit_reason on power up per OPA spec */ 9788 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; 9789 9790 /* one-time init of the LCB */ 9791 init_lcb(dd); 9792 9793 if (loopback) { 9794 ret = init_loopback(dd); 9795 if (ret < 0) 9796 return ret; 9797 } 9798 9799 get_port_type(ppd); 9800 if (ppd->port_type == PORT_TYPE_QSFP) { 9801 set_qsfp_int_n(ppd, 0); 9802 wait_for_qsfp_init(ppd); 9803 set_qsfp_int_n(ppd, 1); 9804 } 9805 9806 try_start_link(ppd); 9807 return 0; 9808 } 9809 9810 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) 9811 { 9812 struct hfi1_devdata *dd = ppd->dd; 9813 9814 /* 9815 * Shut down the link and keep it down. First turn off that the 9816 * driver wants to allow the link to be up (driver_link_ready). 9817 * Then make sure the link is not automatically restarted 9818 * (link_enabled). Cancel any pending restart. And finally 9819 * go offline. 9820 */ 9821 ppd->driver_link_ready = 0; 9822 ppd->link_enabled = 0; 9823 9824 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ 9825 flush_delayed_work(&ppd->start_link_work); 9826 cancel_delayed_work_sync(&ppd->start_link_work); 9827 9828 ppd->offline_disabled_reason = 9829 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT); 9830 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0, 9831 OPA_LINKDOWN_REASON_REBOOT); 9832 set_link_state(ppd, HLS_DN_OFFLINE); 9833 9834 /* disable the port */ 9835 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 9836 } 9837 9838 static inline int init_cpu_counters(struct hfi1_devdata *dd) 9839 { 9840 struct hfi1_pportdata *ppd; 9841 int i; 9842 9843 ppd = (struct hfi1_pportdata *)(dd + 1); 9844 for (i = 0; i < dd->num_pports; i++, ppd++) { 9845 ppd->ibport_data.rvp.rc_acks = NULL; 9846 ppd->ibport_data.rvp.rc_qacks = NULL; 9847 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); 9848 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); 9849 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); 9850 if (!ppd->ibport_data.rvp.rc_acks || 9851 !ppd->ibport_data.rvp.rc_delayed_comp || 9852 !ppd->ibport_data.rvp.rc_qacks) 9853 return -ENOMEM; 9854 } 9855 9856 return 0; 9857 } 9858 9859 /* 9860 * index is the index into the receive array 9861 */ 9862 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, 9863 u32 type, unsigned long pa, u16 order) 9864 { 9865 u64 reg; 9866 9867 if (!(dd->flags & HFI1_PRESENT)) 9868 goto done; 9869 9870 if (type == PT_INVALID || type == PT_INVALID_FLUSH) { 9871 pa = 0; 9872 order = 0; 9873 } else if (type > PT_INVALID) { 9874 dd_dev_err(dd, 9875 "unexpected receive array type %u for index %u, not handled\n", 9876 type, index); 9877 goto done; 9878 } 9879 trace_hfi1_put_tid(dd, index, type, pa, order); 9880 9881 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */ 9882 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK 9883 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT 9884 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK) 9885 << RCV_ARRAY_RT_ADDR_SHIFT; 9886 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); 9887 writeq(reg, dd->rcvarray_wc + (index * 8)); 9888 9889 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3) 9890 /* 9891 * Eager entries are written and flushed 9892 * 9893 * Expected entries are flushed every 4 writes 9894 */ 9895 flush_wc(); 9896 done: 9897 return; 9898 } 9899 9900 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd) 9901 { 9902 struct hfi1_devdata *dd = rcd->dd; 9903 u32 i; 9904 9905 /* this could be optimized */ 9906 for (i = rcd->eager_base; i < rcd->eager_base + 9907 rcd->egrbufs.alloced; i++) 9908 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); 9909 9910 for (i = rcd->expected_base; 9911 i < rcd->expected_base + rcd->expected_count; i++) 9912 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); 9913 } 9914 9915 static const char * const ib_cfg_name_strings[] = { 9916 "HFI1_IB_CFG_LIDLMC", 9917 "HFI1_IB_CFG_LWID_DG_ENB", 9918 "HFI1_IB_CFG_LWID_ENB", 9919 "HFI1_IB_CFG_LWID", 9920 "HFI1_IB_CFG_SPD_ENB", 9921 "HFI1_IB_CFG_SPD", 9922 "HFI1_IB_CFG_RXPOL_ENB", 9923 "HFI1_IB_CFG_LREV_ENB", 9924 "HFI1_IB_CFG_LINKLATENCY", 9925 "HFI1_IB_CFG_HRTBT", 9926 "HFI1_IB_CFG_OP_VLS", 9927 "HFI1_IB_CFG_VL_HIGH_CAP", 9928 "HFI1_IB_CFG_VL_LOW_CAP", 9929 "HFI1_IB_CFG_OVERRUN_THRESH", 9930 "HFI1_IB_CFG_PHYERR_THRESH", 9931 "HFI1_IB_CFG_LINKDEFAULT", 9932 "HFI1_IB_CFG_PKEYS", 9933 "HFI1_IB_CFG_MTU", 9934 "HFI1_IB_CFG_LSTATE", 9935 "HFI1_IB_CFG_VL_HIGH_LIMIT", 9936 "HFI1_IB_CFG_PMA_TICKS", 9937 "HFI1_IB_CFG_PORT" 9938 }; 9939 9940 static const char *ib_cfg_name(int which) 9941 { 9942 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings)) 9943 return "invalid"; 9944 return ib_cfg_name_strings[which]; 9945 } 9946 9947 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) 9948 { 9949 struct hfi1_devdata *dd = ppd->dd; 9950 int val = 0; 9951 9952 switch (which) { 9953 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ 9954 val = ppd->link_width_enabled; 9955 break; 9956 case HFI1_IB_CFG_LWID: /* currently active Link-width */ 9957 val = ppd->link_width_active; 9958 break; 9959 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ 9960 val = ppd->link_speed_enabled; 9961 break; 9962 case HFI1_IB_CFG_SPD: /* current Link speed */ 9963 val = ppd->link_speed_active; 9964 break; 9965 9966 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ 9967 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ 9968 case HFI1_IB_CFG_LINKLATENCY: 9969 goto unimplemented; 9970 9971 case HFI1_IB_CFG_OP_VLS: 9972 val = ppd->actual_vls_operational; 9973 break; 9974 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ 9975 val = VL_ARB_HIGH_PRIO_TABLE_SIZE; 9976 break; 9977 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */ 9978 val = VL_ARB_LOW_PRIO_TABLE_SIZE; 9979 break; 9980 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ 9981 val = ppd->overrun_threshold; 9982 break; 9983 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ 9984 val = ppd->phy_error_threshold; 9985 break; 9986 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ 9987 val = HLS_DEFAULT; 9988 break; 9989 9990 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */ 9991 case HFI1_IB_CFG_PMA_TICKS: 9992 default: 9993 unimplemented: 9994 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 9995 dd_dev_info( 9996 dd, 9997 "%s: which %s: not implemented\n", 9998 __func__, 9999 ib_cfg_name(which)); 10000 break; 10001 } 10002 10003 return val; 10004 } 10005 10006 /* 10007 * The largest MAD packet size. 10008 */ 10009 #define MAX_MAD_PACKET 2048 10010 10011 /* 10012 * Return the maximum header bytes that can go on the _wire_ 10013 * for this device. This count includes the ICRC which is 10014 * not part of the packet held in memory but it is appended 10015 * by the HW. 10016 * This is dependent on the device's receive header entry size. 10017 * HFI allows this to be set per-receive context, but the 10018 * driver presently enforces a global value. 10019 */ 10020 u32 lrh_max_header_bytes(struct hfi1_devdata *dd) 10021 { 10022 /* 10023 * The maximum non-payload (MTU) bytes in LRH.PktLen are 10024 * the Receive Header Entry Size minus the PBC (or RHF) size 10025 * plus one DW for the ICRC appended by HW. 10026 * 10027 * dd->rcd[0].rcvhdrqentsize is in DW. 10028 * We use rcd[0] as all context will have the same value. Also, 10029 * the first kernel context would have been allocated by now so 10030 * we are guaranteed a valid value. 10031 */ 10032 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; 10033 } 10034 10035 /* 10036 * Set Send Length 10037 * @ppd - per port data 10038 * 10039 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck* 10040 * registers compare against LRH.PktLen, so use the max bytes included 10041 * in the LRH. 10042 * 10043 * This routine changes all VL values except VL15, which it maintains at 10044 * the same value. 10045 */ 10046 static void set_send_length(struct hfi1_pportdata *ppd) 10047 { 10048 struct hfi1_devdata *dd = ppd->dd; 10049 u32 max_hb = lrh_max_header_bytes(dd), dcmtu; 10050 u32 maxvlmtu = dd->vld[15].mtu; 10051 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) 10052 & SEND_LEN_CHECK1_LEN_VL15_MASK) << 10053 SEND_LEN_CHECK1_LEN_VL15_SHIFT; 10054 int i, j; 10055 u32 thres; 10056 10057 for (i = 0; i < ppd->vls_supported; i++) { 10058 if (dd->vld[i].mtu > maxvlmtu) 10059 maxvlmtu = dd->vld[i].mtu; 10060 if (i <= 3) 10061 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) 10062 & SEND_LEN_CHECK0_LEN_VL0_MASK) << 10063 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT); 10064 else 10065 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) 10066 & SEND_LEN_CHECK1_LEN_VL4_MASK) << 10067 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT); 10068 } 10069 write_csr(dd, SEND_LEN_CHECK0, len1); 10070 write_csr(dd, SEND_LEN_CHECK1, len2); 10071 /* adjust kernel credit return thresholds based on new MTUs */ 10072 /* all kernel receive contexts have the same hdrqentsize */ 10073 for (i = 0; i < ppd->vls_supported; i++) { 10074 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), 10075 sc_mtu_to_threshold(dd->vld[i].sc, 10076 dd->vld[i].mtu, 10077 dd->rcd[0]->rcvhdrqentsize)); 10078 for (j = 0; j < INIT_SC_PER_VL; j++) 10079 sc_set_cr_threshold( 10080 pio_select_send_context_vl(dd, j, i), 10081 thres); 10082 } 10083 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), 10084 sc_mtu_to_threshold(dd->vld[15].sc, 10085 dd->vld[15].mtu, 10086 dd->rcd[0]->rcvhdrqentsize)); 10087 sc_set_cr_threshold(dd->vld[15].sc, thres); 10088 10089 /* Adjust maximum MTU for the port in DC */ 10090 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 : 10091 (ilog2(maxvlmtu >> 8) + 1); 10092 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); 10093 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK; 10094 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) << 10095 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT; 10096 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); 10097 } 10098 10099 static void set_lidlmc(struct hfi1_pportdata *ppd) 10100 { 10101 int i; 10102 u64 sreg = 0; 10103 struct hfi1_devdata *dd = ppd->dd; 10104 u32 mask = ~((1U << ppd->lmc) - 1); 10105 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); 10106 u32 lid; 10107 10108 /* 10109 * Program 0 in CSR if port lid is extended. This prevents 10110 * 9B packets being sent out for large lids. 10111 */ 10112 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; 10113 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 10114 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); 10115 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) 10116 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) | 10117 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK) 10118 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT); 10119 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); 10120 10121 /* 10122 * Iterate over all the send contexts and set their SLID check 10123 */ 10124 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) << 10125 SEND_CTXT_CHECK_SLID_MASK_SHIFT) | 10126 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) << 10127 SEND_CTXT_CHECK_SLID_VALUE_SHIFT); 10128 10129 for (i = 0; i < chip_send_contexts(dd); i++) { 10130 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x", 10131 i, (u32)sreg); 10132 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg); 10133 } 10134 10135 /* Now we have to do the same thing for the sdma engines */ 10136 sdma_update_lmc(dd, mask, lid); 10137 } 10138 10139 static const char *state_completed_string(u32 completed) 10140 { 10141 static const char * const state_completed[] = { 10142 "EstablishComm", 10143 "OptimizeEQ", 10144 "VerifyCap" 10145 }; 10146 10147 if (completed < ARRAY_SIZE(state_completed)) 10148 return state_completed[completed]; 10149 10150 return "unknown"; 10151 } 10152 10153 static const char all_lanes_dead_timeout_expired[] = 10154 "All lanes were inactive – was the interconnect media removed?"; 10155 static const char tx_out_of_policy[] = 10156 "Passing lanes on local port do not meet the local link width policy"; 10157 static const char no_state_complete[] = 10158 "State timeout occurred before link partner completed the state"; 10159 static const char * const state_complete_reasons[] = { 10160 [0x00] = "Reason unknown", 10161 [0x01] = "Link was halted by driver, refer to LinkDownReason", 10162 [0x02] = "Link partner reported failure", 10163 [0x10] = "Unable to achieve frame sync on any lane", 10164 [0x11] = 10165 "Unable to find a common bit rate with the link partner", 10166 [0x12] = 10167 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy", 10168 [0x13] = 10169 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy", 10170 [0x14] = no_state_complete, 10171 [0x15] = 10172 "State timeout occurred before link partner identified equalization presets", 10173 [0x16] = 10174 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy", 10175 [0x17] = tx_out_of_policy, 10176 [0x20] = all_lanes_dead_timeout_expired, 10177 [0x21] = 10178 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy", 10179 [0x22] = no_state_complete, 10180 [0x23] = 10181 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy", 10182 [0x24] = tx_out_of_policy, 10183 [0x30] = all_lanes_dead_timeout_expired, 10184 [0x31] = 10185 "State timeout occurred waiting for host to process received frames", 10186 [0x32] = no_state_complete, 10187 [0x33] = 10188 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy", 10189 [0x34] = tx_out_of_policy, 10190 [0x35] = "Negotiated link width is mutually exclusive", 10191 [0x36] = 10192 "Timed out before receiving verifycap frames in VerifyCap.Exchange", 10193 [0x37] = "Unable to resolve secure data exchange", 10194 }; 10195 10196 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd, 10197 u32 code) 10198 { 10199 const char *str = NULL; 10200 10201 if (code < ARRAY_SIZE(state_complete_reasons)) 10202 str = state_complete_reasons[code]; 10203 10204 if (str) 10205 return str; 10206 return "Reserved"; 10207 } 10208 10209 /* describe the given last state complete frame */ 10210 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame, 10211 const char *prefix) 10212 { 10213 struct hfi1_devdata *dd = ppd->dd; 10214 u32 success; 10215 u32 state; 10216 u32 reason; 10217 u32 lanes; 10218 10219 /* 10220 * Decode frame: 10221 * [ 0: 0] - success 10222 * [ 3: 1] - state 10223 * [ 7: 4] - next state timeout 10224 * [15: 8] - reason code 10225 * [31:16] - lanes 10226 */ 10227 success = frame & 0x1; 10228 state = (frame >> 1) & 0x7; 10229 reason = (frame >> 8) & 0xff; 10230 lanes = (frame >> 16) & 0xffff; 10231 10232 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", 10233 prefix, frame); 10234 dd_dev_err(dd, " last reported state state: %s (0x%x)\n", 10235 state_completed_string(state), state); 10236 dd_dev_err(dd, " state successfully completed: %s\n", 10237 success ? "yes" : "no"); 10238 dd_dev_err(dd, " fail reason 0x%x: %s\n", 10239 reason, state_complete_reason_code_string(ppd, reason)); 10240 dd_dev_err(dd, " passing lane mask: 0x%x", lanes); 10241 } 10242 10243 /* 10244 * Read the last state complete frames and explain them. This routine 10245 * expects to be called if the link went down during link negotiation 10246 * and initialization (LNI). That is, anywhere between polling and link up. 10247 */ 10248 static void check_lni_states(struct hfi1_pportdata *ppd) 10249 { 10250 u32 last_local_state; 10251 u32 last_remote_state; 10252 10253 read_last_local_state(ppd->dd, &last_local_state); 10254 read_last_remote_state(ppd->dd, &last_remote_state); 10255 10256 /* 10257 * Don't report anything if there is nothing to report. A value of 10258 * 0 means the link was taken down while polling and there was no 10259 * training in-process. 10260 */ 10261 if (last_local_state == 0 && last_remote_state == 0) 10262 return; 10263 10264 decode_state_complete(ppd, last_local_state, "transmitted"); 10265 decode_state_complete(ppd, last_remote_state, "received"); 10266 } 10267 10268 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */ 10269 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) 10270 { 10271 u64 reg; 10272 unsigned long timeout; 10273 10274 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */ 10275 timeout = jiffies + msecs_to_jiffies(wait_ms); 10276 while (1) { 10277 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); 10278 if (reg) 10279 break; 10280 if (time_after(jiffies, timeout)) { 10281 dd_dev_err(dd, 10282 "timeout waiting for LINK_TRANSFER_ACTIVE\n"); 10283 return -ETIMEDOUT; 10284 } 10285 udelay(2); 10286 } 10287 return 0; 10288 } 10289 10290 /* called when the logical link state is not down as it should be */ 10291 static void force_logical_link_state_down(struct hfi1_pportdata *ppd) 10292 { 10293 struct hfi1_devdata *dd = ppd->dd; 10294 10295 /* 10296 * Bring link up in LCB loopback 10297 */ 10298 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); 10299 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 10300 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); 10301 10302 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); 10303 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0); 10304 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); 10305 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2); 10306 10307 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 10308 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); 10309 udelay(3); 10310 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1); 10311 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); 10312 10313 wait_link_transfer_active(dd, 100); 10314 10315 /* 10316 * Bring the link down again. 10317 */ 10318 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); 10319 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0); 10320 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0); 10321 10322 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); 10323 } 10324 10325 /* 10326 * Helper for set_link_state(). Do not call except from that routine. 10327 * Expects ppd->hls_mutex to be held. 10328 * 10329 * @rem_reason value to be sent to the neighbor 10330 * 10331 * LinkDownReasons only set if transition succeeds. 10332 */ 10333 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) 10334 { 10335 struct hfi1_devdata *dd = ppd->dd; 10336 u32 previous_state; 10337 int offline_state_ret; 10338 int ret; 10339 10340 update_lcb_cache(dd); 10341 10342 previous_state = ppd->host_link_state; 10343 ppd->host_link_state = HLS_GOING_OFFLINE; 10344 10345 /* start offline transition */ 10346 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE); 10347 10348 if (ret != HCMD_SUCCESS) { 10349 dd_dev_err(dd, 10350 "Failed to transition to Offline link state, return %d\n", 10351 ret); 10352 return -EINVAL; 10353 } 10354 if (ppd->offline_disabled_reason == 10355 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) 10356 ppd->offline_disabled_reason = 10357 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 10358 10359 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); 10360 if (offline_state_ret < 0) 10361 return offline_state_ret; 10362 10363 /* Disabling AOC transmitters */ 10364 if (ppd->port_type == PORT_TYPE_QSFP && 10365 ppd->qsfp_info.limiting_active && 10366 qsfp_mod_present(ppd)) { 10367 int ret; 10368 10369 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); 10370 if (ret == 0) { 10371 set_qsfp_tx(ppd, 0); 10372 release_chip_resource(dd, qsfp_resource(dd)); 10373 } else { 10374 /* not fatal, but should warn */ 10375 dd_dev_err(dd, 10376 "Unable to acquire lock to turn off QSFP TX\n"); 10377 } 10378 } 10379 10380 /* 10381 * Wait for the offline.Quiet transition if it hasn't happened yet. It 10382 * can take a while for the link to go down. 10383 */ 10384 if (offline_state_ret != PLS_OFFLINE_QUIET) { 10385 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); 10386 if (ret < 0) 10387 return ret; 10388 } 10389 10390 /* 10391 * Now in charge of LCB - must be after the physical state is 10392 * offline.quiet and before host_link_state is changed. 10393 */ 10394 set_host_lcb_access(dd); 10395 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ 10396 10397 /* make sure the logical state is also down */ 10398 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); 10399 if (ret) 10400 force_logical_link_state_down(ppd); 10401 10402 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ 10403 update_statusp(ppd, IB_PORT_DOWN); 10404 10405 /* 10406 * The LNI has a mandatory wait time after the physical state 10407 * moves to Offline.Quiet. The wait time may be different 10408 * depending on how the link went down. The 8051 firmware 10409 * will observe the needed wait time and only move to ready 10410 * when that is completed. The largest of the quiet timeouts 10411 * is 6s, so wait that long and then at least 0.5s more for 10412 * other transitions, and another 0.5s for a buffer. 10413 */ 10414 ret = wait_fm_ready(dd, 7000); 10415 if (ret) { 10416 dd_dev_err(dd, 10417 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n"); 10418 /* state is really offline, so make it so */ 10419 ppd->host_link_state = HLS_DN_OFFLINE; 10420 return ret; 10421 } 10422 10423 /* 10424 * The state is now offline and the 8051 is ready to accept host 10425 * requests. 10426 * - change our state 10427 * - notify others if we were previously in a linkup state 10428 */ 10429 ppd->host_link_state = HLS_DN_OFFLINE; 10430 if (previous_state & HLS_UP) { 10431 /* went down while link was up */ 10432 handle_linkup_change(dd, 0); 10433 } else if (previous_state 10434 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 10435 /* went down while attempting link up */ 10436 check_lni_states(ppd); 10437 10438 /* The QSFP doesn't need to be reset on LNI failure */ 10439 ppd->qsfp_info.reset_needed = 0; 10440 } 10441 10442 /* the active link width (downgrade) is 0 on link down */ 10443 ppd->link_width_active = 0; 10444 ppd->link_width_downgrade_tx_active = 0; 10445 ppd->link_width_downgrade_rx_active = 0; 10446 ppd->current_egress_rate = 0; 10447 return 0; 10448 } 10449 10450 /* return the link state name */ 10451 static const char *link_state_name(u32 state) 10452 { 10453 const char *name; 10454 int n = ilog2(state); 10455 static const char * const names[] = { 10456 [__HLS_UP_INIT_BP] = "INIT", 10457 [__HLS_UP_ARMED_BP] = "ARMED", 10458 [__HLS_UP_ACTIVE_BP] = "ACTIVE", 10459 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF", 10460 [__HLS_DN_POLL_BP] = "POLL", 10461 [__HLS_DN_DISABLE_BP] = "DISABLE", 10462 [__HLS_DN_OFFLINE_BP] = "OFFLINE", 10463 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP", 10464 [__HLS_GOING_UP_BP] = "GOING_UP", 10465 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE", 10466 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN" 10467 }; 10468 10469 name = n < ARRAY_SIZE(names) ? names[n] : NULL; 10470 return name ? name : "unknown"; 10471 } 10472 10473 /* return the link state reason name */ 10474 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state) 10475 { 10476 if (state == HLS_UP_INIT) { 10477 switch (ppd->linkinit_reason) { 10478 case OPA_LINKINIT_REASON_LINKUP: 10479 return "(LINKUP)"; 10480 case OPA_LINKINIT_REASON_FLAPPING: 10481 return "(FLAPPING)"; 10482 case OPA_LINKINIT_OUTSIDE_POLICY: 10483 return "(OUTSIDE_POLICY)"; 10484 case OPA_LINKINIT_QUARANTINED: 10485 return "(QUARANTINED)"; 10486 case OPA_LINKINIT_INSUFIC_CAPABILITY: 10487 return "(INSUFIC_CAPABILITY)"; 10488 default: 10489 break; 10490 } 10491 } 10492 return ""; 10493 } 10494 10495 /* 10496 * driver_pstate - convert the driver's notion of a port's 10497 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*). 10498 * Return -1 (converted to a u32) to indicate error. 10499 */ 10500 u32 driver_pstate(struct hfi1_pportdata *ppd) 10501 { 10502 switch (ppd->host_link_state) { 10503 case HLS_UP_INIT: 10504 case HLS_UP_ARMED: 10505 case HLS_UP_ACTIVE: 10506 return IB_PORTPHYSSTATE_LINKUP; 10507 case HLS_DN_POLL: 10508 return IB_PORTPHYSSTATE_POLLING; 10509 case HLS_DN_DISABLE: 10510 return IB_PORTPHYSSTATE_DISABLED; 10511 case HLS_DN_OFFLINE: 10512 return OPA_PORTPHYSSTATE_OFFLINE; 10513 case HLS_VERIFY_CAP: 10514 return IB_PORTPHYSSTATE_TRAINING; 10515 case HLS_GOING_UP: 10516 return IB_PORTPHYSSTATE_TRAINING; 10517 case HLS_GOING_OFFLINE: 10518 return OPA_PORTPHYSSTATE_OFFLINE; 10519 case HLS_LINK_COOLDOWN: 10520 return OPA_PORTPHYSSTATE_OFFLINE; 10521 case HLS_DN_DOWNDEF: 10522 default: 10523 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", 10524 ppd->host_link_state); 10525 return -1; 10526 } 10527 } 10528 10529 /* 10530 * driver_lstate - convert the driver's notion of a port's 10531 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1 10532 * (converted to a u32) to indicate error. 10533 */ 10534 u32 driver_lstate(struct hfi1_pportdata *ppd) 10535 { 10536 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) 10537 return IB_PORT_DOWN; 10538 10539 switch (ppd->host_link_state & HLS_UP) { 10540 case HLS_UP_INIT: 10541 return IB_PORT_INIT; 10542 case HLS_UP_ARMED: 10543 return IB_PORT_ARMED; 10544 case HLS_UP_ACTIVE: 10545 return IB_PORT_ACTIVE; 10546 default: 10547 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", 10548 ppd->host_link_state); 10549 return -1; 10550 } 10551 } 10552 10553 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason, 10554 u8 neigh_reason, u8 rem_reason) 10555 { 10556 if (ppd->local_link_down_reason.latest == 0 && 10557 ppd->neigh_link_down_reason.latest == 0) { 10558 ppd->local_link_down_reason.latest = lcl_reason; 10559 ppd->neigh_link_down_reason.latest = neigh_reason; 10560 ppd->remote_link_down_reason = rem_reason; 10561 } 10562 } 10563 10564 /** 10565 * data_vls_operational() - Verify if data VL BCT credits and MTU 10566 * are both set. 10567 * @ppd: pointer to hfi1_pportdata structure 10568 * 10569 * Return: true - Ok, false -otherwise. 10570 */ 10571 static inline bool data_vls_operational(struct hfi1_pportdata *ppd) 10572 { 10573 int i; 10574 u64 reg; 10575 10576 if (!ppd->actual_vls_operational) 10577 return false; 10578 10579 for (i = 0; i < ppd->vls_supported; i++) { 10580 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); 10581 if ((reg && !ppd->dd->vld[i].mtu) || 10582 (!reg && ppd->dd->vld[i].mtu)) 10583 return false; 10584 } 10585 10586 return true; 10587 } 10588 10589 /* 10590 * Change the physical and/or logical link state. 10591 * 10592 * Do not call this routine while inside an interrupt. It contains 10593 * calls to routines that can take multiple seconds to finish. 10594 * 10595 * Returns 0 on success, -errno on failure. 10596 */ 10597 int set_link_state(struct hfi1_pportdata *ppd, u32 state) 10598 { 10599 struct hfi1_devdata *dd = ppd->dd; 10600 struct ib_event event = {.device = NULL}; 10601 int ret1, ret = 0; 10602 int orig_new_state, poll_bounce; 10603 10604 mutex_lock(&ppd->hls_lock); 10605 10606 orig_new_state = state; 10607 if (state == HLS_DN_DOWNDEF) 10608 state = HLS_DEFAULT; 10609 10610 /* interpret poll -> poll as a link bounce */ 10611 poll_bounce = ppd->host_link_state == HLS_DN_POLL && 10612 state == HLS_DN_POLL; 10613 10614 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, 10615 link_state_name(ppd->host_link_state), 10616 link_state_name(orig_new_state), 10617 poll_bounce ? "(bounce) " : "", 10618 link_state_reason_name(ppd, state)); 10619 10620 /* 10621 * If we're going to a (HLS_*) link state that implies the logical 10622 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then 10623 * reset is_sm_config_started to 0. 10624 */ 10625 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE))) 10626 ppd->is_sm_config_started = 0; 10627 10628 /* 10629 * Do nothing if the states match. Let a poll to poll link bounce 10630 * go through. 10631 */ 10632 if (ppd->host_link_state == state && !poll_bounce) 10633 goto done; 10634 10635 switch (state) { 10636 case HLS_UP_INIT: 10637 if (ppd->host_link_state == HLS_DN_POLL && 10638 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { 10639 /* 10640 * Quick link up jumps from polling to here. 10641 * 10642 * Whether in normal or loopback mode, the 10643 * simulator jumps from polling to link up. 10644 * Accept that here. 10645 */ 10646 /* OK */ 10647 } else if (ppd->host_link_state != HLS_GOING_UP) { 10648 goto unexpected; 10649 } 10650 10651 /* 10652 * Wait for Link_Up physical state. 10653 * Physical and Logical states should already be 10654 * be transitioned to LinkUp and LinkInit respectively. 10655 */ 10656 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000); 10657 if (ret) { 10658 dd_dev_err(dd, 10659 "%s: physical state did not change to LINK-UP\n", 10660 __func__); 10661 break; 10662 } 10663 10664 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000); 10665 if (ret) { 10666 dd_dev_err(dd, 10667 "%s: logical state did not change to INIT\n", 10668 __func__); 10669 break; 10670 } 10671 10672 /* clear old transient LINKINIT_REASON code */ 10673 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) 10674 ppd->linkinit_reason = 10675 OPA_LINKINIT_REASON_LINKUP; 10676 10677 /* enable the port */ 10678 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 10679 10680 handle_linkup_change(dd, 1); 10681 pio_kernel_linkup(dd); 10682 10683 /* 10684 * After link up, a new link width will have been set. 10685 * Update the xmit counters with regards to the new 10686 * link width. 10687 */ 10688 update_xmit_counters(ppd, ppd->link_width_active); 10689 10690 ppd->host_link_state = HLS_UP_INIT; 10691 update_statusp(ppd, IB_PORT_INIT); 10692 break; 10693 case HLS_UP_ARMED: 10694 if (ppd->host_link_state != HLS_UP_INIT) 10695 goto unexpected; 10696 10697 if (!data_vls_operational(ppd)) { 10698 dd_dev_err(dd, 10699 "%s: Invalid data VL credits or mtu\n", 10700 __func__); 10701 ret = -EINVAL; 10702 break; 10703 } 10704 10705 set_logical_state(dd, LSTATE_ARMED); 10706 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000); 10707 if (ret) { 10708 dd_dev_err(dd, 10709 "%s: logical state did not change to ARMED\n", 10710 __func__); 10711 break; 10712 } 10713 ppd->host_link_state = HLS_UP_ARMED; 10714 update_statusp(ppd, IB_PORT_ARMED); 10715 /* 10716 * The simulator does not currently implement SMA messages, 10717 * so neighbor_normal is not set. Set it here when we first 10718 * move to Armed. 10719 */ 10720 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 10721 ppd->neighbor_normal = 1; 10722 break; 10723 case HLS_UP_ACTIVE: 10724 if (ppd->host_link_state != HLS_UP_ARMED) 10725 goto unexpected; 10726 10727 set_logical_state(dd, LSTATE_ACTIVE); 10728 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000); 10729 if (ret) { 10730 dd_dev_err(dd, 10731 "%s: logical state did not change to ACTIVE\n", 10732 __func__); 10733 } else { 10734 /* tell all engines to go running */ 10735 sdma_all_running(dd); 10736 ppd->host_link_state = HLS_UP_ACTIVE; 10737 update_statusp(ppd, IB_PORT_ACTIVE); 10738 10739 /* Signal the IB layer that the port has went active */ 10740 event.device = &dd->verbs_dev.rdi.ibdev; 10741 event.element.port_num = ppd->port; 10742 event.event = IB_EVENT_PORT_ACTIVE; 10743 } 10744 break; 10745 case HLS_DN_POLL: 10746 if ((ppd->host_link_state == HLS_DN_DISABLE || 10747 ppd->host_link_state == HLS_DN_OFFLINE) && 10748 dd->dc_shutdown) 10749 dc_start(dd); 10750 /* Hand LED control to the DC */ 10751 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 10752 10753 if (ppd->host_link_state != HLS_DN_OFFLINE) { 10754 u8 tmp = ppd->link_enabled; 10755 10756 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10757 if (ret) { 10758 ppd->link_enabled = tmp; 10759 break; 10760 } 10761 ppd->remote_link_down_reason = 0; 10762 10763 if (ppd->driver_link_ready) 10764 ppd->link_enabled = 1; 10765 } 10766 10767 set_all_slowpath(ppd->dd); 10768 ret = set_local_link_attributes(ppd); 10769 if (ret) 10770 break; 10771 10772 ppd->port_error_action = 0; 10773 ppd->host_link_state = HLS_DN_POLL; 10774 10775 if (quick_linkup) { 10776 /* quick linkup does not go into polling */ 10777 ret = do_quick_linkup(dd); 10778 } else { 10779 ret1 = set_physical_link_state(dd, PLS_POLLING); 10780 if (ret1 != HCMD_SUCCESS) { 10781 dd_dev_err(dd, 10782 "Failed to transition to Polling link state, return 0x%x\n", 10783 ret1); 10784 ret = -EINVAL; 10785 } 10786 } 10787 ppd->offline_disabled_reason = 10788 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); 10789 /* 10790 * If an error occurred above, go back to offline. The 10791 * caller may reschedule another attempt. 10792 */ 10793 if (ret) 10794 goto_offline(ppd, 0); 10795 else 10796 log_physical_state(ppd, PLS_POLLING); 10797 break; 10798 case HLS_DN_DISABLE: 10799 /* link is disabled */ 10800 ppd->link_enabled = 0; 10801 10802 /* allow any state to transition to disabled */ 10803 10804 /* must transition to offline first */ 10805 if (ppd->host_link_state != HLS_DN_OFFLINE) { 10806 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10807 if (ret) 10808 break; 10809 ppd->remote_link_down_reason = 0; 10810 } 10811 10812 if (!dd->dc_shutdown) { 10813 ret1 = set_physical_link_state(dd, PLS_DISABLED); 10814 if (ret1 != HCMD_SUCCESS) { 10815 dd_dev_err(dd, 10816 "Failed to transition to Disabled link state, return 0x%x\n", 10817 ret1); 10818 ret = -EINVAL; 10819 break; 10820 } 10821 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000); 10822 if (ret) { 10823 dd_dev_err(dd, 10824 "%s: physical state did not change to DISABLED\n", 10825 __func__); 10826 break; 10827 } 10828 dc_shutdown(dd); 10829 } 10830 ppd->host_link_state = HLS_DN_DISABLE; 10831 break; 10832 case HLS_DN_OFFLINE: 10833 if (ppd->host_link_state == HLS_DN_DISABLE) 10834 dc_start(dd); 10835 10836 /* allow any state to transition to offline */ 10837 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10838 if (!ret) 10839 ppd->remote_link_down_reason = 0; 10840 break; 10841 case HLS_VERIFY_CAP: 10842 if (ppd->host_link_state != HLS_DN_POLL) 10843 goto unexpected; 10844 ppd->host_link_state = HLS_VERIFY_CAP; 10845 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP); 10846 break; 10847 case HLS_GOING_UP: 10848 if (ppd->host_link_state != HLS_VERIFY_CAP) 10849 goto unexpected; 10850 10851 ret1 = set_physical_link_state(dd, PLS_LINKUP); 10852 if (ret1 != HCMD_SUCCESS) { 10853 dd_dev_err(dd, 10854 "Failed to transition to link up state, return 0x%x\n", 10855 ret1); 10856 ret = -EINVAL; 10857 break; 10858 } 10859 ppd->host_link_state = HLS_GOING_UP; 10860 break; 10861 10862 case HLS_GOING_OFFLINE: /* transient within goto_offline() */ 10863 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */ 10864 default: 10865 dd_dev_info(dd, "%s: state 0x%x: not supported\n", 10866 __func__, state); 10867 ret = -EINVAL; 10868 break; 10869 } 10870 10871 goto done; 10872 10873 unexpected: 10874 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", 10875 __func__, link_state_name(ppd->host_link_state), 10876 link_state_name(state)); 10877 ret = -EINVAL; 10878 10879 done: 10880 mutex_unlock(&ppd->hls_lock); 10881 10882 if (event.device) 10883 ib_dispatch_event(&event); 10884 10885 return ret; 10886 } 10887 10888 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) 10889 { 10890 u64 reg; 10891 int ret = 0; 10892 10893 switch (which) { 10894 case HFI1_IB_CFG_LIDLMC: 10895 set_lidlmc(ppd); 10896 break; 10897 case HFI1_IB_CFG_VL_HIGH_LIMIT: 10898 /* 10899 * The VL Arbitrator high limit is sent in units of 4k 10900 * bytes, while HFI stores it in units of 64 bytes. 10901 */ 10902 val *= 4096 / 64; 10903 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK) 10904 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT; 10905 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); 10906 break; 10907 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ 10908 /* HFI only supports POLL as the default link down state */ 10909 if (val != HLS_DN_POLL) 10910 ret = -EINVAL; 10911 break; 10912 case HFI1_IB_CFG_OP_VLS: 10913 if (ppd->vls_operational != val) { 10914 ppd->vls_operational = val; 10915 if (!ppd->port) 10916 ret = -EINVAL; 10917 } 10918 break; 10919 /* 10920 * For link width, link width downgrade, and speed enable, always AND 10921 * the setting with what is actually supported. This has two benefits. 10922 * First, enabled can't have unsupported values, no matter what the 10923 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean 10924 * "fill in with your supported value" have all the bits in the 10925 * field set, so simply ANDing with supported has the desired result. 10926 */ 10927 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ 10928 ppd->link_width_enabled = val & ppd->link_width_supported; 10929 break; 10930 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */ 10931 ppd->link_width_downgrade_enabled = 10932 val & ppd->link_width_downgrade_supported; 10933 break; 10934 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ 10935 ppd->link_speed_enabled = val & ppd->link_speed_supported; 10936 break; 10937 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ 10938 /* 10939 * HFI does not follow IB specs, save this value 10940 * so we can report it, if asked. 10941 */ 10942 ppd->overrun_threshold = val; 10943 break; 10944 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ 10945 /* 10946 * HFI does not follow IB specs, save this value 10947 * so we can report it, if asked. 10948 */ 10949 ppd->phy_error_threshold = val; 10950 break; 10951 10952 case HFI1_IB_CFG_MTU: 10953 set_send_length(ppd); 10954 break; 10955 10956 case HFI1_IB_CFG_PKEYS: 10957 if (HFI1_CAP_IS_KSET(PKEY_CHECK)) 10958 set_partition_keys(ppd); 10959 break; 10960 10961 default: 10962 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 10963 dd_dev_info(ppd->dd, 10964 "%s: which %s, val 0x%x: not implemented\n", 10965 __func__, ib_cfg_name(which), val); 10966 break; 10967 } 10968 return ret; 10969 } 10970 10971 /* begin functions related to vl arbitration table caching */ 10972 static void init_vl_arb_caches(struct hfi1_pportdata *ppd) 10973 { 10974 int i; 10975 10976 BUILD_BUG_ON(VL_ARB_TABLE_SIZE != 10977 VL_ARB_LOW_PRIO_TABLE_SIZE); 10978 BUILD_BUG_ON(VL_ARB_TABLE_SIZE != 10979 VL_ARB_HIGH_PRIO_TABLE_SIZE); 10980 10981 /* 10982 * Note that we always return values directly from the 10983 * 'vl_arb_cache' (and do no CSR reads) in response to a 10984 * 'Get(VLArbTable)'. This is obviously correct after a 10985 * 'Set(VLArbTable)', since the cache will then be up to 10986 * date. But it's also correct prior to any 'Set(VLArbTable)' 10987 * since then both the cache, and the relevant h/w registers 10988 * will be zeroed. 10989 */ 10990 10991 for (i = 0; i < MAX_PRIO_TABLE; i++) 10992 spin_lock_init(&ppd->vl_arb_cache[i].lock); 10993 } 10994 10995 /* 10996 * vl_arb_lock_cache 10997 * 10998 * All other vl_arb_* functions should be called only after locking 10999 * the cache. 11000 */ 11001 static inline struct vl_arb_cache * 11002 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx) 11003 { 11004 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE) 11005 return NULL; 11006 spin_lock(&ppd->vl_arb_cache[idx].lock); 11007 return &ppd->vl_arb_cache[idx]; 11008 } 11009 11010 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx) 11011 { 11012 spin_unlock(&ppd->vl_arb_cache[idx].lock); 11013 } 11014 11015 static void vl_arb_get_cache(struct vl_arb_cache *cache, 11016 struct ib_vl_weight_elem *vl) 11017 { 11018 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11019 } 11020 11021 static void vl_arb_set_cache(struct vl_arb_cache *cache, 11022 struct ib_vl_weight_elem *vl) 11023 { 11024 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11025 } 11026 11027 static int vl_arb_match_cache(struct vl_arb_cache *cache, 11028 struct ib_vl_weight_elem *vl) 11029 { 11030 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11031 } 11032 11033 /* end functions related to vl arbitration table caching */ 11034 11035 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target, 11036 u32 size, struct ib_vl_weight_elem *vl) 11037 { 11038 struct hfi1_devdata *dd = ppd->dd; 11039 u64 reg; 11040 unsigned int i, is_up = 0; 11041 int drain, ret = 0; 11042 11043 mutex_lock(&ppd->hls_lock); 11044 11045 if (ppd->host_link_state & HLS_UP) 11046 is_up = 1; 11047 11048 drain = !is_ax(dd) && is_up; 11049 11050 if (drain) 11051 /* 11052 * Before adjusting VL arbitration weights, empty per-VL 11053 * FIFOs, otherwise a packet whose VL weight is being 11054 * set to 0 could get stuck in a FIFO with no chance to 11055 * egress. 11056 */ 11057 ret = stop_drain_data_vls(dd); 11058 11059 if (ret) { 11060 dd_dev_err( 11061 dd, 11062 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", 11063 __func__); 11064 goto err; 11065 } 11066 11067 for (i = 0; i < size; i++, vl++) { 11068 /* 11069 * NOTE: The low priority shift and mask are used here, but 11070 * they are the same for both the low and high registers. 11071 */ 11072 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) 11073 << SEND_LOW_PRIORITY_LIST_VL_SHIFT) 11074 | (((u64)vl->weight 11075 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK) 11076 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT); 11077 write_csr(dd, target + (i * 8), reg); 11078 } 11079 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); 11080 11081 if (drain) 11082 open_fill_data_vls(dd); /* reopen all VLs */ 11083 11084 err: 11085 mutex_unlock(&ppd->hls_lock); 11086 11087 return ret; 11088 } 11089 11090 /* 11091 * Read one credit merge VL register. 11092 */ 11093 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, 11094 struct vl_limit *vll) 11095 { 11096 u64 reg = read_csr(dd, csr); 11097 11098 vll->dedicated = cpu_to_be16( 11099 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT) 11100 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK); 11101 vll->shared = cpu_to_be16( 11102 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT) 11103 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK); 11104 } 11105 11106 /* 11107 * Read the current credit merge limits. 11108 */ 11109 static int get_buffer_control(struct hfi1_devdata *dd, 11110 struct buffer_control *bc, u16 *overall_limit) 11111 { 11112 u64 reg; 11113 int i; 11114 11115 /* not all entries are filled in */ 11116 memset(bc, 0, sizeof(*bc)); 11117 11118 /* OPA and HFI have a 1-1 mapping */ 11119 for (i = 0; i < TXE_NUM_DATA_VL; i++) 11120 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); 11121 11122 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ 11123 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); 11124 11125 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11126 bc->overall_shared_limit = cpu_to_be16( 11127 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) 11128 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK); 11129 if (overall_limit) 11130 *overall_limit = (reg 11131 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) 11132 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK; 11133 return sizeof(struct buffer_control); 11134 } 11135 11136 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) 11137 { 11138 u64 reg; 11139 int i; 11140 11141 /* each register contains 16 SC->VLnt mappings, 4 bits each */ 11142 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); 11143 for (i = 0; i < sizeof(u64); i++) { 11144 u8 byte = *(((u8 *)®) + i); 11145 11146 dp->vlnt[2 * i] = byte & 0xf; 11147 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; 11148 } 11149 11150 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); 11151 for (i = 0; i < sizeof(u64); i++) { 11152 u8 byte = *(((u8 *)®) + i); 11153 11154 dp->vlnt[16 + (2 * i)] = byte & 0xf; 11155 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; 11156 } 11157 return sizeof(struct sc2vlnt); 11158 } 11159 11160 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, 11161 struct ib_vl_weight_elem *vl) 11162 { 11163 unsigned int i; 11164 11165 for (i = 0; i < nelems; i++, vl++) { 11166 vl->vl = 0xf; 11167 vl->weight = 0; 11168 } 11169 } 11170 11171 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) 11172 { 11173 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, 11174 DC_SC_VL_VAL(15_0, 11175 0, dp->vlnt[0] & 0xf, 11176 1, dp->vlnt[1] & 0xf, 11177 2, dp->vlnt[2] & 0xf, 11178 3, dp->vlnt[3] & 0xf, 11179 4, dp->vlnt[4] & 0xf, 11180 5, dp->vlnt[5] & 0xf, 11181 6, dp->vlnt[6] & 0xf, 11182 7, dp->vlnt[7] & 0xf, 11183 8, dp->vlnt[8] & 0xf, 11184 9, dp->vlnt[9] & 0xf, 11185 10, dp->vlnt[10] & 0xf, 11186 11, dp->vlnt[11] & 0xf, 11187 12, dp->vlnt[12] & 0xf, 11188 13, dp->vlnt[13] & 0xf, 11189 14, dp->vlnt[14] & 0xf, 11190 15, dp->vlnt[15] & 0xf)); 11191 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, 11192 DC_SC_VL_VAL(31_16, 11193 16, dp->vlnt[16] & 0xf, 11194 17, dp->vlnt[17] & 0xf, 11195 18, dp->vlnt[18] & 0xf, 11196 19, dp->vlnt[19] & 0xf, 11197 20, dp->vlnt[20] & 0xf, 11198 21, dp->vlnt[21] & 0xf, 11199 22, dp->vlnt[22] & 0xf, 11200 23, dp->vlnt[23] & 0xf, 11201 24, dp->vlnt[24] & 0xf, 11202 25, dp->vlnt[25] & 0xf, 11203 26, dp->vlnt[26] & 0xf, 11204 27, dp->vlnt[27] & 0xf, 11205 28, dp->vlnt[28] & 0xf, 11206 29, dp->vlnt[29] & 0xf, 11207 30, dp->vlnt[30] & 0xf, 11208 31, dp->vlnt[31] & 0xf)); 11209 } 11210 11211 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, 11212 u16 limit) 11213 { 11214 if (limit != 0) 11215 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", 11216 what, (int)limit, idx); 11217 } 11218 11219 /* change only the shared limit portion of SendCmGLobalCredit */ 11220 static void set_global_shared(struct hfi1_devdata *dd, u16 limit) 11221 { 11222 u64 reg; 11223 11224 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11225 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK; 11226 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT; 11227 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 11228 } 11229 11230 /* change only the total credit limit portion of SendCmGLobalCredit */ 11231 static void set_global_limit(struct hfi1_devdata *dd, u16 limit) 11232 { 11233 u64 reg; 11234 11235 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11236 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK; 11237 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 11238 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 11239 } 11240 11241 /* set the given per-VL shared limit */ 11242 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) 11243 { 11244 u64 reg; 11245 u32 addr; 11246 11247 if (vl < TXE_NUM_DATA_VL) 11248 addr = SEND_CM_CREDIT_VL + (8 * vl); 11249 else 11250 addr = SEND_CM_CREDIT_VL15; 11251 11252 reg = read_csr(dd, addr); 11253 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK; 11254 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT; 11255 write_csr(dd, addr, reg); 11256 } 11257 11258 /* set the given per-VL dedicated limit */ 11259 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) 11260 { 11261 u64 reg; 11262 u32 addr; 11263 11264 if (vl < TXE_NUM_DATA_VL) 11265 addr = SEND_CM_CREDIT_VL + (8 * vl); 11266 else 11267 addr = SEND_CM_CREDIT_VL15; 11268 11269 reg = read_csr(dd, addr); 11270 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK; 11271 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT; 11272 write_csr(dd, addr, reg); 11273 } 11274 11275 /* spin until the given per-VL status mask bits clear */ 11276 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, 11277 const char *which) 11278 { 11279 unsigned long timeout; 11280 u64 reg; 11281 11282 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT); 11283 while (1) { 11284 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; 11285 11286 if (reg == 0) 11287 return; /* success */ 11288 if (time_after(jiffies, timeout)) 11289 break; /* timed out */ 11290 udelay(1); 11291 } 11292 11293 dd_dev_err(dd, 11294 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n", 11295 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); 11296 /* 11297 * If this occurs, it is likely there was a credit loss on the link. 11298 * The only recovery from that is a link bounce. 11299 */ 11300 dd_dev_err(dd, 11301 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n"); 11302 } 11303 11304 /* 11305 * The number of credits on the VLs may be changed while everything 11306 * is "live", but the following algorithm must be followed due to 11307 * how the hardware is actually implemented. In particular, 11308 * Return_Credit_Status[] is the only correct status check. 11309 * 11310 * if (reducing Global_Shared_Credit_Limit or any shared limit changing) 11311 * set Global_Shared_Credit_Limit = 0 11312 * use_all_vl = 1 11313 * mask0 = all VLs that are changing either dedicated or shared limits 11314 * set Shared_Limit[mask0] = 0 11315 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0 11316 * if (changing any dedicated limit) 11317 * mask1 = all VLs that are lowering dedicated limits 11318 * lower Dedicated_Limit[mask1] 11319 * spin until Return_Credit_Status[mask1] == 0 11320 * raise Dedicated_Limits 11321 * raise Shared_Limits 11322 * raise Global_Shared_Credit_Limit 11323 * 11324 * lower = if the new limit is lower, set the limit to the new value 11325 * raise = if the new limit is higher than the current value (may be changed 11326 * earlier in the algorithm), set the new limit to the new value 11327 */ 11328 int set_buffer_control(struct hfi1_pportdata *ppd, 11329 struct buffer_control *new_bc) 11330 { 11331 struct hfi1_devdata *dd = ppd->dd; 11332 u64 changing_mask, ld_mask, stat_mask; 11333 int change_count; 11334 int i, use_all_mask; 11335 int this_shared_changing; 11336 int vl_count = 0, ret; 11337 /* 11338 * A0: add the variable any_shared_limit_changing below and in the 11339 * algorithm above. If removing A0 support, it can be removed. 11340 */ 11341 int any_shared_limit_changing; 11342 struct buffer_control cur_bc; 11343 u8 changing[OPA_MAX_VLS]; 11344 u8 lowering_dedicated[OPA_MAX_VLS]; 11345 u16 cur_total; 11346 u32 new_total = 0; 11347 const u64 all_mask = 11348 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK 11349 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK 11350 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK 11351 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK 11352 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK 11353 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK 11354 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK 11355 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK 11356 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK; 11357 11358 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15) 11359 #define NUM_USABLE_VLS 16 /* look at VL15 and less */ 11360 11361 /* find the new total credits, do sanity check on unused VLs */ 11362 for (i = 0; i < OPA_MAX_VLS; i++) { 11363 if (valid_vl(i)) { 11364 new_total += be16_to_cpu(new_bc->vl[i].dedicated); 11365 continue; 11366 } 11367 nonzero_msg(dd, i, "dedicated", 11368 be16_to_cpu(new_bc->vl[i].dedicated)); 11369 nonzero_msg(dd, i, "shared", 11370 be16_to_cpu(new_bc->vl[i].shared)); 11371 new_bc->vl[i].dedicated = 0; 11372 new_bc->vl[i].shared = 0; 11373 } 11374 new_total += be16_to_cpu(new_bc->overall_shared_limit); 11375 11376 /* fetch the current values */ 11377 get_buffer_control(dd, &cur_bc, &cur_total); 11378 11379 /* 11380 * Create the masks we will use. 11381 */ 11382 memset(changing, 0, sizeof(changing)); 11383 memset(lowering_dedicated, 0, sizeof(lowering_dedicated)); 11384 /* 11385 * NOTE: Assumes that the individual VL bits are adjacent and in 11386 * increasing order 11387 */ 11388 stat_mask = 11389 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK; 11390 changing_mask = 0; 11391 ld_mask = 0; 11392 change_count = 0; 11393 any_shared_limit_changing = 0; 11394 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) { 11395 if (!valid_vl(i)) 11396 continue; 11397 this_shared_changing = new_bc->vl[i].shared 11398 != cur_bc.vl[i].shared; 11399 if (this_shared_changing) 11400 any_shared_limit_changing = 1; 11401 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || 11402 this_shared_changing) { 11403 changing[i] = 1; 11404 changing_mask |= stat_mask; 11405 change_count++; 11406 } 11407 if (be16_to_cpu(new_bc->vl[i].dedicated) < 11408 be16_to_cpu(cur_bc.vl[i].dedicated)) { 11409 lowering_dedicated[i] = 1; 11410 ld_mask |= stat_mask; 11411 } 11412 } 11413 11414 /* bracket the credit change with a total adjustment */ 11415 if (new_total > cur_total) 11416 set_global_limit(dd, new_total); 11417 11418 /* 11419 * Start the credit change algorithm. 11420 */ 11421 use_all_mask = 0; 11422 if ((be16_to_cpu(new_bc->overall_shared_limit) < 11423 be16_to_cpu(cur_bc.overall_shared_limit)) || 11424 (is_ax(dd) && any_shared_limit_changing)) { 11425 set_global_shared(dd, 0); 11426 cur_bc.overall_shared_limit = 0; 11427 use_all_mask = 1; 11428 } 11429 11430 for (i = 0; i < NUM_USABLE_VLS; i++) { 11431 if (!valid_vl(i)) 11432 continue; 11433 11434 if (changing[i]) { 11435 set_vl_shared(dd, i, 0); 11436 cur_bc.vl[i].shared = 0; 11437 } 11438 } 11439 11440 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, 11441 "shared"); 11442 11443 if (change_count > 0) { 11444 for (i = 0; i < NUM_USABLE_VLS; i++) { 11445 if (!valid_vl(i)) 11446 continue; 11447 11448 if (lowering_dedicated[i]) { 11449 set_vl_dedicated(dd, i, 11450 be16_to_cpu(new_bc-> 11451 vl[i].dedicated)); 11452 cur_bc.vl[i].dedicated = 11453 new_bc->vl[i].dedicated; 11454 } 11455 } 11456 11457 wait_for_vl_status_clear(dd, ld_mask, "dedicated"); 11458 11459 /* now raise all dedicated that are going up */ 11460 for (i = 0; i < NUM_USABLE_VLS; i++) { 11461 if (!valid_vl(i)) 11462 continue; 11463 11464 if (be16_to_cpu(new_bc->vl[i].dedicated) > 11465 be16_to_cpu(cur_bc.vl[i].dedicated)) 11466 set_vl_dedicated(dd, i, 11467 be16_to_cpu(new_bc-> 11468 vl[i].dedicated)); 11469 } 11470 } 11471 11472 /* next raise all shared that are going up */ 11473 for (i = 0; i < NUM_USABLE_VLS; i++) { 11474 if (!valid_vl(i)) 11475 continue; 11476 11477 if (be16_to_cpu(new_bc->vl[i].shared) > 11478 be16_to_cpu(cur_bc.vl[i].shared)) 11479 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); 11480 } 11481 11482 /* finally raise the global shared */ 11483 if (be16_to_cpu(new_bc->overall_shared_limit) > 11484 be16_to_cpu(cur_bc.overall_shared_limit)) 11485 set_global_shared(dd, 11486 be16_to_cpu(new_bc->overall_shared_limit)); 11487 11488 /* bracket the credit change with a total adjustment */ 11489 if (new_total < cur_total) 11490 set_global_limit(dd, new_total); 11491 11492 /* 11493 * Determine the actual number of operational VLS using the number of 11494 * dedicated and shared credits for each VL. 11495 */ 11496 if (change_count > 0) { 11497 for (i = 0; i < TXE_NUM_DATA_VL; i++) 11498 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || 11499 be16_to_cpu(new_bc->vl[i].shared) > 0) 11500 vl_count++; 11501 ppd->actual_vls_operational = vl_count; 11502 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? 11503 ppd->actual_vls_operational : 11504 ppd->vls_operational, 11505 NULL); 11506 if (ret == 0) 11507 ret = pio_map_init(dd, ppd->port - 1, vl_count ? 11508 ppd->actual_vls_operational : 11509 ppd->vls_operational, NULL); 11510 if (ret) 11511 return ret; 11512 } 11513 return 0; 11514 } 11515 11516 /* 11517 * Read the given fabric manager table. Return the size of the 11518 * table (in bytes) on success, and a negative error code on 11519 * failure. 11520 */ 11521 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t) 11522 11523 { 11524 int size; 11525 struct vl_arb_cache *vlc; 11526 11527 switch (which) { 11528 case FM_TBL_VL_HIGH_ARB: 11529 size = 256; 11530 /* 11531 * OPA specifies 128 elements (of 2 bytes each), though 11532 * HFI supports only 16 elements in h/w. 11533 */ 11534 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); 11535 vl_arb_get_cache(vlc, t); 11536 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11537 break; 11538 case FM_TBL_VL_LOW_ARB: 11539 size = 256; 11540 /* 11541 * OPA specifies 128 elements (of 2 bytes each), though 11542 * HFI supports only 16 elements in h/w. 11543 */ 11544 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); 11545 vl_arb_get_cache(vlc, t); 11546 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11547 break; 11548 case FM_TBL_BUFFER_CONTROL: 11549 size = get_buffer_control(ppd->dd, t, NULL); 11550 break; 11551 case FM_TBL_SC2VLNT: 11552 size = get_sc2vlnt(ppd->dd, t); 11553 break; 11554 case FM_TBL_VL_PREEMPT_ELEMS: 11555 size = 256; 11556 /* OPA specifies 128 elements, of 2 bytes each */ 11557 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); 11558 break; 11559 case FM_TBL_VL_PREEMPT_MATRIX: 11560 size = 256; 11561 /* 11562 * OPA specifies that this is the same size as the VL 11563 * arbitration tables (i.e., 256 bytes). 11564 */ 11565 break; 11566 default: 11567 return -EINVAL; 11568 } 11569 return size; 11570 } 11571 11572 /* 11573 * Write the given fabric manager table. 11574 */ 11575 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t) 11576 { 11577 int ret = 0; 11578 struct vl_arb_cache *vlc; 11579 11580 switch (which) { 11581 case FM_TBL_VL_HIGH_ARB: 11582 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); 11583 if (vl_arb_match_cache(vlc, t)) { 11584 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11585 break; 11586 } 11587 vl_arb_set_cache(vlc, t); 11588 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11589 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST, 11590 VL_ARB_HIGH_PRIO_TABLE_SIZE, t); 11591 break; 11592 case FM_TBL_VL_LOW_ARB: 11593 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); 11594 if (vl_arb_match_cache(vlc, t)) { 11595 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11596 break; 11597 } 11598 vl_arb_set_cache(vlc, t); 11599 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11600 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST, 11601 VL_ARB_LOW_PRIO_TABLE_SIZE, t); 11602 break; 11603 case FM_TBL_BUFFER_CONTROL: 11604 ret = set_buffer_control(ppd, t); 11605 break; 11606 case FM_TBL_SC2VLNT: 11607 set_sc2vlnt(ppd->dd, t); 11608 break; 11609 default: 11610 ret = -EINVAL; 11611 } 11612 return ret; 11613 } 11614 11615 /* 11616 * Disable all data VLs. 11617 * 11618 * Return 0 if disabled, non-zero if the VLs cannot be disabled. 11619 */ 11620 static int disable_data_vls(struct hfi1_devdata *dd) 11621 { 11622 if (is_ax(dd)) 11623 return 1; 11624 11625 pio_send_control(dd, PSC_DATA_VL_DISABLE); 11626 11627 return 0; 11628 } 11629 11630 /* 11631 * open_fill_data_vls() - the counterpart to stop_drain_data_vls(). 11632 * Just re-enables all data VLs (the "fill" part happens 11633 * automatically - the name was chosen for symmetry with 11634 * stop_drain_data_vls()). 11635 * 11636 * Return 0 if successful, non-zero if the VLs cannot be enabled. 11637 */ 11638 int open_fill_data_vls(struct hfi1_devdata *dd) 11639 { 11640 if (is_ax(dd)) 11641 return 1; 11642 11643 pio_send_control(dd, PSC_DATA_VL_ENABLE); 11644 11645 return 0; 11646 } 11647 11648 /* 11649 * drain_data_vls() - assumes that disable_data_vls() has been called, 11650 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA 11651 * engines to drop to 0. 11652 */ 11653 static void drain_data_vls(struct hfi1_devdata *dd) 11654 { 11655 sc_wait(dd); 11656 sdma_wait(dd); 11657 pause_for_credit_return(dd); 11658 } 11659 11660 /* 11661 * stop_drain_data_vls() - disable, then drain all per-VL fifos. 11662 * 11663 * Use open_fill_data_vls() to resume using data VLs. This pair is 11664 * meant to be used like this: 11665 * 11666 * stop_drain_data_vls(dd); 11667 * // do things with per-VL resources 11668 * open_fill_data_vls(dd); 11669 */ 11670 int stop_drain_data_vls(struct hfi1_devdata *dd) 11671 { 11672 int ret; 11673 11674 ret = disable_data_vls(dd); 11675 if (ret == 0) 11676 drain_data_vls(dd); 11677 11678 return ret; 11679 } 11680 11681 /* 11682 * Convert a nanosecond time to a cclock count. No matter how slow 11683 * the cclock, a non-zero ns will always have a non-zero result. 11684 */ 11685 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) 11686 { 11687 u32 cclocks; 11688 11689 if (dd->icode == ICODE_FPGA_EMULATION) 11690 cclocks = (ns * 1000) / FPGA_CCLOCK_PS; 11691 else /* simulation pretends to be ASIC */ 11692 cclocks = (ns * 1000) / ASIC_CCLOCK_PS; 11693 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */ 11694 cclocks = 1; 11695 return cclocks; 11696 } 11697 11698 /* 11699 * Convert a cclock count to nanoseconds. Not matter how slow 11700 * the cclock, a non-zero cclocks will always have a non-zero result. 11701 */ 11702 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) 11703 { 11704 u32 ns; 11705 11706 if (dd->icode == ICODE_FPGA_EMULATION) 11707 ns = (cclocks * FPGA_CCLOCK_PS) / 1000; 11708 else /* simulation pretends to be ASIC */ 11709 ns = (cclocks * ASIC_CCLOCK_PS) / 1000; 11710 if (cclocks && !ns) 11711 ns = 1; 11712 return ns; 11713 } 11714 11715 /* 11716 * Dynamically adjust the receive interrupt timeout for a context based on 11717 * incoming packet rate. 11718 * 11719 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero. 11720 */ 11721 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) 11722 { 11723 struct hfi1_devdata *dd = rcd->dd; 11724 u32 timeout = rcd->rcvavail_timeout; 11725 11726 /* 11727 * This algorithm doubles or halves the timeout depending on whether 11728 * the number of packets received in this interrupt were less than or 11729 * greater equal the interrupt count. 11730 * 11731 * The calculations below do not allow a steady state to be achieved. 11732 * Only at the endpoints it is possible to have an unchanging 11733 * timeout. 11734 */ 11735 if (npkts < rcv_intr_count) { 11736 /* 11737 * Not enough packets arrived before the timeout, adjust 11738 * timeout downward. 11739 */ 11740 if (timeout < 2) /* already at minimum? */ 11741 return; 11742 timeout >>= 1; 11743 } else { 11744 /* 11745 * More than enough packets arrived before the timeout, adjust 11746 * timeout upward. 11747 */ 11748 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ 11749 return; 11750 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); 11751 } 11752 11753 rcd->rcvavail_timeout = timeout; 11754 /* 11755 * timeout cannot be larger than rcv_intr_timeout_csr which has already 11756 * been verified to be in range 11757 */ 11758 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, 11759 (u64)timeout << 11760 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); 11761 } 11762 11763 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, 11764 u32 intr_adjust, u32 npkts) 11765 { 11766 struct hfi1_devdata *dd = rcd->dd; 11767 u64 reg; 11768 u32 ctxt = rcd->ctxt; 11769 11770 /* 11771 * Need to write timeout register before updating RcvHdrHead to ensure 11772 * that a new value is used when the HW decides to restart counting. 11773 */ 11774 if (intr_adjust) 11775 adjust_rcv_timeout(rcd, npkts); 11776 if (updegr) { 11777 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK) 11778 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; 11779 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); 11780 } 11781 mmiowb(); 11782 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | 11783 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) 11784 << RCV_HDR_HEAD_HEAD_SHIFT); 11785 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); 11786 mmiowb(); 11787 } 11788 11789 u32 hdrqempty(struct hfi1_ctxtdata *rcd) 11790 { 11791 u32 head, tail; 11792 11793 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) 11794 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; 11795 11796 if (rcd->rcvhdrtail_kvaddr) 11797 tail = get_rcvhdrtail(rcd); 11798 else 11799 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); 11800 11801 return head == tail; 11802 } 11803 11804 /* 11805 * Context Control and Receive Array encoding for buffer size: 11806 * 0x0 invalid 11807 * 0x1 4 KB 11808 * 0x2 8 KB 11809 * 0x3 16 KB 11810 * 0x4 32 KB 11811 * 0x5 64 KB 11812 * 0x6 128 KB 11813 * 0x7 256 KB 11814 * 0x8 512 KB (Receive Array only) 11815 * 0x9 1 MB (Receive Array only) 11816 * 0xa 2 MB (Receive Array only) 11817 * 11818 * 0xB-0xF - reserved (Receive Array only) 11819 * 11820 * 11821 * This routine assumes that the value has already been sanity checked. 11822 */ 11823 static u32 encoded_size(u32 size) 11824 { 11825 switch (size) { 11826 case 4 * 1024: return 0x1; 11827 case 8 * 1024: return 0x2; 11828 case 16 * 1024: return 0x3; 11829 case 32 * 1024: return 0x4; 11830 case 64 * 1024: return 0x5; 11831 case 128 * 1024: return 0x6; 11832 case 256 * 1024: return 0x7; 11833 case 512 * 1024: return 0x8; 11834 case 1 * 1024 * 1024: return 0x9; 11835 case 2 * 1024 * 1024: return 0xa; 11836 } 11837 return 0x1; /* if invalid, go with the minimum size */ 11838 } 11839 11840 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, 11841 struct hfi1_ctxtdata *rcd) 11842 { 11843 u64 rcvctrl, reg; 11844 int did_enable = 0; 11845 u16 ctxt; 11846 11847 if (!rcd) 11848 return; 11849 11850 ctxt = rcd->ctxt; 11851 11852 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op); 11853 11854 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); 11855 /* if the context already enabled, don't do the extra steps */ 11856 if ((op & HFI1_RCVCTRL_CTXT_ENB) && 11857 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { 11858 /* reset the tail and hdr addresses, and sequence count */ 11859 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, 11860 rcd->rcvhdrq_dma); 11861 if (rcd->rcvhdrtail_kvaddr) 11862 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 11863 rcd->rcvhdrqtailaddr_dma); 11864 rcd->seq_cnt = 1; 11865 11866 /* reset the cached receive header queue head value */ 11867 rcd->head = 0; 11868 11869 /* 11870 * Zero the receive header queue so we don't get false 11871 * positives when checking the sequence number. The 11872 * sequence numbers could land exactly on the same spot. 11873 * E.g. a rcd restart before the receive header wrapped. 11874 */ 11875 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); 11876 11877 /* starting timeout */ 11878 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; 11879 11880 /* enable the context */ 11881 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK; 11882 11883 /* clean the egr buffer size first */ 11884 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; 11885 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) 11886 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK) 11887 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT; 11888 11889 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ 11890 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); 11891 did_enable = 1; 11892 11893 /* zero RcvEgrIndexHead */ 11894 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); 11895 11896 /* set eager count and base index */ 11897 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) 11898 & RCV_EGR_CTRL_EGR_CNT_MASK) 11899 << RCV_EGR_CTRL_EGR_CNT_SHIFT) | 11900 (((rcd->eager_base >> RCV_SHIFT) 11901 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK) 11902 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT); 11903 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); 11904 11905 /* 11906 * Set TID (expected) count and base index. 11907 * rcd->expected_count is set to individual RcvArray entries, 11908 * not pairs, and the CSR takes a pair-count in groups of 11909 * four, so divide by 8. 11910 */ 11911 reg = (((rcd->expected_count >> RCV_SHIFT) 11912 & RCV_TID_CTRL_TID_PAIR_CNT_MASK) 11913 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) | 11914 (((rcd->expected_base >> RCV_SHIFT) 11915 & RCV_TID_CTRL_TID_BASE_INDEX_MASK) 11916 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT); 11917 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); 11918 if (ctxt == HFI1_CTRL_CTXT) 11919 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); 11920 } 11921 if (op & HFI1_RCVCTRL_CTXT_DIS) { 11922 write_csr(dd, RCV_VL15, 0); 11923 /* 11924 * When receive context is being disabled turn on tail 11925 * update with a dummy tail address and then disable 11926 * receive context. 11927 */ 11928 if (dd->rcvhdrtail_dummy_dma) { 11929 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 11930 dd->rcvhdrtail_dummy_dma); 11931 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */ 11932 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; 11933 } 11934 11935 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK; 11936 } 11937 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) { 11938 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, 11939 IS_RCVAVAIL_START + rcd->ctxt, true); 11940 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; 11941 } 11942 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) { 11943 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, 11944 IS_RCVAVAIL_START + rcd->ctxt, false); 11945 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; 11946 } 11947 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr) 11948 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; 11949 if (op & HFI1_RCVCTRL_TAILUPD_DIS) { 11950 /* See comment on RcvCtxtCtrl.TailUpd above */ 11951 if (!(op & HFI1_RCVCTRL_CTXT_DIS)) 11952 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; 11953 } 11954 if (op & HFI1_RCVCTRL_TIDFLOW_ENB) 11955 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; 11956 if (op & HFI1_RCVCTRL_TIDFLOW_DIS) 11957 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; 11958 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) { 11959 /* 11960 * In one-packet-per-eager mode, the size comes from 11961 * the RcvArray entry. 11962 */ 11963 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; 11964 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; 11965 } 11966 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS) 11967 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; 11968 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB) 11969 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; 11970 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS) 11971 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; 11972 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB) 11973 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; 11974 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS) 11975 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; 11976 if (op & HFI1_RCVCTRL_URGENT_ENB) 11977 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, 11978 IS_RCVURGENT_START + rcd->ctxt, true); 11979 if (op & HFI1_RCVCTRL_URGENT_DIS) 11980 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, 11981 IS_RCVURGENT_START + rcd->ctxt, false); 11982 11983 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl); 11984 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); 11985 11986 /* work around sticky RcvCtxtStatus.BlockedRHQFull */ 11987 if (did_enable && 11988 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { 11989 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); 11990 if (reg != 0) { 11991 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", 11992 ctxt, reg); 11993 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); 11994 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); 11995 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); 11996 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); 11997 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); 11998 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", 11999 ctxt, reg, reg == 0 ? "not" : "still"); 12000 } 12001 } 12002 12003 if (did_enable) { 12004 /* 12005 * The interrupt timeout and count must be set after 12006 * the context is enabled to take effect. 12007 */ 12008 /* set interrupt timeout */ 12009 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, 12010 (u64)rcd->rcvavail_timeout << 12011 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); 12012 12013 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */ 12014 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT; 12015 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); 12016 } 12017 12018 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS)) 12019 /* 12020 * If the context has been disabled and the Tail Update has 12021 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address 12022 * so it doesn't contain an address that is invalid. 12023 */ 12024 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12025 dd->rcvhdrtail_dummy_dma); 12026 } 12027 12028 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) 12029 { 12030 int ret; 12031 u64 val = 0; 12032 12033 if (namep) { 12034 ret = dd->cntrnameslen; 12035 *namep = dd->cntrnames; 12036 } else { 12037 const struct cntr_entry *entry; 12038 int i, j; 12039 12040 ret = (dd->ndevcntrs) * sizeof(u64); 12041 12042 /* Get the start of the block of counters */ 12043 *cntrp = dd->cntrs; 12044 12045 /* 12046 * Now go and fill in each counter in the block. 12047 */ 12048 for (i = 0; i < DEV_CNTR_LAST; i++) { 12049 entry = &dev_cntrs[i]; 12050 hfi1_cdbg(CNTR, "reading %s", entry->name); 12051 if (entry->flags & CNTR_DISABLED) { 12052 /* Nothing */ 12053 hfi1_cdbg(CNTR, "\tDisabled\n"); 12054 } else { 12055 if (entry->flags & CNTR_VL) { 12056 hfi1_cdbg(CNTR, "\tPer VL\n"); 12057 for (j = 0; j < C_VL_COUNT; j++) { 12058 val = entry->rw_cntr(entry, 12059 dd, j, 12060 CNTR_MODE_R, 12061 0); 12062 hfi1_cdbg( 12063 CNTR, 12064 "\t\tRead 0x%llx for %d\n", 12065 val, j); 12066 dd->cntrs[entry->offset + j] = 12067 val; 12068 } 12069 } else if (entry->flags & CNTR_SDMA) { 12070 hfi1_cdbg(CNTR, 12071 "\t Per SDMA Engine\n"); 12072 for (j = 0; j < chip_sdma_engines(dd); 12073 j++) { 12074 val = 12075 entry->rw_cntr(entry, dd, j, 12076 CNTR_MODE_R, 0); 12077 hfi1_cdbg(CNTR, 12078 "\t\tRead 0x%llx for %d\n", 12079 val, j); 12080 dd->cntrs[entry->offset + j] = 12081 val; 12082 } 12083 } else { 12084 val = entry->rw_cntr(entry, dd, 12085 CNTR_INVALID_VL, 12086 CNTR_MODE_R, 0); 12087 dd->cntrs[entry->offset] = val; 12088 hfi1_cdbg(CNTR, "\tRead 0x%llx", val); 12089 } 12090 } 12091 } 12092 } 12093 return ret; 12094 } 12095 12096 /* 12097 * Used by sysfs to create files for hfi stats to read 12098 */ 12099 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) 12100 { 12101 int ret; 12102 u64 val = 0; 12103 12104 if (namep) { 12105 ret = ppd->dd->portcntrnameslen; 12106 *namep = ppd->dd->portcntrnames; 12107 } else { 12108 const struct cntr_entry *entry; 12109 int i, j; 12110 12111 ret = ppd->dd->nportcntrs * sizeof(u64); 12112 *cntrp = ppd->cntrs; 12113 12114 for (i = 0; i < PORT_CNTR_LAST; i++) { 12115 entry = &port_cntrs[i]; 12116 hfi1_cdbg(CNTR, "reading %s", entry->name); 12117 if (entry->flags & CNTR_DISABLED) { 12118 /* Nothing */ 12119 hfi1_cdbg(CNTR, "\tDisabled\n"); 12120 continue; 12121 } 12122 12123 if (entry->flags & CNTR_VL) { 12124 hfi1_cdbg(CNTR, "\tPer VL"); 12125 for (j = 0; j < C_VL_COUNT; j++) { 12126 val = entry->rw_cntr(entry, ppd, j, 12127 CNTR_MODE_R, 12128 0); 12129 hfi1_cdbg( 12130 CNTR, 12131 "\t\tRead 0x%llx for %d", 12132 val, j); 12133 ppd->cntrs[entry->offset + j] = val; 12134 } 12135 } else { 12136 val = entry->rw_cntr(entry, ppd, 12137 CNTR_INVALID_VL, 12138 CNTR_MODE_R, 12139 0); 12140 ppd->cntrs[entry->offset] = val; 12141 hfi1_cdbg(CNTR, "\tRead 0x%llx", val); 12142 } 12143 } 12144 } 12145 return ret; 12146 } 12147 12148 static void free_cntrs(struct hfi1_devdata *dd) 12149 { 12150 struct hfi1_pportdata *ppd; 12151 int i; 12152 12153 if (dd->synth_stats_timer.function) 12154 del_timer_sync(&dd->synth_stats_timer); 12155 ppd = (struct hfi1_pportdata *)(dd + 1); 12156 for (i = 0; i < dd->num_pports; i++, ppd++) { 12157 kfree(ppd->cntrs); 12158 kfree(ppd->scntrs); 12159 free_percpu(ppd->ibport_data.rvp.rc_acks); 12160 free_percpu(ppd->ibport_data.rvp.rc_qacks); 12161 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); 12162 ppd->cntrs = NULL; 12163 ppd->scntrs = NULL; 12164 ppd->ibport_data.rvp.rc_acks = NULL; 12165 ppd->ibport_data.rvp.rc_qacks = NULL; 12166 ppd->ibport_data.rvp.rc_delayed_comp = NULL; 12167 } 12168 kfree(dd->portcntrnames); 12169 dd->portcntrnames = NULL; 12170 kfree(dd->cntrs); 12171 dd->cntrs = NULL; 12172 kfree(dd->scntrs); 12173 dd->scntrs = NULL; 12174 kfree(dd->cntrnames); 12175 dd->cntrnames = NULL; 12176 if (dd->update_cntr_wq) { 12177 destroy_workqueue(dd->update_cntr_wq); 12178 dd->update_cntr_wq = NULL; 12179 } 12180 } 12181 12182 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, 12183 u64 *psval, void *context, int vl) 12184 { 12185 u64 val; 12186 u64 sval = *psval; 12187 12188 if (entry->flags & CNTR_DISABLED) { 12189 dd_dev_err(dd, "Counter %s not enabled", entry->name); 12190 return 0; 12191 } 12192 12193 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); 12194 12195 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); 12196 12197 /* If its a synthetic counter there is more work we need to do */ 12198 if (entry->flags & CNTR_SYNTH) { 12199 if (sval == CNTR_MAX) { 12200 /* No need to read already saturated */ 12201 return CNTR_MAX; 12202 } 12203 12204 if (entry->flags & CNTR_32BIT) { 12205 /* 32bit counters can wrap multiple times */ 12206 u64 upper = sval >> 32; 12207 u64 lower = (sval << 32) >> 32; 12208 12209 if (lower > val) { /* hw wrapped */ 12210 if (upper == CNTR_32BIT_MAX) 12211 val = CNTR_MAX; 12212 else 12213 upper++; 12214 } 12215 12216 if (val != CNTR_MAX) 12217 val = (upper << 32) | val; 12218 12219 } else { 12220 /* If we rolled we are saturated */ 12221 if ((val < sval) || (val > CNTR_MAX)) 12222 val = CNTR_MAX; 12223 } 12224 } 12225 12226 *psval = val; 12227 12228 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); 12229 12230 return val; 12231 } 12232 12233 static u64 write_dev_port_cntr(struct hfi1_devdata *dd, 12234 struct cntr_entry *entry, 12235 u64 *psval, void *context, int vl, u64 data) 12236 { 12237 u64 val; 12238 12239 if (entry->flags & CNTR_DISABLED) { 12240 dd_dev_err(dd, "Counter %s not enabled", entry->name); 12241 return 0; 12242 } 12243 12244 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); 12245 12246 if (entry->flags & CNTR_SYNTH) { 12247 *psval = data; 12248 if (entry->flags & CNTR_32BIT) { 12249 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, 12250 (data << 32) >> 32); 12251 val = data; /* return the full 64bit value */ 12252 } else { 12253 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, 12254 data); 12255 } 12256 } else { 12257 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); 12258 } 12259 12260 *psval = val; 12261 12262 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); 12263 12264 return val; 12265 } 12266 12267 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) 12268 { 12269 struct cntr_entry *entry; 12270 u64 *sval; 12271 12272 entry = &dev_cntrs[index]; 12273 sval = dd->scntrs + entry->offset; 12274 12275 if (vl != CNTR_INVALID_VL) 12276 sval += vl; 12277 12278 return read_dev_port_cntr(dd, entry, sval, dd, vl); 12279 } 12280 12281 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) 12282 { 12283 struct cntr_entry *entry; 12284 u64 *sval; 12285 12286 entry = &dev_cntrs[index]; 12287 sval = dd->scntrs + entry->offset; 12288 12289 if (vl != CNTR_INVALID_VL) 12290 sval += vl; 12291 12292 return write_dev_port_cntr(dd, entry, sval, dd, vl, data); 12293 } 12294 12295 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl) 12296 { 12297 struct cntr_entry *entry; 12298 u64 *sval; 12299 12300 entry = &port_cntrs[index]; 12301 sval = ppd->scntrs + entry->offset; 12302 12303 if (vl != CNTR_INVALID_VL) 12304 sval += vl; 12305 12306 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && 12307 (index <= C_RCV_HDR_OVF_LAST)) { 12308 /* We do not want to bother for disabled contexts */ 12309 return 0; 12310 } 12311 12312 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); 12313 } 12314 12315 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data) 12316 { 12317 struct cntr_entry *entry; 12318 u64 *sval; 12319 12320 entry = &port_cntrs[index]; 12321 sval = ppd->scntrs + entry->offset; 12322 12323 if (vl != CNTR_INVALID_VL) 12324 sval += vl; 12325 12326 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && 12327 (index <= C_RCV_HDR_OVF_LAST)) { 12328 /* We do not want to bother for disabled contexts */ 12329 return 0; 12330 } 12331 12332 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); 12333 } 12334 12335 static void do_update_synth_timer(struct work_struct *work) 12336 { 12337 u64 cur_tx; 12338 u64 cur_rx; 12339 u64 total_flits; 12340 u8 update = 0; 12341 int i, j, vl; 12342 struct hfi1_pportdata *ppd; 12343 struct cntr_entry *entry; 12344 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, 12345 update_cntr_work); 12346 12347 /* 12348 * Rather than keep beating on the CSRs pick a minimal set that we can 12349 * check to watch for potential roll over. We can do this by looking at 12350 * the number of flits sent/recv. If the total flits exceeds 32bits then 12351 * we have to iterate all the counters and update. 12352 */ 12353 entry = &dev_cntrs[C_DC_RCV_FLITS]; 12354 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); 12355 12356 entry = &dev_cntrs[C_DC_XMIT_FLITS]; 12357 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); 12358 12359 hfi1_cdbg( 12360 CNTR, 12361 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n", 12362 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); 12363 12364 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { 12365 /* 12366 * May not be strictly necessary to update but it won't hurt and 12367 * simplifies the logic here. 12368 */ 12369 update = 1; 12370 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating", 12371 dd->unit); 12372 } else { 12373 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); 12374 hfi1_cdbg(CNTR, 12375 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit, 12376 total_flits, (u64)CNTR_32BIT_MAX); 12377 if (total_flits >= CNTR_32BIT_MAX) { 12378 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating", 12379 dd->unit); 12380 update = 1; 12381 } 12382 } 12383 12384 if (update) { 12385 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); 12386 for (i = 0; i < DEV_CNTR_LAST; i++) { 12387 entry = &dev_cntrs[i]; 12388 if (entry->flags & CNTR_VL) { 12389 for (vl = 0; vl < C_VL_COUNT; vl++) 12390 read_dev_cntr(dd, i, vl); 12391 } else { 12392 read_dev_cntr(dd, i, CNTR_INVALID_VL); 12393 } 12394 } 12395 ppd = (struct hfi1_pportdata *)(dd + 1); 12396 for (i = 0; i < dd->num_pports; i++, ppd++) { 12397 for (j = 0; j < PORT_CNTR_LAST; j++) { 12398 entry = &port_cntrs[j]; 12399 if (entry->flags & CNTR_VL) { 12400 for (vl = 0; vl < C_VL_COUNT; vl++) 12401 read_port_cntr(ppd, j, vl); 12402 } else { 12403 read_port_cntr(ppd, j, CNTR_INVALID_VL); 12404 } 12405 } 12406 } 12407 12408 /* 12409 * We want the value in the register. The goal is to keep track 12410 * of the number of "ticks" not the counter value. In other 12411 * words if the register rolls we want to notice it and go ahead 12412 * and force an update. 12413 */ 12414 entry = &dev_cntrs[C_DC_XMIT_FLITS]; 12415 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, 12416 CNTR_MODE_R, 0); 12417 12418 entry = &dev_cntrs[C_DC_RCV_FLITS]; 12419 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, 12420 CNTR_MODE_R, 0); 12421 12422 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx", 12423 dd->unit, dd->last_tx, dd->last_rx); 12424 12425 } else { 12426 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); 12427 } 12428 } 12429 12430 static void update_synth_timer(struct timer_list *t) 12431 { 12432 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer); 12433 12434 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); 12435 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12436 } 12437 12438 #define C_MAX_NAME 16 /* 15 chars + one for /0 */ 12439 static int init_cntrs(struct hfi1_devdata *dd) 12440 { 12441 int i, rcv_ctxts, j; 12442 size_t sz; 12443 char *p; 12444 char name[C_MAX_NAME]; 12445 struct hfi1_pportdata *ppd; 12446 const char *bit_type_32 = ",32"; 12447 const int bit_type_32_sz = strlen(bit_type_32); 12448 u32 sdma_engines = chip_sdma_engines(dd); 12449 12450 /* set up the stats timer; the add_timer is done at the end */ 12451 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); 12452 12453 /***********************/ 12454 /* per device counters */ 12455 /***********************/ 12456 12457 /* size names and determine how many we have*/ 12458 dd->ndevcntrs = 0; 12459 sz = 0; 12460 12461 for (i = 0; i < DEV_CNTR_LAST; i++) { 12462 if (dev_cntrs[i].flags & CNTR_DISABLED) { 12463 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name); 12464 continue; 12465 } 12466 12467 if (dev_cntrs[i].flags & CNTR_VL) { 12468 dev_cntrs[i].offset = dd->ndevcntrs; 12469 for (j = 0; j < C_VL_COUNT; j++) { 12470 snprintf(name, C_MAX_NAME, "%s%d", 12471 dev_cntrs[i].name, vl_from_idx(j)); 12472 sz += strlen(name); 12473 /* Add ",32" for 32-bit counters */ 12474 if (dev_cntrs[i].flags & CNTR_32BIT) 12475 sz += bit_type_32_sz; 12476 sz++; 12477 dd->ndevcntrs++; 12478 } 12479 } else if (dev_cntrs[i].flags & CNTR_SDMA) { 12480 dev_cntrs[i].offset = dd->ndevcntrs; 12481 for (j = 0; j < sdma_engines; j++) { 12482 snprintf(name, C_MAX_NAME, "%s%d", 12483 dev_cntrs[i].name, j); 12484 sz += strlen(name); 12485 /* Add ",32" for 32-bit counters */ 12486 if (dev_cntrs[i].flags & CNTR_32BIT) 12487 sz += bit_type_32_sz; 12488 sz++; 12489 dd->ndevcntrs++; 12490 } 12491 } else { 12492 /* +1 for newline. */ 12493 sz += strlen(dev_cntrs[i].name) + 1; 12494 /* Add ",32" for 32-bit counters */ 12495 if (dev_cntrs[i].flags & CNTR_32BIT) 12496 sz += bit_type_32_sz; 12497 dev_cntrs[i].offset = dd->ndevcntrs; 12498 dd->ndevcntrs++; 12499 } 12500 } 12501 12502 /* allocate space for the counter values */ 12503 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), 12504 GFP_KERNEL); 12505 if (!dd->cntrs) 12506 goto bail; 12507 12508 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); 12509 if (!dd->scntrs) 12510 goto bail; 12511 12512 /* allocate space for the counter names */ 12513 dd->cntrnameslen = sz; 12514 dd->cntrnames = kmalloc(sz, GFP_KERNEL); 12515 if (!dd->cntrnames) 12516 goto bail; 12517 12518 /* fill in the names */ 12519 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { 12520 if (dev_cntrs[i].flags & CNTR_DISABLED) { 12521 /* Nothing */ 12522 } else if (dev_cntrs[i].flags & CNTR_VL) { 12523 for (j = 0; j < C_VL_COUNT; j++) { 12524 snprintf(name, C_MAX_NAME, "%s%d", 12525 dev_cntrs[i].name, 12526 vl_from_idx(j)); 12527 memcpy(p, name, strlen(name)); 12528 p += strlen(name); 12529 12530 /* Counter is 32 bits */ 12531 if (dev_cntrs[i].flags & CNTR_32BIT) { 12532 memcpy(p, bit_type_32, bit_type_32_sz); 12533 p += bit_type_32_sz; 12534 } 12535 12536 *p++ = '\n'; 12537 } 12538 } else if (dev_cntrs[i].flags & CNTR_SDMA) { 12539 for (j = 0; j < sdma_engines; j++) { 12540 snprintf(name, C_MAX_NAME, "%s%d", 12541 dev_cntrs[i].name, j); 12542 memcpy(p, name, strlen(name)); 12543 p += strlen(name); 12544 12545 /* Counter is 32 bits */ 12546 if (dev_cntrs[i].flags & CNTR_32BIT) { 12547 memcpy(p, bit_type_32, bit_type_32_sz); 12548 p += bit_type_32_sz; 12549 } 12550 12551 *p++ = '\n'; 12552 } 12553 } else { 12554 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); 12555 p += strlen(dev_cntrs[i].name); 12556 12557 /* Counter is 32 bits */ 12558 if (dev_cntrs[i].flags & CNTR_32BIT) { 12559 memcpy(p, bit_type_32, bit_type_32_sz); 12560 p += bit_type_32_sz; 12561 } 12562 12563 *p++ = '\n'; 12564 } 12565 } 12566 12567 /*********************/ 12568 /* per port counters */ 12569 /*********************/ 12570 12571 /* 12572 * Go through the counters for the overflows and disable the ones we 12573 * don't need. This varies based on platform so we need to do it 12574 * dynamically here. 12575 */ 12576 rcv_ctxts = dd->num_rcv_contexts; 12577 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts; 12578 i <= C_RCV_HDR_OVF_LAST; i++) { 12579 port_cntrs[i].flags |= CNTR_DISABLED; 12580 } 12581 12582 /* size port counter names and determine how many we have*/ 12583 sz = 0; 12584 dd->nportcntrs = 0; 12585 for (i = 0; i < PORT_CNTR_LAST; i++) { 12586 if (port_cntrs[i].flags & CNTR_DISABLED) { 12587 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name); 12588 continue; 12589 } 12590 12591 if (port_cntrs[i].flags & CNTR_VL) { 12592 port_cntrs[i].offset = dd->nportcntrs; 12593 for (j = 0; j < C_VL_COUNT; j++) { 12594 snprintf(name, C_MAX_NAME, "%s%d", 12595 port_cntrs[i].name, vl_from_idx(j)); 12596 sz += strlen(name); 12597 /* Add ",32" for 32-bit counters */ 12598 if (port_cntrs[i].flags & CNTR_32BIT) 12599 sz += bit_type_32_sz; 12600 sz++; 12601 dd->nportcntrs++; 12602 } 12603 } else { 12604 /* +1 for newline */ 12605 sz += strlen(port_cntrs[i].name) + 1; 12606 /* Add ",32" for 32-bit counters */ 12607 if (port_cntrs[i].flags & CNTR_32BIT) 12608 sz += bit_type_32_sz; 12609 port_cntrs[i].offset = dd->nportcntrs; 12610 dd->nportcntrs++; 12611 } 12612 } 12613 12614 /* allocate space for the counter names */ 12615 dd->portcntrnameslen = sz; 12616 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); 12617 if (!dd->portcntrnames) 12618 goto bail; 12619 12620 /* fill in port cntr names */ 12621 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { 12622 if (port_cntrs[i].flags & CNTR_DISABLED) 12623 continue; 12624 12625 if (port_cntrs[i].flags & CNTR_VL) { 12626 for (j = 0; j < C_VL_COUNT; j++) { 12627 snprintf(name, C_MAX_NAME, "%s%d", 12628 port_cntrs[i].name, vl_from_idx(j)); 12629 memcpy(p, name, strlen(name)); 12630 p += strlen(name); 12631 12632 /* Counter is 32 bits */ 12633 if (port_cntrs[i].flags & CNTR_32BIT) { 12634 memcpy(p, bit_type_32, bit_type_32_sz); 12635 p += bit_type_32_sz; 12636 } 12637 12638 *p++ = '\n'; 12639 } 12640 } else { 12641 memcpy(p, port_cntrs[i].name, 12642 strlen(port_cntrs[i].name)); 12643 p += strlen(port_cntrs[i].name); 12644 12645 /* Counter is 32 bits */ 12646 if (port_cntrs[i].flags & CNTR_32BIT) { 12647 memcpy(p, bit_type_32, bit_type_32_sz); 12648 p += bit_type_32_sz; 12649 } 12650 12651 *p++ = '\n'; 12652 } 12653 } 12654 12655 /* allocate per port storage for counter values */ 12656 ppd = (struct hfi1_pportdata *)(dd + 1); 12657 for (i = 0; i < dd->num_pports; i++, ppd++) { 12658 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); 12659 if (!ppd->cntrs) 12660 goto bail; 12661 12662 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); 12663 if (!ppd->scntrs) 12664 goto bail; 12665 } 12666 12667 /* CPU counters need to be allocated and zeroed */ 12668 if (init_cpu_counters(dd)) 12669 goto bail; 12670 12671 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", 12672 WQ_MEM_RECLAIM, dd->unit); 12673 if (!dd->update_cntr_wq) 12674 goto bail; 12675 12676 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); 12677 12678 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12679 return 0; 12680 bail: 12681 free_cntrs(dd); 12682 return -ENOMEM; 12683 } 12684 12685 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) 12686 { 12687 switch (chip_lstate) { 12688 default: 12689 dd_dev_err(dd, 12690 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", 12691 chip_lstate); 12692 /* fall through */ 12693 case LSTATE_DOWN: 12694 return IB_PORT_DOWN; 12695 case LSTATE_INIT: 12696 return IB_PORT_INIT; 12697 case LSTATE_ARMED: 12698 return IB_PORT_ARMED; 12699 case LSTATE_ACTIVE: 12700 return IB_PORT_ACTIVE; 12701 } 12702 } 12703 12704 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) 12705 { 12706 /* look at the HFI meta-states only */ 12707 switch (chip_pstate & 0xf0) { 12708 default: 12709 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", 12710 chip_pstate); 12711 /* fall through */ 12712 case PLS_DISABLED: 12713 return IB_PORTPHYSSTATE_DISABLED; 12714 case PLS_OFFLINE: 12715 return OPA_PORTPHYSSTATE_OFFLINE; 12716 case PLS_POLLING: 12717 return IB_PORTPHYSSTATE_POLLING; 12718 case PLS_CONFIGPHY: 12719 return IB_PORTPHYSSTATE_TRAINING; 12720 case PLS_LINKUP: 12721 return IB_PORTPHYSSTATE_LINKUP; 12722 case PLS_PHYTEST: 12723 return IB_PORTPHYSSTATE_PHY_TEST; 12724 } 12725 } 12726 12727 /* return the OPA port logical state name */ 12728 const char *opa_lstate_name(u32 lstate) 12729 { 12730 static const char * const port_logical_names[] = { 12731 "PORT_NOP", 12732 "PORT_DOWN", 12733 "PORT_INIT", 12734 "PORT_ARMED", 12735 "PORT_ACTIVE", 12736 "PORT_ACTIVE_DEFER", 12737 }; 12738 if (lstate < ARRAY_SIZE(port_logical_names)) 12739 return port_logical_names[lstate]; 12740 return "unknown"; 12741 } 12742 12743 /* return the OPA port physical state name */ 12744 const char *opa_pstate_name(u32 pstate) 12745 { 12746 static const char * const port_physical_names[] = { 12747 "PHYS_NOP", 12748 "reserved1", 12749 "PHYS_POLL", 12750 "PHYS_DISABLED", 12751 "PHYS_TRAINING", 12752 "PHYS_LINKUP", 12753 "PHYS_LINK_ERR_RECOVER", 12754 "PHYS_PHY_TEST", 12755 "reserved8", 12756 "PHYS_OFFLINE", 12757 "PHYS_GANGED", 12758 "PHYS_TEST", 12759 }; 12760 if (pstate < ARRAY_SIZE(port_physical_names)) 12761 return port_physical_names[pstate]; 12762 return "unknown"; 12763 } 12764 12765 /** 12766 * update_statusp - Update userspace status flag 12767 * @ppd: Port data structure 12768 * @state: port state information 12769 * 12770 * Actual port status is determined by the host_link_state value 12771 * in the ppd. 12772 * 12773 * host_link_state MUST be updated before updating the user space 12774 * statusp. 12775 */ 12776 static void update_statusp(struct hfi1_pportdata *ppd, u32 state) 12777 { 12778 /* 12779 * Set port status flags in the page mapped into userspace 12780 * memory. Do it here to ensure a reliable state - this is 12781 * the only function called by all state handling code. 12782 * Always set the flags due to the fact that the cache value 12783 * might have been changed explicitly outside of this 12784 * function. 12785 */ 12786 if (ppd->statusp) { 12787 switch (state) { 12788 case IB_PORT_DOWN: 12789 case IB_PORT_INIT: 12790 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 12791 HFI1_STATUS_IB_READY); 12792 break; 12793 case IB_PORT_ARMED: 12794 *ppd->statusp |= HFI1_STATUS_IB_CONF; 12795 break; 12796 case IB_PORT_ACTIVE: 12797 *ppd->statusp |= HFI1_STATUS_IB_READY; 12798 break; 12799 } 12800 } 12801 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", 12802 opa_lstate_name(state), state); 12803 } 12804 12805 /** 12806 * wait_logical_linkstate - wait for an IB link state change to occur 12807 * @ppd: port device 12808 * @state: the state to wait for 12809 * @msecs: the number of milliseconds to wait 12810 * 12811 * Wait up to msecs milliseconds for IB link state change to occur. 12812 * For now, take the easy polling route. 12813 * Returns 0 if state reached, otherwise -ETIMEDOUT. 12814 */ 12815 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 12816 int msecs) 12817 { 12818 unsigned long timeout; 12819 u32 new_state; 12820 12821 timeout = jiffies + msecs_to_jiffies(msecs); 12822 while (1) { 12823 new_state = chip_to_opa_lstate(ppd->dd, 12824 read_logical_state(ppd->dd)); 12825 if (new_state == state) 12826 break; 12827 if (time_after(jiffies, timeout)) { 12828 dd_dev_err(ppd->dd, 12829 "timeout waiting for link state 0x%x\n", 12830 state); 12831 return -ETIMEDOUT; 12832 } 12833 msleep(20); 12834 } 12835 12836 return 0; 12837 } 12838 12839 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state) 12840 { 12841 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); 12842 12843 dd_dev_info(ppd->dd, 12844 "physical state changed to %s (0x%x), phy 0x%x\n", 12845 opa_pstate_name(ib_pstate), ib_pstate, state); 12846 } 12847 12848 /* 12849 * Read the physical hardware link state and check if it matches host 12850 * drivers anticipated state. 12851 */ 12852 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state) 12853 { 12854 u32 read_state = read_physical_state(ppd->dd); 12855 12856 if (read_state == state) { 12857 log_state_transition(ppd, state); 12858 } else { 12859 dd_dev_err(ppd->dd, 12860 "anticipated phy link state 0x%x, read 0x%x\n", 12861 state, read_state); 12862 } 12863 } 12864 12865 /* 12866 * wait_physical_linkstate - wait for an physical link state change to occur 12867 * @ppd: port device 12868 * @state: the state to wait for 12869 * @msecs: the number of milliseconds to wait 12870 * 12871 * Wait up to msecs milliseconds for physical link state change to occur. 12872 * Returns 0 if state reached, otherwise -ETIMEDOUT. 12873 */ 12874 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, 12875 int msecs) 12876 { 12877 u32 read_state; 12878 unsigned long timeout; 12879 12880 timeout = jiffies + msecs_to_jiffies(msecs); 12881 while (1) { 12882 read_state = read_physical_state(ppd->dd); 12883 if (read_state == state) 12884 break; 12885 if (time_after(jiffies, timeout)) { 12886 dd_dev_err(ppd->dd, 12887 "timeout waiting for phy link state 0x%x\n", 12888 state); 12889 return -ETIMEDOUT; 12890 } 12891 usleep_range(1950, 2050); /* sleep 2ms-ish */ 12892 } 12893 12894 log_state_transition(ppd, state); 12895 return 0; 12896 } 12897 12898 /* 12899 * wait_phys_link_offline_quiet_substates - wait for any offline substate 12900 * @ppd: port device 12901 * @msecs: the number of milliseconds to wait 12902 * 12903 * Wait up to msecs milliseconds for any offline physical link 12904 * state change to occur. 12905 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. 12906 */ 12907 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, 12908 int msecs) 12909 { 12910 u32 read_state; 12911 unsigned long timeout; 12912 12913 timeout = jiffies + msecs_to_jiffies(msecs); 12914 while (1) { 12915 read_state = read_physical_state(ppd->dd); 12916 if ((read_state & 0xF0) == PLS_OFFLINE) 12917 break; 12918 if (time_after(jiffies, timeout)) { 12919 dd_dev_err(ppd->dd, 12920 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", 12921 read_state, msecs); 12922 return -ETIMEDOUT; 12923 } 12924 usleep_range(1950, 2050); /* sleep 2ms-ish */ 12925 } 12926 12927 log_state_transition(ppd, read_state); 12928 return read_state; 12929 } 12930 12931 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 12932 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 12933 12934 #define SET_STATIC_RATE_CONTROL_SMASK(r) \ 12935 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 12936 12937 void hfi1_init_ctxt(struct send_context *sc) 12938 { 12939 if (sc) { 12940 struct hfi1_devdata *dd = sc->dd; 12941 u64 reg; 12942 u8 set = (sc->type == SC_USER ? 12943 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) : 12944 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)); 12945 reg = read_kctxt_csr(dd, sc->hw_context, 12946 SEND_CTXT_CHECK_ENABLE); 12947 if (set) 12948 CLEAR_STATIC_RATE_CONTROL_SMASK(reg); 12949 else 12950 SET_STATIC_RATE_CONTROL_SMASK(reg); 12951 write_kctxt_csr(dd, sc->hw_context, 12952 SEND_CTXT_CHECK_ENABLE, reg); 12953 } 12954 } 12955 12956 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) 12957 { 12958 int ret = 0; 12959 u64 reg; 12960 12961 if (dd->icode != ICODE_RTL_SILICON) { 12962 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 12963 dd_dev_info(dd, "%s: tempsense not supported by HW\n", 12964 __func__); 12965 return -EINVAL; 12966 } 12967 reg = read_csr(dd, ASIC_STS_THERM); 12968 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & 12969 ASIC_STS_THERM_CURR_TEMP_MASK); 12970 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & 12971 ASIC_STS_THERM_LO_TEMP_MASK); 12972 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & 12973 ASIC_STS_THERM_HI_TEMP_MASK); 12974 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & 12975 ASIC_STS_THERM_CRIT_TEMP_MASK); 12976 /* triggers is a 3-bit value - 1 bit per trigger. */ 12977 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); 12978 12979 return ret; 12980 } 12981 12982 /* ========================================================================= */ 12983 12984 /** 12985 * read_mod_write() - Calculate the IRQ register index and set/clear the bits 12986 * @dd: valid devdata 12987 * @src: IRQ source to determine register index from 12988 * @bits: the bits to set or clear 12989 * @set: true == set the bits, false == clear the bits 12990 * 12991 */ 12992 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, 12993 bool set) 12994 { 12995 u64 reg; 12996 u16 idx = src / BITS_PER_REGISTER; 12997 12998 spin_lock(&dd->irq_src_lock); 12999 reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); 13000 if (set) 13001 reg |= bits; 13002 else 13003 reg &= ~bits; 13004 write_csr(dd, CCE_INT_MASK + (8 * idx), reg); 13005 spin_unlock(&dd->irq_src_lock); 13006 } 13007 13008 /** 13009 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources 13010 * @dd: valid devdata 13011 * @first: first IRQ source to set/clear 13012 * @last: last IRQ source (inclusive) to set/clear 13013 * @set: true == set the bits, false == clear the bits 13014 * 13015 * If first == last, set the exact source. 13016 */ 13017 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) 13018 { 13019 u64 bits = 0; 13020 u64 bit; 13021 u16 src; 13022 13023 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES) 13024 return -EINVAL; 13025 13026 if (last < first) 13027 return -ERANGE; 13028 13029 for (src = first; src <= last; src++) { 13030 bit = src % BITS_PER_REGISTER; 13031 /* wrapped to next register? */ 13032 if (!bit && bits) { 13033 read_mod_write(dd, src - 1, bits, set); 13034 bits = 0; 13035 } 13036 bits |= BIT_ULL(bit); 13037 } 13038 read_mod_write(dd, last, bits, set); 13039 13040 return 0; 13041 } 13042 13043 /* 13044 * Clear all interrupt sources on the chip. 13045 */ 13046 void clear_all_interrupts(struct hfi1_devdata *dd) 13047 { 13048 int i; 13049 13050 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13051 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); 13052 13053 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); 13054 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); 13055 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0); 13056 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0); 13057 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0); 13058 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0); 13059 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0); 13060 for (i = 0; i < chip_send_contexts(dd); i++) 13061 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0); 13062 for (i = 0; i < chip_sdma_engines(dd); i++) 13063 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0); 13064 13065 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0); 13066 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0); 13067 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); 13068 } 13069 13070 /* 13071 * Remap the interrupt source from the general handler to the given MSI-X 13072 * interrupt. 13073 */ 13074 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) 13075 { 13076 u64 reg; 13077 int m, n; 13078 13079 /* clear from the handled mask of the general interrupt */ 13080 m = isrc / 64; 13081 n = isrc % 64; 13082 if (likely(m < CCE_NUM_INT_CSRS)) { 13083 dd->gi_mask[m] &= ~((u64)1 << n); 13084 } else { 13085 dd_dev_err(dd, "remap interrupt err\n"); 13086 return; 13087 } 13088 13089 /* direct the chip source to the given MSI-X interrupt */ 13090 m = isrc / 8; 13091 n = isrc % 8; 13092 reg = read_csr(dd, CCE_INT_MAP + (8 * m)); 13093 reg &= ~((u64)0xff << (8 * n)); 13094 reg |= ((u64)msix_intr & 0xff) << (8 * n); 13095 write_csr(dd, CCE_INT_MAP + (8 * m), reg); 13096 } 13097 13098 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) 13099 { 13100 /* 13101 * SDMA engine interrupt sources grouped by type, rather than 13102 * engine. Per-engine interrupts are as follows: 13103 * SDMA 13104 * SDMAProgress 13105 * SDMAIdle 13106 */ 13107 remap_intr(dd, IS_SDMA_START + engine, msix_intr); 13108 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); 13109 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); 13110 } 13111 13112 /* 13113 * Set the general handler to accept all interrupts, remap all 13114 * chip interrupts back to MSI-X 0. 13115 */ 13116 void reset_interrupts(struct hfi1_devdata *dd) 13117 { 13118 int i; 13119 13120 /* all interrupts handled by the general handler */ 13121 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13122 dd->gi_mask[i] = ~(u64)0; 13123 13124 /* all chip interrupts map to MSI-X 0 */ 13125 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13126 write_csr(dd, CCE_INT_MAP + (8 * i), 0); 13127 } 13128 13129 /** 13130 * set_up_interrupts() - Initialize the IRQ resources and state 13131 * @dd: valid devdata 13132 * 13133 */ 13134 static int set_up_interrupts(struct hfi1_devdata *dd) 13135 { 13136 int ret; 13137 13138 /* mask all interrupts */ 13139 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); 13140 13141 /* clear all pending interrupts */ 13142 clear_all_interrupts(dd); 13143 13144 /* reset general handler mask, chip MSI-X mappings */ 13145 reset_interrupts(dd); 13146 13147 /* ask for MSI-X interrupts */ 13148 ret = msix_initialize(dd); 13149 if (ret) 13150 return ret; 13151 13152 ret = msix_request_irqs(dd); 13153 if (ret) 13154 msix_clean_up_interrupts(dd); 13155 13156 return ret; 13157 } 13158 13159 /* 13160 * Set up context values in dd. Sets: 13161 * 13162 * num_rcv_contexts - number of contexts being used 13163 * n_krcv_queues - number of kernel contexts 13164 * first_dyn_alloc_ctxt - first dynamically allocated context 13165 * in array of contexts 13166 * freectxts - number of free user contexts 13167 * num_send_contexts - number of PIO send contexts being used 13168 * num_vnic_contexts - number of contexts reserved for VNIC 13169 */ 13170 static int set_up_context_variables(struct hfi1_devdata *dd) 13171 { 13172 unsigned long num_kernel_contexts; 13173 u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT; 13174 int total_contexts; 13175 int ret; 13176 unsigned ngroups; 13177 int qos_rmt_count; 13178 int user_rmt_reduced; 13179 u32 n_usr_ctxts; 13180 u32 send_contexts = chip_send_contexts(dd); 13181 u32 rcv_contexts = chip_rcv_contexts(dd); 13182 13183 /* 13184 * Kernel receive contexts: 13185 * - Context 0 - control context (VL15/multicast/error) 13186 * - Context 1 - first kernel context 13187 * - Context 2 - second kernel context 13188 * ... 13189 */ 13190 if (n_krcvqs) 13191 /* 13192 * n_krcvqs is the sum of module parameter kernel receive 13193 * contexts, krcvqs[]. It does not include the control 13194 * context, so add that. 13195 */ 13196 num_kernel_contexts = n_krcvqs + 1; 13197 else 13198 num_kernel_contexts = DEFAULT_KRCVQS + 1; 13199 /* 13200 * Every kernel receive context needs an ACK send context. 13201 * one send context is allocated for each VL{0-7} and VL15 13202 */ 13203 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { 13204 dd_dev_err(dd, 13205 "Reducing # kernel rcv contexts to: %d, from %lu\n", 13206 send_contexts - num_vls - 1, 13207 num_kernel_contexts); 13208 num_kernel_contexts = send_contexts - num_vls - 1; 13209 } 13210 13211 /* Accommodate VNIC contexts if possible */ 13212 if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) { 13213 dd_dev_err(dd, "No receive contexts available for VNIC\n"); 13214 num_vnic_contexts = 0; 13215 } 13216 total_contexts = num_kernel_contexts + num_vnic_contexts; 13217 13218 /* 13219 * User contexts: 13220 * - default to 1 user context per real (non-HT) CPU core if 13221 * num_user_contexts is negative 13222 */ 13223 if (num_user_contexts < 0) 13224 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask); 13225 else 13226 n_usr_ctxts = num_user_contexts; 13227 /* 13228 * Adjust the counts given a global max. 13229 */ 13230 if (total_contexts + n_usr_ctxts > rcv_contexts) { 13231 dd_dev_err(dd, 13232 "Reducing # user receive contexts to: %d, from %u\n", 13233 rcv_contexts - total_contexts, 13234 n_usr_ctxts); 13235 /* recalculate */ 13236 n_usr_ctxts = rcv_contexts - total_contexts; 13237 } 13238 13239 /* each user context requires an entry in the RMT */ 13240 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL); 13241 if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { 13242 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count; 13243 dd_dev_err(dd, 13244 "RMT size is reducing the number of user receive contexts from %u to %d\n", 13245 n_usr_ctxts, 13246 user_rmt_reduced); 13247 /* recalculate */ 13248 n_usr_ctxts = user_rmt_reduced; 13249 } 13250 13251 total_contexts += n_usr_ctxts; 13252 13253 /* the first N are kernel contexts, the rest are user/vnic contexts */ 13254 dd->num_rcv_contexts = total_contexts; 13255 dd->n_krcv_queues = num_kernel_contexts; 13256 dd->first_dyn_alloc_ctxt = num_kernel_contexts; 13257 dd->num_vnic_contexts = num_vnic_contexts; 13258 dd->num_user_contexts = n_usr_ctxts; 13259 dd->freectxts = n_usr_ctxts; 13260 dd_dev_info(dd, 13261 "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n", 13262 rcv_contexts, 13263 (int)dd->num_rcv_contexts, 13264 (int)dd->n_krcv_queues, 13265 dd->num_vnic_contexts, 13266 dd->num_user_contexts); 13267 13268 /* 13269 * Receive array allocation: 13270 * All RcvArray entries are divided into groups of 8. This 13271 * is required by the hardware and will speed up writes to 13272 * consecutive entries by using write-combining of the entire 13273 * cacheline. 13274 * 13275 * The number of groups are evenly divided among all contexts. 13276 * any left over groups will be given to the first N user 13277 * contexts. 13278 */ 13279 dd->rcv_entries.group_size = RCV_INCREMENT; 13280 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; 13281 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; 13282 dd->rcv_entries.nctxt_extra = ngroups - 13283 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); 13284 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n", 13285 dd->rcv_entries.ngroups, 13286 dd->rcv_entries.nctxt_extra); 13287 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > 13288 MAX_EAGER_ENTRIES * 2) { 13289 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / 13290 dd->rcv_entries.group_size; 13291 dd_dev_info(dd, 13292 "RcvArray group count too high, change to %u\n", 13293 dd->rcv_entries.ngroups); 13294 dd->rcv_entries.nctxt_extra = 0; 13295 } 13296 /* 13297 * PIO send contexts 13298 */ 13299 ret = init_sc_pools_and_sizes(dd); 13300 if (ret >= 0) { /* success */ 13301 dd->num_send_contexts = ret; 13302 dd_dev_info( 13303 dd, 13304 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n", 13305 send_contexts, 13306 dd->num_send_contexts, 13307 dd->sc_sizes[SC_KERNEL].count, 13308 dd->sc_sizes[SC_ACK].count, 13309 dd->sc_sizes[SC_USER].count, 13310 dd->sc_sizes[SC_VL15].count); 13311 ret = 0; /* success */ 13312 } 13313 13314 return ret; 13315 } 13316 13317 /* 13318 * Set the device/port partition key table. The MAD code 13319 * will ensure that, at least, the partial management 13320 * partition key is present in the table. 13321 */ 13322 static void set_partition_keys(struct hfi1_pportdata *ppd) 13323 { 13324 struct hfi1_devdata *dd = ppd->dd; 13325 u64 reg = 0; 13326 int i; 13327 13328 dd_dev_info(dd, "Setting partition keys\n"); 13329 for (i = 0; i < hfi1_get_npkeys(dd); i++) { 13330 reg |= (ppd->pkeys[i] & 13331 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) << 13332 ((i % 4) * 13333 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT); 13334 /* Each register holds 4 PKey values. */ 13335 if ((i % 4) == 3) { 13336 write_csr(dd, RCV_PARTITION_KEY + 13337 ((i - 3) * 2), reg); 13338 reg = 0; 13339 } 13340 } 13341 13342 /* Always enable HW pkeys check when pkeys table is set */ 13343 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); 13344 } 13345 13346 /* 13347 * These CSRs and memories are uninitialized on reset and must be 13348 * written before reading to set the ECC/parity bits. 13349 * 13350 * NOTE: All user context CSRs that are not mmaped write-only 13351 * (e.g. the TID flows) must be initialized even if the driver never 13352 * reads them. 13353 */ 13354 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) 13355 { 13356 int i, j; 13357 13358 /* CceIntMap */ 13359 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13360 write_csr(dd, CCE_INT_MAP + (8 * i), 0); 13361 13362 /* SendCtxtCreditReturnAddr */ 13363 for (i = 0; i < chip_send_contexts(dd); i++) 13364 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); 13365 13366 /* PIO Send buffers */ 13367 /* SDMA Send buffers */ 13368 /* 13369 * These are not normally read, and (presently) have no method 13370 * to be read, so are not pre-initialized 13371 */ 13372 13373 /* RcvHdrAddr */ 13374 /* RcvHdrTailAddr */ 13375 /* RcvTidFlowTable */ 13376 for (i = 0; i < chip_rcv_contexts(dd); i++) { 13377 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); 13378 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); 13379 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) 13380 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); 13381 } 13382 13383 /* RcvArray */ 13384 for (i = 0; i < chip_rcv_array_count(dd); i++) 13385 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0); 13386 13387 /* RcvQPMapTable */ 13388 for (i = 0; i < 32; i++) 13389 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); 13390 } 13391 13392 /* 13393 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus. 13394 */ 13395 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, 13396 u64 ctrl_bits) 13397 { 13398 unsigned long timeout; 13399 u64 reg; 13400 13401 /* is the condition present? */ 13402 reg = read_csr(dd, CCE_STATUS); 13403 if ((reg & status_bits) == 0) 13404 return; 13405 13406 /* clear the condition */ 13407 write_csr(dd, CCE_CTRL, ctrl_bits); 13408 13409 /* wait for the condition to clear */ 13410 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT); 13411 while (1) { 13412 reg = read_csr(dd, CCE_STATUS); 13413 if ((reg & status_bits) == 0) 13414 return; 13415 if (time_after(jiffies, timeout)) { 13416 dd_dev_err(dd, 13417 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n", 13418 status_bits, reg & status_bits); 13419 return; 13420 } 13421 udelay(1); 13422 } 13423 } 13424 13425 /* set CCE CSRs to chip reset defaults */ 13426 static void reset_cce_csrs(struct hfi1_devdata *dd) 13427 { 13428 int i; 13429 13430 /* CCE_REVISION read-only */ 13431 /* CCE_REVISION2 read-only */ 13432 /* CCE_CTRL - bits clear automatically */ 13433 /* CCE_STATUS read-only, use CceCtrl to clear */ 13434 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); 13435 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); 13436 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); 13437 for (i = 0; i < CCE_NUM_SCRATCH; i++) 13438 write_csr(dd, CCE_SCRATCH + (8 * i), 0); 13439 /* CCE_ERR_STATUS read-only */ 13440 write_csr(dd, CCE_ERR_MASK, 0); 13441 write_csr(dd, CCE_ERR_CLEAR, ~0ull); 13442 /* CCE_ERR_FORCE leave alone */ 13443 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++) 13444 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0); 13445 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); 13446 /* CCE_PCIE_CTRL leave alone */ 13447 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) { 13448 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); 13449 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), 13450 CCE_MSIX_TABLE_UPPER_RESETCSR); 13451 } 13452 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) { 13453 /* CCE_MSIX_PBA read-only */ 13454 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull); 13455 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull); 13456 } 13457 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13458 write_csr(dd, CCE_INT_MAP, 0); 13459 for (i = 0; i < CCE_NUM_INT_CSRS; i++) { 13460 /* CCE_INT_STATUS read-only */ 13461 write_csr(dd, CCE_INT_MASK + (8 * i), 0); 13462 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull); 13463 /* CCE_INT_FORCE leave alone */ 13464 /* CCE_INT_BLOCKED read-only */ 13465 } 13466 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++) 13467 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); 13468 } 13469 13470 /* set MISC CSRs to chip reset defaults */ 13471 static void reset_misc_csrs(struct hfi1_devdata *dd) 13472 { 13473 int i; 13474 13475 for (i = 0; i < 32; i++) { 13476 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0); 13477 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); 13478 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); 13479 } 13480 /* 13481 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can 13482 * only be written 128-byte chunks 13483 */ 13484 /* init RSA engine to clear lingering errors */ 13485 write_csr(dd, MISC_CFG_RSA_CMD, 1); 13486 write_csr(dd, MISC_CFG_RSA_MU, 0); 13487 write_csr(dd, MISC_CFG_FW_CTRL, 0); 13488 /* MISC_STS_8051_DIGEST read-only */ 13489 /* MISC_STS_SBM_DIGEST read-only */ 13490 /* MISC_STS_PCIE_DIGEST read-only */ 13491 /* MISC_STS_FAB_DIGEST read-only */ 13492 /* MISC_ERR_STATUS read-only */ 13493 write_csr(dd, MISC_ERR_MASK, 0); 13494 write_csr(dd, MISC_ERR_CLEAR, ~0ull); 13495 /* MISC_ERR_FORCE leave alone */ 13496 } 13497 13498 /* set TXE CSRs to chip reset defaults */ 13499 static void reset_txe_csrs(struct hfi1_devdata *dd) 13500 { 13501 int i; 13502 13503 /* 13504 * TXE Kernel CSRs 13505 */ 13506 write_csr(dd, SEND_CTRL, 0); 13507 __cm_reset(dd, 0); /* reset CM internal state */ 13508 /* SEND_CONTEXTS read-only */ 13509 /* SEND_DMA_ENGINES read-only */ 13510 /* SEND_PIO_MEM_SIZE read-only */ 13511 /* SEND_DMA_MEM_SIZE read-only */ 13512 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0); 13513 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ 13514 /* SEND_PIO_ERR_STATUS read-only */ 13515 write_csr(dd, SEND_PIO_ERR_MASK, 0); 13516 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull); 13517 /* SEND_PIO_ERR_FORCE leave alone */ 13518 /* SEND_DMA_ERR_STATUS read-only */ 13519 write_csr(dd, SEND_DMA_ERR_MASK, 0); 13520 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull); 13521 /* SEND_DMA_ERR_FORCE leave alone */ 13522 /* SEND_EGRESS_ERR_STATUS read-only */ 13523 write_csr(dd, SEND_EGRESS_ERR_MASK, 0); 13524 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull); 13525 /* SEND_EGRESS_ERR_FORCE leave alone */ 13526 write_csr(dd, SEND_BTH_QP, 0); 13527 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0); 13528 write_csr(dd, SEND_SC2VLT0, 0); 13529 write_csr(dd, SEND_SC2VLT1, 0); 13530 write_csr(dd, SEND_SC2VLT2, 0); 13531 write_csr(dd, SEND_SC2VLT3, 0); 13532 write_csr(dd, SEND_LEN_CHECK0, 0); 13533 write_csr(dd, SEND_LEN_CHECK1, 0); 13534 /* SEND_ERR_STATUS read-only */ 13535 write_csr(dd, SEND_ERR_MASK, 0); 13536 write_csr(dd, SEND_ERR_CLEAR, ~0ull); 13537 /* SEND_ERR_FORCE read-only */ 13538 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++) 13539 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); 13540 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++) 13541 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); 13542 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) 13543 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); 13544 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++) 13545 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); 13546 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) 13547 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); 13548 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); 13549 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); 13550 /* SEND_CM_CREDIT_USED_STATUS read-only */ 13551 write_csr(dd, SEND_CM_TIMER_CTRL, 0); 13552 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); 13553 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0); 13554 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); 13555 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); 13556 for (i = 0; i < TXE_NUM_DATA_VL; i++) 13557 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 13558 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 13559 /* SEND_CM_CREDIT_USED_VL read-only */ 13560 /* SEND_CM_CREDIT_USED_VL15 read-only */ 13561 /* SEND_EGRESS_CTXT_STATUS read-only */ 13562 /* SEND_EGRESS_SEND_DMA_STATUS read-only */ 13563 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull); 13564 /* SEND_EGRESS_ERR_INFO read-only */ 13565 /* SEND_EGRESS_ERR_SOURCE read-only */ 13566 13567 /* 13568 * TXE Per-Context CSRs 13569 */ 13570 for (i = 0; i < chip_send_contexts(dd); i++) { 13571 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); 13572 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0); 13573 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); 13574 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0); 13575 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0); 13576 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull); 13577 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0); 13578 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0); 13579 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0); 13580 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0); 13581 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0); 13582 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0); 13583 } 13584 13585 /* 13586 * TXE Per-SDMA CSRs 13587 */ 13588 for (i = 0; i < chip_sdma_engines(dd); i++) { 13589 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); 13590 /* SEND_DMA_STATUS read-only */ 13591 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0); 13592 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0); 13593 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0); 13594 /* SEND_DMA_HEAD read-only */ 13595 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0); 13596 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0); 13597 /* SEND_DMA_IDLE_CNT read-only */ 13598 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0); 13599 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0); 13600 /* SEND_DMA_DESC_FETCHED_CNT read-only */ 13601 /* SEND_DMA_ENG_ERR_STATUS read-only */ 13602 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0); 13603 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull); 13604 /* SEND_DMA_ENG_ERR_FORCE leave alone */ 13605 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0); 13606 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0); 13607 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0); 13608 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0); 13609 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0); 13610 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0); 13611 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0); 13612 } 13613 } 13614 13615 /* 13616 * Expect on entry: 13617 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0 13618 */ 13619 static void init_rbufs(struct hfi1_devdata *dd) 13620 { 13621 u64 reg; 13622 int count; 13623 13624 /* 13625 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are 13626 * clear. 13627 */ 13628 count = 0; 13629 while (1) { 13630 reg = read_csr(dd, RCV_STATUS); 13631 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK 13632 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0) 13633 break; 13634 /* 13635 * Give up after 1ms - maximum wait time. 13636 * 13637 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at 13638 * 250MB/s bandwidth. Lower rate to 66% for overhead to get: 13639 * 136 KB / (66% * 250MB/s) = 844us 13640 */ 13641 if (count++ > 500) { 13642 dd_dev_err(dd, 13643 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", 13644 __func__, reg); 13645 break; 13646 } 13647 udelay(2); /* do not busy-wait the CSR */ 13648 } 13649 13650 /* start the init - expect RcvCtrl to be 0 */ 13651 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); 13652 13653 /* 13654 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief 13655 * period after the write before RcvStatus.RxRbufInitDone is valid. 13656 * The delay in the first run through the loop below is sufficient and 13657 * required before the first read of RcvStatus.RxRbufInintDone. 13658 */ 13659 read_csr(dd, RCV_CTRL); 13660 13661 /* wait for the init to finish */ 13662 count = 0; 13663 while (1) { 13664 /* delay is required first time through - see above */ 13665 udelay(2); /* do not busy-wait the CSR */ 13666 reg = read_csr(dd, RCV_STATUS); 13667 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK)) 13668 break; 13669 13670 /* give up after 100us - slowest possible at 33MHz is 73us */ 13671 if (count++ > 50) { 13672 dd_dev_err(dd, 13673 "%s: RcvStatus.RxRbufInit not set, continuing\n", 13674 __func__); 13675 break; 13676 } 13677 } 13678 } 13679 13680 /* set RXE CSRs to chip reset defaults */ 13681 static void reset_rxe_csrs(struct hfi1_devdata *dd) 13682 { 13683 int i, j; 13684 13685 /* 13686 * RXE Kernel CSRs 13687 */ 13688 write_csr(dd, RCV_CTRL, 0); 13689 init_rbufs(dd); 13690 /* RCV_STATUS read-only */ 13691 /* RCV_CONTEXTS read-only */ 13692 /* RCV_ARRAY_CNT read-only */ 13693 /* RCV_BUF_SIZE read-only */ 13694 write_csr(dd, RCV_BTH_QP, 0); 13695 write_csr(dd, RCV_MULTICAST, 0); 13696 write_csr(dd, RCV_BYPASS, 0); 13697 write_csr(dd, RCV_VL15, 0); 13698 /* this is a clear-down */ 13699 write_csr(dd, RCV_ERR_INFO, 13700 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); 13701 /* RCV_ERR_STATUS read-only */ 13702 write_csr(dd, RCV_ERR_MASK, 0); 13703 write_csr(dd, RCV_ERR_CLEAR, ~0ull); 13704 /* RCV_ERR_FORCE leave alone */ 13705 for (i = 0; i < 32; i++) 13706 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); 13707 for (i = 0; i < 4; i++) 13708 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0); 13709 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++) 13710 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0); 13711 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++) 13712 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0); 13713 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) 13714 clear_rsm_rule(dd, i); 13715 for (i = 0; i < 32; i++) 13716 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0); 13717 13718 /* 13719 * RXE Kernel and User Per-Context CSRs 13720 */ 13721 for (i = 0; i < chip_rcv_contexts(dd); i++) { 13722 /* kernel */ 13723 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0); 13724 /* RCV_CTXT_STATUS read-only */ 13725 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0); 13726 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0); 13727 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0); 13728 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); 13729 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0); 13730 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0); 13731 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0); 13732 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); 13733 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0); 13734 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0); 13735 13736 /* user */ 13737 /* RCV_HDR_TAIL read-only */ 13738 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0); 13739 /* RCV_EGR_INDEX_TAIL read-only */ 13740 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); 13741 /* RCV_EGR_OFFSET_TAIL read-only */ 13742 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) { 13743 write_uctxt_csr(dd, i, 13744 RCV_TID_FLOW_TABLE + (8 * j), 0); 13745 } 13746 } 13747 } 13748 13749 /* 13750 * Set sc2vl tables. 13751 * 13752 * They power on to zeros, so to avoid send context errors 13753 * they need to be set: 13754 * 13755 * SC 0-7 -> VL 0-7 (respectively) 13756 * SC 15 -> VL 15 13757 * otherwise 13758 * -> VL 0 13759 */ 13760 static void init_sc2vl_tables(struct hfi1_devdata *dd) 13761 { 13762 int i; 13763 /* init per architecture spec, constrained by hardware capability */ 13764 13765 /* HFI maps sent packets */ 13766 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( 13767 0, 13768 0, 0, 1, 1, 13769 2, 2, 3, 3, 13770 4, 4, 5, 5, 13771 6, 6, 7, 7)); 13772 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( 13773 1, 13774 8, 0, 9, 0, 13775 10, 0, 11, 0, 13776 12, 0, 13, 0, 13777 14, 0, 15, 15)); 13778 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( 13779 2, 13780 16, 0, 17, 0, 13781 18, 0, 19, 0, 13782 20, 0, 21, 0, 13783 22, 0, 23, 0)); 13784 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( 13785 3, 13786 24, 0, 25, 0, 13787 26, 0, 27, 0, 13788 28, 0, 29, 0, 13789 30, 0, 31, 0)); 13790 13791 /* DC maps received packets */ 13792 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( 13793 15_0, 13794 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 13795 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15)); 13796 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( 13797 31_16, 13798 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 13799 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0)); 13800 13801 /* initialize the cached sc2vl values consistently with h/w */ 13802 for (i = 0; i < 32; i++) { 13803 if (i < 8 || i == 15) 13804 *((u8 *)(dd->sc2vl) + i) = (u8)i; 13805 else 13806 *((u8 *)(dd->sc2vl) + i) = 0; 13807 } 13808 } 13809 13810 /* 13811 * Read chip sizes and then reset parts to sane, disabled, values. We cannot 13812 * depend on the chip going through a power-on reset - a driver may be loaded 13813 * and unloaded many times. 13814 * 13815 * Do not write any CSR values to the chip in this routine - there may be 13816 * a reset following the (possible) FLR in this routine. 13817 * 13818 */ 13819 static int init_chip(struct hfi1_devdata *dd) 13820 { 13821 int i; 13822 int ret = 0; 13823 13824 /* 13825 * Put the HFI CSRs in a known state. 13826 * Combine this with a DC reset. 13827 * 13828 * Stop the device from doing anything while we do a 13829 * reset. We know there are no other active users of 13830 * the device since we are now in charge. Turn off 13831 * off all outbound and inbound traffic and make sure 13832 * the device does not generate any interrupts. 13833 */ 13834 13835 /* disable send contexts and SDMA engines */ 13836 write_csr(dd, SEND_CTRL, 0); 13837 for (i = 0; i < chip_send_contexts(dd); i++) 13838 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); 13839 for (i = 0; i < chip_sdma_engines(dd); i++) 13840 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); 13841 /* disable port (turn off RXE inbound traffic) and contexts */ 13842 write_csr(dd, RCV_CTRL, 0); 13843 for (i = 0; i < chip_rcv_contexts(dd); i++) 13844 write_csr(dd, RCV_CTXT_CTRL, 0); 13845 /* mask all interrupt sources */ 13846 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13847 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); 13848 13849 /* 13850 * DC Reset: do a full DC reset before the register clear. 13851 * A recommended length of time to hold is one CSR read, 13852 * so reread the CceDcCtrl. Then, hold the DC in reset 13853 * across the clear. 13854 */ 13855 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); 13856 (void)read_csr(dd, CCE_DC_CTRL); 13857 13858 if (use_flr) { 13859 /* 13860 * A FLR will reset the SPC core and part of the PCIe. 13861 * The parts that need to be restored have already been 13862 * saved. 13863 */ 13864 dd_dev_info(dd, "Resetting CSRs with FLR\n"); 13865 13866 /* do the FLR, the DC reset will remain */ 13867 pcie_flr(dd->pcidev); 13868 13869 /* restore command and BARs */ 13870 ret = restore_pci_variables(dd); 13871 if (ret) { 13872 dd_dev_err(dd, "%s: Could not restore PCI variables\n", 13873 __func__); 13874 return ret; 13875 } 13876 13877 if (is_ax(dd)) { 13878 dd_dev_info(dd, "Resetting CSRs with FLR\n"); 13879 pcie_flr(dd->pcidev); 13880 ret = restore_pci_variables(dd); 13881 if (ret) { 13882 dd_dev_err(dd, "%s: Could not restore PCI variables\n", 13883 __func__); 13884 return ret; 13885 } 13886 } 13887 } else { 13888 dd_dev_info(dd, "Resetting CSRs with writes\n"); 13889 reset_cce_csrs(dd); 13890 reset_txe_csrs(dd); 13891 reset_rxe_csrs(dd); 13892 reset_misc_csrs(dd); 13893 } 13894 /* clear the DC reset */ 13895 write_csr(dd, CCE_DC_CTRL, 0); 13896 13897 /* Set the LED off */ 13898 setextled(dd, 0); 13899 13900 /* 13901 * Clear the QSFP reset. 13902 * An FLR enforces a 0 on all out pins. The driver does not touch 13903 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and 13904 * anything plugged constantly in reset, if it pays attention 13905 * to RESET_N. 13906 * Prime examples of this are optical cables. Set all pins high. 13907 * I2CCLK and I2CDAT will change per direction, and INT_N and 13908 * MODPRS_N are input only and their value is ignored. 13909 */ 13910 write_csr(dd, ASIC_QSFP1_OUT, 0x1f); 13911 write_csr(dd, ASIC_QSFP2_OUT, 0x1f); 13912 init_chip_resources(dd); 13913 return ret; 13914 } 13915 13916 static void init_early_variables(struct hfi1_devdata *dd) 13917 { 13918 int i; 13919 13920 /* assign link credit variables */ 13921 dd->vau = CM_VAU; 13922 dd->link_credits = CM_GLOBAL_CREDITS; 13923 if (is_ax(dd)) 13924 dd->link_credits--; 13925 dd->vcu = cu_to_vcu(hfi1_cu); 13926 /* enough room for 8 MAD packets plus header - 17K */ 13927 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); 13928 if (dd->vl15_init > dd->link_credits) 13929 dd->vl15_init = dd->link_credits; 13930 13931 write_uninitialized_csrs_and_memories(dd); 13932 13933 if (HFI1_CAP_IS_KSET(PKEY_CHECK)) 13934 for (i = 0; i < dd->num_pports; i++) { 13935 struct hfi1_pportdata *ppd = &dd->pport[i]; 13936 13937 set_partition_keys(ppd); 13938 } 13939 init_sc2vl_tables(dd); 13940 } 13941 13942 static void init_kdeth_qp(struct hfi1_devdata *dd) 13943 { 13944 /* user changed the KDETH_QP */ 13945 if (kdeth_qp != 0 && kdeth_qp >= 0xff) { 13946 /* out of range or illegal value */ 13947 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring"); 13948 kdeth_qp = 0; 13949 } 13950 if (kdeth_qp == 0) /* not set, or failed range check */ 13951 kdeth_qp = DEFAULT_KDETH_QP; 13952 13953 write_csr(dd, SEND_BTH_QP, 13954 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) << 13955 SEND_BTH_QP_KDETH_QP_SHIFT); 13956 13957 write_csr(dd, RCV_BTH_QP, 13958 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) << 13959 RCV_BTH_QP_KDETH_QP_SHIFT); 13960 } 13961 13962 /** 13963 * init_qpmap_table 13964 * @dd - device data 13965 * @first_ctxt - first context 13966 * @last_ctxt - first context 13967 * 13968 * This return sets the qpn mapping table that 13969 * is indexed by qpn[8:1]. 13970 * 13971 * The routine will round robin the 256 settings 13972 * from first_ctxt to last_ctxt. 13973 * 13974 * The first/last looks ahead to having specialized 13975 * receive contexts for mgmt and bypass. Normal 13976 * verbs traffic will assumed to be on a range 13977 * of receive contexts. 13978 */ 13979 static void init_qpmap_table(struct hfi1_devdata *dd, 13980 u32 first_ctxt, 13981 u32 last_ctxt) 13982 { 13983 u64 reg = 0; 13984 u64 regno = RCV_QP_MAP_TABLE; 13985 int i; 13986 u64 ctxt = first_ctxt; 13987 13988 for (i = 0; i < 256; i++) { 13989 reg |= ctxt << (8 * (i % 8)); 13990 ctxt++; 13991 if (ctxt > last_ctxt) 13992 ctxt = first_ctxt; 13993 if (i % 8 == 7) { 13994 write_csr(dd, regno, reg); 13995 reg = 0; 13996 regno += 8; 13997 } 13998 } 13999 14000 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK 14001 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK); 14002 } 14003 14004 struct rsm_map_table { 14005 u64 map[NUM_MAP_REGS]; 14006 unsigned int used; 14007 }; 14008 14009 struct rsm_rule_data { 14010 u8 offset; 14011 u8 pkt_type; 14012 u32 field1_off; 14013 u32 field2_off; 14014 u32 index1_off; 14015 u32 index1_width; 14016 u32 index2_off; 14017 u32 index2_width; 14018 u32 mask1; 14019 u32 value1; 14020 u32 mask2; 14021 u32 value2; 14022 }; 14023 14024 /* 14025 * Return an initialized RMT map table for users to fill in. OK if it 14026 * returns NULL, indicating no table. 14027 */ 14028 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) 14029 { 14030 struct rsm_map_table *rmt; 14031 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ 14032 14033 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL); 14034 if (rmt) { 14035 memset(rmt->map, rxcontext, sizeof(rmt->map)); 14036 rmt->used = 0; 14037 } 14038 14039 return rmt; 14040 } 14041 14042 /* 14043 * Write the final RMT map table to the chip and free the table. OK if 14044 * table is NULL. 14045 */ 14046 static void complete_rsm_map_table(struct hfi1_devdata *dd, 14047 struct rsm_map_table *rmt) 14048 { 14049 int i; 14050 14051 if (rmt) { 14052 /* write table to chip */ 14053 for (i = 0; i < NUM_MAP_REGS; i++) 14054 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); 14055 14056 /* enable RSM */ 14057 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14058 } 14059 } 14060 14061 /* 14062 * Add a receive side mapping rule. 14063 */ 14064 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, 14065 struct rsm_rule_data *rrd) 14066 { 14067 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 14068 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | 14069 1ull << rule_index | /* enable bit */ 14070 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); 14071 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 14072 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | 14073 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | 14074 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | 14075 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | 14076 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | 14077 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); 14078 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 14079 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | 14080 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | 14081 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | 14082 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); 14083 } 14084 14085 /* 14086 * Clear a receive side mapping rule. 14087 */ 14088 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) 14089 { 14090 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0); 14091 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0); 14092 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0); 14093 } 14094 14095 /* return the number of RSM map table entries that will be used for QOS */ 14096 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, 14097 unsigned int *np) 14098 { 14099 int i; 14100 unsigned int m, n; 14101 u8 max_by_vl = 0; 14102 14103 /* is QOS active at all? */ 14104 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || 14105 num_vls == 1 || 14106 krcvqsset <= 1) 14107 goto no_qos; 14108 14109 /* determine bits for qpn */ 14110 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) 14111 if (krcvqs[i] > max_by_vl) 14112 max_by_vl = krcvqs[i]; 14113 if (max_by_vl > 32) 14114 goto no_qos; 14115 m = ilog2(__roundup_pow_of_two(max_by_vl)); 14116 14117 /* determine bits for vl */ 14118 n = ilog2(__roundup_pow_of_two(num_vls)); 14119 14120 /* reject if too much is used */ 14121 if ((m + n) > 7) 14122 goto no_qos; 14123 14124 if (mp) 14125 *mp = m; 14126 if (np) 14127 *np = n; 14128 14129 return 1 << (m + n); 14130 14131 no_qos: 14132 if (mp) 14133 *mp = 0; 14134 if (np) 14135 *np = 0; 14136 return 0; 14137 } 14138 14139 /** 14140 * init_qos - init RX qos 14141 * @dd - device data 14142 * @rmt - RSM map table 14143 * 14144 * This routine initializes Rule 0 and the RSM map table to implement 14145 * quality of service (qos). 14146 * 14147 * If all of the limit tests succeed, qos is applied based on the array 14148 * interpretation of krcvqs where entry 0 is VL0. 14149 * 14150 * The number of vl bits (n) and the number of qpn bits (m) are computed to 14151 * feed both the RSM map table and the single rule. 14152 */ 14153 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) 14154 { 14155 struct rsm_rule_data rrd; 14156 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; 14157 unsigned int rmt_entries; 14158 u64 reg; 14159 14160 if (!rmt) 14161 goto bail; 14162 rmt_entries = qos_rmt_entries(dd, &m, &n); 14163 if (rmt_entries == 0) 14164 goto bail; 14165 qpns_per_vl = 1 << m; 14166 14167 /* enough room in the map table? */ 14168 rmt_entries = 1 << (m + n); 14169 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) 14170 goto bail; 14171 14172 /* add qos entries to the the RSM map table */ 14173 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) { 14174 unsigned tctxt; 14175 14176 for (qpn = 0, tctxt = ctxt; 14177 krcvqs[i] && qpn < qpns_per_vl; qpn++) { 14178 unsigned idx, regoff, regidx; 14179 14180 /* generate the index the hardware will produce */ 14181 idx = rmt->used + ((qpn << n) ^ i); 14182 regoff = (idx % 8) * 8; 14183 regidx = idx / 8; 14184 /* replace default with context number */ 14185 reg = rmt->map[regidx]; 14186 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 14187 << regoff); 14188 reg |= (u64)(tctxt++) << regoff; 14189 rmt->map[regidx] = reg; 14190 if (tctxt == ctxt + krcvqs[i]) 14191 tctxt = ctxt; 14192 } 14193 ctxt += krcvqs[i]; 14194 } 14195 14196 rrd.offset = rmt->used; 14197 rrd.pkt_type = 2; 14198 rrd.field1_off = LRH_BTH_MATCH_OFFSET; 14199 rrd.field2_off = LRH_SC_MATCH_OFFSET; 14200 rrd.index1_off = LRH_SC_SELECT_OFFSET; 14201 rrd.index1_width = n; 14202 rrd.index2_off = QPN_SELECT_OFFSET; 14203 rrd.index2_width = m + n; 14204 rrd.mask1 = LRH_BTH_MASK; 14205 rrd.value1 = LRH_BTH_VALUE; 14206 rrd.mask2 = LRH_SC_MASK; 14207 rrd.value2 = LRH_SC_VALUE; 14208 14209 /* add rule 0 */ 14210 add_rsm_rule(dd, RSM_INS_VERBS, &rrd); 14211 14212 /* mark RSM map entries as used */ 14213 rmt->used += rmt_entries; 14214 /* map everything else to the mcast/err/vl15 context */ 14215 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); 14216 dd->qos_shift = n + 1; 14217 return; 14218 bail: 14219 dd->qos_shift = 1; 14220 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); 14221 } 14222 14223 static void init_user_fecn_handling(struct hfi1_devdata *dd, 14224 struct rsm_map_table *rmt) 14225 { 14226 struct rsm_rule_data rrd; 14227 u64 reg; 14228 int i, idx, regoff, regidx; 14229 u8 offset; 14230 14231 /* there needs to be enough room in the map table */ 14232 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) { 14233 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n"); 14234 return; 14235 } 14236 14237 /* 14238 * RSM will extract the destination context as an index into the 14239 * map table. The destination contexts are a sequential block 14240 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive). 14241 * Map entries are accessed as offset + extracted value. Adjust 14242 * the added offset so this sequence can be placed anywhere in 14243 * the table - as long as the entries themselves do not wrap. 14244 * There are only enough bits in offset for the table size, so 14245 * start with that to allow for a "negative" offset. 14246 */ 14247 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used - 14248 (int)dd->first_dyn_alloc_ctxt); 14249 14250 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used; 14251 i < dd->num_rcv_contexts; i++, idx++) { 14252 /* replace with identity mapping */ 14253 regoff = (idx % 8) * 8; 14254 regidx = idx / 8; 14255 reg = rmt->map[regidx]; 14256 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff); 14257 reg |= (u64)i << regoff; 14258 rmt->map[regidx] = reg; 14259 } 14260 14261 /* 14262 * For RSM intercept of Expected FECN packets: 14263 * o packet type 0 - expected 14264 * o match on F (bit 95), using select/match 1, and 14265 * o match on SH (bit 133), using select/match 2. 14266 * 14267 * Use index 1 to extract the 8-bit receive context from DestQP 14268 * (start at bit 64). Use that as the RSM map table index. 14269 */ 14270 rrd.offset = offset; 14271 rrd.pkt_type = 0; 14272 rrd.field1_off = 95; 14273 rrd.field2_off = 133; 14274 rrd.index1_off = 64; 14275 rrd.index1_width = 8; 14276 rrd.index2_off = 0; 14277 rrd.index2_width = 0; 14278 rrd.mask1 = 1; 14279 rrd.value1 = 1; 14280 rrd.mask2 = 1; 14281 rrd.value2 = 1; 14282 14283 /* add rule 1 */ 14284 add_rsm_rule(dd, RSM_INS_FECN, &rrd); 14285 14286 rmt->used += dd->num_user_contexts; 14287 } 14288 14289 /* Initialize RSM for VNIC */ 14290 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd) 14291 { 14292 u8 i, j; 14293 u8 ctx_id = 0; 14294 u64 reg; 14295 u32 regoff; 14296 struct rsm_rule_data rrd; 14297 14298 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) { 14299 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n", 14300 dd->vnic.rmt_start); 14301 return; 14302 } 14303 14304 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n", 14305 dd->vnic.rmt_start, 14306 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES); 14307 14308 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ 14309 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8; 14310 reg = read_csr(dd, regoff); 14311 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) { 14312 /* Update map register with vnic context */ 14313 j = (dd->vnic.rmt_start + i) % 8; 14314 reg &= ~(0xffllu << (j * 8)); 14315 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8); 14316 /* Wrap up vnic ctx index */ 14317 ctx_id %= dd->vnic.num_ctxt; 14318 /* Write back map register */ 14319 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) { 14320 dev_dbg(&(dd)->pcidev->dev, 14321 "Vnic rsm map reg[%d] =0x%llx\n", 14322 regoff - RCV_RSM_MAP_TABLE, reg); 14323 14324 write_csr(dd, regoff, reg); 14325 regoff += 8; 14326 if (i < (NUM_VNIC_MAP_ENTRIES - 1)) 14327 reg = read_csr(dd, regoff); 14328 } 14329 } 14330 14331 /* Add rule for vnic */ 14332 rrd.offset = dd->vnic.rmt_start; 14333 rrd.pkt_type = 4; 14334 /* Match 16B packets */ 14335 rrd.field1_off = L2_TYPE_MATCH_OFFSET; 14336 rrd.mask1 = L2_TYPE_MASK; 14337 rrd.value1 = L2_16B_VALUE; 14338 /* Match ETH L4 packets */ 14339 rrd.field2_off = L4_TYPE_MATCH_OFFSET; 14340 rrd.mask2 = L4_16B_TYPE_MASK; 14341 rrd.value2 = L4_16B_ETH_VALUE; 14342 /* Calc context from veswid and entropy */ 14343 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET; 14344 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES); 14345 rrd.index2_off = L2_16B_ENTROPY_OFFSET; 14346 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES); 14347 add_rsm_rule(dd, RSM_INS_VNIC, &rrd); 14348 14349 /* Enable RSM if not already enabled */ 14350 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14351 } 14352 14353 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) 14354 { 14355 clear_rsm_rule(dd, RSM_INS_VNIC); 14356 14357 /* Disable RSM if used only by vnic */ 14358 if (dd->vnic.rmt_start == 0) 14359 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14360 } 14361 14362 static void init_rxe(struct hfi1_devdata *dd) 14363 { 14364 struct rsm_map_table *rmt; 14365 u64 val; 14366 14367 /* enable all receive errors */ 14368 write_csr(dd, RCV_ERR_MASK, ~0ull); 14369 14370 rmt = alloc_rsm_map_table(dd); 14371 /* set up QOS, including the QPN map table */ 14372 init_qos(dd, rmt); 14373 init_user_fecn_handling(dd, rmt); 14374 complete_rsm_map_table(dd, rmt); 14375 /* record number of used rsm map entries for vnic */ 14376 dd->vnic.rmt_start = rmt->used; 14377 kfree(rmt); 14378 14379 /* 14380 * make sure RcvCtrl.RcvWcb <= PCIe Device Control 14381 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config 14382 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one 14383 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and 14384 * Max_PayLoad_Size set to its minimum of 128. 14385 * 14386 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0 14387 * (64 bytes). Max_Payload_Size is possibly modified upward in 14388 * tune_pcie_caps() which is called after this routine. 14389 */ 14390 14391 /* Have 16 bytes (4DW) of bypass header available in header queue */ 14392 val = read_csr(dd, RCV_BYPASS); 14393 val &= ~RCV_BYPASS_HDR_SIZE_SMASK; 14394 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << 14395 RCV_BYPASS_HDR_SIZE_SHIFT); 14396 write_csr(dd, RCV_BYPASS, val); 14397 } 14398 14399 static void init_other(struct hfi1_devdata *dd) 14400 { 14401 /* enable all CCE errors */ 14402 write_csr(dd, CCE_ERR_MASK, ~0ull); 14403 /* enable *some* Misc errors */ 14404 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); 14405 /* enable all DC errors, except LCB */ 14406 write_csr(dd, DCC_ERR_FLG_EN, ~0ull); 14407 write_csr(dd, DC_DC8051_ERR_EN, ~0ull); 14408 } 14409 14410 /* 14411 * Fill out the given AU table using the given CU. A CU is defined in terms 14412 * AUs. The table is a an encoding: given the index, how many AUs does that 14413 * represent? 14414 * 14415 * NOTE: Assumes that the register layout is the same for the 14416 * local and remote tables. 14417 */ 14418 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, 14419 u32 csr0to3, u32 csr4to7) 14420 { 14421 write_csr(dd, csr0to3, 14422 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT | 14423 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT | 14424 2ull * cu << 14425 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT | 14426 4ull * cu << 14427 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); 14428 write_csr(dd, csr4to7, 14429 8ull * cu << 14430 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT | 14431 16ull * cu << 14432 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT | 14433 32ull * cu << 14434 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | 14435 64ull * cu << 14436 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); 14437 } 14438 14439 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) 14440 { 14441 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, 14442 SEND_CM_LOCAL_AU_TABLE4_TO7); 14443 } 14444 14445 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) 14446 { 14447 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, 14448 SEND_CM_REMOTE_AU_TABLE4_TO7); 14449 } 14450 14451 static void init_txe(struct hfi1_devdata *dd) 14452 { 14453 int i; 14454 14455 /* enable all PIO, SDMA, general, and Egress errors */ 14456 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull); 14457 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull); 14458 write_csr(dd, SEND_ERR_MASK, ~0ull); 14459 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull); 14460 14461 /* enable all per-context and per-SDMA engine errors */ 14462 for (i = 0; i < chip_send_contexts(dd); i++) 14463 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull); 14464 for (i = 0; i < chip_sdma_engines(dd); i++) 14465 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull); 14466 14467 /* set the local CU to AU mapping */ 14468 assign_local_cm_au_table(dd, dd->vcu); 14469 14470 /* 14471 * Set reasonable default for Credit Return Timer 14472 * Don't set on Simulator - causes it to choke. 14473 */ 14474 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) 14475 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); 14476 } 14477 14478 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, 14479 u16 jkey) 14480 { 14481 u8 hw_ctxt; 14482 u64 reg; 14483 14484 if (!rcd || !rcd->sc) 14485 return -EINVAL; 14486 14487 hw_ctxt = rcd->sc->hw_context; 14488 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */ 14489 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) << 14490 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT); 14491 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */ 14492 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) 14493 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK; 14494 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg); 14495 /* 14496 * Enable send-side J_KEY integrity check, unless this is A0 h/w 14497 */ 14498 if (!is_ax(dd)) { 14499 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14500 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 14501 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14502 } 14503 14504 /* Enable J_KEY check on receive context. */ 14505 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK | 14506 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) << 14507 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT); 14508 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); 14509 14510 return 0; 14511 } 14512 14513 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 14514 { 14515 u8 hw_ctxt; 14516 u64 reg; 14517 14518 if (!rcd || !rcd->sc) 14519 return -EINVAL; 14520 14521 hw_ctxt = rcd->sc->hw_context; 14522 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0); 14523 /* 14524 * Disable send-side J_KEY integrity check, unless this is A0 h/w. 14525 * This check would not have been enabled for A0 h/w, see 14526 * set_ctxt_jkey(). 14527 */ 14528 if (!is_ax(dd)) { 14529 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14530 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 14531 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14532 } 14533 /* Turn off the J_KEY on the receive side */ 14534 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); 14535 14536 return 0; 14537 } 14538 14539 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, 14540 u16 pkey) 14541 { 14542 u8 hw_ctxt; 14543 u64 reg; 14544 14545 if (!rcd || !rcd->sc) 14546 return -EINVAL; 14547 14548 hw_ctxt = rcd->sc->hw_context; 14549 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) << 14550 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT; 14551 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg); 14552 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14553 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14554 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK; 14555 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14556 14557 return 0; 14558 } 14559 14560 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) 14561 { 14562 u8 hw_ctxt; 14563 u64 reg; 14564 14565 if (!ctxt || !ctxt->sc) 14566 return -EINVAL; 14567 14568 hw_ctxt = ctxt->sc->hw_context; 14569 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14570 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14571 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14572 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); 14573 14574 return 0; 14575 } 14576 14577 /* 14578 * Start doing the clean up the the chip. Our clean up happens in multiple 14579 * stages and this is just the first. 14580 */ 14581 void hfi1_start_cleanup(struct hfi1_devdata *dd) 14582 { 14583 aspm_exit(dd); 14584 free_cntrs(dd); 14585 free_rcverr(dd); 14586 finish_chip_resources(dd); 14587 } 14588 14589 #define HFI_BASE_GUID(dev) \ 14590 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT)) 14591 14592 /* 14593 * Information can be shared between the two HFIs on the same ASIC 14594 * in the same OS. This function finds the peer device and sets 14595 * up a shared structure. 14596 */ 14597 static int init_asic_data(struct hfi1_devdata *dd) 14598 { 14599 unsigned long flags; 14600 struct hfi1_devdata *tmp, *peer = NULL; 14601 struct hfi1_asic_data *asic_data; 14602 int ret = 0; 14603 14604 /* pre-allocate the asic structure in case we are the first device */ 14605 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); 14606 if (!asic_data) 14607 return -ENOMEM; 14608 14609 spin_lock_irqsave(&hfi1_devs_lock, flags); 14610 /* Find our peer device */ 14611 list_for_each_entry(tmp, &hfi1_dev_list, list) { 14612 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) && 14613 dd->unit != tmp->unit) { 14614 peer = tmp; 14615 break; 14616 } 14617 } 14618 14619 if (peer) { 14620 /* use already allocated structure */ 14621 dd->asic_data = peer->asic_data; 14622 kfree(asic_data); 14623 } else { 14624 dd->asic_data = asic_data; 14625 mutex_init(&dd->asic_data->asic_resource_mutex); 14626 } 14627 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ 14628 spin_unlock_irqrestore(&hfi1_devs_lock, flags); 14629 14630 /* first one through - set up i2c devices */ 14631 if (!peer) 14632 ret = set_up_i2c(dd, dd->asic_data); 14633 14634 return ret; 14635 } 14636 14637 /* 14638 * Set dd->boardname. Use a generic name if a name is not returned from 14639 * EFI variable space. 14640 * 14641 * Return 0 on success, -ENOMEM if space could not be allocated. 14642 */ 14643 static int obtain_boardname(struct hfi1_devdata *dd) 14644 { 14645 /* generic board description */ 14646 const char generic[] = 14647 "Intel Omni-Path Host Fabric Interface Adapter 100 Series"; 14648 unsigned long size; 14649 int ret; 14650 14651 ret = read_hfi1_efi_var(dd, "description", &size, 14652 (void **)&dd->boardname); 14653 if (ret) { 14654 dd_dev_info(dd, "Board description not found\n"); 14655 /* use generic description */ 14656 dd->boardname = kstrdup(generic, GFP_KERNEL); 14657 if (!dd->boardname) 14658 return -ENOMEM; 14659 } 14660 return 0; 14661 } 14662 14663 /* 14664 * Check the interrupt registers to make sure that they are mapped correctly. 14665 * It is intended to help user identify any mismapping by VMM when the driver 14666 * is running in a VM. This function should only be called before interrupt 14667 * is set up properly. 14668 * 14669 * Return 0 on success, -EINVAL on failure. 14670 */ 14671 static int check_int_registers(struct hfi1_devdata *dd) 14672 { 14673 u64 reg; 14674 u64 all_bits = ~(u64)0; 14675 u64 mask; 14676 14677 /* Clear CceIntMask[0] to avoid raising any interrupts */ 14678 mask = read_csr(dd, CCE_INT_MASK); 14679 write_csr(dd, CCE_INT_MASK, 0ull); 14680 reg = read_csr(dd, CCE_INT_MASK); 14681 if (reg) 14682 goto err_exit; 14683 14684 /* Clear all interrupt status bits */ 14685 write_csr(dd, CCE_INT_CLEAR, all_bits); 14686 reg = read_csr(dd, CCE_INT_STATUS); 14687 if (reg) 14688 goto err_exit; 14689 14690 /* Set all interrupt status bits */ 14691 write_csr(dd, CCE_INT_FORCE, all_bits); 14692 reg = read_csr(dd, CCE_INT_STATUS); 14693 if (reg != all_bits) 14694 goto err_exit; 14695 14696 /* Restore the interrupt mask */ 14697 write_csr(dd, CCE_INT_CLEAR, all_bits); 14698 write_csr(dd, CCE_INT_MASK, mask); 14699 14700 return 0; 14701 err_exit: 14702 write_csr(dd, CCE_INT_MASK, mask); 14703 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); 14704 return -EINVAL; 14705 } 14706 14707 /** 14708 * hfi1_init_dd() - Initialize most of the dd structure. 14709 * @dev: the pci_dev for hfi1_ib device 14710 * @ent: pci_device_id struct for this dev 14711 * 14712 * This is global, and is called directly at init to set up the 14713 * chip-specific function pointers for later use. 14714 */ 14715 int hfi1_init_dd(struct hfi1_devdata *dd) 14716 { 14717 struct pci_dev *pdev = dd->pcidev; 14718 struct hfi1_pportdata *ppd; 14719 u64 reg; 14720 int i, ret; 14721 static const char * const inames[] = { /* implementation names */ 14722 "RTL silicon", 14723 "RTL VCS simulation", 14724 "RTL FPGA emulation", 14725 "Functional simulator" 14726 }; 14727 struct pci_dev *parent = pdev->bus->self; 14728 u32 sdma_engines = chip_sdma_engines(dd); 14729 14730 ppd = dd->pport; 14731 for (i = 0; i < dd->num_pports; i++, ppd++) { 14732 int vl; 14733 /* init common fields */ 14734 hfi1_init_pportdata(pdev, ppd, dd, 0, 1); 14735 /* DC supports 4 link widths */ 14736 ppd->link_width_supported = 14737 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X | 14738 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X; 14739 ppd->link_width_downgrade_supported = 14740 ppd->link_width_supported; 14741 /* start out enabling only 4X */ 14742 ppd->link_width_enabled = OPA_LINK_WIDTH_4X; 14743 ppd->link_width_downgrade_enabled = 14744 ppd->link_width_downgrade_supported; 14745 /* link width active is 0 when link is down */ 14746 /* link width downgrade active is 0 when link is down */ 14747 14748 if (num_vls < HFI1_MIN_VLS_SUPPORTED || 14749 num_vls > HFI1_MAX_VLS_SUPPORTED) { 14750 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n", 14751 num_vls, HFI1_MAX_VLS_SUPPORTED); 14752 num_vls = HFI1_MAX_VLS_SUPPORTED; 14753 } 14754 ppd->vls_supported = num_vls; 14755 ppd->vls_operational = ppd->vls_supported; 14756 /* Set the default MTU. */ 14757 for (vl = 0; vl < num_vls; vl++) 14758 dd->vld[vl].mtu = hfi1_max_mtu; 14759 dd->vld[15].mtu = MAX_MAD_PACKET; 14760 /* 14761 * Set the initial values to reasonable default, will be set 14762 * for real when link is up. 14763 */ 14764 ppd->overrun_threshold = 0x4; 14765 ppd->phy_error_threshold = 0xf; 14766 ppd->port_crc_mode_enabled = link_crc_mask; 14767 /* initialize supported LTP CRC mode */ 14768 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; 14769 /* initialize enabled LTP CRC mode */ 14770 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; 14771 /* start in offline */ 14772 ppd->host_link_state = HLS_DN_OFFLINE; 14773 init_vl_arb_caches(ppd); 14774 } 14775 14776 /* 14777 * Do remaining PCIe setup and save PCIe values in dd. 14778 * Any error printing is already done by the init code. 14779 * On return, we have the chip mapped. 14780 */ 14781 ret = hfi1_pcie_ddinit(dd, pdev); 14782 if (ret < 0) 14783 goto bail_free; 14784 14785 /* Save PCI space registers to rewrite after device reset */ 14786 ret = save_pci_variables(dd); 14787 if (ret < 0) 14788 goto bail_cleanup; 14789 14790 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) 14791 & CCE_REVISION_CHIP_REV_MAJOR_MASK; 14792 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) 14793 & CCE_REVISION_CHIP_REV_MINOR_MASK; 14794 14795 /* 14796 * Check interrupt registers mapping if the driver has no access to 14797 * the upstream component. In this case, it is likely that the driver 14798 * is running in a VM. 14799 */ 14800 if (!parent) { 14801 ret = check_int_registers(dd); 14802 if (ret) 14803 goto bail_cleanup; 14804 } 14805 14806 /* 14807 * obtain the hardware ID - NOT related to unit, which is a 14808 * software enumeration 14809 */ 14810 reg = read_csr(dd, CCE_REVISION2); 14811 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) 14812 & CCE_REVISION2_HFI_ID_MASK; 14813 /* the variable size will remove unwanted bits */ 14814 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; 14815 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; 14816 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", 14817 dd->icode < ARRAY_SIZE(inames) ? 14818 inames[dd->icode] : "unknown", (int)dd->irev); 14819 14820 /* speeds the hardware can support */ 14821 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; 14822 /* speeds allowed to run at */ 14823 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; 14824 /* give a reasonable active value, will be set on link up */ 14825 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; 14826 14827 /* fix up link widths for emulation _p */ 14828 ppd = dd->pport; 14829 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { 14830 ppd->link_width_supported = 14831 ppd->link_width_enabled = 14832 ppd->link_width_downgrade_supported = 14833 ppd->link_width_downgrade_enabled = 14834 OPA_LINK_WIDTH_1X; 14835 } 14836 /* insure num_vls isn't larger than number of sdma engines */ 14837 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) { 14838 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n", 14839 num_vls, sdma_engines); 14840 num_vls = sdma_engines; 14841 ppd->vls_supported = sdma_engines; 14842 ppd->vls_operational = ppd->vls_supported; 14843 } 14844 14845 /* 14846 * Convert the ns parameter to the 64 * cclocks used in the CSR. 14847 * Limit the max if larger than the field holds. If timeout is 14848 * non-zero, then the calculated field will be at least 1. 14849 * 14850 * Must be after icode is set up - the cclock rate depends 14851 * on knowing the hardware being used. 14852 */ 14853 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; 14854 if (dd->rcv_intr_timeout_csr > 14855 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK) 14856 dd->rcv_intr_timeout_csr = 14857 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK; 14858 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) 14859 dd->rcv_intr_timeout_csr = 1; 14860 14861 /* needs to be done before we look for the peer device */ 14862 read_guid(dd); 14863 14864 /* set up shared ASIC data with peer device */ 14865 ret = init_asic_data(dd); 14866 if (ret) 14867 goto bail_cleanup; 14868 14869 /* obtain chip sizes, reset chip CSRs */ 14870 ret = init_chip(dd); 14871 if (ret) 14872 goto bail_cleanup; 14873 14874 /* read in the PCIe link speed information */ 14875 ret = pcie_speeds(dd); 14876 if (ret) 14877 goto bail_cleanup; 14878 14879 /* call before get_platform_config(), after init_chip_resources() */ 14880 ret = eprom_init(dd); 14881 if (ret) 14882 goto bail_free_rcverr; 14883 14884 /* Needs to be called before hfi1_firmware_init */ 14885 get_platform_config(dd); 14886 14887 /* read in firmware */ 14888 ret = hfi1_firmware_init(dd); 14889 if (ret) 14890 goto bail_cleanup; 14891 14892 /* 14893 * In general, the PCIe Gen3 transition must occur after the 14894 * chip has been idled (so it won't initiate any PCIe transactions 14895 * e.g. an interrupt) and before the driver changes any registers 14896 * (the transition will reset the registers). 14897 * 14898 * In particular, place this call after: 14899 * - init_chip() - the chip will not initiate any PCIe transactions 14900 * - pcie_speeds() - reads the current link speed 14901 * - hfi1_firmware_init() - the needed firmware is ready to be 14902 * downloaded 14903 */ 14904 ret = do_pcie_gen3_transition(dd); 14905 if (ret) 14906 goto bail_cleanup; 14907 14908 /* 14909 * This should probably occur in hfi1_pcie_init(), but historically 14910 * occurs after the do_pcie_gen3_transition() code. 14911 */ 14912 tune_pcie_caps(dd); 14913 14914 /* start setting dd values and adjusting CSRs */ 14915 init_early_variables(dd); 14916 14917 parse_platform_config(dd); 14918 14919 ret = obtain_boardname(dd); 14920 if (ret) 14921 goto bail_cleanup; 14922 14923 snprintf(dd->boardversion, BOARD_VERS_MAX, 14924 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n", 14925 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN, 14926 (u32)dd->majrev, 14927 (u32)dd->minrev, 14928 (dd->revision >> CCE_REVISION_SW_SHIFT) 14929 & CCE_REVISION_SW_MASK); 14930 14931 ret = set_up_context_variables(dd); 14932 if (ret) 14933 goto bail_cleanup; 14934 14935 /* set initial RXE CSRs */ 14936 init_rxe(dd); 14937 /* set initial TXE CSRs */ 14938 init_txe(dd); 14939 /* set initial non-RXE, non-TXE CSRs */ 14940 init_other(dd); 14941 /* set up KDETH QP prefix in both RX and TX CSRs */ 14942 init_kdeth_qp(dd); 14943 14944 ret = hfi1_dev_affinity_init(dd); 14945 if (ret) 14946 goto bail_cleanup; 14947 14948 /* send contexts must be set up before receive contexts */ 14949 ret = init_send_contexts(dd); 14950 if (ret) 14951 goto bail_cleanup; 14952 14953 ret = hfi1_create_kctxts(dd); 14954 if (ret) 14955 goto bail_cleanup; 14956 14957 /* 14958 * Initialize aspm, to be done after gen3 transition and setting up 14959 * contexts and before enabling interrupts 14960 */ 14961 aspm_init(dd); 14962 14963 ret = init_pervl_scs(dd); 14964 if (ret) 14965 goto bail_cleanup; 14966 14967 /* sdma init */ 14968 for (i = 0; i < dd->num_pports; ++i) { 14969 ret = sdma_init(dd, i); 14970 if (ret) 14971 goto bail_cleanup; 14972 } 14973 14974 /* use contexts created by hfi1_create_kctxts */ 14975 ret = set_up_interrupts(dd); 14976 if (ret) 14977 goto bail_cleanup; 14978 14979 ret = hfi1_comp_vectors_set_up(dd); 14980 if (ret) 14981 goto bail_clear_intr; 14982 14983 /* set up LCB access - must be after set_up_interrupts() */ 14984 init_lcb_access(dd); 14985 14986 /* 14987 * Serial number is created from the base guid: 14988 * [27:24] = base guid [38:35] 14989 * [23: 0] = base guid [23: 0] 14990 */ 14991 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", 14992 (dd->base_guid & 0xFFFFFF) | 14993 ((dd->base_guid >> 11) & 0xF000000)); 14994 14995 dd->oui1 = dd->base_guid >> 56 & 0xFF; 14996 dd->oui2 = dd->base_guid >> 48 & 0xFF; 14997 dd->oui3 = dd->base_guid >> 40 & 0xFF; 14998 14999 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ 15000 if (ret) 15001 goto bail_clear_intr; 15002 15003 thermal_init(dd); 15004 15005 ret = init_cntrs(dd); 15006 if (ret) 15007 goto bail_clear_intr; 15008 15009 ret = init_rcverr(dd); 15010 if (ret) 15011 goto bail_free_cntrs; 15012 15013 init_completion(&dd->user_comp); 15014 15015 /* The user refcount starts with one to inidicate an active device */ 15016 atomic_set(&dd->user_refcount, 1); 15017 15018 goto bail; 15019 15020 bail_free_rcverr: 15021 free_rcverr(dd); 15022 bail_free_cntrs: 15023 free_cntrs(dd); 15024 bail_clear_intr: 15025 hfi1_comp_vectors_clean_up(dd); 15026 msix_clean_up_interrupts(dd); 15027 bail_cleanup: 15028 hfi1_pcie_ddcleanup(dd); 15029 bail_free: 15030 hfi1_free_devdata(dd); 15031 bail: 15032 return ret; 15033 } 15034 15035 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, 15036 u32 dw_len) 15037 { 15038 u32 delta_cycles; 15039 u32 current_egress_rate = ppd->current_egress_rate; 15040 /* rates here are in units of 10^6 bits/sec */ 15041 15042 if (desired_egress_rate == -1) 15043 return 0; /* shouldn't happen */ 15044 15045 if (desired_egress_rate >= current_egress_rate) 15046 return 0; /* we can't help go faster, only slower */ 15047 15048 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - 15049 egress_cycles(dw_len * 4, current_egress_rate); 15050 15051 return (u16)delta_cycles; 15052 } 15053 15054 /** 15055 * create_pbc - build a pbc for transmission 15056 * @flags: special case flags or-ed in built pbc 15057 * @srate: static rate 15058 * @vl: vl 15059 * @dwlen: dword length (header words + data words + pbc words) 15060 * 15061 * Create a PBC with the given flags, rate, VL, and length. 15062 * 15063 * NOTE: The PBC created will not insert any HCRC - all callers but one are 15064 * for verbs, which does not use this PSM feature. The lone other caller 15065 * is for the diagnostic interface which calls this if the user does not 15066 * supply their own PBC. 15067 */ 15068 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, 15069 u32 dw_len) 15070 { 15071 u64 pbc, delay = 0; 15072 15073 if (unlikely(srate_mbs)) 15074 delay = delay_cycles(ppd, srate_mbs, dw_len); 15075 15076 pbc = flags 15077 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT) 15078 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) 15079 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT 15080 | (dw_len & PBC_LENGTH_DWS_MASK) 15081 << PBC_LENGTH_DWS_SHIFT; 15082 15083 return pbc; 15084 } 15085 15086 #define SBUS_THERMAL 0x4f 15087 #define SBUS_THERM_MONITOR_MODE 0x1 15088 15089 #define THERM_FAILURE(dev, ret, reason) \ 15090 dd_dev_err((dd), \ 15091 "Thermal sensor initialization failed: %s (%d)\n", \ 15092 (reason), (ret)) 15093 15094 /* 15095 * Initialize the thermal sensor. 15096 * 15097 * After initialization, enable polling of thermal sensor through 15098 * SBus interface. In order for this to work, the SBus Master 15099 * firmware has to be loaded due to the fact that the HW polling 15100 * logic uses SBus interrupts, which are not supported with 15101 * default firmware. Otherwise, no data will be returned through 15102 * the ASIC_STS_THERM CSR. 15103 */ 15104 static int thermal_init(struct hfi1_devdata *dd) 15105 { 15106 int ret = 0; 15107 15108 if (dd->icode != ICODE_RTL_SILICON || 15109 check_chip_resource(dd, CR_THERM_INIT, NULL)) 15110 return ret; 15111 15112 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); 15113 if (ret) { 15114 THERM_FAILURE(dd, ret, "Acquire SBus"); 15115 return ret; 15116 } 15117 15118 dd_dev_info(dd, "Initializing thermal sensor\n"); 15119 /* Disable polling of thermal readings */ 15120 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); 15121 msleep(100); 15122 /* Thermal Sensor Initialization */ 15123 /* Step 1: Reset the Thermal SBus Receiver */ 15124 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15125 RESET_SBUS_RECEIVER, 0); 15126 if (ret) { 15127 THERM_FAILURE(dd, ret, "Bus Reset"); 15128 goto done; 15129 } 15130 /* Step 2: Set Reset bit in Thermal block */ 15131 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15132 WRITE_SBUS_RECEIVER, 0x1); 15133 if (ret) { 15134 THERM_FAILURE(dd, ret, "Therm Block Reset"); 15135 goto done; 15136 } 15137 /* Step 3: Write clock divider value (100MHz -> 2MHz) */ 15138 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1, 15139 WRITE_SBUS_RECEIVER, 0x32); 15140 if (ret) { 15141 THERM_FAILURE(dd, ret, "Write Clock Div"); 15142 goto done; 15143 } 15144 /* Step 4: Select temperature mode */ 15145 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3, 15146 WRITE_SBUS_RECEIVER, 15147 SBUS_THERM_MONITOR_MODE); 15148 if (ret) { 15149 THERM_FAILURE(dd, ret, "Write Mode Sel"); 15150 goto done; 15151 } 15152 /* Step 5: De-assert block reset and start conversion */ 15153 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15154 WRITE_SBUS_RECEIVER, 0x2); 15155 if (ret) { 15156 THERM_FAILURE(dd, ret, "Write Reset Deassert"); 15157 goto done; 15158 } 15159 /* Step 5.1: Wait for first conversion (21.5ms per spec) */ 15160 msleep(22); 15161 15162 /* Enable polling of thermal readings */ 15163 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); 15164 15165 /* Set initialized flag */ 15166 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); 15167 if (ret) 15168 THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); 15169 15170 done: 15171 release_chip_resource(dd, CR_SBUS); 15172 return ret; 15173 } 15174 15175 static void handle_temp_err(struct hfi1_devdata *dd) 15176 { 15177 struct hfi1_pportdata *ppd = &dd->pport[0]; 15178 /* 15179 * Thermal Critical Interrupt 15180 * Put the device into forced freeze mode, take link down to 15181 * offline, and put DC into reset. 15182 */ 15183 dd_dev_emerg(dd, 15184 "Critical temperature reached! Forcing device into freeze mode!\n"); 15185 dd->flags |= HFI1_FORCED_FREEZE; 15186 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT); 15187 /* 15188 * Shut DC down as much and as quickly as possible. 15189 * 15190 * Step 1: Take the link down to OFFLINE. This will cause the 15191 * 8051 to put the Serdes in reset. However, we don't want to 15192 * go through the entire link state machine since we want to 15193 * shutdown ASAP. Furthermore, this is not a graceful shutdown 15194 * but rather an attempt to save the chip. 15195 * Code below is almost the same as quiet_serdes() but avoids 15196 * all the extra work and the sleeps. 15197 */ 15198 ppd->driver_link_ready = 0; 15199 ppd->link_enabled = 0; 15200 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | 15201 PLS_OFFLINE); 15202 /* 15203 * Step 2: Shutdown LCB and 8051 15204 * After shutdown, do not restore DC_CFG_RESET value. 15205 */ 15206 dc_shutdown(dd); 15207 } 15208