1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright(c) 2015 - 2020 Intel Corporation. 4 * Copyright(c) 2021 Cornelis Networks. 5 */ 6 7 /* 8 * This file contains all of the code that is specific to the HFI chip 9 */ 10 11 #include <linux/pci.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 16 #include "hfi.h" 17 #include "trace.h" 18 #include "mad.h" 19 #include "pio.h" 20 #include "sdma.h" 21 #include "eprom.h" 22 #include "efivar.h" 23 #include "platform.h" 24 #include "aspm.h" 25 #include "affinity.h" 26 #include "debugfs.h" 27 #include "fault.h" 28 #include "netdev.h" 29 30 uint num_vls = HFI1_MAX_VLS_SUPPORTED; 31 module_param(num_vls, uint, S_IRUGO); 32 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)"); 33 34 /* 35 * Default time to aggregate two 10K packets from the idle state 36 * (timer not running). The timer starts at the end of the first packet, 37 * so only the time for one 10K packet and header plus a bit extra is needed. 38 * 10 * 1024 + 64 header byte = 10304 byte 39 * 10304 byte / 12.5 GB/s = 824.32ns 40 */ 41 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */ 42 module_param(rcv_intr_timeout, uint, S_IRUGO); 43 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns"); 44 45 uint rcv_intr_count = 16; /* same as qib */ 46 module_param(rcv_intr_count, uint, S_IRUGO); 47 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count"); 48 49 ushort link_crc_mask = SUPPORTED_CRCS; 50 module_param(link_crc_mask, ushort, S_IRUGO); 51 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link"); 52 53 uint loopback; 54 module_param_named(loopback, loopback, uint, S_IRUGO); 55 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable"); 56 57 /* Other driver tunables */ 58 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/ 59 static ushort crc_14b_sideband = 1; 60 static uint use_flr = 1; 61 uint quick_linkup; /* skip LNI */ 62 63 struct flag_table { 64 u64 flag; /* the flag */ 65 char *str; /* description string */ 66 u16 extra; /* extra information */ 67 u16 unused0; 68 u32 unused1; 69 }; 70 71 /* str must be a string constant */ 72 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra} 73 #define FLAG_ENTRY0(str, flag) {flag, str, 0} 74 75 /* Send Error Consequences */ 76 #define SEC_WRITE_DROPPED 0x1 77 #define SEC_PACKET_DROPPED 0x2 78 #define SEC_SC_HALTED 0x4 /* per-context only */ 79 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */ 80 81 #define DEFAULT_KRCVQS 2 82 #define MIN_KERNEL_KCTXTS 2 83 #define FIRST_KERNEL_KCTXT 1 84 85 /* 86 * RSM instance allocation 87 * 0 - User Fecn Handling 88 * 1 - Deprecated 89 * 2 - AIP 90 * 3 - Verbs 91 */ 92 #define RSM_INS_FECN 0 93 #define RSM_INS_DEPRECATED 1 94 #define RSM_INS_AIP 2 95 #define RSM_INS_VERBS 3 96 97 /* Bit offset into the GUID which carries HFI id information */ 98 #define GUID_HFI_INDEX_SHIFT 39 99 100 /* extract the emulation revision */ 101 #define emulator_rev(dd) ((dd)->irev >> 8) 102 /* parallel and serial emulation versions are 3 and 4 respectively */ 103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3) 104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4) 105 106 /* RSM fields for Verbs */ 107 /* packet type */ 108 #define IB_PACKET_TYPE 2ull 109 #define QW_SHIFT 6ull 110 /* QPN[7..1] */ 111 #define QPN_WIDTH 7ull 112 113 /* LRH.BTH: QW 0, OFFSET 48 - for match */ 114 #define LRH_BTH_QW 0ull 115 #define LRH_BTH_BIT_OFFSET 48ull 116 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off)) 117 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET) 118 #define LRH_BTH_SELECT 119 #define LRH_BTH_MASK 3ull 120 #define LRH_BTH_VALUE 2ull 121 122 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */ 123 #define LRH_SC_QW 0ull 124 #define LRH_SC_BIT_OFFSET 56ull 125 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off)) 126 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET) 127 #define LRH_SC_MASK 128ull 128 #define LRH_SC_VALUE 0ull 129 130 /* SC[n..0] QW 0, OFFSET 60 - for select */ 131 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull)) 132 133 /* QPN[m+n:1] QW 1, OFFSET 1 */ 134 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull)) 135 136 /* RSM fields for AIP */ 137 /* LRH.BTH above is reused for this rule */ 138 139 /* BTH.DESTQP: QW 1, OFFSET 16 for match */ 140 #define BTH_DESTQP_QW 1ull 141 #define BTH_DESTQP_BIT_OFFSET 16ull 142 #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off)) 143 #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET) 144 #define BTH_DESTQP_MASK 0xFFull 145 #define BTH_DESTQP_VALUE 0x81ull 146 147 /* DETH.SQPN: QW 1 Offset 56 for select */ 148 /* We use 8 most significant Soure QPN bits as entropy fpr AIP */ 149 #define DETH_AIP_SQPN_QW 3ull 150 #define DETH_AIP_SQPN_BIT_OFFSET 56ull 151 #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off)) 152 #define DETH_AIP_SQPN_SELECT_OFFSET \ 153 DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET) 154 155 /* L4_TYPE QW 1, OFFSET 0 - for match */ 156 #define L4_TYPE_QW 1ull 157 #define L4_TYPE_BIT_OFFSET 0ull 158 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off)) 159 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET) 160 #define L4_16B_TYPE_MASK 0xFFull 161 #define L4_16B_ETH_VALUE 0x78ull 162 163 /* 16B VESWID - for select */ 164 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull)) 165 /* 16B ENTROPY - for select */ 166 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull)) 167 168 /* defines to build power on SC2VL table */ 169 #define SC2VL_VAL( \ 170 num, \ 171 sc0, sc0val, \ 172 sc1, sc1val, \ 173 sc2, sc2val, \ 174 sc3, sc3val, \ 175 sc4, sc4val, \ 176 sc5, sc5val, \ 177 sc6, sc6val, \ 178 sc7, sc7val) \ 179 ( \ 180 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \ 181 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \ 182 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \ 183 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \ 184 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \ 185 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \ 186 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \ 187 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \ 188 ) 189 190 #define DC_SC_VL_VAL( \ 191 range, \ 192 e0, e0val, \ 193 e1, e1val, \ 194 e2, e2val, \ 195 e3, e3val, \ 196 e4, e4val, \ 197 e5, e5val, \ 198 e6, e6val, \ 199 e7, e7val, \ 200 e8, e8val, \ 201 e9, e9val, \ 202 e10, e10val, \ 203 e11, e11val, \ 204 e12, e12val, \ 205 e13, e13val, \ 206 e14, e14val, \ 207 e15, e15val) \ 208 ( \ 209 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \ 210 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \ 211 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \ 212 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \ 213 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \ 214 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \ 215 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \ 216 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \ 217 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \ 218 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \ 219 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \ 220 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \ 221 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \ 222 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \ 223 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \ 224 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \ 225 ) 226 227 /* all CceStatus sub-block freeze bits */ 228 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \ 229 | CCE_STATUS_RXE_FROZE_SMASK \ 230 | CCE_STATUS_TXE_FROZE_SMASK \ 231 | CCE_STATUS_TXE_PIO_FROZE_SMASK) 232 /* all CceStatus sub-block TXE pause bits */ 233 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \ 234 | CCE_STATUS_TXE_PAUSED_SMASK \ 235 | CCE_STATUS_SDMA_PAUSED_SMASK) 236 /* all CceStatus sub-block RXE pause bits */ 237 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK 238 239 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL 240 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF 241 242 /* 243 * CCE Error flags. 244 */ 245 static const struct flag_table cce_err_status_flags[] = { 246 /* 0*/ FLAG_ENTRY0("CceCsrParityErr", 247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK), 248 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr", 249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK), 250 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr", 251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK), 252 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr", 253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK), 254 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr", 255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK), 256 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr", 257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK), 258 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr", 259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK), 260 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr", 261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK), 262 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr", 263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK), 264 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", 265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK), 266 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr", 267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK), 268 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError", 269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK), 270 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError", 271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK), 272 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr", 273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK), 274 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr", 275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK), 276 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr", 277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK), 278 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr", 279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK), 280 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr", 281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK), 282 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr", 283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK), 284 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr", 285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK), 286 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr", 287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK), 288 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr", 289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK), 290 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr", 291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK), 292 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr", 293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK), 294 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr", 295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK), 296 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr", 297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK), 298 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr", 299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK), 300 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr", 301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK), 302 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr", 303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK), 304 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr", 305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK), 306 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr", 307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK), 308 /*31*/ FLAG_ENTRY0("LATriggered", 309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK), 310 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr", 311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK), 312 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr", 313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK), 314 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr", 315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK), 316 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr", 317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK), 318 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr", 319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK), 320 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr", 321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK), 322 /*38*/ FLAG_ENTRY0("CceIntMapCorErr", 323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK), 324 /*39*/ FLAG_ENTRY0("CceIntMapUncErr", 325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK), 326 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr", 327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK), 328 /*41-63 reserved*/ 329 }; 330 331 /* 332 * Misc Error flags 333 */ 334 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK 335 static const struct flag_table misc_err_status_flags[] = { 336 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)), 337 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)), 338 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)), 339 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)), 340 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)), 341 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)), 342 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)), 343 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)), 344 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)), 345 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)), 346 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)), 347 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)), 348 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL)) 349 }; 350 351 /* 352 * TXE PIO Error flags and consequences 353 */ 354 static const struct flag_table pio_err_status_flags[] = { 355 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt", 356 SEC_WRITE_DROPPED, 357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK), 358 /* 1*/ FLAG_ENTRY("PioWriteAddrParity", 359 SEC_SPC_FREEZE, 360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK), 361 /* 2*/ FLAG_ENTRY("PioCsrParity", 362 SEC_SPC_FREEZE, 363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK), 364 /* 3*/ FLAG_ENTRY("PioSbMemFifo0", 365 SEC_SPC_FREEZE, 366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK), 367 /* 4*/ FLAG_ENTRY("PioSbMemFifo1", 368 SEC_SPC_FREEZE, 369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK), 370 /* 5*/ FLAG_ENTRY("PioPccFifoParity", 371 SEC_SPC_FREEZE, 372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK), 373 /* 6*/ FLAG_ENTRY("PioPecFifoParity", 374 SEC_SPC_FREEZE, 375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK), 376 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity", 377 SEC_SPC_FREEZE, 378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK), 379 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity", 380 SEC_SPC_FREEZE, 381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK), 382 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr", 383 SEC_SPC_FREEZE, 384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK), 385 /*10*/ FLAG_ENTRY("PioSmPktResetParity", 386 SEC_SPC_FREEZE, 387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK), 388 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc", 389 SEC_SPC_FREEZE, 390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK), 391 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc", 392 SEC_SPC_FREEZE, 393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK), 394 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor", 395 0, 396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK), 397 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor", 398 0, 399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK), 400 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity", 401 SEC_SPC_FREEZE, 402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK), 403 /*16*/ FLAG_ENTRY("PioPpmcPblFifo", 404 SEC_SPC_FREEZE, 405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK), 406 /*17*/ FLAG_ENTRY("PioInitSmIn", 407 0, 408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK), 409 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm", 410 SEC_SPC_FREEZE, 411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK), 412 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc", 413 SEC_SPC_FREEZE, 414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK), 415 /*20*/ FLAG_ENTRY("PioHostAddrMemCor", 416 0, 417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK), 418 /*21*/ FLAG_ENTRY("PioWriteDataParity", 419 SEC_SPC_FREEZE, 420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK), 421 /*22*/ FLAG_ENTRY("PioStateMachine", 422 SEC_SPC_FREEZE, 423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK), 424 /*23*/ FLAG_ENTRY("PioWriteQwValidParity", 425 SEC_WRITE_DROPPED | SEC_SPC_FREEZE, 426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK), 427 /*24*/ FLAG_ENTRY("PioBlockQwCountParity", 428 SEC_WRITE_DROPPED | SEC_SPC_FREEZE, 429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK), 430 /*25*/ FLAG_ENTRY("PioVlfVlLenParity", 431 SEC_SPC_FREEZE, 432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK), 433 /*26*/ FLAG_ENTRY("PioVlfSopParity", 434 SEC_SPC_FREEZE, 435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK), 436 /*27*/ FLAG_ENTRY("PioVlFifoParity", 437 SEC_SPC_FREEZE, 438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK), 439 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity", 440 SEC_SPC_FREEZE, 441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK), 442 /*29*/ FLAG_ENTRY("PioPpmcSopLen", 443 SEC_SPC_FREEZE, 444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK), 445 /*30-31 reserved*/ 446 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity", 447 SEC_SPC_FREEZE, 448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK), 449 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity", 450 SEC_SPC_FREEZE, 451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK), 452 /*34*/ FLAG_ENTRY("PioPccSopHeadParity", 453 SEC_SPC_FREEZE, 454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK), 455 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr", 456 SEC_SPC_FREEZE, 457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK), 458 /*36-63 reserved*/ 459 }; 460 461 /* TXE PIO errors that cause an SPC freeze */ 462 #define ALL_PIO_FREEZE_ERR \ 463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \ 464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \ 465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \ 466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \ 467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \ 468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \ 469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \ 470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \ 471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \ 472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \ 473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \ 474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \ 475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \ 476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \ 477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \ 478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \ 479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \ 480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \ 481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \ 482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \ 483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \ 484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \ 485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \ 486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \ 487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \ 488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \ 489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \ 490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \ 491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK) 492 493 /* 494 * TXE SDMA Error flags 495 */ 496 static const struct flag_table sdma_err_status_flags[] = { 497 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr", 498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK), 499 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr", 500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK), 501 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr", 502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK), 503 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr", 504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK), 505 /*04-63 reserved*/ 506 }; 507 508 /* TXE SDMA errors that cause an SPC freeze */ 509 #define ALL_SDMA_FREEZE_ERR \ 510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \ 511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \ 512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK) 513 514 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */ 515 #define PORT_DISCARD_EGRESS_ERRS \ 516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \ 517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \ 518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK) 519 520 /* 521 * TXE Egress Error flags 522 */ 523 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK 524 static const struct flag_table egress_err_status_flags[] = { 525 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)), 526 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)), 527 /* 2 reserved */ 528 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr", 529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)), 530 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)), 531 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)), 532 /* 6 reserved */ 533 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr", 534 SEES(TX_PIO_LAUNCH_INTF_PARITY)), 535 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr", 536 SEES(TX_SDMA_LAUNCH_INTF_PARITY)), 537 /* 9-10 reserved */ 538 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr", 539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)), 540 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)), 541 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)), 542 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)), 543 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)), 544 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr", 545 SEES(TX_SDMA0_DISALLOWED_PACKET)), 546 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr", 547 SEES(TX_SDMA1_DISALLOWED_PACKET)), 548 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr", 549 SEES(TX_SDMA2_DISALLOWED_PACKET)), 550 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr", 551 SEES(TX_SDMA3_DISALLOWED_PACKET)), 552 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr", 553 SEES(TX_SDMA4_DISALLOWED_PACKET)), 554 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr", 555 SEES(TX_SDMA5_DISALLOWED_PACKET)), 556 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr", 557 SEES(TX_SDMA6_DISALLOWED_PACKET)), 558 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr", 559 SEES(TX_SDMA7_DISALLOWED_PACKET)), 560 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr", 561 SEES(TX_SDMA8_DISALLOWED_PACKET)), 562 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr", 563 SEES(TX_SDMA9_DISALLOWED_PACKET)), 564 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr", 565 SEES(TX_SDMA10_DISALLOWED_PACKET)), 566 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr", 567 SEES(TX_SDMA11_DISALLOWED_PACKET)), 568 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr", 569 SEES(TX_SDMA12_DISALLOWED_PACKET)), 570 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr", 571 SEES(TX_SDMA13_DISALLOWED_PACKET)), 572 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr", 573 SEES(TX_SDMA14_DISALLOWED_PACKET)), 574 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr", 575 SEES(TX_SDMA15_DISALLOWED_PACKET)), 576 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr", 577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)), 578 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr", 579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)), 580 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr", 581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)), 582 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr", 583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)), 584 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr", 585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)), 586 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr", 587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)), 588 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr", 589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)), 590 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr", 591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)), 592 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr", 593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)), 594 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)), 595 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)), 596 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)), 597 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)), 598 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)), 599 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)), 600 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)), 601 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)), 602 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)), 603 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)), 604 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)), 605 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)), 606 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)), 607 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)), 608 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)), 609 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)), 610 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)), 611 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)), 612 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)), 613 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)), 614 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)), 615 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr", 616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)), 617 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr", 618 SEES(TX_READ_PIO_MEMORY_CSR_UNC)), 619 }; 620 621 /* 622 * TXE Egress Error Info flags 623 */ 624 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK 625 static const struct flag_table egress_err_info_flags[] = { 626 /* 0*/ FLAG_ENTRY0("Reserved", 0ull), 627 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)), 628 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), 629 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)), 630 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)), 631 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)), 632 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)), 633 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)), 634 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)), 635 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)), 636 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)), 637 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)), 638 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)), 639 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)), 640 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)), 641 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)), 642 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)), 643 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)), 644 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)), 645 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)), 646 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)), 647 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)), 648 }; 649 650 /* TXE Egress errors that cause an SPC freeze */ 651 #define ALL_TXE_EGRESS_FREEZE_ERR \ 652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \ 653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \ 654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \ 655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \ 656 | SEES(TX_LAUNCH_CSR_PARITY) \ 657 | SEES(TX_SBRD_CTL_CSR_PARITY) \ 658 | SEES(TX_CONFIG_PARITY) \ 659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \ 660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \ 661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \ 662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \ 663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \ 664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \ 665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \ 666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \ 667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \ 668 | SEES(TX_CREDIT_RETURN_PARITY)) 669 670 /* 671 * TXE Send error flags 672 */ 673 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK 674 static const struct flag_table send_err_status_flags[] = { 675 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)), 676 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)), 677 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR)) 678 }; 679 680 /* 681 * TXE Send Context Error flags and consequences 682 */ 683 static const struct flag_table sc_err_status_flags[] = { 684 /* 0*/ FLAG_ENTRY("InconsistentSop", 685 SEC_PACKET_DROPPED | SEC_SC_HALTED, 686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK), 687 /* 1*/ FLAG_ENTRY("DisallowedPacket", 688 SEC_PACKET_DROPPED | SEC_SC_HALTED, 689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK), 690 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary", 691 SEC_WRITE_DROPPED | SEC_SC_HALTED, 692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK), 693 /* 3*/ FLAG_ENTRY("WriteOverflow", 694 SEC_WRITE_DROPPED | SEC_SC_HALTED, 695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK), 696 /* 4*/ FLAG_ENTRY("WriteOutOfBounds", 697 SEC_WRITE_DROPPED | SEC_SC_HALTED, 698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK), 699 /* 5-63 reserved*/ 700 }; 701 702 /* 703 * RXE Receive Error flags 704 */ 705 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK 706 static const struct flag_table rxe_err_status_flags[] = { 707 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)), 708 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)), 709 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)), 710 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)), 711 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)), 712 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)), 713 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)), 714 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)), 715 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)), 716 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)), 717 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)), 718 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)), 719 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)), 720 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)), 721 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)), 722 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)), 723 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr", 724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)), 725 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)), 726 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)), 727 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr", 728 RXES(RBUF_BLOCK_LIST_READ_UNC)), 729 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr", 730 RXES(RBUF_BLOCK_LIST_READ_COR)), 731 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr", 732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)), 733 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr", 734 RXES(RBUF_CSR_QENT_CNT_PARITY)), 735 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr", 736 RXES(RBUF_CSR_QNEXT_BUF_PARITY)), 737 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr", 738 RXES(RBUF_CSR_QVLD_BIT_PARITY)), 739 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)), 740 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)), 741 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr", 742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)), 743 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)), 744 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)), 745 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)), 746 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)), 747 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)), 748 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)), 749 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)), 750 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr", 751 RXES(RBUF_FL_INITDONE_PARITY)), 752 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr", 753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)), 754 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)), 755 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)), 756 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)), 757 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr", 758 RXES(LOOKUP_DES_PART1_UNC_COR)), 759 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr", 760 RXES(LOOKUP_DES_PART2_PARITY)), 761 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)), 762 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)), 763 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)), 764 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)), 765 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)), 766 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)), 767 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)), 768 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)), 769 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)), 770 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)), 771 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)), 772 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)), 773 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)), 774 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)), 775 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)), 776 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)), 777 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)), 778 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)), 779 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)), 780 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)), 781 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)), 782 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY)) 783 }; 784 785 /* RXE errors that will trigger an SPC freeze */ 786 #define ALL_RXE_FREEZE_ERR \ 787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \ 788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \ 789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \ 790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \ 791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \ 792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \ 793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \ 794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \ 795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \ 796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \ 797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \ 798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \ 799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \ 800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \ 801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \ 802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \ 803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \ 804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \ 805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \ 806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \ 807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \ 808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \ 809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \ 810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \ 811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \ 812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \ 813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \ 814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \ 815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \ 816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \ 817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \ 818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \ 819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \ 820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \ 821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \ 822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \ 823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \ 824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \ 825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \ 826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \ 827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \ 828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \ 829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \ 830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK) 831 832 #define RXE_FREEZE_ABORT_MASK \ 833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \ 834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \ 835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK) 836 837 /* 838 * DCC Error Flags 839 */ 840 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK 841 static const struct flag_table dcc_err_flags[] = { 842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)), 843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)), 844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)), 845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)), 846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)), 847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)), 848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)), 849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)), 850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)), 851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)), 852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)), 853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)), 854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)), 855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)), 856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)), 857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)), 858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)), 859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)), 860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)), 861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)), 862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)), 863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)), 864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)), 865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)), 866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)), 867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)), 868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)), 869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)), 870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)), 871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)), 872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)), 873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)), 874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)), 875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)), 876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)), 877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)), 878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)), 879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)), 880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)), 881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)), 882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)), 883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)), 884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)), 885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)), 886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)), 887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)), 888 }; 889 890 /* 891 * LCB error flags 892 */ 893 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK 894 static const struct flag_table lcb_err_flags[] = { 895 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)), 896 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)), 897 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)), 898 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST", 899 LCBE(ALL_LNS_FAILED_REINIT_TEST)), 900 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)), 901 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)), 902 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)), 903 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)), 904 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)), 905 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)), 906 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)), 907 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)), 908 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)), 909 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER", 910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)), 911 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)), 912 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)), 913 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)), 914 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)), 915 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)), 916 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE", 917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)), 918 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)), 919 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)), 920 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)), 921 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)), 922 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)), 923 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)), 924 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP", 925 LCBE(RST_FOR_INCOMPLT_RND_TRIP)), 926 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)), 927 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE", 928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)), 929 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR", 930 LCBE(REDUNDANT_FLIT_PARITY_ERR)) 931 }; 932 933 /* 934 * DC8051 Error Flags 935 */ 936 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK 937 static const struct flag_table dc8051_err_flags[] = { 938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)), 939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)), 940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)), 941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)), 942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)), 943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)), 944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)), 945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)), 946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES", 947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)), 948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)), 949 }; 950 951 /* 952 * DC8051 Information Error flags 953 * 954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field. 955 */ 956 static const struct flag_table dc8051_info_err_flags[] = { 957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED), 958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME), 959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET), 960 FLAG_ENTRY0("Serdes internal loopback failure", 961 FAILED_SERDES_INTERNAL_LOOPBACK), 962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT), 963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING), 964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE), 965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM), 966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ), 967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1), 968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2), 969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT), 970 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT), 971 FLAG_ENTRY0("External Device Request Timeout", 972 EXTERNAL_DEVICE_REQ_TIMEOUT), 973 }; 974 975 /* 976 * DC8051 Information Host Information flags 977 * 978 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field. 979 */ 980 static const struct flag_table dc8051_info_host_msg_flags[] = { 981 FLAG_ENTRY0("Host request done", 0x0001), 982 FLAG_ENTRY0("BC PWR_MGM message", 0x0002), 983 FLAG_ENTRY0("BC SMA message", 0x0004), 984 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008), 985 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010), 986 FLAG_ENTRY0("External device config request", 0x0020), 987 FLAG_ENTRY0("VerifyCap all frames received", 0x0040), 988 FLAG_ENTRY0("LinkUp achieved", 0x0080), 989 FLAG_ENTRY0("Link going down", 0x0100), 990 FLAG_ENTRY0("Link width downgraded", 0x0200), 991 }; 992 993 static u32 encoded_size(u32 size); 994 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate); 995 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state); 996 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, 997 u8 *continuous); 998 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, 999 u8 *vcu, u16 *vl15buf, u8 *crc_sizes); 1000 static void read_vc_remote_link_width(struct hfi1_devdata *dd, 1001 u8 *remote_tx_rate, u16 *link_widths); 1002 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, 1003 u8 *flag_bits, u16 *link_widths); 1004 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, 1005 u8 *device_rev); 1006 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx); 1007 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx, 1008 u8 *tx_polarity_inversion, 1009 u8 *rx_polarity_inversion, u8 *max_rate); 1010 static void handle_sdma_eng_err(struct hfi1_devdata *dd, 1011 unsigned int context, u64 err_status); 1012 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg); 1013 static void handle_dcc_err(struct hfi1_devdata *dd, 1014 unsigned int context, u64 err_status); 1015 static void handle_lcb_err(struct hfi1_devdata *dd, 1016 unsigned int context, u64 err_status); 1017 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg); 1018 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1019 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1020 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1021 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1022 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1023 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1024 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg); 1025 static void set_partition_keys(struct hfi1_pportdata *ppd); 1026 static const char *link_state_name(u32 state); 1027 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, 1028 u32 state); 1029 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, 1030 u64 *out_data); 1031 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data); 1032 static int thermal_init(struct hfi1_devdata *dd); 1033 1034 static void update_statusp(struct hfi1_pportdata *ppd, u32 state); 1035 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, 1036 int msecs); 1037 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1038 int msecs); 1039 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); 1040 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state); 1041 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1042 int msecs); 1043 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, 1044 int msecs); 1045 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc); 1046 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); 1047 static void handle_temp_err(struct hfi1_devdata *dd); 1048 static void dc_shutdown(struct hfi1_devdata *dd); 1049 static void dc_start(struct hfi1_devdata *dd); 1050 static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, 1051 unsigned int *np); 1052 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); 1053 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); 1054 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index); 1055 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width); 1056 1057 /* 1058 * Error interrupt table entry. This is used as input to the interrupt 1059 * "clear down" routine used for all second tier error interrupt register. 1060 * Second tier interrupt registers have a single bit representing them 1061 * in the top-level CceIntStatus. 1062 */ 1063 struct err_reg_info { 1064 u32 status; /* status CSR offset */ 1065 u32 clear; /* clear CSR offset */ 1066 u32 mask; /* mask CSR offset */ 1067 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg); 1068 const char *desc; 1069 }; 1070 1071 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START) 1072 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START) 1073 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START) 1074 1075 /* 1076 * Helpers for building HFI and DC error interrupt table entries. Different 1077 * helpers are needed because of inconsistent register names. 1078 */ 1079 #define EE(reg, handler, desc) \ 1080 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \ 1081 handler, desc } 1082 #define DC_EE1(reg, handler, desc) \ 1083 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc } 1084 #define DC_EE2(reg, handler, desc) \ 1085 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc } 1086 1087 /* 1088 * Table of the "misc" grouping of error interrupts. Each entry refers to 1089 * another register containing more information. 1090 */ 1091 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = { 1092 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"), 1093 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"), 1094 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"), 1095 /* 3*/ { 0, 0, 0, NULL }, /* reserved */ 1096 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"), 1097 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"), 1098 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"), 1099 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr") 1100 /* the rest are reserved */ 1101 }; 1102 1103 /* 1104 * Index into the Various section of the interrupt sources 1105 * corresponding to the Critical Temperature interrupt. 1106 */ 1107 #define TCRIT_INT_SOURCE 4 1108 1109 /* 1110 * SDMA error interrupt entry - refers to another register containing more 1111 * information. 1112 */ 1113 static const struct err_reg_info sdma_eng_err = 1114 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr"); 1115 1116 static const struct err_reg_info various_err[NUM_VARIOUS] = { 1117 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */ 1118 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */ 1119 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"), 1120 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"), 1121 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */ 1122 /* rest are reserved */ 1123 }; 1124 1125 /* 1126 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG 1127 * register can not be derived from the MTU value because 10K is not 1128 * a power of 2. Therefore, we need a constant. Everything else can 1129 * be calculated. 1130 */ 1131 #define DCC_CFG_PORT_MTU_CAP_10240 7 1132 1133 /* 1134 * Table of the DC grouping of error interrupts. Each entry refers to 1135 * another register containing more information. 1136 */ 1137 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = { 1138 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"), 1139 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"), 1140 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"), 1141 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */ 1142 /* the rest are reserved */ 1143 }; 1144 1145 struct cntr_entry { 1146 /* 1147 * counter name 1148 */ 1149 char *name; 1150 1151 /* 1152 * csr to read for name (if applicable) 1153 */ 1154 u64 csr; 1155 1156 /* 1157 * offset into dd or ppd to store the counter's value 1158 */ 1159 int offset; 1160 1161 /* 1162 * flags 1163 */ 1164 u8 flags; 1165 1166 /* 1167 * accessor for stat element, context either dd or ppd 1168 */ 1169 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl, 1170 int mode, u64 data); 1171 }; 1172 1173 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0 1174 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159 1175 1176 #define CNTR_ELEM(name, csr, offset, flags, accessor) \ 1177 { \ 1178 name, \ 1179 csr, \ 1180 offset, \ 1181 flags, \ 1182 accessor \ 1183 } 1184 1185 /* 32bit RXE */ 1186 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \ 1187 CNTR_ELEM(#name, \ 1188 (counter * 8 + RCV_COUNTER_ARRAY32), \ 1189 0, flags | CNTR_32BIT, \ 1190 port_access_u32_csr) 1191 1192 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \ 1193 CNTR_ELEM(#name, \ 1194 (counter * 8 + RCV_COUNTER_ARRAY32), \ 1195 0, flags | CNTR_32BIT, \ 1196 dev_access_u32_csr) 1197 1198 /* 64bit RXE */ 1199 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \ 1200 CNTR_ELEM(#name, \ 1201 (counter * 8 + RCV_COUNTER_ARRAY64), \ 1202 0, flags, \ 1203 port_access_u64_csr) 1204 1205 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \ 1206 CNTR_ELEM(#name, \ 1207 (counter * 8 + RCV_COUNTER_ARRAY64), \ 1208 0, flags, \ 1209 dev_access_u64_csr) 1210 1211 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx 1212 #define OVR_ELM(ctx) \ 1213 CNTR_ELEM("RcvHdrOvr" #ctx, \ 1214 (RCV_HDR_OVFL_CNT + ctx * 0x100), \ 1215 0, CNTR_NORMAL, port_access_u64_csr) 1216 1217 /* 32bit TXE */ 1218 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \ 1219 CNTR_ELEM(#name, \ 1220 (counter * 8 + SEND_COUNTER_ARRAY32), \ 1221 0, flags | CNTR_32BIT, \ 1222 port_access_u32_csr) 1223 1224 /* 64bit TXE */ 1225 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \ 1226 CNTR_ELEM(#name, \ 1227 (counter * 8 + SEND_COUNTER_ARRAY64), \ 1228 0, flags, \ 1229 port_access_u64_csr) 1230 1231 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \ 1232 CNTR_ELEM(#name,\ 1233 counter * 8 + SEND_COUNTER_ARRAY64, \ 1234 0, \ 1235 flags, \ 1236 dev_access_u64_csr) 1237 1238 /* CCE */ 1239 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \ 1240 CNTR_ELEM(#name, \ 1241 (counter * 8 + CCE_COUNTER_ARRAY32), \ 1242 0, flags | CNTR_32BIT, \ 1243 dev_access_u32_csr) 1244 1245 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \ 1246 CNTR_ELEM(#name, \ 1247 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \ 1248 0, flags | CNTR_32BIT, \ 1249 dev_access_u32_csr) 1250 1251 /* DC */ 1252 #define DC_PERF_CNTR(name, counter, flags) \ 1253 CNTR_ELEM(#name, \ 1254 counter, \ 1255 0, \ 1256 flags, \ 1257 dev_access_u64_csr) 1258 1259 #define DC_PERF_CNTR_LCB(name, counter, flags) \ 1260 CNTR_ELEM(#name, \ 1261 counter, \ 1262 0, \ 1263 flags, \ 1264 dc_access_lcb_cntr) 1265 1266 /* ibp counters */ 1267 #define SW_IBP_CNTR(name, cntr) \ 1268 CNTR_ELEM(#name, \ 1269 0, \ 1270 0, \ 1271 CNTR_SYNTH, \ 1272 access_ibp_##cntr) 1273 1274 /** 1275 * hfi1_addr_from_offset - return addr for readq/writeq 1276 * @dd: the dd device 1277 * @offset: the offset of the CSR within bar0 1278 * 1279 * This routine selects the appropriate base address 1280 * based on the indicated offset. 1281 */ 1282 static inline void __iomem *hfi1_addr_from_offset( 1283 const struct hfi1_devdata *dd, 1284 u32 offset) 1285 { 1286 if (offset >= dd->base2_start) 1287 return dd->kregbase2 + (offset - dd->base2_start); 1288 return dd->kregbase1 + offset; 1289 } 1290 1291 /** 1292 * read_csr - read CSR at the indicated offset 1293 * @dd: the dd device 1294 * @offset: the offset of the CSR within bar0 1295 * 1296 * Return: the value read or all FF's if there 1297 * is no mapping 1298 */ 1299 u64 read_csr(const struct hfi1_devdata *dd, u32 offset) 1300 { 1301 if (dd->flags & HFI1_PRESENT) 1302 return readq(hfi1_addr_from_offset(dd, offset)); 1303 return -1; 1304 } 1305 1306 /** 1307 * write_csr - write CSR at the indicated offset 1308 * @dd: the dd device 1309 * @offset: the offset of the CSR within bar0 1310 * @value: value to write 1311 */ 1312 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value) 1313 { 1314 if (dd->flags & HFI1_PRESENT) { 1315 void __iomem *base = hfi1_addr_from_offset(dd, offset); 1316 1317 /* avoid write to RcvArray */ 1318 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) 1319 return; 1320 writeq(value, base); 1321 } 1322 } 1323 1324 /** 1325 * get_csr_addr - return te iomem address for offset 1326 * @dd: the dd device 1327 * @offset: the offset of the CSR within bar0 1328 * 1329 * Return: The iomem address to use in subsequent 1330 * writeq/readq operations. 1331 */ 1332 void __iomem *get_csr_addr( 1333 const struct hfi1_devdata *dd, 1334 u32 offset) 1335 { 1336 if (dd->flags & HFI1_PRESENT) 1337 return hfi1_addr_from_offset(dd, offset); 1338 return NULL; 1339 } 1340 1341 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr, 1342 int mode, u64 value) 1343 { 1344 u64 ret; 1345 1346 if (mode == CNTR_MODE_R) { 1347 ret = read_csr(dd, csr); 1348 } else if (mode == CNTR_MODE_W) { 1349 write_csr(dd, csr, value); 1350 ret = value; 1351 } else { 1352 dd_dev_err(dd, "Invalid cntr register access mode"); 1353 return 0; 1354 } 1355 1356 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode); 1357 return ret; 1358 } 1359 1360 /* Dev Access */ 1361 static u64 dev_access_u32_csr(const struct cntr_entry *entry, 1362 void *context, int vl, int mode, u64 data) 1363 { 1364 struct hfi1_devdata *dd = context; 1365 u64 csr = entry->csr; 1366 1367 if (entry->flags & CNTR_SDMA) { 1368 if (vl == CNTR_INVALID_VL) 1369 return 0; 1370 csr += 0x100 * vl; 1371 } else { 1372 if (vl != CNTR_INVALID_VL) 1373 return 0; 1374 } 1375 return read_write_csr(dd, csr, mode, data); 1376 } 1377 1378 static u64 access_sde_err_cnt(const struct cntr_entry *entry, 1379 void *context, int idx, int mode, u64 data) 1380 { 1381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1382 1383 if (dd->per_sdma && idx < dd->num_sdma) 1384 return dd->per_sdma[idx].err_cnt; 1385 return 0; 1386 } 1387 1388 static u64 access_sde_int_cnt(const struct cntr_entry *entry, 1389 void *context, int idx, int mode, u64 data) 1390 { 1391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1392 1393 if (dd->per_sdma && idx < dd->num_sdma) 1394 return dd->per_sdma[idx].sdma_int_cnt; 1395 return 0; 1396 } 1397 1398 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry, 1399 void *context, int idx, int mode, u64 data) 1400 { 1401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1402 1403 if (dd->per_sdma && idx < dd->num_sdma) 1404 return dd->per_sdma[idx].idle_int_cnt; 1405 return 0; 1406 } 1407 1408 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry, 1409 void *context, int idx, int mode, 1410 u64 data) 1411 { 1412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1413 1414 if (dd->per_sdma && idx < dd->num_sdma) 1415 return dd->per_sdma[idx].progress_int_cnt; 1416 return 0; 1417 } 1418 1419 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context, 1420 int vl, int mode, u64 data) 1421 { 1422 struct hfi1_devdata *dd = context; 1423 1424 u64 val = 0; 1425 u64 csr = entry->csr; 1426 1427 if (entry->flags & CNTR_VL) { 1428 if (vl == CNTR_INVALID_VL) 1429 return 0; 1430 csr += 8 * vl; 1431 } else { 1432 if (vl != CNTR_INVALID_VL) 1433 return 0; 1434 } 1435 1436 val = read_write_csr(dd, csr, mode, data); 1437 return val; 1438 } 1439 1440 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context, 1441 int vl, int mode, u64 data) 1442 { 1443 struct hfi1_devdata *dd = context; 1444 u32 csr = entry->csr; 1445 int ret = 0; 1446 1447 if (vl != CNTR_INVALID_VL) 1448 return 0; 1449 if (mode == CNTR_MODE_R) 1450 ret = read_lcb_csr(dd, csr, &data); 1451 else if (mode == CNTR_MODE_W) 1452 ret = write_lcb_csr(dd, csr, data); 1453 1454 if (ret) { 1455 if (!(dd->flags & HFI1_SHUTDOWN)) 1456 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr); 1457 return 0; 1458 } 1459 1460 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode); 1461 return data; 1462 } 1463 1464 /* Port Access */ 1465 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context, 1466 int vl, int mode, u64 data) 1467 { 1468 struct hfi1_pportdata *ppd = context; 1469 1470 if (vl != CNTR_INVALID_VL) 1471 return 0; 1472 return read_write_csr(ppd->dd, entry->csr, mode, data); 1473 } 1474 1475 static u64 port_access_u64_csr(const struct cntr_entry *entry, 1476 void *context, int vl, int mode, u64 data) 1477 { 1478 struct hfi1_pportdata *ppd = context; 1479 u64 val; 1480 u64 csr = entry->csr; 1481 1482 if (entry->flags & CNTR_VL) { 1483 if (vl == CNTR_INVALID_VL) 1484 return 0; 1485 csr += 8 * vl; 1486 } else { 1487 if (vl != CNTR_INVALID_VL) 1488 return 0; 1489 } 1490 val = read_write_csr(ppd->dd, csr, mode, data); 1491 return val; 1492 } 1493 1494 /* Software defined */ 1495 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode, 1496 u64 data) 1497 { 1498 u64 ret; 1499 1500 if (mode == CNTR_MODE_R) { 1501 ret = *cntr; 1502 } else if (mode == CNTR_MODE_W) { 1503 *cntr = data; 1504 ret = data; 1505 } else { 1506 dd_dev_err(dd, "Invalid cntr sw access mode"); 1507 return 0; 1508 } 1509 1510 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode); 1511 1512 return ret; 1513 } 1514 1515 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context, 1516 int vl, int mode, u64 data) 1517 { 1518 struct hfi1_pportdata *ppd = context; 1519 1520 if (vl != CNTR_INVALID_VL) 1521 return 0; 1522 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); 1523 } 1524 1525 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context, 1526 int vl, int mode, u64 data) 1527 { 1528 struct hfi1_pportdata *ppd = context; 1529 1530 if (vl != CNTR_INVALID_VL) 1531 return 0; 1532 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); 1533 } 1534 1535 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry, 1536 void *context, int vl, int mode, 1537 u64 data) 1538 { 1539 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; 1540 1541 if (vl != CNTR_INVALID_VL) 1542 return 0; 1543 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); 1544 } 1545 1546 static u64 access_sw_xmit_discards(const struct cntr_entry *entry, 1547 void *context, int vl, int mode, u64 data) 1548 { 1549 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; 1550 u64 zero = 0; 1551 u64 *counter; 1552 1553 if (vl == CNTR_INVALID_VL) 1554 counter = &ppd->port_xmit_discards; 1555 else if (vl >= 0 && vl < C_VL_COUNT) 1556 counter = &ppd->port_xmit_discards_vl[vl]; 1557 else 1558 counter = &zero; 1559 1560 return read_write_sw(ppd->dd, counter, mode, data); 1561 } 1562 1563 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry, 1564 void *context, int vl, int mode, 1565 u64 data) 1566 { 1567 struct hfi1_pportdata *ppd = context; 1568 1569 if (vl != CNTR_INVALID_VL) 1570 return 0; 1571 1572 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, 1573 mode, data); 1574 } 1575 1576 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry, 1577 void *context, int vl, int mode, u64 data) 1578 { 1579 struct hfi1_pportdata *ppd = context; 1580 1581 if (vl != CNTR_INVALID_VL) 1582 return 0; 1583 1584 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, 1585 mode, data); 1586 } 1587 1588 u64 get_all_cpu_total(u64 __percpu *cntr) 1589 { 1590 int cpu; 1591 u64 counter = 0; 1592 1593 for_each_possible_cpu(cpu) 1594 counter += *per_cpu_ptr(cntr, cpu); 1595 return counter; 1596 } 1597 1598 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val, 1599 u64 __percpu *cntr, 1600 int vl, int mode, u64 data) 1601 { 1602 u64 ret = 0; 1603 1604 if (vl != CNTR_INVALID_VL) 1605 return 0; 1606 1607 if (mode == CNTR_MODE_R) { 1608 ret = get_all_cpu_total(cntr) - *z_val; 1609 } else if (mode == CNTR_MODE_W) { 1610 /* A write can only zero the counter */ 1611 if (data == 0) 1612 *z_val = get_all_cpu_total(cntr); 1613 else 1614 dd_dev_err(dd, "Per CPU cntrs can only be zeroed"); 1615 } else { 1616 dd_dev_err(dd, "Invalid cntr sw cpu access mode"); 1617 return 0; 1618 } 1619 1620 return ret; 1621 } 1622 1623 static u64 access_sw_cpu_intr(const struct cntr_entry *entry, 1624 void *context, int vl, int mode, u64 data) 1625 { 1626 struct hfi1_devdata *dd = context; 1627 1628 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, 1629 mode, data); 1630 } 1631 1632 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry, 1633 void *context, int vl, int mode, u64 data) 1634 { 1635 struct hfi1_devdata *dd = context; 1636 1637 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, 1638 mode, data); 1639 } 1640 1641 static u64 access_sw_pio_wait(const struct cntr_entry *entry, 1642 void *context, int vl, int mode, u64 data) 1643 { 1644 struct hfi1_devdata *dd = context; 1645 1646 return dd->verbs_dev.n_piowait; 1647 } 1648 1649 static u64 access_sw_pio_drain(const struct cntr_entry *entry, 1650 void *context, int vl, int mode, u64 data) 1651 { 1652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1653 1654 return dd->verbs_dev.n_piodrain; 1655 } 1656 1657 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, 1658 void *context, int vl, int mode, u64 data) 1659 { 1660 struct hfi1_devdata *dd = context; 1661 1662 return dd->ctx0_seq_drop; 1663 } 1664 1665 static u64 access_sw_vtx_wait(const struct cntr_entry *entry, 1666 void *context, int vl, int mode, u64 data) 1667 { 1668 struct hfi1_devdata *dd = context; 1669 1670 return dd->verbs_dev.n_txwait; 1671 } 1672 1673 static u64 access_sw_kmem_wait(const struct cntr_entry *entry, 1674 void *context, int vl, int mode, u64 data) 1675 { 1676 struct hfi1_devdata *dd = context; 1677 1678 return dd->verbs_dev.n_kmem_wait; 1679 } 1680 1681 static u64 access_sw_send_schedule(const struct cntr_entry *entry, 1682 void *context, int vl, int mode, u64 data) 1683 { 1684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1685 1686 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, 1687 mode, data); 1688 } 1689 1690 /* Software counters for the error status bits within MISC_ERR_STATUS */ 1691 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry, 1692 void *context, int vl, int mode, 1693 u64 data) 1694 { 1695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1696 1697 return dd->misc_err_status_cnt[12]; 1698 } 1699 1700 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry, 1701 void *context, int vl, int mode, 1702 u64 data) 1703 { 1704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1705 1706 return dd->misc_err_status_cnt[11]; 1707 } 1708 1709 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry, 1710 void *context, int vl, int mode, 1711 u64 data) 1712 { 1713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1714 1715 return dd->misc_err_status_cnt[10]; 1716 } 1717 1718 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry, 1719 void *context, int vl, 1720 int mode, u64 data) 1721 { 1722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1723 1724 return dd->misc_err_status_cnt[9]; 1725 } 1726 1727 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry, 1728 void *context, int vl, int mode, 1729 u64 data) 1730 { 1731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1732 1733 return dd->misc_err_status_cnt[8]; 1734 } 1735 1736 static u64 access_misc_efuse_read_bad_addr_err_cnt( 1737 const struct cntr_entry *entry, 1738 void *context, int vl, int mode, u64 data) 1739 { 1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1741 1742 return dd->misc_err_status_cnt[7]; 1743 } 1744 1745 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry, 1746 void *context, int vl, 1747 int mode, u64 data) 1748 { 1749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1750 1751 return dd->misc_err_status_cnt[6]; 1752 } 1753 1754 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry, 1755 void *context, int vl, int mode, 1756 u64 data) 1757 { 1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1759 1760 return dd->misc_err_status_cnt[5]; 1761 } 1762 1763 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry, 1764 void *context, int vl, int mode, 1765 u64 data) 1766 { 1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1768 1769 return dd->misc_err_status_cnt[4]; 1770 } 1771 1772 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry, 1773 void *context, int vl, 1774 int mode, u64 data) 1775 { 1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1777 1778 return dd->misc_err_status_cnt[3]; 1779 } 1780 1781 static u64 access_misc_csr_write_bad_addr_err_cnt( 1782 const struct cntr_entry *entry, 1783 void *context, int vl, int mode, u64 data) 1784 { 1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1786 1787 return dd->misc_err_status_cnt[2]; 1788 } 1789 1790 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 1791 void *context, int vl, 1792 int mode, u64 data) 1793 { 1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1795 1796 return dd->misc_err_status_cnt[1]; 1797 } 1798 1799 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry, 1800 void *context, int vl, int mode, 1801 u64 data) 1802 { 1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1804 1805 return dd->misc_err_status_cnt[0]; 1806 } 1807 1808 /* 1809 * Software counter for the aggregate of 1810 * individual CceErrStatus counters 1811 */ 1812 static u64 access_sw_cce_err_status_aggregated_cnt( 1813 const struct cntr_entry *entry, 1814 void *context, int vl, int mode, u64 data) 1815 { 1816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1817 1818 return dd->sw_cce_err_status_aggregate; 1819 } 1820 1821 /* 1822 * Software counters corresponding to each of the 1823 * error status bits within CceErrStatus 1824 */ 1825 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry, 1826 void *context, int vl, int mode, 1827 u64 data) 1828 { 1829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1830 1831 return dd->cce_err_status_cnt[40]; 1832 } 1833 1834 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry, 1835 void *context, int vl, int mode, 1836 u64 data) 1837 { 1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1839 1840 return dd->cce_err_status_cnt[39]; 1841 } 1842 1843 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry, 1844 void *context, int vl, int mode, 1845 u64 data) 1846 { 1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1848 1849 return dd->cce_err_status_cnt[38]; 1850 } 1851 1852 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry, 1853 void *context, int vl, int mode, 1854 u64 data) 1855 { 1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1857 1858 return dd->cce_err_status_cnt[37]; 1859 } 1860 1861 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry, 1862 void *context, int vl, int mode, 1863 u64 data) 1864 { 1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1866 1867 return dd->cce_err_status_cnt[36]; 1868 } 1869 1870 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt( 1871 const struct cntr_entry *entry, 1872 void *context, int vl, int mode, u64 data) 1873 { 1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1875 1876 return dd->cce_err_status_cnt[35]; 1877 } 1878 1879 static u64 access_cce_rcpl_async_fifo_parity_err_cnt( 1880 const struct cntr_entry *entry, 1881 void *context, int vl, int mode, u64 data) 1882 { 1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1884 1885 return dd->cce_err_status_cnt[34]; 1886 } 1887 1888 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry, 1889 void *context, int vl, 1890 int mode, u64 data) 1891 { 1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1893 1894 return dd->cce_err_status_cnt[33]; 1895 } 1896 1897 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry, 1898 void *context, int vl, int mode, 1899 u64 data) 1900 { 1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1902 1903 return dd->cce_err_status_cnt[32]; 1904 } 1905 1906 static u64 access_la_triggered_cnt(const struct cntr_entry *entry, 1907 void *context, int vl, int mode, u64 data) 1908 { 1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1910 1911 return dd->cce_err_status_cnt[31]; 1912 } 1913 1914 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry, 1915 void *context, int vl, int mode, 1916 u64 data) 1917 { 1918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1919 1920 return dd->cce_err_status_cnt[30]; 1921 } 1922 1923 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry, 1924 void *context, int vl, int mode, 1925 u64 data) 1926 { 1927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1928 1929 return dd->cce_err_status_cnt[29]; 1930 } 1931 1932 static u64 access_pcic_transmit_back_parity_err_cnt( 1933 const struct cntr_entry *entry, 1934 void *context, int vl, int mode, u64 data) 1935 { 1936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1937 1938 return dd->cce_err_status_cnt[28]; 1939 } 1940 1941 static u64 access_pcic_transmit_front_parity_err_cnt( 1942 const struct cntr_entry *entry, 1943 void *context, int vl, int mode, u64 data) 1944 { 1945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1946 1947 return dd->cce_err_status_cnt[27]; 1948 } 1949 1950 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry, 1951 void *context, int vl, int mode, 1952 u64 data) 1953 { 1954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1955 1956 return dd->cce_err_status_cnt[26]; 1957 } 1958 1959 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry, 1960 void *context, int vl, int mode, 1961 u64 data) 1962 { 1963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1964 1965 return dd->cce_err_status_cnt[25]; 1966 } 1967 1968 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry, 1969 void *context, int vl, int mode, 1970 u64 data) 1971 { 1972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1973 1974 return dd->cce_err_status_cnt[24]; 1975 } 1976 1977 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry, 1978 void *context, int vl, int mode, 1979 u64 data) 1980 { 1981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1982 1983 return dd->cce_err_status_cnt[23]; 1984 } 1985 1986 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry, 1987 void *context, int vl, 1988 int mode, u64 data) 1989 { 1990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 1991 1992 return dd->cce_err_status_cnt[22]; 1993 } 1994 1995 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry, 1996 void *context, int vl, int mode, 1997 u64 data) 1998 { 1999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2000 2001 return dd->cce_err_status_cnt[21]; 2002 } 2003 2004 static u64 access_pcic_n_post_dat_q_parity_err_cnt( 2005 const struct cntr_entry *entry, 2006 void *context, int vl, int mode, u64 data) 2007 { 2008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2009 2010 return dd->cce_err_status_cnt[20]; 2011 } 2012 2013 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry, 2014 void *context, int vl, 2015 int mode, u64 data) 2016 { 2017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2018 2019 return dd->cce_err_status_cnt[19]; 2020 } 2021 2022 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry, 2023 void *context, int vl, int mode, 2024 u64 data) 2025 { 2026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2027 2028 return dd->cce_err_status_cnt[18]; 2029 } 2030 2031 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry, 2032 void *context, int vl, int mode, 2033 u64 data) 2034 { 2035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2036 2037 return dd->cce_err_status_cnt[17]; 2038 } 2039 2040 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry, 2041 void *context, int vl, int mode, 2042 u64 data) 2043 { 2044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2045 2046 return dd->cce_err_status_cnt[16]; 2047 } 2048 2049 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry, 2050 void *context, int vl, int mode, 2051 u64 data) 2052 { 2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2054 2055 return dd->cce_err_status_cnt[15]; 2056 } 2057 2058 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry, 2059 void *context, int vl, 2060 int mode, u64 data) 2061 { 2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2063 2064 return dd->cce_err_status_cnt[14]; 2065 } 2066 2067 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry, 2068 void *context, int vl, int mode, 2069 u64 data) 2070 { 2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2072 2073 return dd->cce_err_status_cnt[13]; 2074 } 2075 2076 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt( 2077 const struct cntr_entry *entry, 2078 void *context, int vl, int mode, u64 data) 2079 { 2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2081 2082 return dd->cce_err_status_cnt[12]; 2083 } 2084 2085 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt( 2086 const struct cntr_entry *entry, 2087 void *context, int vl, int mode, u64 data) 2088 { 2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2090 2091 return dd->cce_err_status_cnt[11]; 2092 } 2093 2094 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt( 2095 const struct cntr_entry *entry, 2096 void *context, int vl, int mode, u64 data) 2097 { 2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2099 2100 return dd->cce_err_status_cnt[10]; 2101 } 2102 2103 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt( 2104 const struct cntr_entry *entry, 2105 void *context, int vl, int mode, u64 data) 2106 { 2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2108 2109 return dd->cce_err_status_cnt[9]; 2110 } 2111 2112 static u64 access_cce_cli2_async_fifo_parity_err_cnt( 2113 const struct cntr_entry *entry, 2114 void *context, int vl, int mode, u64 data) 2115 { 2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2117 2118 return dd->cce_err_status_cnt[8]; 2119 } 2120 2121 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry, 2122 void *context, int vl, 2123 int mode, u64 data) 2124 { 2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2126 2127 return dd->cce_err_status_cnt[7]; 2128 } 2129 2130 static u64 access_cce_cli0_async_fifo_parity_err_cnt( 2131 const struct cntr_entry *entry, 2132 void *context, int vl, int mode, u64 data) 2133 { 2134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2135 2136 return dd->cce_err_status_cnt[6]; 2137 } 2138 2139 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry, 2140 void *context, int vl, int mode, 2141 u64 data) 2142 { 2143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2144 2145 return dd->cce_err_status_cnt[5]; 2146 } 2147 2148 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry, 2149 void *context, int vl, int mode, 2150 u64 data) 2151 { 2152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2153 2154 return dd->cce_err_status_cnt[4]; 2155 } 2156 2157 static u64 access_cce_trgt_async_fifo_parity_err_cnt( 2158 const struct cntr_entry *entry, 2159 void *context, int vl, int mode, u64 data) 2160 { 2161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2162 2163 return dd->cce_err_status_cnt[3]; 2164 } 2165 2166 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, 2167 void *context, int vl, 2168 int mode, u64 data) 2169 { 2170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2171 2172 return dd->cce_err_status_cnt[2]; 2173 } 2174 2175 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 2176 void *context, int vl, 2177 int mode, u64 data) 2178 { 2179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2180 2181 return dd->cce_err_status_cnt[1]; 2182 } 2183 2184 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry, 2185 void *context, int vl, int mode, 2186 u64 data) 2187 { 2188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2189 2190 return dd->cce_err_status_cnt[0]; 2191 } 2192 2193 /* 2194 * Software counters corresponding to each of the 2195 * error status bits within RcvErrStatus 2196 */ 2197 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry, 2198 void *context, int vl, int mode, 2199 u64 data) 2200 { 2201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2202 2203 return dd->rcv_err_status_cnt[63]; 2204 } 2205 2206 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry, 2207 void *context, int vl, 2208 int mode, u64 data) 2209 { 2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2211 2212 return dd->rcv_err_status_cnt[62]; 2213 } 2214 2215 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 2216 void *context, int vl, int mode, 2217 u64 data) 2218 { 2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2220 2221 return dd->rcv_err_status_cnt[61]; 2222 } 2223 2224 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry, 2225 void *context, int vl, int mode, 2226 u64 data) 2227 { 2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2229 2230 return dd->rcv_err_status_cnt[60]; 2231 } 2232 2233 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2234 void *context, int vl, 2235 int mode, u64 data) 2236 { 2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2238 2239 return dd->rcv_err_status_cnt[59]; 2240 } 2241 2242 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2243 void *context, int vl, 2244 int mode, u64 data) 2245 { 2246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2247 2248 return dd->rcv_err_status_cnt[58]; 2249 } 2250 2251 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry, 2252 void *context, int vl, int mode, 2253 u64 data) 2254 { 2255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2256 2257 return dd->rcv_err_status_cnt[57]; 2258 } 2259 2260 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry, 2261 void *context, int vl, int mode, 2262 u64 data) 2263 { 2264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2265 2266 return dd->rcv_err_status_cnt[56]; 2267 } 2268 2269 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry, 2270 void *context, int vl, int mode, 2271 u64 data) 2272 { 2273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2274 2275 return dd->rcv_err_status_cnt[55]; 2276 } 2277 2278 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt( 2279 const struct cntr_entry *entry, 2280 void *context, int vl, int mode, u64 data) 2281 { 2282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2283 2284 return dd->rcv_err_status_cnt[54]; 2285 } 2286 2287 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt( 2288 const struct cntr_entry *entry, 2289 void *context, int vl, int mode, u64 data) 2290 { 2291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2292 2293 return dd->rcv_err_status_cnt[53]; 2294 } 2295 2296 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry, 2297 void *context, int vl, 2298 int mode, u64 data) 2299 { 2300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2301 2302 return dd->rcv_err_status_cnt[52]; 2303 } 2304 2305 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry, 2306 void *context, int vl, 2307 int mode, u64 data) 2308 { 2309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2310 2311 return dd->rcv_err_status_cnt[51]; 2312 } 2313 2314 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry, 2315 void *context, int vl, 2316 int mode, u64 data) 2317 { 2318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2319 2320 return dd->rcv_err_status_cnt[50]; 2321 } 2322 2323 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry, 2324 void *context, int vl, 2325 int mode, u64 data) 2326 { 2327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2328 2329 return dd->rcv_err_status_cnt[49]; 2330 } 2331 2332 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry, 2333 void *context, int vl, 2334 int mode, u64 data) 2335 { 2336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2337 2338 return dd->rcv_err_status_cnt[48]; 2339 } 2340 2341 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry, 2342 void *context, int vl, 2343 int mode, u64 data) 2344 { 2345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2346 2347 return dd->rcv_err_status_cnt[47]; 2348 } 2349 2350 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry, 2351 void *context, int vl, int mode, 2352 u64 data) 2353 { 2354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2355 2356 return dd->rcv_err_status_cnt[46]; 2357 } 2358 2359 static u64 access_rx_hq_intr_csr_parity_err_cnt( 2360 const struct cntr_entry *entry, 2361 void *context, int vl, int mode, u64 data) 2362 { 2363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2364 2365 return dd->rcv_err_status_cnt[45]; 2366 } 2367 2368 static u64 access_rx_lookup_csr_parity_err_cnt( 2369 const struct cntr_entry *entry, 2370 void *context, int vl, int mode, u64 data) 2371 { 2372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2373 2374 return dd->rcv_err_status_cnt[44]; 2375 } 2376 2377 static u64 access_rx_lookup_rcv_array_cor_err_cnt( 2378 const struct cntr_entry *entry, 2379 void *context, int vl, int mode, u64 data) 2380 { 2381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2382 2383 return dd->rcv_err_status_cnt[43]; 2384 } 2385 2386 static u64 access_rx_lookup_rcv_array_unc_err_cnt( 2387 const struct cntr_entry *entry, 2388 void *context, int vl, int mode, u64 data) 2389 { 2390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2391 2392 return dd->rcv_err_status_cnt[42]; 2393 } 2394 2395 static u64 access_rx_lookup_des_part2_parity_err_cnt( 2396 const struct cntr_entry *entry, 2397 void *context, int vl, int mode, u64 data) 2398 { 2399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2400 2401 return dd->rcv_err_status_cnt[41]; 2402 } 2403 2404 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt( 2405 const struct cntr_entry *entry, 2406 void *context, int vl, int mode, u64 data) 2407 { 2408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2409 2410 return dd->rcv_err_status_cnt[40]; 2411 } 2412 2413 static u64 access_rx_lookup_des_part1_unc_err_cnt( 2414 const struct cntr_entry *entry, 2415 void *context, int vl, int mode, u64 data) 2416 { 2417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2418 2419 return dd->rcv_err_status_cnt[39]; 2420 } 2421 2422 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt( 2423 const struct cntr_entry *entry, 2424 void *context, int vl, int mode, u64 data) 2425 { 2426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2427 2428 return dd->rcv_err_status_cnt[38]; 2429 } 2430 2431 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt( 2432 const struct cntr_entry *entry, 2433 void *context, int vl, int mode, u64 data) 2434 { 2435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2436 2437 return dd->rcv_err_status_cnt[37]; 2438 } 2439 2440 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt( 2441 const struct cntr_entry *entry, 2442 void *context, int vl, int mode, u64 data) 2443 { 2444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2445 2446 return dd->rcv_err_status_cnt[36]; 2447 } 2448 2449 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt( 2450 const struct cntr_entry *entry, 2451 void *context, int vl, int mode, u64 data) 2452 { 2453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2454 2455 return dd->rcv_err_status_cnt[35]; 2456 } 2457 2458 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt( 2459 const struct cntr_entry *entry, 2460 void *context, int vl, int mode, u64 data) 2461 { 2462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2463 2464 return dd->rcv_err_status_cnt[34]; 2465 } 2466 2467 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt( 2468 const struct cntr_entry *entry, 2469 void *context, int vl, int mode, u64 data) 2470 { 2471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2472 2473 return dd->rcv_err_status_cnt[33]; 2474 } 2475 2476 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry, 2477 void *context, int vl, int mode, 2478 u64 data) 2479 { 2480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2481 2482 return dd->rcv_err_status_cnt[32]; 2483 } 2484 2485 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry, 2486 void *context, int vl, int mode, 2487 u64 data) 2488 { 2489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2490 2491 return dd->rcv_err_status_cnt[31]; 2492 } 2493 2494 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry, 2495 void *context, int vl, int mode, 2496 u64 data) 2497 { 2498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2499 2500 return dd->rcv_err_status_cnt[30]; 2501 } 2502 2503 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry, 2504 void *context, int vl, int mode, 2505 u64 data) 2506 { 2507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2508 2509 return dd->rcv_err_status_cnt[29]; 2510 } 2511 2512 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry, 2513 void *context, int vl, 2514 int mode, u64 data) 2515 { 2516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2517 2518 return dd->rcv_err_status_cnt[28]; 2519 } 2520 2521 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt( 2522 const struct cntr_entry *entry, 2523 void *context, int vl, int mode, u64 data) 2524 { 2525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2526 2527 return dd->rcv_err_status_cnt[27]; 2528 } 2529 2530 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt( 2531 const struct cntr_entry *entry, 2532 void *context, int vl, int mode, u64 data) 2533 { 2534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2535 2536 return dd->rcv_err_status_cnt[26]; 2537 } 2538 2539 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt( 2540 const struct cntr_entry *entry, 2541 void *context, int vl, int mode, u64 data) 2542 { 2543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2544 2545 return dd->rcv_err_status_cnt[25]; 2546 } 2547 2548 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt( 2549 const struct cntr_entry *entry, 2550 void *context, int vl, int mode, u64 data) 2551 { 2552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2553 2554 return dd->rcv_err_status_cnt[24]; 2555 } 2556 2557 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt( 2558 const struct cntr_entry *entry, 2559 void *context, int vl, int mode, u64 data) 2560 { 2561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2562 2563 return dd->rcv_err_status_cnt[23]; 2564 } 2565 2566 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt( 2567 const struct cntr_entry *entry, 2568 void *context, int vl, int mode, u64 data) 2569 { 2570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2571 2572 return dd->rcv_err_status_cnt[22]; 2573 } 2574 2575 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt( 2576 const struct cntr_entry *entry, 2577 void *context, int vl, int mode, u64 data) 2578 { 2579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2580 2581 return dd->rcv_err_status_cnt[21]; 2582 } 2583 2584 static u64 access_rx_rbuf_block_list_read_cor_err_cnt( 2585 const struct cntr_entry *entry, 2586 void *context, int vl, int mode, u64 data) 2587 { 2588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2589 2590 return dd->rcv_err_status_cnt[20]; 2591 } 2592 2593 static u64 access_rx_rbuf_block_list_read_unc_err_cnt( 2594 const struct cntr_entry *entry, 2595 void *context, int vl, int mode, u64 data) 2596 { 2597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2598 2599 return dd->rcv_err_status_cnt[19]; 2600 } 2601 2602 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry, 2603 void *context, int vl, 2604 int mode, u64 data) 2605 { 2606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2607 2608 return dd->rcv_err_status_cnt[18]; 2609 } 2610 2611 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry, 2612 void *context, int vl, 2613 int mode, u64 data) 2614 { 2615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2616 2617 return dd->rcv_err_status_cnt[17]; 2618 } 2619 2620 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt( 2621 const struct cntr_entry *entry, 2622 void *context, int vl, int mode, u64 data) 2623 { 2624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2625 2626 return dd->rcv_err_status_cnt[16]; 2627 } 2628 2629 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt( 2630 const struct cntr_entry *entry, 2631 void *context, int vl, int mode, u64 data) 2632 { 2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2634 2635 return dd->rcv_err_status_cnt[15]; 2636 } 2637 2638 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry, 2639 void *context, int vl, 2640 int mode, u64 data) 2641 { 2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2643 2644 return dd->rcv_err_status_cnt[14]; 2645 } 2646 2647 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry, 2648 void *context, int vl, 2649 int mode, u64 data) 2650 { 2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2652 2653 return dd->rcv_err_status_cnt[13]; 2654 } 2655 2656 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry, 2657 void *context, int vl, int mode, 2658 u64 data) 2659 { 2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2661 2662 return dd->rcv_err_status_cnt[12]; 2663 } 2664 2665 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry, 2666 void *context, int vl, int mode, 2667 u64 data) 2668 { 2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2670 2671 return dd->rcv_err_status_cnt[11]; 2672 } 2673 2674 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry, 2675 void *context, int vl, int mode, 2676 u64 data) 2677 { 2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2679 2680 return dd->rcv_err_status_cnt[10]; 2681 } 2682 2683 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry, 2684 void *context, int vl, int mode, 2685 u64 data) 2686 { 2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2688 2689 return dd->rcv_err_status_cnt[9]; 2690 } 2691 2692 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry, 2693 void *context, int vl, int mode, 2694 u64 data) 2695 { 2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2697 2698 return dd->rcv_err_status_cnt[8]; 2699 } 2700 2701 static u64 access_rx_rcv_qp_map_table_cor_err_cnt( 2702 const struct cntr_entry *entry, 2703 void *context, int vl, int mode, u64 data) 2704 { 2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2706 2707 return dd->rcv_err_status_cnt[7]; 2708 } 2709 2710 static u64 access_rx_rcv_qp_map_table_unc_err_cnt( 2711 const struct cntr_entry *entry, 2712 void *context, int vl, int mode, u64 data) 2713 { 2714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2715 2716 return dd->rcv_err_status_cnt[6]; 2717 } 2718 2719 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry, 2720 void *context, int vl, int mode, 2721 u64 data) 2722 { 2723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2724 2725 return dd->rcv_err_status_cnt[5]; 2726 } 2727 2728 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry, 2729 void *context, int vl, int mode, 2730 u64 data) 2731 { 2732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2733 2734 return dd->rcv_err_status_cnt[4]; 2735 } 2736 2737 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry, 2738 void *context, int vl, int mode, 2739 u64 data) 2740 { 2741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2742 2743 return dd->rcv_err_status_cnt[3]; 2744 } 2745 2746 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry, 2747 void *context, int vl, int mode, 2748 u64 data) 2749 { 2750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2751 2752 return dd->rcv_err_status_cnt[2]; 2753 } 2754 2755 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry, 2756 void *context, int vl, int mode, 2757 u64 data) 2758 { 2759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2760 2761 return dd->rcv_err_status_cnt[1]; 2762 } 2763 2764 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry, 2765 void *context, int vl, int mode, 2766 u64 data) 2767 { 2768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2769 2770 return dd->rcv_err_status_cnt[0]; 2771 } 2772 2773 /* 2774 * Software counters corresponding to each of the 2775 * error status bits within SendPioErrStatus 2776 */ 2777 static u64 access_pio_pec_sop_head_parity_err_cnt( 2778 const struct cntr_entry *entry, 2779 void *context, int vl, int mode, u64 data) 2780 { 2781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2782 2783 return dd->send_pio_err_status_cnt[35]; 2784 } 2785 2786 static u64 access_pio_pcc_sop_head_parity_err_cnt( 2787 const struct cntr_entry *entry, 2788 void *context, int vl, int mode, u64 data) 2789 { 2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2791 2792 return dd->send_pio_err_status_cnt[34]; 2793 } 2794 2795 static u64 access_pio_last_returned_cnt_parity_err_cnt( 2796 const struct cntr_entry *entry, 2797 void *context, int vl, int mode, u64 data) 2798 { 2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2800 2801 return dd->send_pio_err_status_cnt[33]; 2802 } 2803 2804 static u64 access_pio_current_free_cnt_parity_err_cnt( 2805 const struct cntr_entry *entry, 2806 void *context, int vl, int mode, u64 data) 2807 { 2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2809 2810 return dd->send_pio_err_status_cnt[32]; 2811 } 2812 2813 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry, 2814 void *context, int vl, int mode, 2815 u64 data) 2816 { 2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2818 2819 return dd->send_pio_err_status_cnt[31]; 2820 } 2821 2822 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry, 2823 void *context, int vl, int mode, 2824 u64 data) 2825 { 2826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2827 2828 return dd->send_pio_err_status_cnt[30]; 2829 } 2830 2831 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry, 2832 void *context, int vl, int mode, 2833 u64 data) 2834 { 2835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2836 2837 return dd->send_pio_err_status_cnt[29]; 2838 } 2839 2840 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt( 2841 const struct cntr_entry *entry, 2842 void *context, int vl, int mode, u64 data) 2843 { 2844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2845 2846 return dd->send_pio_err_status_cnt[28]; 2847 } 2848 2849 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry, 2850 void *context, int vl, int mode, 2851 u64 data) 2852 { 2853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2854 2855 return dd->send_pio_err_status_cnt[27]; 2856 } 2857 2858 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry, 2859 void *context, int vl, int mode, 2860 u64 data) 2861 { 2862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2863 2864 return dd->send_pio_err_status_cnt[26]; 2865 } 2866 2867 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry, 2868 void *context, int vl, 2869 int mode, u64 data) 2870 { 2871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2872 2873 return dd->send_pio_err_status_cnt[25]; 2874 } 2875 2876 static u64 access_pio_block_qw_count_parity_err_cnt( 2877 const struct cntr_entry *entry, 2878 void *context, int vl, int mode, u64 data) 2879 { 2880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2881 2882 return dd->send_pio_err_status_cnt[24]; 2883 } 2884 2885 static u64 access_pio_write_qw_valid_parity_err_cnt( 2886 const struct cntr_entry *entry, 2887 void *context, int vl, int mode, u64 data) 2888 { 2889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2890 2891 return dd->send_pio_err_status_cnt[23]; 2892 } 2893 2894 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry, 2895 void *context, int vl, int mode, 2896 u64 data) 2897 { 2898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2899 2900 return dd->send_pio_err_status_cnt[22]; 2901 } 2902 2903 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry, 2904 void *context, int vl, 2905 int mode, u64 data) 2906 { 2907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2908 2909 return dd->send_pio_err_status_cnt[21]; 2910 } 2911 2912 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry, 2913 void *context, int vl, 2914 int mode, u64 data) 2915 { 2916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2917 2918 return dd->send_pio_err_status_cnt[20]; 2919 } 2920 2921 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry, 2922 void *context, int vl, 2923 int mode, u64 data) 2924 { 2925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2926 2927 return dd->send_pio_err_status_cnt[19]; 2928 } 2929 2930 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt( 2931 const struct cntr_entry *entry, 2932 void *context, int vl, int mode, u64 data) 2933 { 2934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2935 2936 return dd->send_pio_err_status_cnt[18]; 2937 } 2938 2939 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry, 2940 void *context, int vl, int mode, 2941 u64 data) 2942 { 2943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2944 2945 return dd->send_pio_err_status_cnt[17]; 2946 } 2947 2948 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry, 2949 void *context, int vl, int mode, 2950 u64 data) 2951 { 2952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2953 2954 return dd->send_pio_err_status_cnt[16]; 2955 } 2956 2957 static u64 access_pio_credit_ret_fifo_parity_err_cnt( 2958 const struct cntr_entry *entry, 2959 void *context, int vl, int mode, u64 data) 2960 { 2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2962 2963 return dd->send_pio_err_status_cnt[15]; 2964 } 2965 2966 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt( 2967 const struct cntr_entry *entry, 2968 void *context, int vl, int mode, u64 data) 2969 { 2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2971 2972 return dd->send_pio_err_status_cnt[14]; 2973 } 2974 2975 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt( 2976 const struct cntr_entry *entry, 2977 void *context, int vl, int mode, u64 data) 2978 { 2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2980 2981 return dd->send_pio_err_status_cnt[13]; 2982 } 2983 2984 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt( 2985 const struct cntr_entry *entry, 2986 void *context, int vl, int mode, u64 data) 2987 { 2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2989 2990 return dd->send_pio_err_status_cnt[12]; 2991 } 2992 2993 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt( 2994 const struct cntr_entry *entry, 2995 void *context, int vl, int mode, u64 data) 2996 { 2997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 2998 2999 return dd->send_pio_err_status_cnt[11]; 3000 } 3001 3002 static u64 access_pio_sm_pkt_reset_parity_err_cnt( 3003 const struct cntr_entry *entry, 3004 void *context, int vl, int mode, u64 data) 3005 { 3006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3007 3008 return dd->send_pio_err_status_cnt[10]; 3009 } 3010 3011 static u64 access_pio_pkt_evict_fifo_parity_err_cnt( 3012 const struct cntr_entry *entry, 3013 void *context, int vl, int mode, u64 data) 3014 { 3015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3016 3017 return dd->send_pio_err_status_cnt[9]; 3018 } 3019 3020 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt( 3021 const struct cntr_entry *entry, 3022 void *context, int vl, int mode, u64 data) 3023 { 3024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3025 3026 return dd->send_pio_err_status_cnt[8]; 3027 } 3028 3029 static u64 access_pio_sbrdctl_crrel_parity_err_cnt( 3030 const struct cntr_entry *entry, 3031 void *context, int vl, int mode, u64 data) 3032 { 3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3034 3035 return dd->send_pio_err_status_cnt[7]; 3036 } 3037 3038 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry, 3039 void *context, int vl, int mode, 3040 u64 data) 3041 { 3042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3043 3044 return dd->send_pio_err_status_cnt[6]; 3045 } 3046 3047 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry, 3048 void *context, int vl, int mode, 3049 u64 data) 3050 { 3051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3052 3053 return dd->send_pio_err_status_cnt[5]; 3054 } 3055 3056 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry, 3057 void *context, int vl, int mode, 3058 u64 data) 3059 { 3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3061 3062 return dd->send_pio_err_status_cnt[4]; 3063 } 3064 3065 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry, 3066 void *context, int vl, int mode, 3067 u64 data) 3068 { 3069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3070 3071 return dd->send_pio_err_status_cnt[3]; 3072 } 3073 3074 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry, 3075 void *context, int vl, int mode, 3076 u64 data) 3077 { 3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3079 3080 return dd->send_pio_err_status_cnt[2]; 3081 } 3082 3083 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry, 3084 void *context, int vl, 3085 int mode, u64 data) 3086 { 3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3088 3089 return dd->send_pio_err_status_cnt[1]; 3090 } 3091 3092 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry, 3093 void *context, int vl, int mode, 3094 u64 data) 3095 { 3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3097 3098 return dd->send_pio_err_status_cnt[0]; 3099 } 3100 3101 /* 3102 * Software counters corresponding to each of the 3103 * error status bits within SendDmaErrStatus 3104 */ 3105 static u64 access_sdma_pcie_req_tracking_cor_err_cnt( 3106 const struct cntr_entry *entry, 3107 void *context, int vl, int mode, u64 data) 3108 { 3109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3110 3111 return dd->send_dma_err_status_cnt[3]; 3112 } 3113 3114 static u64 access_sdma_pcie_req_tracking_unc_err_cnt( 3115 const struct cntr_entry *entry, 3116 void *context, int vl, int mode, u64 data) 3117 { 3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3119 3120 return dd->send_dma_err_status_cnt[2]; 3121 } 3122 3123 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry, 3124 void *context, int vl, int mode, 3125 u64 data) 3126 { 3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3128 3129 return dd->send_dma_err_status_cnt[1]; 3130 } 3131 3132 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry, 3133 void *context, int vl, int mode, 3134 u64 data) 3135 { 3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3137 3138 return dd->send_dma_err_status_cnt[0]; 3139 } 3140 3141 /* 3142 * Software counters corresponding to each of the 3143 * error status bits within SendEgressErrStatus 3144 */ 3145 static u64 access_tx_read_pio_memory_csr_unc_err_cnt( 3146 const struct cntr_entry *entry, 3147 void *context, int vl, int mode, u64 data) 3148 { 3149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3150 3151 return dd->send_egress_err_status_cnt[63]; 3152 } 3153 3154 static u64 access_tx_read_sdma_memory_csr_err_cnt( 3155 const struct cntr_entry *entry, 3156 void *context, int vl, int mode, u64 data) 3157 { 3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3159 3160 return dd->send_egress_err_status_cnt[62]; 3161 } 3162 3163 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry, 3164 void *context, int vl, int mode, 3165 u64 data) 3166 { 3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3168 3169 return dd->send_egress_err_status_cnt[61]; 3170 } 3171 3172 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry, 3173 void *context, int vl, 3174 int mode, u64 data) 3175 { 3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3177 3178 return dd->send_egress_err_status_cnt[60]; 3179 } 3180 3181 static u64 access_tx_read_sdma_memory_cor_err_cnt( 3182 const struct cntr_entry *entry, 3183 void *context, int vl, int mode, u64 data) 3184 { 3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3186 3187 return dd->send_egress_err_status_cnt[59]; 3188 } 3189 3190 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry, 3191 void *context, int vl, int mode, 3192 u64 data) 3193 { 3194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3195 3196 return dd->send_egress_err_status_cnt[58]; 3197 } 3198 3199 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry, 3200 void *context, int vl, int mode, 3201 u64 data) 3202 { 3203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3204 3205 return dd->send_egress_err_status_cnt[57]; 3206 } 3207 3208 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry, 3209 void *context, int vl, int mode, 3210 u64 data) 3211 { 3212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3213 3214 return dd->send_egress_err_status_cnt[56]; 3215 } 3216 3217 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry, 3218 void *context, int vl, int mode, 3219 u64 data) 3220 { 3221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3222 3223 return dd->send_egress_err_status_cnt[55]; 3224 } 3225 3226 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry, 3227 void *context, int vl, int mode, 3228 u64 data) 3229 { 3230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3231 3232 return dd->send_egress_err_status_cnt[54]; 3233 } 3234 3235 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry, 3236 void *context, int vl, int mode, 3237 u64 data) 3238 { 3239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3240 3241 return dd->send_egress_err_status_cnt[53]; 3242 } 3243 3244 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry, 3245 void *context, int vl, int mode, 3246 u64 data) 3247 { 3248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3249 3250 return dd->send_egress_err_status_cnt[52]; 3251 } 3252 3253 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry, 3254 void *context, int vl, int mode, 3255 u64 data) 3256 { 3257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3258 3259 return dd->send_egress_err_status_cnt[51]; 3260 } 3261 3262 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry, 3263 void *context, int vl, int mode, 3264 u64 data) 3265 { 3266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3267 3268 return dd->send_egress_err_status_cnt[50]; 3269 } 3270 3271 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry, 3272 void *context, int vl, int mode, 3273 u64 data) 3274 { 3275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3276 3277 return dd->send_egress_err_status_cnt[49]; 3278 } 3279 3280 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry, 3281 void *context, int vl, int mode, 3282 u64 data) 3283 { 3284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3285 3286 return dd->send_egress_err_status_cnt[48]; 3287 } 3288 3289 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry, 3290 void *context, int vl, int mode, 3291 u64 data) 3292 { 3293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3294 3295 return dd->send_egress_err_status_cnt[47]; 3296 } 3297 3298 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry, 3299 void *context, int vl, int mode, 3300 u64 data) 3301 { 3302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3303 3304 return dd->send_egress_err_status_cnt[46]; 3305 } 3306 3307 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry, 3308 void *context, int vl, int mode, 3309 u64 data) 3310 { 3311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3312 3313 return dd->send_egress_err_status_cnt[45]; 3314 } 3315 3316 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry, 3317 void *context, int vl, 3318 int mode, u64 data) 3319 { 3320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3321 3322 return dd->send_egress_err_status_cnt[44]; 3323 } 3324 3325 static u64 access_tx_read_sdma_memory_unc_err_cnt( 3326 const struct cntr_entry *entry, 3327 void *context, int vl, int mode, u64 data) 3328 { 3329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3330 3331 return dd->send_egress_err_status_cnt[43]; 3332 } 3333 3334 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry, 3335 void *context, int vl, int mode, 3336 u64 data) 3337 { 3338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3339 3340 return dd->send_egress_err_status_cnt[42]; 3341 } 3342 3343 static u64 access_tx_credit_return_partiy_err_cnt( 3344 const struct cntr_entry *entry, 3345 void *context, int vl, int mode, u64 data) 3346 { 3347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3348 3349 return dd->send_egress_err_status_cnt[41]; 3350 } 3351 3352 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt( 3353 const struct cntr_entry *entry, 3354 void *context, int vl, int mode, u64 data) 3355 { 3356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3357 3358 return dd->send_egress_err_status_cnt[40]; 3359 } 3360 3361 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt( 3362 const struct cntr_entry *entry, 3363 void *context, int vl, int mode, u64 data) 3364 { 3365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3366 3367 return dd->send_egress_err_status_cnt[39]; 3368 } 3369 3370 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt( 3371 const struct cntr_entry *entry, 3372 void *context, int vl, int mode, u64 data) 3373 { 3374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3375 3376 return dd->send_egress_err_status_cnt[38]; 3377 } 3378 3379 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt( 3380 const struct cntr_entry *entry, 3381 void *context, int vl, int mode, u64 data) 3382 { 3383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3384 3385 return dd->send_egress_err_status_cnt[37]; 3386 } 3387 3388 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt( 3389 const struct cntr_entry *entry, 3390 void *context, int vl, int mode, u64 data) 3391 { 3392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3393 3394 return dd->send_egress_err_status_cnt[36]; 3395 } 3396 3397 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt( 3398 const struct cntr_entry *entry, 3399 void *context, int vl, int mode, u64 data) 3400 { 3401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3402 3403 return dd->send_egress_err_status_cnt[35]; 3404 } 3405 3406 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt( 3407 const struct cntr_entry *entry, 3408 void *context, int vl, int mode, u64 data) 3409 { 3410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3411 3412 return dd->send_egress_err_status_cnt[34]; 3413 } 3414 3415 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt( 3416 const struct cntr_entry *entry, 3417 void *context, int vl, int mode, u64 data) 3418 { 3419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3420 3421 return dd->send_egress_err_status_cnt[33]; 3422 } 3423 3424 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt( 3425 const struct cntr_entry *entry, 3426 void *context, int vl, int mode, u64 data) 3427 { 3428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3429 3430 return dd->send_egress_err_status_cnt[32]; 3431 } 3432 3433 static u64 access_tx_sdma15_disallowed_packet_err_cnt( 3434 const struct cntr_entry *entry, 3435 void *context, int vl, int mode, u64 data) 3436 { 3437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3438 3439 return dd->send_egress_err_status_cnt[31]; 3440 } 3441 3442 static u64 access_tx_sdma14_disallowed_packet_err_cnt( 3443 const struct cntr_entry *entry, 3444 void *context, int vl, int mode, u64 data) 3445 { 3446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3447 3448 return dd->send_egress_err_status_cnt[30]; 3449 } 3450 3451 static u64 access_tx_sdma13_disallowed_packet_err_cnt( 3452 const struct cntr_entry *entry, 3453 void *context, int vl, int mode, u64 data) 3454 { 3455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3456 3457 return dd->send_egress_err_status_cnt[29]; 3458 } 3459 3460 static u64 access_tx_sdma12_disallowed_packet_err_cnt( 3461 const struct cntr_entry *entry, 3462 void *context, int vl, int mode, u64 data) 3463 { 3464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3465 3466 return dd->send_egress_err_status_cnt[28]; 3467 } 3468 3469 static u64 access_tx_sdma11_disallowed_packet_err_cnt( 3470 const struct cntr_entry *entry, 3471 void *context, int vl, int mode, u64 data) 3472 { 3473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3474 3475 return dd->send_egress_err_status_cnt[27]; 3476 } 3477 3478 static u64 access_tx_sdma10_disallowed_packet_err_cnt( 3479 const struct cntr_entry *entry, 3480 void *context, int vl, int mode, u64 data) 3481 { 3482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3483 3484 return dd->send_egress_err_status_cnt[26]; 3485 } 3486 3487 static u64 access_tx_sdma9_disallowed_packet_err_cnt( 3488 const struct cntr_entry *entry, 3489 void *context, int vl, int mode, u64 data) 3490 { 3491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3492 3493 return dd->send_egress_err_status_cnt[25]; 3494 } 3495 3496 static u64 access_tx_sdma8_disallowed_packet_err_cnt( 3497 const struct cntr_entry *entry, 3498 void *context, int vl, int mode, u64 data) 3499 { 3500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3501 3502 return dd->send_egress_err_status_cnt[24]; 3503 } 3504 3505 static u64 access_tx_sdma7_disallowed_packet_err_cnt( 3506 const struct cntr_entry *entry, 3507 void *context, int vl, int mode, u64 data) 3508 { 3509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3510 3511 return dd->send_egress_err_status_cnt[23]; 3512 } 3513 3514 static u64 access_tx_sdma6_disallowed_packet_err_cnt( 3515 const struct cntr_entry *entry, 3516 void *context, int vl, int mode, u64 data) 3517 { 3518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3519 3520 return dd->send_egress_err_status_cnt[22]; 3521 } 3522 3523 static u64 access_tx_sdma5_disallowed_packet_err_cnt( 3524 const struct cntr_entry *entry, 3525 void *context, int vl, int mode, u64 data) 3526 { 3527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3528 3529 return dd->send_egress_err_status_cnt[21]; 3530 } 3531 3532 static u64 access_tx_sdma4_disallowed_packet_err_cnt( 3533 const struct cntr_entry *entry, 3534 void *context, int vl, int mode, u64 data) 3535 { 3536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3537 3538 return dd->send_egress_err_status_cnt[20]; 3539 } 3540 3541 static u64 access_tx_sdma3_disallowed_packet_err_cnt( 3542 const struct cntr_entry *entry, 3543 void *context, int vl, int mode, u64 data) 3544 { 3545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3546 3547 return dd->send_egress_err_status_cnt[19]; 3548 } 3549 3550 static u64 access_tx_sdma2_disallowed_packet_err_cnt( 3551 const struct cntr_entry *entry, 3552 void *context, int vl, int mode, u64 data) 3553 { 3554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3555 3556 return dd->send_egress_err_status_cnt[18]; 3557 } 3558 3559 static u64 access_tx_sdma1_disallowed_packet_err_cnt( 3560 const struct cntr_entry *entry, 3561 void *context, int vl, int mode, u64 data) 3562 { 3563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3564 3565 return dd->send_egress_err_status_cnt[17]; 3566 } 3567 3568 static u64 access_tx_sdma0_disallowed_packet_err_cnt( 3569 const struct cntr_entry *entry, 3570 void *context, int vl, int mode, u64 data) 3571 { 3572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3573 3574 return dd->send_egress_err_status_cnt[16]; 3575 } 3576 3577 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry, 3578 void *context, int vl, int mode, 3579 u64 data) 3580 { 3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3582 3583 return dd->send_egress_err_status_cnt[15]; 3584 } 3585 3586 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry, 3587 void *context, int vl, 3588 int mode, u64 data) 3589 { 3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3591 3592 return dd->send_egress_err_status_cnt[14]; 3593 } 3594 3595 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry, 3596 void *context, int vl, int mode, 3597 u64 data) 3598 { 3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3600 3601 return dd->send_egress_err_status_cnt[13]; 3602 } 3603 3604 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry, 3605 void *context, int vl, int mode, 3606 u64 data) 3607 { 3608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3609 3610 return dd->send_egress_err_status_cnt[12]; 3611 } 3612 3613 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt( 3614 const struct cntr_entry *entry, 3615 void *context, int vl, int mode, u64 data) 3616 { 3617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3618 3619 return dd->send_egress_err_status_cnt[11]; 3620 } 3621 3622 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry, 3623 void *context, int vl, int mode, 3624 u64 data) 3625 { 3626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3627 3628 return dd->send_egress_err_status_cnt[10]; 3629 } 3630 3631 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry, 3632 void *context, int vl, int mode, 3633 u64 data) 3634 { 3635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3636 3637 return dd->send_egress_err_status_cnt[9]; 3638 } 3639 3640 static u64 access_tx_sdma_launch_intf_parity_err_cnt( 3641 const struct cntr_entry *entry, 3642 void *context, int vl, int mode, u64 data) 3643 { 3644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3645 3646 return dd->send_egress_err_status_cnt[8]; 3647 } 3648 3649 static u64 access_tx_pio_launch_intf_parity_err_cnt( 3650 const struct cntr_entry *entry, 3651 void *context, int vl, int mode, u64 data) 3652 { 3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3654 3655 return dd->send_egress_err_status_cnt[7]; 3656 } 3657 3658 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry, 3659 void *context, int vl, int mode, 3660 u64 data) 3661 { 3662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3663 3664 return dd->send_egress_err_status_cnt[6]; 3665 } 3666 3667 static u64 access_tx_incorrect_link_state_err_cnt( 3668 const struct cntr_entry *entry, 3669 void *context, int vl, int mode, u64 data) 3670 { 3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3672 3673 return dd->send_egress_err_status_cnt[5]; 3674 } 3675 3676 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry, 3677 void *context, int vl, int mode, 3678 u64 data) 3679 { 3680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3681 3682 return dd->send_egress_err_status_cnt[4]; 3683 } 3684 3685 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt( 3686 const struct cntr_entry *entry, 3687 void *context, int vl, int mode, u64 data) 3688 { 3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3690 3691 return dd->send_egress_err_status_cnt[3]; 3692 } 3693 3694 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry, 3695 void *context, int vl, int mode, 3696 u64 data) 3697 { 3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3699 3700 return dd->send_egress_err_status_cnt[2]; 3701 } 3702 3703 static u64 access_tx_pkt_integrity_mem_unc_err_cnt( 3704 const struct cntr_entry *entry, 3705 void *context, int vl, int mode, u64 data) 3706 { 3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3708 3709 return dd->send_egress_err_status_cnt[1]; 3710 } 3711 3712 static u64 access_tx_pkt_integrity_mem_cor_err_cnt( 3713 const struct cntr_entry *entry, 3714 void *context, int vl, int mode, u64 data) 3715 { 3716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3717 3718 return dd->send_egress_err_status_cnt[0]; 3719 } 3720 3721 /* 3722 * Software counters corresponding to each of the 3723 * error status bits within SendErrStatus 3724 */ 3725 static u64 access_send_csr_write_bad_addr_err_cnt( 3726 const struct cntr_entry *entry, 3727 void *context, int vl, int mode, u64 data) 3728 { 3729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3730 3731 return dd->send_err_status_cnt[2]; 3732 } 3733 3734 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry, 3735 void *context, int vl, 3736 int mode, u64 data) 3737 { 3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3739 3740 return dd->send_err_status_cnt[1]; 3741 } 3742 3743 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry, 3744 void *context, int vl, int mode, 3745 u64 data) 3746 { 3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3748 3749 return dd->send_err_status_cnt[0]; 3750 } 3751 3752 /* 3753 * Software counters corresponding to each of the 3754 * error status bits within SendCtxtErrStatus 3755 */ 3756 static u64 access_pio_write_out_of_bounds_err_cnt( 3757 const struct cntr_entry *entry, 3758 void *context, int vl, int mode, u64 data) 3759 { 3760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3761 3762 return dd->sw_ctxt_err_status_cnt[4]; 3763 } 3764 3765 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry, 3766 void *context, int vl, int mode, 3767 u64 data) 3768 { 3769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3770 3771 return dd->sw_ctxt_err_status_cnt[3]; 3772 } 3773 3774 static u64 access_pio_write_crosses_boundary_err_cnt( 3775 const struct cntr_entry *entry, 3776 void *context, int vl, int mode, u64 data) 3777 { 3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3779 3780 return dd->sw_ctxt_err_status_cnt[2]; 3781 } 3782 3783 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry, 3784 void *context, int vl, 3785 int mode, u64 data) 3786 { 3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3788 3789 return dd->sw_ctxt_err_status_cnt[1]; 3790 } 3791 3792 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry, 3793 void *context, int vl, int mode, 3794 u64 data) 3795 { 3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3797 3798 return dd->sw_ctxt_err_status_cnt[0]; 3799 } 3800 3801 /* 3802 * Software counters corresponding to each of the 3803 * error status bits within SendDmaEngErrStatus 3804 */ 3805 static u64 access_sdma_header_request_fifo_cor_err_cnt( 3806 const struct cntr_entry *entry, 3807 void *context, int vl, int mode, u64 data) 3808 { 3809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3810 3811 return dd->sw_send_dma_eng_err_status_cnt[23]; 3812 } 3813 3814 static u64 access_sdma_header_storage_cor_err_cnt( 3815 const struct cntr_entry *entry, 3816 void *context, int vl, int mode, u64 data) 3817 { 3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3819 3820 return dd->sw_send_dma_eng_err_status_cnt[22]; 3821 } 3822 3823 static u64 access_sdma_packet_tracking_cor_err_cnt( 3824 const struct cntr_entry *entry, 3825 void *context, int vl, int mode, u64 data) 3826 { 3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3828 3829 return dd->sw_send_dma_eng_err_status_cnt[21]; 3830 } 3831 3832 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry, 3833 void *context, int vl, int mode, 3834 u64 data) 3835 { 3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3837 3838 return dd->sw_send_dma_eng_err_status_cnt[20]; 3839 } 3840 3841 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry, 3842 void *context, int vl, int mode, 3843 u64 data) 3844 { 3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3846 3847 return dd->sw_send_dma_eng_err_status_cnt[19]; 3848 } 3849 3850 static u64 access_sdma_header_request_fifo_unc_err_cnt( 3851 const struct cntr_entry *entry, 3852 void *context, int vl, int mode, u64 data) 3853 { 3854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3855 3856 return dd->sw_send_dma_eng_err_status_cnt[18]; 3857 } 3858 3859 static u64 access_sdma_header_storage_unc_err_cnt( 3860 const struct cntr_entry *entry, 3861 void *context, int vl, int mode, u64 data) 3862 { 3863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3864 3865 return dd->sw_send_dma_eng_err_status_cnt[17]; 3866 } 3867 3868 static u64 access_sdma_packet_tracking_unc_err_cnt( 3869 const struct cntr_entry *entry, 3870 void *context, int vl, int mode, u64 data) 3871 { 3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3873 3874 return dd->sw_send_dma_eng_err_status_cnt[16]; 3875 } 3876 3877 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry, 3878 void *context, int vl, int mode, 3879 u64 data) 3880 { 3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3882 3883 return dd->sw_send_dma_eng_err_status_cnt[15]; 3884 } 3885 3886 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry, 3887 void *context, int vl, int mode, 3888 u64 data) 3889 { 3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3891 3892 return dd->sw_send_dma_eng_err_status_cnt[14]; 3893 } 3894 3895 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry, 3896 void *context, int vl, int mode, 3897 u64 data) 3898 { 3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3900 3901 return dd->sw_send_dma_eng_err_status_cnt[13]; 3902 } 3903 3904 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry, 3905 void *context, int vl, int mode, 3906 u64 data) 3907 { 3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3909 3910 return dd->sw_send_dma_eng_err_status_cnt[12]; 3911 } 3912 3913 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry, 3914 void *context, int vl, int mode, 3915 u64 data) 3916 { 3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3918 3919 return dd->sw_send_dma_eng_err_status_cnt[11]; 3920 } 3921 3922 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry, 3923 void *context, int vl, int mode, 3924 u64 data) 3925 { 3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3927 3928 return dd->sw_send_dma_eng_err_status_cnt[10]; 3929 } 3930 3931 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry, 3932 void *context, int vl, int mode, 3933 u64 data) 3934 { 3935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3936 3937 return dd->sw_send_dma_eng_err_status_cnt[9]; 3938 } 3939 3940 static u64 access_sdma_packet_desc_overflow_err_cnt( 3941 const struct cntr_entry *entry, 3942 void *context, int vl, int mode, u64 data) 3943 { 3944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3945 3946 return dd->sw_send_dma_eng_err_status_cnt[8]; 3947 } 3948 3949 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry, 3950 void *context, int vl, 3951 int mode, u64 data) 3952 { 3953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3954 3955 return dd->sw_send_dma_eng_err_status_cnt[7]; 3956 } 3957 3958 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry, 3959 void *context, int vl, int mode, u64 data) 3960 { 3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3962 3963 return dd->sw_send_dma_eng_err_status_cnt[6]; 3964 } 3965 3966 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry, 3967 void *context, int vl, int mode, 3968 u64 data) 3969 { 3970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3971 3972 return dd->sw_send_dma_eng_err_status_cnt[5]; 3973 } 3974 3975 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry, 3976 void *context, int vl, int mode, 3977 u64 data) 3978 { 3979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3980 3981 return dd->sw_send_dma_eng_err_status_cnt[4]; 3982 } 3983 3984 static u64 access_sdma_tail_out_of_bounds_err_cnt( 3985 const struct cntr_entry *entry, 3986 void *context, int vl, int mode, u64 data) 3987 { 3988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3989 3990 return dd->sw_send_dma_eng_err_status_cnt[3]; 3991 } 3992 3993 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry, 3994 void *context, int vl, int mode, 3995 u64 data) 3996 { 3997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 3998 3999 return dd->sw_send_dma_eng_err_status_cnt[2]; 4000 } 4001 4002 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry, 4003 void *context, int vl, int mode, 4004 u64 data) 4005 { 4006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4007 4008 return dd->sw_send_dma_eng_err_status_cnt[1]; 4009 } 4010 4011 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry, 4012 void *context, int vl, int mode, 4013 u64 data) 4014 { 4015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4016 4017 return dd->sw_send_dma_eng_err_status_cnt[0]; 4018 } 4019 4020 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry, 4021 void *context, int vl, int mode, 4022 u64 data) 4023 { 4024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context; 4025 4026 u64 val = 0; 4027 u64 csr = entry->csr; 4028 4029 val = read_write_csr(dd, csr, mode, data); 4030 if (mode == CNTR_MODE_R) { 4031 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? 4032 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; 4033 } else if (mode == CNTR_MODE_W) { 4034 dd->sw_rcv_bypass_packet_errors = 0; 4035 } else { 4036 dd_dev_err(dd, "Invalid cntr register access mode"); 4037 return 0; 4038 } 4039 return val; 4040 } 4041 4042 #define def_access_sw_cpu(cntr) \ 4043 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \ 4044 void *context, int vl, int mode, u64 data) \ 4045 { \ 4046 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ 4047 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \ 4048 ppd->ibport_data.rvp.cntr, vl, \ 4049 mode, data); \ 4050 } 4051 4052 def_access_sw_cpu(rc_acks); 4053 def_access_sw_cpu(rc_qacks); 4054 def_access_sw_cpu(rc_delayed_comp); 4055 4056 #define def_access_ibp_counter(cntr) \ 4057 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \ 4058 void *context, int vl, int mode, u64 data) \ 4059 { \ 4060 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \ 4061 \ 4062 if (vl != CNTR_INVALID_VL) \ 4063 return 0; \ 4064 \ 4065 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \ 4066 mode, data); \ 4067 } 4068 4069 def_access_ibp_counter(loop_pkts); 4070 def_access_ibp_counter(rc_resends); 4071 def_access_ibp_counter(rnr_naks); 4072 def_access_ibp_counter(other_naks); 4073 def_access_ibp_counter(rc_timeouts); 4074 def_access_ibp_counter(pkt_drops); 4075 def_access_ibp_counter(dmawait); 4076 def_access_ibp_counter(rc_seqnak); 4077 def_access_ibp_counter(rc_dupreq); 4078 def_access_ibp_counter(rdma_seq); 4079 def_access_ibp_counter(unaligned); 4080 def_access_ibp_counter(seq_naks); 4081 def_access_ibp_counter(rc_crwaits); 4082 4083 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { 4084 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), 4085 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH), 4086 [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH), 4087 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH), 4088 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH), 4089 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT, 4090 CNTR_NORMAL), 4091 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT, 4092 CNTR_NORMAL), 4093 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs, 4094 RCV_TID_FLOW_GEN_MISMATCH_CNT, 4095 CNTR_NORMAL), 4096 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL, 4097 CNTR_NORMAL), 4098 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs, 4099 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL), 4100 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt, 4101 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL), 4102 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT, 4103 CNTR_NORMAL), 4104 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT, 4105 CNTR_NORMAL), 4106 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT, 4107 CNTR_NORMAL), 4108 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT, 4109 CNTR_NORMAL), 4110 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT, 4111 CNTR_NORMAL), 4112 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT, 4113 CNTR_NORMAL), 4114 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt, 4115 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL), 4116 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt, 4117 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL), 4118 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT, 4119 CNTR_SYNTH), 4120 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH, 4121 access_dc_rcv_err_cnt), 4122 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT, 4123 CNTR_SYNTH), 4124 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT, 4125 CNTR_SYNTH), 4126 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT, 4127 CNTR_SYNTH), 4128 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts, 4129 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH), 4130 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts, 4131 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT, 4132 CNTR_SYNTH), 4133 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr, 4134 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH), 4135 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT, 4136 CNTR_SYNTH), 4137 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT, 4138 CNTR_SYNTH), 4139 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT, 4140 CNTR_SYNTH), 4141 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT, 4142 CNTR_SYNTH), 4143 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT, 4144 CNTR_SYNTH), 4145 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT, 4146 CNTR_SYNTH), 4147 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT, 4148 CNTR_SYNTH), 4149 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT, 4150 CNTR_SYNTH | CNTR_VL), 4151 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT, 4152 CNTR_SYNTH | CNTR_VL), 4153 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH), 4154 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT, 4155 CNTR_SYNTH | CNTR_VL), 4156 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH), 4157 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT, 4158 CNTR_SYNTH | CNTR_VL), 4159 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT, 4160 CNTR_SYNTH), 4161 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT, 4162 CNTR_SYNTH | CNTR_VL), 4163 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT, 4164 CNTR_SYNTH), 4165 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT, 4166 CNTR_SYNTH | CNTR_VL), 4167 [C_DC_TOTAL_CRC] = 4168 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR, 4169 CNTR_SYNTH), 4170 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0, 4171 CNTR_SYNTH), 4172 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1, 4173 CNTR_SYNTH), 4174 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2, 4175 CNTR_SYNTH), 4176 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3, 4177 CNTR_SYNTH), 4178 [C_DC_CRC_MULT_LN] = 4179 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN, 4180 CNTR_SYNTH), 4181 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT, 4182 CNTR_SYNTH), 4183 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT, 4184 CNTR_SYNTH), 4185 [C_DC_SEQ_CRC_CNT] = 4186 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT, 4187 CNTR_SYNTH), 4188 [C_DC_ESC0_ONLY_CNT] = 4189 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT, 4190 CNTR_SYNTH), 4191 [C_DC_ESC0_PLUS1_CNT] = 4192 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT, 4193 CNTR_SYNTH), 4194 [C_DC_ESC0_PLUS2_CNT] = 4195 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT, 4196 CNTR_SYNTH), 4197 [C_DC_REINIT_FROM_PEER_CNT] = 4198 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 4199 CNTR_SYNTH), 4200 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT, 4201 CNTR_SYNTH), 4202 [C_DC_MISC_FLG_CNT] = 4203 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT, 4204 CNTR_SYNTH), 4205 [C_DC_PRF_GOOD_LTP_CNT] = 4206 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH), 4207 [C_DC_PRF_ACCEPTED_LTP_CNT] = 4208 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT, 4209 CNTR_SYNTH), 4210 [C_DC_PRF_RX_FLIT_CNT] = 4211 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH), 4212 [C_DC_PRF_TX_FLIT_CNT] = 4213 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH), 4214 [C_DC_PRF_CLK_CNTR] = 4215 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH), 4216 [C_DC_PG_DBG_FLIT_CRDTS_CNT] = 4217 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH), 4218 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] = 4219 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT, 4220 CNTR_SYNTH), 4221 [C_DC_PG_STS_TX_SBE_CNT] = 4222 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH), 4223 [C_DC_PG_STS_TX_MBE_CNT] = 4224 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT, 4225 CNTR_SYNTH), 4226 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL, 4227 access_sw_cpu_intr), 4228 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL, 4229 access_sw_cpu_rcv_limit), 4230 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL, 4231 access_sw_ctx0_seq_drop), 4232 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL, 4233 access_sw_vtx_wait), 4234 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL, 4235 access_sw_pio_wait), 4236 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL, 4237 access_sw_pio_drain), 4238 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL, 4239 access_sw_kmem_wait), 4240 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL, 4241 hfi1_access_sw_tid_wait), 4242 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL, 4243 access_sw_send_schedule), 4244 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn", 4245 SEND_DMA_DESC_FETCHED_CNT, 0, 4246 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4247 dev_access_u32_csr), 4248 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0, 4249 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4250 access_sde_int_cnt), 4251 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0, 4252 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4253 access_sde_err_cnt), 4254 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0, 4255 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4256 access_sde_idle_int_cnt), 4257 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0, 4258 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA, 4259 access_sde_progress_int_cnt), 4260 /* MISC_ERR_STATUS */ 4261 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0, 4262 CNTR_NORMAL, 4263 access_misc_pll_lock_fail_err_cnt), 4264 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0, 4265 CNTR_NORMAL, 4266 access_misc_mbist_fail_err_cnt), 4267 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0, 4268 CNTR_NORMAL, 4269 access_misc_invalid_eep_cmd_err_cnt), 4270 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0, 4271 CNTR_NORMAL, 4272 access_misc_efuse_done_parity_err_cnt), 4273 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0, 4274 CNTR_NORMAL, 4275 access_misc_efuse_write_err_cnt), 4276 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0, 4277 0, CNTR_NORMAL, 4278 access_misc_efuse_read_bad_addr_err_cnt), 4279 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0, 4280 CNTR_NORMAL, 4281 access_misc_efuse_csr_parity_err_cnt), 4282 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0, 4283 CNTR_NORMAL, 4284 access_misc_fw_auth_failed_err_cnt), 4285 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0, 4286 CNTR_NORMAL, 4287 access_misc_key_mismatch_err_cnt), 4288 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0, 4289 CNTR_NORMAL, 4290 access_misc_sbus_write_failed_err_cnt), 4291 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0, 4292 CNTR_NORMAL, 4293 access_misc_csr_write_bad_addr_err_cnt), 4294 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0, 4295 CNTR_NORMAL, 4296 access_misc_csr_read_bad_addr_err_cnt), 4297 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0, 4298 CNTR_NORMAL, 4299 access_misc_csr_parity_err_cnt), 4300 /* CceErrStatus */ 4301 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0, 4302 CNTR_NORMAL, 4303 access_sw_cce_err_status_aggregated_cnt), 4304 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0, 4305 CNTR_NORMAL, 4306 access_cce_msix_csr_parity_err_cnt), 4307 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0, 4308 CNTR_NORMAL, 4309 access_cce_int_map_unc_err_cnt), 4310 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0, 4311 CNTR_NORMAL, 4312 access_cce_int_map_cor_err_cnt), 4313 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0, 4314 CNTR_NORMAL, 4315 access_cce_msix_table_unc_err_cnt), 4316 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0, 4317 CNTR_NORMAL, 4318 access_cce_msix_table_cor_err_cnt), 4319 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0, 4320 0, CNTR_NORMAL, 4321 access_cce_rxdma_conv_fifo_parity_err_cnt), 4322 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0, 4323 0, CNTR_NORMAL, 4324 access_cce_rcpl_async_fifo_parity_err_cnt), 4325 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0, 4326 CNTR_NORMAL, 4327 access_cce_seg_write_bad_addr_err_cnt), 4328 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0, 4329 CNTR_NORMAL, 4330 access_cce_seg_read_bad_addr_err_cnt), 4331 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0, 4332 CNTR_NORMAL, 4333 access_la_triggered_cnt), 4334 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0, 4335 CNTR_NORMAL, 4336 access_cce_trgt_cpl_timeout_err_cnt), 4337 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0, 4338 CNTR_NORMAL, 4339 access_pcic_receive_parity_err_cnt), 4340 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0, 4341 CNTR_NORMAL, 4342 access_pcic_transmit_back_parity_err_cnt), 4343 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0, 4344 0, CNTR_NORMAL, 4345 access_pcic_transmit_front_parity_err_cnt), 4346 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0, 4347 CNTR_NORMAL, 4348 access_pcic_cpl_dat_q_unc_err_cnt), 4349 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0, 4350 CNTR_NORMAL, 4351 access_pcic_cpl_hd_q_unc_err_cnt), 4352 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0, 4353 CNTR_NORMAL, 4354 access_pcic_post_dat_q_unc_err_cnt), 4355 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0, 4356 CNTR_NORMAL, 4357 access_pcic_post_hd_q_unc_err_cnt), 4358 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0, 4359 CNTR_NORMAL, 4360 access_pcic_retry_sot_mem_unc_err_cnt), 4361 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0, 4362 CNTR_NORMAL, 4363 access_pcic_retry_mem_unc_err), 4364 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0, 4365 CNTR_NORMAL, 4366 access_pcic_n_post_dat_q_parity_err_cnt), 4367 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0, 4368 CNTR_NORMAL, 4369 access_pcic_n_post_h_q_parity_err_cnt), 4370 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0, 4371 CNTR_NORMAL, 4372 access_pcic_cpl_dat_q_cor_err_cnt), 4373 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0, 4374 CNTR_NORMAL, 4375 access_pcic_cpl_hd_q_cor_err_cnt), 4376 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0, 4377 CNTR_NORMAL, 4378 access_pcic_post_dat_q_cor_err_cnt), 4379 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0, 4380 CNTR_NORMAL, 4381 access_pcic_post_hd_q_cor_err_cnt), 4382 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0, 4383 CNTR_NORMAL, 4384 access_pcic_retry_sot_mem_cor_err_cnt), 4385 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0, 4386 CNTR_NORMAL, 4387 access_pcic_retry_mem_cor_err_cnt), 4388 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM( 4389 "CceCli1AsyncFifoDbgParityError", 0, 0, 4390 CNTR_NORMAL, 4391 access_cce_cli1_async_fifo_dbg_parity_err_cnt), 4392 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM( 4393 "CceCli1AsyncFifoRxdmaParityError", 0, 0, 4394 CNTR_NORMAL, 4395 access_cce_cli1_async_fifo_rxdma_parity_err_cnt 4396 ), 4397 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM( 4398 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0, 4399 CNTR_NORMAL, 4400 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt), 4401 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM( 4402 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0, 4403 CNTR_NORMAL, 4404 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt), 4405 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0, 4406 0, CNTR_NORMAL, 4407 access_cce_cli2_async_fifo_parity_err_cnt), 4408 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0, 4409 CNTR_NORMAL, 4410 access_cce_csr_cfg_bus_parity_err_cnt), 4411 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0, 4412 0, CNTR_NORMAL, 4413 access_cce_cli0_async_fifo_parity_err_cnt), 4414 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0, 4415 CNTR_NORMAL, 4416 access_cce_rspd_data_parity_err_cnt), 4417 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0, 4418 CNTR_NORMAL, 4419 access_cce_trgt_access_err_cnt), 4420 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0, 4421 0, CNTR_NORMAL, 4422 access_cce_trgt_async_fifo_parity_err_cnt), 4423 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0, 4424 CNTR_NORMAL, 4425 access_cce_csr_write_bad_addr_err_cnt), 4426 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0, 4427 CNTR_NORMAL, 4428 access_cce_csr_read_bad_addr_err_cnt), 4429 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0, 4430 CNTR_NORMAL, 4431 access_ccs_csr_parity_err_cnt), 4432 4433 /* RcvErrStatus */ 4434 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0, 4435 CNTR_NORMAL, 4436 access_rx_csr_parity_err_cnt), 4437 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0, 4438 CNTR_NORMAL, 4439 access_rx_csr_write_bad_addr_err_cnt), 4440 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0, 4441 CNTR_NORMAL, 4442 access_rx_csr_read_bad_addr_err_cnt), 4443 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0, 4444 CNTR_NORMAL, 4445 access_rx_dma_csr_unc_err_cnt), 4446 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0, 4447 CNTR_NORMAL, 4448 access_rx_dma_dq_fsm_encoding_err_cnt), 4449 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0, 4450 CNTR_NORMAL, 4451 access_rx_dma_eq_fsm_encoding_err_cnt), 4452 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0, 4453 CNTR_NORMAL, 4454 access_rx_dma_csr_parity_err_cnt), 4455 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0, 4456 CNTR_NORMAL, 4457 access_rx_rbuf_data_cor_err_cnt), 4458 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0, 4459 CNTR_NORMAL, 4460 access_rx_rbuf_data_unc_err_cnt), 4461 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0, 4462 CNTR_NORMAL, 4463 access_rx_dma_data_fifo_rd_cor_err_cnt), 4464 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0, 4465 CNTR_NORMAL, 4466 access_rx_dma_data_fifo_rd_unc_err_cnt), 4467 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0, 4468 CNTR_NORMAL, 4469 access_rx_dma_hdr_fifo_rd_cor_err_cnt), 4470 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0, 4471 CNTR_NORMAL, 4472 access_rx_dma_hdr_fifo_rd_unc_err_cnt), 4473 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0, 4474 CNTR_NORMAL, 4475 access_rx_rbuf_desc_part2_cor_err_cnt), 4476 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0, 4477 CNTR_NORMAL, 4478 access_rx_rbuf_desc_part2_unc_err_cnt), 4479 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0, 4480 CNTR_NORMAL, 4481 access_rx_rbuf_desc_part1_cor_err_cnt), 4482 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0, 4483 CNTR_NORMAL, 4484 access_rx_rbuf_desc_part1_unc_err_cnt), 4485 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0, 4486 CNTR_NORMAL, 4487 access_rx_hq_intr_fsm_err_cnt), 4488 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0, 4489 CNTR_NORMAL, 4490 access_rx_hq_intr_csr_parity_err_cnt), 4491 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0, 4492 CNTR_NORMAL, 4493 access_rx_lookup_csr_parity_err_cnt), 4494 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0, 4495 CNTR_NORMAL, 4496 access_rx_lookup_rcv_array_cor_err_cnt), 4497 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0, 4498 CNTR_NORMAL, 4499 access_rx_lookup_rcv_array_unc_err_cnt), 4500 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0, 4501 0, CNTR_NORMAL, 4502 access_rx_lookup_des_part2_parity_err_cnt), 4503 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0, 4504 0, CNTR_NORMAL, 4505 access_rx_lookup_des_part1_unc_cor_err_cnt), 4506 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0, 4507 CNTR_NORMAL, 4508 access_rx_lookup_des_part1_unc_err_cnt), 4509 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0, 4510 CNTR_NORMAL, 4511 access_rx_rbuf_next_free_buf_cor_err_cnt), 4512 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0, 4513 CNTR_NORMAL, 4514 access_rx_rbuf_next_free_buf_unc_err_cnt), 4515 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM( 4516 "RxRbufFlInitWrAddrParityErr", 0, 0, 4517 CNTR_NORMAL, 4518 access_rbuf_fl_init_wr_addr_parity_err_cnt), 4519 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0, 4520 0, CNTR_NORMAL, 4521 access_rx_rbuf_fl_initdone_parity_err_cnt), 4522 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0, 4523 0, CNTR_NORMAL, 4524 access_rx_rbuf_fl_write_addr_parity_err_cnt), 4525 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0, 4526 CNTR_NORMAL, 4527 access_rx_rbuf_fl_rd_addr_parity_err_cnt), 4528 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0, 4529 CNTR_NORMAL, 4530 access_rx_rbuf_empty_err_cnt), 4531 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0, 4532 CNTR_NORMAL, 4533 access_rx_rbuf_full_err_cnt), 4534 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0, 4535 CNTR_NORMAL, 4536 access_rbuf_bad_lookup_err_cnt), 4537 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0, 4538 CNTR_NORMAL, 4539 access_rbuf_ctx_id_parity_err_cnt), 4540 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0, 4541 CNTR_NORMAL, 4542 access_rbuf_csr_qeopdw_parity_err_cnt), 4543 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM( 4544 "RxRbufCsrQNumOfPktParityErr", 0, 0, 4545 CNTR_NORMAL, 4546 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt), 4547 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM( 4548 "RxRbufCsrQTlPtrParityErr", 0, 0, 4549 CNTR_NORMAL, 4550 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt), 4551 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0, 4552 0, CNTR_NORMAL, 4553 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt), 4554 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0, 4555 0, CNTR_NORMAL, 4556 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt), 4557 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr", 4558 0, 0, CNTR_NORMAL, 4559 access_rx_rbuf_csr_q_next_buf_parity_err_cnt), 4560 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0, 4561 0, CNTR_NORMAL, 4562 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt), 4563 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM( 4564 "RxRbufCsrQHeadBufNumParityErr", 0, 0, 4565 CNTR_NORMAL, 4566 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt), 4567 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0, 4568 0, CNTR_NORMAL, 4569 access_rx_rbuf_block_list_read_cor_err_cnt), 4570 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0, 4571 0, CNTR_NORMAL, 4572 access_rx_rbuf_block_list_read_unc_err_cnt), 4573 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0, 4574 CNTR_NORMAL, 4575 access_rx_rbuf_lookup_des_cor_err_cnt), 4576 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0, 4577 CNTR_NORMAL, 4578 access_rx_rbuf_lookup_des_unc_err_cnt), 4579 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM( 4580 "RxRbufLookupDesRegUncCorErr", 0, 0, 4581 CNTR_NORMAL, 4582 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt), 4583 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0, 4584 CNTR_NORMAL, 4585 access_rx_rbuf_lookup_des_reg_unc_err_cnt), 4586 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0, 4587 CNTR_NORMAL, 4588 access_rx_rbuf_free_list_cor_err_cnt), 4589 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0, 4590 CNTR_NORMAL, 4591 access_rx_rbuf_free_list_unc_err_cnt), 4592 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0, 4593 CNTR_NORMAL, 4594 access_rx_rcv_fsm_encoding_err_cnt), 4595 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0, 4596 CNTR_NORMAL, 4597 access_rx_dma_flag_cor_err_cnt), 4598 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0, 4599 CNTR_NORMAL, 4600 access_rx_dma_flag_unc_err_cnt), 4601 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0, 4602 CNTR_NORMAL, 4603 access_rx_dc_sop_eop_parity_err_cnt), 4604 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0, 4605 CNTR_NORMAL, 4606 access_rx_rcv_csr_parity_err_cnt), 4607 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0, 4608 CNTR_NORMAL, 4609 access_rx_rcv_qp_map_table_cor_err_cnt), 4610 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0, 4611 CNTR_NORMAL, 4612 access_rx_rcv_qp_map_table_unc_err_cnt), 4613 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0, 4614 CNTR_NORMAL, 4615 access_rx_rcv_data_cor_err_cnt), 4616 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0, 4617 CNTR_NORMAL, 4618 access_rx_rcv_data_unc_err_cnt), 4619 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0, 4620 CNTR_NORMAL, 4621 access_rx_rcv_hdr_cor_err_cnt), 4622 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0, 4623 CNTR_NORMAL, 4624 access_rx_rcv_hdr_unc_err_cnt), 4625 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0, 4626 CNTR_NORMAL, 4627 access_rx_dc_intf_parity_err_cnt), 4628 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0, 4629 CNTR_NORMAL, 4630 access_rx_dma_csr_cor_err_cnt), 4631 /* SendPioErrStatus */ 4632 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0, 4633 CNTR_NORMAL, 4634 access_pio_pec_sop_head_parity_err_cnt), 4635 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0, 4636 CNTR_NORMAL, 4637 access_pio_pcc_sop_head_parity_err_cnt), 4638 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr", 4639 0, 0, CNTR_NORMAL, 4640 access_pio_last_returned_cnt_parity_err_cnt), 4641 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0, 4642 0, CNTR_NORMAL, 4643 access_pio_current_free_cnt_parity_err_cnt), 4644 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0, 4645 CNTR_NORMAL, 4646 access_pio_reserved_31_err_cnt), 4647 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0, 4648 CNTR_NORMAL, 4649 access_pio_reserved_30_err_cnt), 4650 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0, 4651 CNTR_NORMAL, 4652 access_pio_ppmc_sop_len_err_cnt), 4653 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0, 4654 CNTR_NORMAL, 4655 access_pio_ppmc_bqc_mem_parity_err_cnt), 4656 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0, 4657 CNTR_NORMAL, 4658 access_pio_vl_fifo_parity_err_cnt), 4659 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0, 4660 CNTR_NORMAL, 4661 access_pio_vlf_sop_parity_err_cnt), 4662 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0, 4663 CNTR_NORMAL, 4664 access_pio_vlf_v1_len_parity_err_cnt), 4665 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0, 4666 CNTR_NORMAL, 4667 access_pio_block_qw_count_parity_err_cnt), 4668 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0, 4669 CNTR_NORMAL, 4670 access_pio_write_qw_valid_parity_err_cnt), 4671 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0, 4672 CNTR_NORMAL, 4673 access_pio_state_machine_err_cnt), 4674 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0, 4675 CNTR_NORMAL, 4676 access_pio_write_data_parity_err_cnt), 4677 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0, 4678 CNTR_NORMAL, 4679 access_pio_host_addr_mem_cor_err_cnt), 4680 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0, 4681 CNTR_NORMAL, 4682 access_pio_host_addr_mem_unc_err_cnt), 4683 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0, 4684 CNTR_NORMAL, 4685 access_pio_pkt_evict_sm_or_arb_sm_err_cnt), 4686 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0, 4687 CNTR_NORMAL, 4688 access_pio_init_sm_in_err_cnt), 4689 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0, 4690 CNTR_NORMAL, 4691 access_pio_ppmc_pbl_fifo_err_cnt), 4692 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0, 4693 0, CNTR_NORMAL, 4694 access_pio_credit_ret_fifo_parity_err_cnt), 4695 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0, 4696 CNTR_NORMAL, 4697 access_pio_v1_len_mem_bank1_cor_err_cnt), 4698 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0, 4699 CNTR_NORMAL, 4700 access_pio_v1_len_mem_bank0_cor_err_cnt), 4701 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0, 4702 CNTR_NORMAL, 4703 access_pio_v1_len_mem_bank1_unc_err_cnt), 4704 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0, 4705 CNTR_NORMAL, 4706 access_pio_v1_len_mem_bank0_unc_err_cnt), 4707 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0, 4708 CNTR_NORMAL, 4709 access_pio_sm_pkt_reset_parity_err_cnt), 4710 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0, 4711 CNTR_NORMAL, 4712 access_pio_pkt_evict_fifo_parity_err_cnt), 4713 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM( 4714 "PioSbrdctrlCrrelFifoParityErr", 0, 0, 4715 CNTR_NORMAL, 4716 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt), 4717 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0, 4718 CNTR_NORMAL, 4719 access_pio_sbrdctl_crrel_parity_err_cnt), 4720 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0, 4721 CNTR_NORMAL, 4722 access_pio_pec_fifo_parity_err_cnt), 4723 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0, 4724 CNTR_NORMAL, 4725 access_pio_pcc_fifo_parity_err_cnt), 4726 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0, 4727 CNTR_NORMAL, 4728 access_pio_sb_mem_fifo1_err_cnt), 4729 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0, 4730 CNTR_NORMAL, 4731 access_pio_sb_mem_fifo0_err_cnt), 4732 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0, 4733 CNTR_NORMAL, 4734 access_pio_csr_parity_err_cnt), 4735 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0, 4736 CNTR_NORMAL, 4737 access_pio_write_addr_parity_err_cnt), 4738 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0, 4739 CNTR_NORMAL, 4740 access_pio_write_bad_ctxt_err_cnt), 4741 /* SendDmaErrStatus */ 4742 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0, 4743 0, CNTR_NORMAL, 4744 access_sdma_pcie_req_tracking_cor_err_cnt), 4745 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0, 4746 0, CNTR_NORMAL, 4747 access_sdma_pcie_req_tracking_unc_err_cnt), 4748 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0, 4749 CNTR_NORMAL, 4750 access_sdma_csr_parity_err_cnt), 4751 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0, 4752 CNTR_NORMAL, 4753 access_sdma_rpy_tag_err_cnt), 4754 /* SendEgressErrStatus */ 4755 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0, 4756 CNTR_NORMAL, 4757 access_tx_read_pio_memory_csr_unc_err_cnt), 4758 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0, 4759 0, CNTR_NORMAL, 4760 access_tx_read_sdma_memory_csr_err_cnt), 4761 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0, 4762 CNTR_NORMAL, 4763 access_tx_egress_fifo_cor_err_cnt), 4764 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0, 4765 CNTR_NORMAL, 4766 access_tx_read_pio_memory_cor_err_cnt), 4767 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0, 4768 CNTR_NORMAL, 4769 access_tx_read_sdma_memory_cor_err_cnt), 4770 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0, 4771 CNTR_NORMAL, 4772 access_tx_sb_hdr_cor_err_cnt), 4773 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0, 4774 CNTR_NORMAL, 4775 access_tx_credit_overrun_err_cnt), 4776 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0, 4777 CNTR_NORMAL, 4778 access_tx_launch_fifo8_cor_err_cnt), 4779 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0, 4780 CNTR_NORMAL, 4781 access_tx_launch_fifo7_cor_err_cnt), 4782 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0, 4783 CNTR_NORMAL, 4784 access_tx_launch_fifo6_cor_err_cnt), 4785 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0, 4786 CNTR_NORMAL, 4787 access_tx_launch_fifo5_cor_err_cnt), 4788 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0, 4789 CNTR_NORMAL, 4790 access_tx_launch_fifo4_cor_err_cnt), 4791 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0, 4792 CNTR_NORMAL, 4793 access_tx_launch_fifo3_cor_err_cnt), 4794 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0, 4795 CNTR_NORMAL, 4796 access_tx_launch_fifo2_cor_err_cnt), 4797 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0, 4798 CNTR_NORMAL, 4799 access_tx_launch_fifo1_cor_err_cnt), 4800 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0, 4801 CNTR_NORMAL, 4802 access_tx_launch_fifo0_cor_err_cnt), 4803 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0, 4804 CNTR_NORMAL, 4805 access_tx_credit_return_vl_err_cnt), 4806 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0, 4807 CNTR_NORMAL, 4808 access_tx_hcrc_insertion_err_cnt), 4809 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0, 4810 CNTR_NORMAL, 4811 access_tx_egress_fifo_unc_err_cnt), 4812 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0, 4813 CNTR_NORMAL, 4814 access_tx_read_pio_memory_unc_err_cnt), 4815 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0, 4816 CNTR_NORMAL, 4817 access_tx_read_sdma_memory_unc_err_cnt), 4818 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0, 4819 CNTR_NORMAL, 4820 access_tx_sb_hdr_unc_err_cnt), 4821 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0, 4822 CNTR_NORMAL, 4823 access_tx_credit_return_partiy_err_cnt), 4824 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr", 4825 0, 0, CNTR_NORMAL, 4826 access_tx_launch_fifo8_unc_or_parity_err_cnt), 4827 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr", 4828 0, 0, CNTR_NORMAL, 4829 access_tx_launch_fifo7_unc_or_parity_err_cnt), 4830 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr", 4831 0, 0, CNTR_NORMAL, 4832 access_tx_launch_fifo6_unc_or_parity_err_cnt), 4833 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr", 4834 0, 0, CNTR_NORMAL, 4835 access_tx_launch_fifo5_unc_or_parity_err_cnt), 4836 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr", 4837 0, 0, CNTR_NORMAL, 4838 access_tx_launch_fifo4_unc_or_parity_err_cnt), 4839 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr", 4840 0, 0, CNTR_NORMAL, 4841 access_tx_launch_fifo3_unc_or_parity_err_cnt), 4842 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr", 4843 0, 0, CNTR_NORMAL, 4844 access_tx_launch_fifo2_unc_or_parity_err_cnt), 4845 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr", 4846 0, 0, CNTR_NORMAL, 4847 access_tx_launch_fifo1_unc_or_parity_err_cnt), 4848 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr", 4849 0, 0, CNTR_NORMAL, 4850 access_tx_launch_fifo0_unc_or_parity_err_cnt), 4851 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr", 4852 0, 0, CNTR_NORMAL, 4853 access_tx_sdma15_disallowed_packet_err_cnt), 4854 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr", 4855 0, 0, CNTR_NORMAL, 4856 access_tx_sdma14_disallowed_packet_err_cnt), 4857 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr", 4858 0, 0, CNTR_NORMAL, 4859 access_tx_sdma13_disallowed_packet_err_cnt), 4860 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr", 4861 0, 0, CNTR_NORMAL, 4862 access_tx_sdma12_disallowed_packet_err_cnt), 4863 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr", 4864 0, 0, CNTR_NORMAL, 4865 access_tx_sdma11_disallowed_packet_err_cnt), 4866 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr", 4867 0, 0, CNTR_NORMAL, 4868 access_tx_sdma10_disallowed_packet_err_cnt), 4869 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr", 4870 0, 0, CNTR_NORMAL, 4871 access_tx_sdma9_disallowed_packet_err_cnt), 4872 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr", 4873 0, 0, CNTR_NORMAL, 4874 access_tx_sdma8_disallowed_packet_err_cnt), 4875 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr", 4876 0, 0, CNTR_NORMAL, 4877 access_tx_sdma7_disallowed_packet_err_cnt), 4878 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr", 4879 0, 0, CNTR_NORMAL, 4880 access_tx_sdma6_disallowed_packet_err_cnt), 4881 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr", 4882 0, 0, CNTR_NORMAL, 4883 access_tx_sdma5_disallowed_packet_err_cnt), 4884 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr", 4885 0, 0, CNTR_NORMAL, 4886 access_tx_sdma4_disallowed_packet_err_cnt), 4887 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr", 4888 0, 0, CNTR_NORMAL, 4889 access_tx_sdma3_disallowed_packet_err_cnt), 4890 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr", 4891 0, 0, CNTR_NORMAL, 4892 access_tx_sdma2_disallowed_packet_err_cnt), 4893 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr", 4894 0, 0, CNTR_NORMAL, 4895 access_tx_sdma1_disallowed_packet_err_cnt), 4896 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr", 4897 0, 0, CNTR_NORMAL, 4898 access_tx_sdma0_disallowed_packet_err_cnt), 4899 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0, 4900 CNTR_NORMAL, 4901 access_tx_config_parity_err_cnt), 4902 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0, 4903 CNTR_NORMAL, 4904 access_tx_sbrd_ctl_csr_parity_err_cnt), 4905 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0, 4906 CNTR_NORMAL, 4907 access_tx_launch_csr_parity_err_cnt), 4908 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0, 4909 CNTR_NORMAL, 4910 access_tx_illegal_vl_err_cnt), 4911 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM( 4912 "TxSbrdCtlStateMachineParityErr", 0, 0, 4913 CNTR_NORMAL, 4914 access_tx_sbrd_ctl_state_machine_parity_err_cnt), 4915 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0, 4916 CNTR_NORMAL, 4917 access_egress_reserved_10_err_cnt), 4918 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0, 4919 CNTR_NORMAL, 4920 access_egress_reserved_9_err_cnt), 4921 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr", 4922 0, 0, CNTR_NORMAL, 4923 access_tx_sdma_launch_intf_parity_err_cnt), 4924 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0, 4925 CNTR_NORMAL, 4926 access_tx_pio_launch_intf_parity_err_cnt), 4927 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0, 4928 CNTR_NORMAL, 4929 access_egress_reserved_6_err_cnt), 4930 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0, 4931 CNTR_NORMAL, 4932 access_tx_incorrect_link_state_err_cnt), 4933 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0, 4934 CNTR_NORMAL, 4935 access_tx_linkdown_err_cnt), 4936 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM( 4937 "EgressFifoUnderrunOrParityErr", 0, 0, 4938 CNTR_NORMAL, 4939 access_tx_egress_fifi_underrun_or_parity_err_cnt), 4940 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0, 4941 CNTR_NORMAL, 4942 access_egress_reserved_2_err_cnt), 4943 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0, 4944 CNTR_NORMAL, 4945 access_tx_pkt_integrity_mem_unc_err_cnt), 4946 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0, 4947 CNTR_NORMAL, 4948 access_tx_pkt_integrity_mem_cor_err_cnt), 4949 /* SendErrStatus */ 4950 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0, 4951 CNTR_NORMAL, 4952 access_send_csr_write_bad_addr_err_cnt), 4953 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0, 4954 CNTR_NORMAL, 4955 access_send_csr_read_bad_addr_err_cnt), 4956 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0, 4957 CNTR_NORMAL, 4958 access_send_csr_parity_cnt), 4959 /* SendCtxtErrStatus */ 4960 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0, 4961 CNTR_NORMAL, 4962 access_pio_write_out_of_bounds_err_cnt), 4963 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0, 4964 CNTR_NORMAL, 4965 access_pio_write_overflow_err_cnt), 4966 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr", 4967 0, 0, CNTR_NORMAL, 4968 access_pio_write_crosses_boundary_err_cnt), 4969 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0, 4970 CNTR_NORMAL, 4971 access_pio_disallowed_packet_err_cnt), 4972 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0, 4973 CNTR_NORMAL, 4974 access_pio_inconsistent_sop_err_cnt), 4975 /* SendDmaEngErrStatus */ 4976 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr", 4977 0, 0, CNTR_NORMAL, 4978 access_sdma_header_request_fifo_cor_err_cnt), 4979 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0, 4980 CNTR_NORMAL, 4981 access_sdma_header_storage_cor_err_cnt), 4982 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0, 4983 CNTR_NORMAL, 4984 access_sdma_packet_tracking_cor_err_cnt), 4985 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0, 4986 CNTR_NORMAL, 4987 access_sdma_assembly_cor_err_cnt), 4988 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0, 4989 CNTR_NORMAL, 4990 access_sdma_desc_table_cor_err_cnt), 4991 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr", 4992 0, 0, CNTR_NORMAL, 4993 access_sdma_header_request_fifo_unc_err_cnt), 4994 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0, 4995 CNTR_NORMAL, 4996 access_sdma_header_storage_unc_err_cnt), 4997 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0, 4998 CNTR_NORMAL, 4999 access_sdma_packet_tracking_unc_err_cnt), 5000 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0, 5001 CNTR_NORMAL, 5002 access_sdma_assembly_unc_err_cnt), 5003 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0, 5004 CNTR_NORMAL, 5005 access_sdma_desc_table_unc_err_cnt), 5006 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0, 5007 CNTR_NORMAL, 5008 access_sdma_timeout_err_cnt), 5009 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0, 5010 CNTR_NORMAL, 5011 access_sdma_header_length_err_cnt), 5012 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0, 5013 CNTR_NORMAL, 5014 access_sdma_header_address_err_cnt), 5015 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0, 5016 CNTR_NORMAL, 5017 access_sdma_header_select_err_cnt), 5018 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0, 5019 CNTR_NORMAL, 5020 access_sdma_reserved_9_err_cnt), 5021 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0, 5022 CNTR_NORMAL, 5023 access_sdma_packet_desc_overflow_err_cnt), 5024 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0, 5025 CNTR_NORMAL, 5026 access_sdma_length_mismatch_err_cnt), 5027 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0, 5028 CNTR_NORMAL, 5029 access_sdma_halt_err_cnt), 5030 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0, 5031 CNTR_NORMAL, 5032 access_sdma_mem_read_err_cnt), 5033 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0, 5034 CNTR_NORMAL, 5035 access_sdma_first_desc_err_cnt), 5036 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0, 5037 CNTR_NORMAL, 5038 access_sdma_tail_out_of_bounds_err_cnt), 5039 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0, 5040 CNTR_NORMAL, 5041 access_sdma_too_long_err_cnt), 5042 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0, 5043 CNTR_NORMAL, 5044 access_sdma_gen_mismatch_err_cnt), 5045 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0, 5046 CNTR_NORMAL, 5047 access_sdma_wrong_dw_err_cnt), 5048 }; 5049 5050 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = { 5051 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT, 5052 CNTR_NORMAL), 5053 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT, 5054 CNTR_NORMAL), 5055 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT, 5056 CNTR_NORMAL), 5057 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT, 5058 CNTR_NORMAL), 5059 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT, 5060 CNTR_NORMAL), 5061 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT, 5062 CNTR_NORMAL), 5063 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT, 5064 CNTR_NORMAL), 5065 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL), 5066 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL), 5067 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH), 5068 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT, 5069 CNTR_SYNTH | CNTR_VL), 5070 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT, 5071 CNTR_SYNTH | CNTR_VL), 5072 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT, 5073 CNTR_SYNTH | CNTR_VL), 5074 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL), 5075 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL), 5076 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5077 access_sw_link_dn_cnt), 5078 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5079 access_sw_link_up_cnt), 5080 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL, 5081 access_sw_unknown_frame_cnt), 5082 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT, 5083 access_sw_xmit_discards), 5084 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0, 5085 CNTR_SYNTH | CNTR_32BIT | CNTR_VL, 5086 access_sw_xmit_discards), 5087 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH, 5088 access_xmit_constraint_errs), 5089 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH, 5090 access_rcv_constraint_errs), 5091 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts), 5092 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends), 5093 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks), 5094 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks), 5095 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts), 5096 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops), 5097 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait), 5098 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak), 5099 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq), 5100 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), 5101 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), 5102 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), 5103 [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits), 5104 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, 5105 access_sw_cpu_rc_acks), 5106 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, 5107 access_sw_cpu_rc_qacks), 5108 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL, 5109 access_sw_cpu_rc_delayed_comp), 5110 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1), 5111 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3), 5112 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5), 5113 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7), 5114 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9), 5115 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11), 5116 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13), 5117 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15), 5118 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17), 5119 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19), 5120 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21), 5121 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23), 5122 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25), 5123 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27), 5124 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29), 5125 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31), 5126 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33), 5127 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35), 5128 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37), 5129 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39), 5130 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41), 5131 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43), 5132 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45), 5133 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47), 5134 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49), 5135 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51), 5136 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53), 5137 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55), 5138 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57), 5139 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59), 5140 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61), 5141 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63), 5142 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65), 5143 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67), 5144 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69), 5145 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71), 5146 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73), 5147 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75), 5148 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77), 5149 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79), 5150 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81), 5151 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83), 5152 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85), 5153 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87), 5154 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89), 5155 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91), 5156 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93), 5157 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95), 5158 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97), 5159 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99), 5160 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101), 5161 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103), 5162 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105), 5163 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107), 5164 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109), 5165 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111), 5166 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113), 5167 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115), 5168 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117), 5169 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119), 5170 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121), 5171 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123), 5172 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125), 5173 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127), 5174 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129), 5175 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131), 5176 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133), 5177 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135), 5178 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137), 5179 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139), 5180 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141), 5181 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143), 5182 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145), 5183 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147), 5184 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149), 5185 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151), 5186 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153), 5187 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155), 5188 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157), 5189 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159), 5190 }; 5191 5192 /* ======================================================================== */ 5193 5194 /* return true if this is chip revision revision a */ 5195 int is_ax(struct hfi1_devdata *dd) 5196 { 5197 u8 chip_rev_minor = 5198 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT 5199 & CCE_REVISION_CHIP_REV_MINOR_MASK; 5200 return (chip_rev_minor & 0xf0) == 0; 5201 } 5202 5203 /* return true if this is chip revision revision b */ 5204 int is_bx(struct hfi1_devdata *dd) 5205 { 5206 u8 chip_rev_minor = 5207 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT 5208 & CCE_REVISION_CHIP_REV_MINOR_MASK; 5209 return (chip_rev_minor & 0xF0) == 0x10; 5210 } 5211 5212 /* return true is kernel urg disabled for rcd */ 5213 bool is_urg_masked(struct hfi1_ctxtdata *rcd) 5214 { 5215 u64 mask; 5216 u32 is = IS_RCVURGENT_START + rcd->ctxt; 5217 u8 bit = is % 64; 5218 5219 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); 5220 return !(mask & BIT_ULL(bit)); 5221 } 5222 5223 /* 5224 * Append string s to buffer buf. Arguments curp and len are the current 5225 * position and remaining length, respectively. 5226 * 5227 * return 0 on success, 1 on out of room 5228 */ 5229 static int append_str(char *buf, char **curp, int *lenp, const char *s) 5230 { 5231 char *p = *curp; 5232 int len = *lenp; 5233 int result = 0; /* success */ 5234 char c; 5235 5236 /* add a comma, if first in the buffer */ 5237 if (p != buf) { 5238 if (len == 0) { 5239 result = 1; /* out of room */ 5240 goto done; 5241 } 5242 *p++ = ','; 5243 len--; 5244 } 5245 5246 /* copy the string */ 5247 while ((c = *s++) != 0) { 5248 if (len == 0) { 5249 result = 1; /* out of room */ 5250 goto done; 5251 } 5252 *p++ = c; 5253 len--; 5254 } 5255 5256 done: 5257 /* write return values */ 5258 *curp = p; 5259 *lenp = len; 5260 5261 return result; 5262 } 5263 5264 /* 5265 * Using the given flag table, print a comma separated string into 5266 * the buffer. End in '*' if the buffer is too short. 5267 */ 5268 static char *flag_string(char *buf, int buf_len, u64 flags, 5269 const struct flag_table *table, int table_size) 5270 { 5271 char extra[32]; 5272 char *p = buf; 5273 int len = buf_len; 5274 int no_room = 0; 5275 int i; 5276 5277 /* make sure there is at least 2 so we can form "*" */ 5278 if (len < 2) 5279 return ""; 5280 5281 len--; /* leave room for a nul */ 5282 for (i = 0; i < table_size; i++) { 5283 if (flags & table[i].flag) { 5284 no_room = append_str(buf, &p, &len, table[i].str); 5285 if (no_room) 5286 break; 5287 flags &= ~table[i].flag; 5288 } 5289 } 5290 5291 /* any undocumented bits left? */ 5292 if (!no_room && flags) { 5293 snprintf(extra, sizeof(extra), "bits 0x%llx", flags); 5294 no_room = append_str(buf, &p, &len, extra); 5295 } 5296 5297 /* add * if ran out of room */ 5298 if (no_room) { 5299 /* may need to back up to add space for a '*' */ 5300 if (len == 0) 5301 --p; 5302 *p++ = '*'; 5303 } 5304 5305 /* add final nul - space already allocated above */ 5306 *p = 0; 5307 return buf; 5308 } 5309 5310 /* first 8 CCE error interrupt source names */ 5311 static const char * const cce_misc_names[] = { 5312 "CceErrInt", /* 0 */ 5313 "RxeErrInt", /* 1 */ 5314 "MiscErrInt", /* 2 */ 5315 "Reserved3", /* 3 */ 5316 "PioErrInt", /* 4 */ 5317 "SDmaErrInt", /* 5 */ 5318 "EgressErrInt", /* 6 */ 5319 "TxeErrInt" /* 7 */ 5320 }; 5321 5322 /* 5323 * Return the miscellaneous error interrupt name. 5324 */ 5325 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) 5326 { 5327 if (source < ARRAY_SIZE(cce_misc_names)) 5328 strscpy_pad(buf, cce_misc_names[source], bsize); 5329 else 5330 snprintf(buf, bsize, "Reserved%u", 5331 source + IS_GENERAL_ERR_START); 5332 5333 return buf; 5334 } 5335 5336 /* 5337 * Return the SDMA engine error interrupt name. 5338 */ 5339 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source) 5340 { 5341 snprintf(buf, bsize, "SDmaEngErrInt%u", source); 5342 return buf; 5343 } 5344 5345 /* 5346 * Return the send context error interrupt name. 5347 */ 5348 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source) 5349 { 5350 snprintf(buf, bsize, "SendCtxtErrInt%u", source); 5351 return buf; 5352 } 5353 5354 static const char * const various_names[] = { 5355 "PbcInt", 5356 "GpioAssertInt", 5357 "Qsfp1Int", 5358 "Qsfp2Int", 5359 "TCritInt" 5360 }; 5361 5362 /* 5363 * Return the various interrupt name. 5364 */ 5365 static char *is_various_name(char *buf, size_t bsize, unsigned int source) 5366 { 5367 if (source < ARRAY_SIZE(various_names)) 5368 strscpy_pad(buf, various_names[source], bsize); 5369 else 5370 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START); 5371 return buf; 5372 } 5373 5374 /* 5375 * Return the DC interrupt name. 5376 */ 5377 static char *is_dc_name(char *buf, size_t bsize, unsigned int source) 5378 { 5379 static const char * const dc_int_names[] = { 5380 "common", 5381 "lcb", 5382 "8051", 5383 "lbm" /* local block merge */ 5384 }; 5385 5386 if (source < ARRAY_SIZE(dc_int_names)) 5387 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]); 5388 else 5389 snprintf(buf, bsize, "DCInt%u", source); 5390 return buf; 5391 } 5392 5393 static const char * const sdma_int_names[] = { 5394 "SDmaInt", 5395 "SdmaIdleInt", 5396 "SdmaProgressInt", 5397 }; 5398 5399 /* 5400 * Return the SDMA engine interrupt name. 5401 */ 5402 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source) 5403 { 5404 /* what interrupt */ 5405 unsigned int what = source / TXE_NUM_SDMA_ENGINES; 5406 /* which engine */ 5407 unsigned int which = source % TXE_NUM_SDMA_ENGINES; 5408 5409 if (likely(what < 3)) 5410 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which); 5411 else 5412 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source); 5413 return buf; 5414 } 5415 5416 /* 5417 * Return the receive available interrupt name. 5418 */ 5419 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source) 5420 { 5421 snprintf(buf, bsize, "RcvAvailInt%u", source); 5422 return buf; 5423 } 5424 5425 /* 5426 * Return the receive urgent interrupt name. 5427 */ 5428 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source) 5429 { 5430 snprintf(buf, bsize, "RcvUrgentInt%u", source); 5431 return buf; 5432 } 5433 5434 /* 5435 * Return the send credit interrupt name. 5436 */ 5437 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source) 5438 { 5439 snprintf(buf, bsize, "SendCreditInt%u", source); 5440 return buf; 5441 } 5442 5443 /* 5444 * Return the reserved interrupt name. 5445 */ 5446 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source) 5447 { 5448 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START); 5449 return buf; 5450 } 5451 5452 static char *cce_err_status_string(char *buf, int buf_len, u64 flags) 5453 { 5454 return flag_string(buf, buf_len, flags, 5455 cce_err_status_flags, 5456 ARRAY_SIZE(cce_err_status_flags)); 5457 } 5458 5459 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags) 5460 { 5461 return flag_string(buf, buf_len, flags, 5462 rxe_err_status_flags, 5463 ARRAY_SIZE(rxe_err_status_flags)); 5464 } 5465 5466 static char *misc_err_status_string(char *buf, int buf_len, u64 flags) 5467 { 5468 return flag_string(buf, buf_len, flags, misc_err_status_flags, 5469 ARRAY_SIZE(misc_err_status_flags)); 5470 } 5471 5472 static char *pio_err_status_string(char *buf, int buf_len, u64 flags) 5473 { 5474 return flag_string(buf, buf_len, flags, 5475 pio_err_status_flags, 5476 ARRAY_SIZE(pio_err_status_flags)); 5477 } 5478 5479 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags) 5480 { 5481 return flag_string(buf, buf_len, flags, 5482 sdma_err_status_flags, 5483 ARRAY_SIZE(sdma_err_status_flags)); 5484 } 5485 5486 static char *egress_err_status_string(char *buf, int buf_len, u64 flags) 5487 { 5488 return flag_string(buf, buf_len, flags, 5489 egress_err_status_flags, 5490 ARRAY_SIZE(egress_err_status_flags)); 5491 } 5492 5493 static char *egress_err_info_string(char *buf, int buf_len, u64 flags) 5494 { 5495 return flag_string(buf, buf_len, flags, 5496 egress_err_info_flags, 5497 ARRAY_SIZE(egress_err_info_flags)); 5498 } 5499 5500 static char *send_err_status_string(char *buf, int buf_len, u64 flags) 5501 { 5502 return flag_string(buf, buf_len, flags, 5503 send_err_status_flags, 5504 ARRAY_SIZE(send_err_status_flags)); 5505 } 5506 5507 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5508 { 5509 char buf[96]; 5510 int i = 0; 5511 5512 /* 5513 * For most these errors, there is nothing that can be done except 5514 * report or record it. 5515 */ 5516 dd_dev_info(dd, "CCE Error: %s\n", 5517 cce_err_status_string(buf, sizeof(buf), reg)); 5518 5519 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) && 5520 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { 5521 /* this error requires a manual drop into SPC freeze mode */ 5522 /* then a fix up */ 5523 start_freeze_handling(dd->pport, FREEZE_SELF); 5524 } 5525 5526 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) { 5527 if (reg & (1ull << i)) { 5528 incr_cntr64(&dd->cce_err_status_cnt[i]); 5529 /* maintain a counter over all cce_err_status errors */ 5530 incr_cntr64(&dd->sw_cce_err_status_aggregate); 5531 } 5532 } 5533 } 5534 5535 /* 5536 * Check counters for receive errors that do not have an interrupt 5537 * associated with them. 5538 */ 5539 #define RCVERR_CHECK_TIME 10 5540 static void update_rcverr_timer(struct timer_list *t) 5541 { 5542 struct hfi1_devdata *dd = timer_container_of(dd, t, rcverr_timer); 5543 struct hfi1_pportdata *ppd = dd->pport; 5544 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL); 5545 5546 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && 5547 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { 5548 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__); 5549 set_link_down_reason( 5550 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0, 5551 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN); 5552 queue_work(ppd->link_wq, &ppd->link_bounce_work); 5553 } 5554 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; 5555 5556 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); 5557 } 5558 5559 static int init_rcverr(struct hfi1_devdata *dd) 5560 { 5561 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); 5562 /* Assume the hardware counter has been reset */ 5563 dd->rcv_ovfl_cnt = 0; 5564 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); 5565 } 5566 5567 static void free_rcverr(struct hfi1_devdata *dd) 5568 { 5569 if (dd->rcverr_timer.function) 5570 timer_delete_sync(&dd->rcverr_timer); 5571 } 5572 5573 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5574 { 5575 char buf[96]; 5576 int i = 0; 5577 5578 dd_dev_info(dd, "Receive Error: %s\n", 5579 rxe_err_status_string(buf, sizeof(buf), reg)); 5580 5581 if (reg & ALL_RXE_FREEZE_ERR) { 5582 int flags = 0; 5583 5584 /* 5585 * Freeze mode recovery is disabled for the errors 5586 * in RXE_FREEZE_ABORT_MASK 5587 */ 5588 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK)) 5589 flags = FREEZE_ABORT; 5590 5591 start_freeze_handling(dd->pport, flags); 5592 } 5593 5594 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) { 5595 if (reg & (1ull << i)) 5596 incr_cntr64(&dd->rcv_err_status_cnt[i]); 5597 } 5598 } 5599 5600 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5601 { 5602 char buf[96]; 5603 int i = 0; 5604 5605 dd_dev_info(dd, "Misc Error: %s", 5606 misc_err_status_string(buf, sizeof(buf), reg)); 5607 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) { 5608 if (reg & (1ull << i)) 5609 incr_cntr64(&dd->misc_err_status_cnt[i]); 5610 } 5611 } 5612 5613 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5614 { 5615 char buf[96]; 5616 int i = 0; 5617 5618 dd_dev_info(dd, "PIO Error: %s\n", 5619 pio_err_status_string(buf, sizeof(buf), reg)); 5620 5621 if (reg & ALL_PIO_FREEZE_ERR) 5622 start_freeze_handling(dd->pport, 0); 5623 5624 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) { 5625 if (reg & (1ull << i)) 5626 incr_cntr64(&dd->send_pio_err_status_cnt[i]); 5627 } 5628 } 5629 5630 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5631 { 5632 char buf[96]; 5633 int i = 0; 5634 5635 dd_dev_info(dd, "SDMA Error: %s\n", 5636 sdma_err_status_string(buf, sizeof(buf), reg)); 5637 5638 if (reg & ALL_SDMA_FREEZE_ERR) 5639 start_freeze_handling(dd->pport, 0); 5640 5641 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) { 5642 if (reg & (1ull << i)) 5643 incr_cntr64(&dd->send_dma_err_status_cnt[i]); 5644 } 5645 } 5646 5647 static inline void __count_port_discards(struct hfi1_pportdata *ppd) 5648 { 5649 incr_cntr64(&ppd->port_xmit_discards); 5650 } 5651 5652 static void count_port_inactive(struct hfi1_devdata *dd) 5653 { 5654 __count_port_discards(dd->pport); 5655 } 5656 5657 /* 5658 * We have had a "disallowed packet" error during egress. Determine the 5659 * integrity check which failed, and update relevant error counter, etc. 5660 * 5661 * Note that the SEND_EGRESS_ERR_INFO register has only a single 5662 * bit of state per integrity check, and so we can miss the reason for an 5663 * egress error if more than one packet fails the same integrity check 5664 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO. 5665 */ 5666 static void handle_send_egress_err_info(struct hfi1_devdata *dd, 5667 int vl) 5668 { 5669 struct hfi1_pportdata *ppd = dd->pport; 5670 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */ 5671 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO); 5672 char buf[96]; 5673 5674 /* clear down all observed info as quickly as possible after read */ 5675 write_csr(dd, SEND_EGRESS_ERR_INFO, info); 5676 5677 dd_dev_info(dd, 5678 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n", 5679 info, egress_err_info_string(buf, sizeof(buf), info), src); 5680 5681 /* Eventually add other counters for each bit */ 5682 if (info & PORT_DISCARD_EGRESS_ERRS) { 5683 int weight, i; 5684 5685 /* 5686 * Count all applicable bits as individual errors and 5687 * attribute them to the packet that triggered this handler. 5688 * This may not be completely accurate due to limitations 5689 * on the available hardware error information. There is 5690 * a single information register and any number of error 5691 * packets may have occurred and contributed to it before 5692 * this routine is called. This means that: 5693 * a) If multiple packets with the same error occur before 5694 * this routine is called, earlier packets are missed. 5695 * There is only a single bit for each error type. 5696 * b) Errors may not be attributed to the correct VL. 5697 * The driver is attributing all bits in the info register 5698 * to the packet that triggered this call, but bits 5699 * could be an accumulation of different packets with 5700 * different VLs. 5701 * c) A single error packet may have multiple counts attached 5702 * to it. There is no way for the driver to know if 5703 * multiple bits set in the info register are due to a 5704 * single packet or multiple packets. The driver assumes 5705 * multiple packets. 5706 */ 5707 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS); 5708 for (i = 0; i < weight; i++) { 5709 __count_port_discards(ppd); 5710 if (vl >= 0 && vl < TXE_NUM_DATA_VL) 5711 incr_cntr64(&ppd->port_xmit_discards_vl[vl]); 5712 else if (vl == 15) 5713 incr_cntr64(&ppd->port_xmit_discards_vl 5714 [C_VL_15]); 5715 } 5716 } 5717 } 5718 5719 /* 5720 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS 5721 * register. Does it represent a 'port inactive' error? 5722 */ 5723 static inline int port_inactive_err(u64 posn) 5724 { 5725 return (posn >= SEES(TX_LINKDOWN) && 5726 posn <= SEES(TX_INCORRECT_LINK_STATE)); 5727 } 5728 5729 /* 5730 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS 5731 * register. Does it represent a 'disallowed packet' error? 5732 */ 5733 static inline int disallowed_pkt_err(int posn) 5734 { 5735 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) && 5736 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET)); 5737 } 5738 5739 /* 5740 * Input value is a bit position of one of the SDMA engine disallowed 5741 * packet errors. Return which engine. Use of this must be guarded by 5742 * disallowed_pkt_err(). 5743 */ 5744 static inline int disallowed_pkt_engine(int posn) 5745 { 5746 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); 5747 } 5748 5749 /* 5750 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot 5751 * be done. 5752 */ 5753 static int engine_to_vl(struct hfi1_devdata *dd, int engine) 5754 { 5755 struct sdma_vl_map *m; 5756 int vl; 5757 5758 /* range check */ 5759 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES) 5760 return -1; 5761 5762 rcu_read_lock(); 5763 m = rcu_dereference(dd->sdma_map); 5764 vl = m->engine_to_vl[engine]; 5765 rcu_read_unlock(); 5766 5767 return vl; 5768 } 5769 5770 /* 5771 * Translate the send context (sofware index) into a VL. Return -1 if the 5772 * translation cannot be done. 5773 */ 5774 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index) 5775 { 5776 struct send_context_info *sci; 5777 struct send_context *sc; 5778 int i; 5779 5780 sci = &dd->send_contexts[sw_index]; 5781 5782 /* there is no information for user (PSM) and ack contexts */ 5783 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) 5784 return -1; 5785 5786 sc = sci->sc; 5787 if (!sc) 5788 return -1; 5789 if (dd->vld[15].sc == sc) 5790 return 15; 5791 for (i = 0; i < num_vls; i++) 5792 if (dd->vld[i].sc == sc) 5793 return i; 5794 5795 return -1; 5796 } 5797 5798 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5799 { 5800 u64 reg_copy = reg, handled = 0; 5801 char buf[96]; 5802 int i = 0; 5803 5804 if (reg & ALL_TXE_EGRESS_FREEZE_ERR) 5805 start_freeze_handling(dd->pport, 0); 5806 else if (is_ax(dd) && 5807 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) && 5808 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) 5809 start_freeze_handling(dd->pport, 0); 5810 5811 while (reg_copy) { 5812 int posn = fls64(reg_copy); 5813 /* fls64() returns a 1-based offset, we want it zero based */ 5814 int shift = posn - 1; 5815 u64 mask = 1ULL << shift; 5816 5817 if (port_inactive_err(shift)) { 5818 count_port_inactive(dd); 5819 handled |= mask; 5820 } else if (disallowed_pkt_err(shift)) { 5821 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift)); 5822 5823 handle_send_egress_err_info(dd, vl); 5824 handled |= mask; 5825 } 5826 reg_copy &= ~mask; 5827 } 5828 5829 reg &= ~handled; 5830 5831 if (reg) 5832 dd_dev_info(dd, "Egress Error: %s\n", 5833 egress_err_status_string(buf, sizeof(buf), reg)); 5834 5835 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) { 5836 if (reg & (1ull << i)) 5837 incr_cntr64(&dd->send_egress_err_status_cnt[i]); 5838 } 5839 } 5840 5841 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 5842 { 5843 char buf[96]; 5844 int i = 0; 5845 5846 dd_dev_info(dd, "Send Error: %s\n", 5847 send_err_status_string(buf, sizeof(buf), reg)); 5848 5849 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) { 5850 if (reg & (1ull << i)) 5851 incr_cntr64(&dd->send_err_status_cnt[i]); 5852 } 5853 } 5854 5855 /* 5856 * The maximum number of times the error clear down will loop before 5857 * blocking a repeating error. This value is arbitrary. 5858 */ 5859 #define MAX_CLEAR_COUNT 20 5860 5861 /* 5862 * Clear and handle an error register. All error interrupts are funneled 5863 * through here to have a central location to correctly handle single- 5864 * or multi-shot errors. 5865 * 5866 * For non per-context registers, call this routine with a context value 5867 * of 0 so the per-context offset is zero. 5868 * 5869 * If the handler loops too many times, assume that something is wrong 5870 * and can't be fixed, so mask the error bits. 5871 */ 5872 static void interrupt_clear_down(struct hfi1_devdata *dd, 5873 u32 context, 5874 const struct err_reg_info *eri) 5875 { 5876 u64 reg; 5877 u32 count; 5878 5879 /* read in a loop until no more errors are seen */ 5880 count = 0; 5881 while (1) { 5882 reg = read_kctxt_csr(dd, context, eri->status); 5883 if (reg == 0) 5884 break; 5885 write_kctxt_csr(dd, context, eri->clear, reg); 5886 if (likely(eri->handler)) 5887 eri->handler(dd, context, reg); 5888 count++; 5889 if (count > MAX_CLEAR_COUNT) { 5890 u64 mask; 5891 5892 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", 5893 eri->desc, reg); 5894 /* 5895 * Read-modify-write so any other masked bits 5896 * remain masked. 5897 */ 5898 mask = read_kctxt_csr(dd, context, eri->mask); 5899 mask &= ~reg; 5900 write_kctxt_csr(dd, context, eri->mask, mask); 5901 break; 5902 } 5903 } 5904 } 5905 5906 /* 5907 * CCE block "misc" interrupt. Source is < 16. 5908 */ 5909 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source) 5910 { 5911 const struct err_reg_info *eri = &misc_errs[source]; 5912 5913 if (eri->handler) { 5914 interrupt_clear_down(dd, 0, eri); 5915 } else { 5916 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", 5917 source); 5918 } 5919 } 5920 5921 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags) 5922 { 5923 return flag_string(buf, buf_len, flags, 5924 sc_err_status_flags, 5925 ARRAY_SIZE(sc_err_status_flags)); 5926 } 5927 5928 /* 5929 * Send context error interrupt. Source (hw_context) is < 160. 5930 * 5931 * All send context errors cause the send context to halt. The normal 5932 * clear-down mechanism cannot be used because we cannot clear the 5933 * error bits until several other long-running items are done first. 5934 * This is OK because with the context halted, nothing else is going 5935 * to happen on it anyway. 5936 */ 5937 static void is_sendctxt_err_int(struct hfi1_devdata *dd, 5938 unsigned int hw_context) 5939 { 5940 struct send_context_info *sci; 5941 struct send_context *sc; 5942 char flags[96]; 5943 u64 status; 5944 u32 sw_index; 5945 int i = 0; 5946 unsigned long irq_flags; 5947 5948 sw_index = dd->hw_to_sw[hw_context]; 5949 if (sw_index >= dd->num_send_contexts) { 5950 dd_dev_err(dd, 5951 "out of range sw index %u for send context %u\n", 5952 sw_index, hw_context); 5953 return; 5954 } 5955 sci = &dd->send_contexts[sw_index]; 5956 spin_lock_irqsave(&dd->sc_lock, irq_flags); 5957 sc = sci->sc; 5958 if (!sc) { 5959 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5960 sw_index, hw_context); 5961 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5962 return; 5963 } 5964 5965 /* tell the software that a halt has begun */ 5966 sc_stop(sc, SCF_HALTED); 5967 5968 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); 5969 5970 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, 5971 send_context_err_status_string(flags, sizeof(flags), 5972 status)); 5973 5974 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK) 5975 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index)); 5976 5977 /* 5978 * Automatically restart halted kernel contexts out of interrupt 5979 * context. User contexts must ask the driver to restart the context. 5980 */ 5981 if (sc->type != SC_USER) 5982 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5983 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); 5984 5985 /* 5986 * Update the counters for the corresponding status bits. 5987 * Note that these particular counters are aggregated over all 5988 * 160 contexts. 5989 */ 5990 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) { 5991 if (status & (1ull << i)) 5992 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); 5993 } 5994 } 5995 5996 static void handle_sdma_eng_err(struct hfi1_devdata *dd, 5997 unsigned int source, u64 status) 5998 { 5999 struct sdma_engine *sde; 6000 int i = 0; 6001 6002 sde = &dd->per_sdma[source]; 6003 #ifdef CONFIG_SDMA_VERBOSITY 6004 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 6005 slashstrip(__FILE__), __LINE__, __func__); 6006 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", 6007 sde->this_idx, source, (unsigned long long)status); 6008 #endif 6009 sde->err_cnt++; 6010 sdma_engine_error(sde, status); 6011 6012 /* 6013 * Update the counters for the corresponding status bits. 6014 * Note that these particular counters are aggregated over 6015 * all 16 DMA engines. 6016 */ 6017 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) { 6018 if (status & (1ull << i)) 6019 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); 6020 } 6021 } 6022 6023 /* 6024 * CCE block SDMA error interrupt. Source is < 16. 6025 */ 6026 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source) 6027 { 6028 #ifdef CONFIG_SDMA_VERBOSITY 6029 struct sdma_engine *sde = &dd->per_sdma[source]; 6030 6031 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 6032 slashstrip(__FILE__), __LINE__, __func__); 6033 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, 6034 source); 6035 sdma_dumpstate(sde); 6036 #endif 6037 interrupt_clear_down(dd, source, &sdma_eng_err); 6038 } 6039 6040 /* 6041 * CCE block "various" interrupt. Source is < 8. 6042 */ 6043 static void is_various_int(struct hfi1_devdata *dd, unsigned int source) 6044 { 6045 const struct err_reg_info *eri = &various_err[source]; 6046 6047 /* 6048 * TCritInt cannot go through interrupt_clear_down() 6049 * because it is not a second tier interrupt. The handler 6050 * should be called directly. 6051 */ 6052 if (source == TCRIT_INT_SOURCE) 6053 handle_temp_err(dd); 6054 else if (eri->handler) 6055 interrupt_clear_down(dd, 0, eri); 6056 else 6057 dd_dev_info(dd, 6058 "%s: Unimplemented/reserved interrupt %d\n", 6059 __func__, source); 6060 } 6061 6062 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg) 6063 { 6064 /* src_ctx is always zero */ 6065 struct hfi1_pportdata *ppd = dd->pport; 6066 unsigned long flags; 6067 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); 6068 6069 if (reg & QSFP_HFI0_MODPRST_N) { 6070 if (!qsfp_mod_present(ppd)) { 6071 dd_dev_info(dd, "%s: QSFP module removed\n", 6072 __func__); 6073 6074 ppd->driver_link_ready = 0; 6075 /* 6076 * Cable removed, reset all our information about the 6077 * cache and cable capabilities 6078 */ 6079 6080 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6081 /* 6082 * We don't set cache_refresh_required here as we expect 6083 * an interrupt when a cable is inserted 6084 */ 6085 ppd->qsfp_info.cache_valid = 0; 6086 ppd->qsfp_info.reset_needed = 0; 6087 ppd->qsfp_info.limiting_active = 0; 6088 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 6089 flags); 6090 /* Invert the ModPresent pin now to detect plug-in */ 6091 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : 6092 ASIC_QSFP1_INVERT, qsfp_int_mgmt); 6093 6094 if ((ppd->offline_disabled_reason > 6095 HFI1_ODR_MASK( 6096 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) || 6097 (ppd->offline_disabled_reason == 6098 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))) 6099 ppd->offline_disabled_reason = 6100 HFI1_ODR_MASK( 6101 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED); 6102 6103 if (ppd->host_link_state == HLS_DN_POLL) { 6104 /* 6105 * The link is still in POLL. This means 6106 * that the normal link down processing 6107 * will not happen. We have to do it here 6108 * before turning the DC off. 6109 */ 6110 queue_work(ppd->link_wq, &ppd->link_down_work); 6111 } 6112 } else { 6113 dd_dev_info(dd, "%s: QSFP module inserted\n", 6114 __func__); 6115 6116 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6117 ppd->qsfp_info.cache_valid = 0; 6118 ppd->qsfp_info.cache_refresh_required = 1; 6119 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 6120 flags); 6121 6122 /* 6123 * Stop inversion of ModPresent pin to detect 6124 * removal of the cable 6125 */ 6126 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N; 6127 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : 6128 ASIC_QSFP1_INVERT, qsfp_int_mgmt); 6129 6130 ppd->offline_disabled_reason = 6131 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 6132 } 6133 } 6134 6135 if (reg & QSFP_HFI0_INT_N) { 6136 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n", 6137 __func__); 6138 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 6139 ppd->qsfp_info.check_interrupt_flags = 1; 6140 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); 6141 } 6142 6143 /* Schedule the QSFP work only if there is a cable attached. */ 6144 if (qsfp_mod_present(ppd)) 6145 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); 6146 } 6147 6148 static int request_host_lcb_access(struct hfi1_devdata *dd) 6149 { 6150 int ret; 6151 6152 ret = do_8051_command(dd, HCMD_MISC, 6153 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << 6154 LOAD_DATA_FIELD_ID_SHIFT, NULL); 6155 if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) { 6156 dd_dev_err(dd, "%s: command failed with error %d\n", 6157 __func__, ret); 6158 } 6159 return ret == HCMD_SUCCESS ? 0 : -EBUSY; 6160 } 6161 6162 static int request_8051_lcb_access(struct hfi1_devdata *dd) 6163 { 6164 int ret; 6165 6166 ret = do_8051_command(dd, HCMD_MISC, 6167 (u64)HCMD_MISC_GRANT_LCB_ACCESS << 6168 LOAD_DATA_FIELD_ID_SHIFT, NULL); 6169 if (ret != HCMD_SUCCESS) { 6170 dd_dev_err(dd, "%s: command failed with error %d\n", 6171 __func__, ret); 6172 } 6173 return ret == HCMD_SUCCESS ? 0 : -EBUSY; 6174 } 6175 6176 /* 6177 * Set the LCB selector - allow host access. The DCC selector always 6178 * points to the host. 6179 */ 6180 static inline void set_host_lcb_access(struct hfi1_devdata *dd) 6181 { 6182 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, 6183 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK | 6184 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK); 6185 } 6186 6187 /* 6188 * Clear the LCB selector - allow 8051 access. The DCC selector always 6189 * points to the host. 6190 */ 6191 static inline void set_8051_lcb_access(struct hfi1_devdata *dd) 6192 { 6193 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL, 6194 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK); 6195 } 6196 6197 /* 6198 * Acquire LCB access from the 8051. If the host already has access, 6199 * just increment a counter. Otherwise, inform the 8051 that the 6200 * host is taking access. 6201 * 6202 * Returns: 6203 * 0 on success 6204 * -EBUSY if the 8051 has control and cannot be disturbed 6205 * -errno if unable to acquire access from the 8051 6206 */ 6207 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok) 6208 { 6209 struct hfi1_pportdata *ppd = dd->pport; 6210 int ret = 0; 6211 6212 /* 6213 * Use the host link state lock so the operation of this routine 6214 * { link state check, selector change, count increment } can occur 6215 * as a unit against a link state change. Otherwise there is a 6216 * race between the state change and the count increment. 6217 */ 6218 if (sleep_ok) { 6219 mutex_lock(&ppd->hls_lock); 6220 } else { 6221 while (!mutex_trylock(&ppd->hls_lock)) 6222 udelay(1); 6223 } 6224 6225 /* this access is valid only when the link is up */ 6226 if (ppd->host_link_state & HLS_DOWN) { 6227 dd_dev_info(dd, "%s: link state %s not up\n", 6228 __func__, link_state_name(ppd->host_link_state)); 6229 ret = -EBUSY; 6230 goto done; 6231 } 6232 6233 if (dd->lcb_access_count == 0) { 6234 ret = request_host_lcb_access(dd); 6235 if (ret) { 6236 if (!(dd->flags & HFI1_SHUTDOWN)) 6237 dd_dev_err(dd, 6238 "%s: unable to acquire LCB access, err %d\n", 6239 __func__, ret); 6240 goto done; 6241 } 6242 set_host_lcb_access(dd); 6243 } 6244 dd->lcb_access_count++; 6245 done: 6246 mutex_unlock(&ppd->hls_lock); 6247 return ret; 6248 } 6249 6250 /* 6251 * Release LCB access by decrementing the use count. If the count is moving 6252 * from 1 to 0, inform 8051 that it has control back. 6253 * 6254 * Returns: 6255 * 0 on success 6256 * -errno if unable to release access to the 8051 6257 */ 6258 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok) 6259 { 6260 int ret = 0; 6261 6262 /* 6263 * Use the host link state lock because the acquire needed it. 6264 * Here, we only need to keep { selector change, count decrement } 6265 * as a unit. 6266 */ 6267 if (sleep_ok) { 6268 mutex_lock(&dd->pport->hls_lock); 6269 } else { 6270 while (!mutex_trylock(&dd->pport->hls_lock)) 6271 udelay(1); 6272 } 6273 6274 if (dd->lcb_access_count == 0) { 6275 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n", 6276 __func__); 6277 goto done; 6278 } 6279 6280 if (dd->lcb_access_count == 1) { 6281 set_8051_lcb_access(dd); 6282 ret = request_8051_lcb_access(dd); 6283 if (ret) { 6284 dd_dev_err(dd, 6285 "%s: unable to release LCB access, err %d\n", 6286 __func__, ret); 6287 /* restore host access if the grant didn't work */ 6288 set_host_lcb_access(dd); 6289 goto done; 6290 } 6291 } 6292 dd->lcb_access_count--; 6293 done: 6294 mutex_unlock(&dd->pport->hls_lock); 6295 return ret; 6296 } 6297 6298 /* 6299 * Initialize LCB access variables and state. Called during driver load, 6300 * after most of the initialization is finished. 6301 * 6302 * The DC default is LCB access on for the host. The driver defaults to 6303 * leaving access to the 8051. Assign access now - this constrains the call 6304 * to this routine to be after all LCB set-up is done. In particular, after 6305 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts() 6306 */ 6307 static void init_lcb_access(struct hfi1_devdata *dd) 6308 { 6309 dd->lcb_access_count = 0; 6310 } 6311 6312 /* 6313 * Write a response back to a 8051 request. 6314 */ 6315 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data) 6316 { 6317 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 6318 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK | 6319 (u64)return_code << 6320 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT | 6321 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); 6322 } 6323 6324 /* 6325 * Handle host requests from the 8051. 6326 */ 6327 static void handle_8051_request(struct hfi1_pportdata *ppd) 6328 { 6329 struct hfi1_devdata *dd = ppd->dd; 6330 u64 reg; 6331 u16 data = 0; 6332 u8 type; 6333 6334 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1); 6335 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0) 6336 return; /* no request */ 6337 6338 /* zero out COMPLETED so the response is seen */ 6339 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0); 6340 6341 /* extract request details */ 6342 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT) 6343 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK; 6344 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT) 6345 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK; 6346 6347 switch (type) { 6348 case HREQ_LOAD_CONFIG: 6349 case HREQ_SAVE_CONFIG: 6350 case HREQ_READ_CONFIG: 6351 case HREQ_SET_TX_EQ_ABS: 6352 case HREQ_SET_TX_EQ_REL: 6353 case HREQ_ENABLE: 6354 dd_dev_info(dd, "8051 request: request 0x%x not supported\n", 6355 type); 6356 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); 6357 break; 6358 case HREQ_LCB_RESET: 6359 /* Put the LCB, RX FPE and TX FPE into reset */ 6360 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET); 6361 /* Make sure the write completed */ 6362 (void)read_csr(dd, DCC_CFG_RESET); 6363 /* Hold the reset long enough to take effect */ 6364 udelay(1); 6365 /* Take the LCB, RX FPE and TX FPE out of reset */ 6366 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); 6367 hreq_response(dd, HREQ_SUCCESS, 0); 6368 6369 break; 6370 case HREQ_CONFIG_DONE: 6371 hreq_response(dd, HREQ_SUCCESS, 0); 6372 break; 6373 6374 case HREQ_INTERFACE_TEST: 6375 hreq_response(dd, HREQ_SUCCESS, data); 6376 break; 6377 default: 6378 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type); 6379 hreq_response(dd, HREQ_NOT_SUPPORTED, 0); 6380 break; 6381 } 6382 } 6383 6384 /* 6385 * Set up allocation unit vaulue. 6386 */ 6387 void set_up_vau(struct hfi1_devdata *dd, u8 vau) 6388 { 6389 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6390 6391 /* do not modify other values in the register */ 6392 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; 6393 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; 6394 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6395 } 6396 6397 /* 6398 * Set up initial VL15 credits of the remote. Assumes the rest of 6399 * the CM credit registers are zero from a previous global or credit reset. 6400 * Shared limit for VL15 will always be 0. 6401 */ 6402 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) 6403 { 6404 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6405 6406 /* set initial values for total and shared credit limit */ 6407 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | 6408 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); 6409 6410 /* 6411 * Set total limit to be equal to VL15 credits. 6412 * Leave shared limit at 0. 6413 */ 6414 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 6415 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6416 6417 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6418 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); 6419 } 6420 6421 /* 6422 * Zero all credit details from the previous connection and 6423 * reset the CM manager's internal counters. 6424 */ 6425 void reset_link_credits(struct hfi1_devdata *dd) 6426 { 6427 int i; 6428 6429 /* remove all previous VL credit limits */ 6430 for (i = 0; i < TXE_NUM_DATA_VL; i++) 6431 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 6432 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 6433 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); 6434 /* reset the CM block */ 6435 pio_send_control(dd, PSC_CM_RESET); 6436 /* reset cached value */ 6437 dd->vl15buf_cached = 0; 6438 } 6439 6440 /* convert a vCU to a CU */ 6441 static u32 vcu_to_cu(u8 vcu) 6442 { 6443 return 1 << vcu; 6444 } 6445 6446 /* convert a CU to a vCU */ 6447 static u8 cu_to_vcu(u32 cu) 6448 { 6449 return ilog2(cu); 6450 } 6451 6452 /* convert a vAU to an AU */ 6453 static u32 vau_to_au(u8 vau) 6454 { 6455 return 8 * (1 << vau); 6456 } 6457 6458 static void set_linkup_defaults(struct hfi1_pportdata *ppd) 6459 { 6460 ppd->sm_trap_qp = 0x0; 6461 ppd->sa_qp = 0x1; 6462 } 6463 6464 /* 6465 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset. 6466 */ 6467 static void lcb_shutdown(struct hfi1_devdata *dd, int abort) 6468 { 6469 u64 reg; 6470 6471 /* clear lcb run: LCB_CFG_RUN.EN = 0 */ 6472 write_csr(dd, DC_LCB_CFG_RUN, 0); 6473 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */ 6474 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 6475 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT); 6476 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */ 6477 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); 6478 reg = read_csr(dd, DCC_CFG_RESET); 6479 write_csr(dd, DCC_CFG_RESET, reg | 6480 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE); 6481 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */ 6482 if (!abort) { 6483 udelay(1); /* must hold for the longer of 16cclks or 20ns */ 6484 write_csr(dd, DCC_CFG_RESET, reg); 6485 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); 6486 } 6487 } 6488 6489 /* 6490 * This routine should be called after the link has been transitioned to 6491 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into 6492 * reset). 6493 * 6494 * The expectation is that the caller of this routine would have taken 6495 * care of properly transitioning the link into the correct state. 6496 * NOTE: the caller needs to acquire the dd->dc8051_lock lock 6497 * before calling this function. 6498 */ 6499 static void _dc_shutdown(struct hfi1_devdata *dd) 6500 { 6501 lockdep_assert_held(&dd->dc8051_lock); 6502 6503 if (dd->dc_shutdown) 6504 return; 6505 6506 dd->dc_shutdown = 1; 6507 /* Shutdown the LCB */ 6508 lcb_shutdown(dd, 1); 6509 /* 6510 * Going to OFFLINE would have causes the 8051 to put the 6511 * SerDes into reset already. Just need to shut down the 8051, 6512 * itself. 6513 */ 6514 write_csr(dd, DC_DC8051_CFG_RST, 0x1); 6515 } 6516 6517 static void dc_shutdown(struct hfi1_devdata *dd) 6518 { 6519 mutex_lock(&dd->dc8051_lock); 6520 _dc_shutdown(dd); 6521 mutex_unlock(&dd->dc8051_lock); 6522 } 6523 6524 /* 6525 * Calling this after the DC has been brought out of reset should not 6526 * do any damage. 6527 * NOTE: the caller needs to acquire the dd->dc8051_lock lock 6528 * before calling this function. 6529 */ 6530 static void _dc_start(struct hfi1_devdata *dd) 6531 { 6532 lockdep_assert_held(&dd->dc8051_lock); 6533 6534 if (!dd->dc_shutdown) 6535 return; 6536 6537 /* Take the 8051 out of reset */ 6538 write_csr(dd, DC_DC8051_CFG_RST, 0ull); 6539 /* Wait until 8051 is ready */ 6540 if (wait_fm_ready(dd, TIMEOUT_8051_START)) 6541 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n", 6542 __func__); 6543 6544 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */ 6545 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET); 6546 /* lcb_shutdown() with abort=1 does not restore these */ 6547 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); 6548 dd->dc_shutdown = 0; 6549 } 6550 6551 static void dc_start(struct hfi1_devdata *dd) 6552 { 6553 mutex_lock(&dd->dc8051_lock); 6554 _dc_start(dd); 6555 mutex_unlock(&dd->dc8051_lock); 6556 } 6557 6558 /* 6559 * These LCB adjustments are for the Aurora SerDes core in the FPGA. 6560 */ 6561 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd) 6562 { 6563 u64 rx_radr, tx_radr; 6564 u32 version; 6565 6566 if (dd->icode != ICODE_FPGA_EMULATION) 6567 return; 6568 6569 /* 6570 * These LCB defaults on emulator _s are good, nothing to do here: 6571 * LCB_CFG_TX_FIFOS_RADR 6572 * LCB_CFG_RX_FIFOS_RADR 6573 * LCB_CFG_LN_DCLK 6574 * LCB_CFG_IGNORE_LOST_RCLK 6575 */ 6576 if (is_emulator_s(dd)) 6577 return; 6578 /* else this is _p */ 6579 6580 version = emulator_rev(dd); 6581 if (!is_ax(dd)) 6582 version = 0x2d; /* all B0 use 0x2d or higher settings */ 6583 6584 if (version <= 0x12) { 6585 /* release 0x12 and below */ 6586 6587 /* 6588 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9 6589 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9 6590 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa 6591 */ 6592 rx_radr = 6593 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6594 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6595 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6596 /* 6597 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default) 6598 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6 6599 */ 6600 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6601 } else if (version <= 0x18) { 6602 /* release 0x13 up to 0x18 */ 6603 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ 6604 rx_radr = 6605 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6606 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6607 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6608 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6609 } else if (version == 0x19) { 6610 /* release 0x19 */ 6611 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */ 6612 rx_radr = 6613 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6614 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6615 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6616 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6617 } else if (version == 0x1a) { 6618 /* release 0x1a */ 6619 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */ 6620 rx_radr = 6621 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6622 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6623 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6624 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6625 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull); 6626 } else { 6627 /* release 0x1b and higher */ 6628 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */ 6629 rx_radr = 6630 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 6631 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 6632 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT; 6633 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT; 6634 } 6635 6636 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr); 6637 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */ 6638 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 6639 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); 6640 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr); 6641 } 6642 6643 /* 6644 * Handle a SMA idle message 6645 * 6646 * This is a work-queue function outside of the interrupt. 6647 */ 6648 void handle_sma_message(struct work_struct *work) 6649 { 6650 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6651 sma_message_work); 6652 struct hfi1_devdata *dd = ppd->dd; 6653 u64 msg; 6654 int ret; 6655 6656 /* 6657 * msg is bytes 1-4 of the 40-bit idle message - the command code 6658 * is stripped off 6659 */ 6660 ret = read_idle_sma(dd, &msg); 6661 if (ret) 6662 return; 6663 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg); 6664 /* 6665 * React to the SMA message. Byte[1] (0 for us) is the command. 6666 */ 6667 switch (msg & 0xff) { 6668 case SMA_IDLE_ARM: 6669 /* 6670 * See OPAv1 table 9-14 - HFI and External Switch Ports Key 6671 * State Transitions 6672 * 6673 * Only expected in INIT or ARMED, discard otherwise. 6674 */ 6675 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) 6676 ppd->neighbor_normal = 1; 6677 break; 6678 case SMA_IDLE_ACTIVE: 6679 /* 6680 * See OPAv1 table 9-14 - HFI and External Switch Ports Key 6681 * State Transitions 6682 * 6683 * Can activate the node. Discard otherwise. 6684 */ 6685 if (ppd->host_link_state == HLS_UP_ARMED && 6686 ppd->is_active_optimize_enabled) { 6687 ppd->neighbor_normal = 1; 6688 ret = set_link_state(ppd, HLS_UP_ACTIVE); 6689 if (ret) 6690 dd_dev_err( 6691 dd, 6692 "%s: received Active SMA idle message, couldn't set link to Active\n", 6693 __func__); 6694 } 6695 break; 6696 default: 6697 dd_dev_err(dd, 6698 "%s: received unexpected SMA idle message 0x%llx\n", 6699 __func__, msg); 6700 break; 6701 } 6702 } 6703 6704 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear) 6705 { 6706 u64 rcvctrl; 6707 unsigned long flags; 6708 6709 spin_lock_irqsave(&dd->rcvctrl_lock, flags); 6710 rcvctrl = read_csr(dd, RCV_CTRL); 6711 rcvctrl |= add; 6712 rcvctrl &= ~clear; 6713 write_csr(dd, RCV_CTRL, rcvctrl); 6714 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); 6715 } 6716 6717 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add) 6718 { 6719 adjust_rcvctrl(dd, add, 0); 6720 } 6721 6722 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear) 6723 { 6724 adjust_rcvctrl(dd, 0, clear); 6725 } 6726 6727 /* 6728 * Called from all interrupt handlers to start handling an SPC freeze. 6729 */ 6730 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) 6731 { 6732 struct hfi1_devdata *dd = ppd->dd; 6733 struct send_context *sc; 6734 int i; 6735 int sc_flags; 6736 6737 if (flags & FREEZE_SELF) 6738 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6739 6740 /* enter frozen mode */ 6741 dd->flags |= HFI1_FROZEN; 6742 6743 /* notify all SDMA engines that they are going into a freeze */ 6744 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); 6745 6746 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? 6747 SCF_LINK_DOWN : 0); 6748 /* do halt pre-handling on all enabled send contexts */ 6749 for (i = 0; i < dd->num_send_contexts; i++) { 6750 sc = dd->send_contexts[i].sc; 6751 if (sc && (sc->flags & SCF_ENABLED)) 6752 sc_stop(sc, sc_flags); 6753 } 6754 6755 /* Send context are frozen. Notify user space */ 6756 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT); 6757 6758 if (flags & FREEZE_ABORT) { 6759 dd_dev_err(dd, 6760 "Aborted freeze recovery. Please REBOOT system\n"); 6761 return; 6762 } 6763 /* queue non-interrupt handler */ 6764 queue_work(ppd->hfi1_wq, &ppd->freeze_work); 6765 } 6766 6767 /* 6768 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen, 6769 * depending on the "freeze" parameter. 6770 * 6771 * No need to return an error if it times out, our only option 6772 * is to proceed anyway. 6773 */ 6774 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze) 6775 { 6776 unsigned long timeout; 6777 u64 reg; 6778 6779 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT); 6780 while (1) { 6781 reg = read_csr(dd, CCE_STATUS); 6782 if (freeze) { 6783 /* waiting until all indicators are set */ 6784 if ((reg & ALL_FROZE) == ALL_FROZE) 6785 return; /* all done */ 6786 } else { 6787 /* waiting until all indicators are clear */ 6788 if ((reg & ALL_FROZE) == 0) 6789 return; /* all done */ 6790 } 6791 6792 if (time_after(jiffies, timeout)) { 6793 dd_dev_err(dd, 6794 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing", 6795 freeze ? "" : "un", reg & ALL_FROZE, 6796 freeze ? ALL_FROZE : 0ull); 6797 return; 6798 } 6799 usleep_range(80, 120); 6800 } 6801 } 6802 6803 /* 6804 * Do all freeze handling for the RXE block. 6805 */ 6806 static void rxe_freeze(struct hfi1_devdata *dd) 6807 { 6808 int i; 6809 struct hfi1_ctxtdata *rcd; 6810 6811 /* disable port */ 6812 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6813 6814 /* disable all receive contexts */ 6815 for (i = 0; i < dd->num_rcv_contexts; i++) { 6816 rcd = hfi1_rcd_get_by_index(dd, i); 6817 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd); 6818 hfi1_rcd_put(rcd); 6819 } 6820 } 6821 6822 /* 6823 * Unfreeze handling for the RXE block - kernel contexts only. 6824 * This will also enable the port. User contexts will do unfreeze 6825 * handling on a per-context basis as they call into the driver. 6826 * 6827 */ 6828 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) 6829 { 6830 u32 rcvmask; 6831 u16 i; 6832 struct hfi1_ctxtdata *rcd; 6833 6834 /* enable all kernel contexts */ 6835 for (i = 0; i < dd->num_rcv_contexts; i++) { 6836 rcd = hfi1_rcd_get_by_index(dd, i); 6837 6838 /* Ensure all non-user contexts are enabled */ 6839 if (!rcd || 6840 (i >= dd->first_dyn_alloc_ctxt)) { 6841 hfi1_rcd_put(rcd); 6842 continue; 6843 } 6844 rcvmask = HFI1_RCVCTRL_CTXT_ENB; 6845 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ 6846 rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ? 6847 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; 6848 hfi1_rcvctrl(dd, rcvmask, rcd); 6849 hfi1_rcd_put(rcd); 6850 } 6851 6852 /* enable port */ 6853 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 6854 } 6855 6856 /* 6857 * Non-interrupt SPC freeze handling. 6858 * 6859 * This is a work-queue function outside of the triggering interrupt. 6860 */ 6861 void handle_freeze(struct work_struct *work) 6862 { 6863 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6864 freeze_work); 6865 struct hfi1_devdata *dd = ppd->dd; 6866 6867 /* wait for freeze indicators on all affected blocks */ 6868 wait_for_freeze_status(dd, 1); 6869 6870 /* SPC is now frozen */ 6871 6872 /* do send PIO freeze steps */ 6873 pio_freeze(dd); 6874 6875 /* do send DMA freeze steps */ 6876 sdma_freeze(dd); 6877 6878 /* do send egress freeze steps - nothing to do */ 6879 6880 /* do receive freeze steps */ 6881 rxe_freeze(dd); 6882 6883 /* 6884 * Unfreeze the hardware - clear the freeze, wait for each 6885 * block's frozen bit to clear, then clear the frozen flag. 6886 */ 6887 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); 6888 wait_for_freeze_status(dd, 0); 6889 6890 if (is_ax(dd)) { 6891 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6892 wait_for_freeze_status(dd, 1); 6893 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK); 6894 wait_for_freeze_status(dd, 0); 6895 } 6896 6897 /* do send PIO unfreeze steps for kernel contexts */ 6898 pio_kernel_unfreeze(dd); 6899 6900 /* do send DMA unfreeze steps */ 6901 sdma_unfreeze(dd); 6902 6903 /* do send egress unfreeze steps - nothing to do */ 6904 6905 /* do receive unfreeze steps for kernel contexts */ 6906 rxe_kernel_unfreeze(dd); 6907 6908 /* 6909 * The unfreeze procedure touches global device registers when 6910 * it disables and re-enables RXE. Mark the device unfrozen 6911 * after all that is done so other parts of the driver waiting 6912 * for the device to unfreeze don't do things out of order. 6913 * 6914 * The above implies that the meaning of HFI1_FROZEN flag is 6915 * "Device has gone into freeze mode and freeze mode handling 6916 * is still in progress." 6917 * 6918 * The flag will be removed when freeze mode processing has 6919 * completed. 6920 */ 6921 dd->flags &= ~HFI1_FROZEN; 6922 wake_up(&dd->event_queue); 6923 6924 /* no longer frozen */ 6925 } 6926 6927 /** 6928 * update_xmit_counters - update PortXmitWait/PortVlXmitWait 6929 * counters. 6930 * @ppd: info of physical Hfi port 6931 * @link_width: new link width after link up or downgrade 6932 * 6933 * Update the PortXmitWait and PortVlXmitWait counters after 6934 * a link up or downgrade event to reflect a link width change. 6935 */ 6936 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width) 6937 { 6938 int i; 6939 u16 tx_width; 6940 u16 link_speed; 6941 6942 tx_width = tx_link_width(link_width); 6943 link_speed = get_link_speed(ppd->link_speed_active); 6944 6945 /* 6946 * There are C_VL_COUNT number of PortVLXmitWait counters. 6947 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter. 6948 */ 6949 for (i = 0; i < C_VL_COUNT + 1; i++) 6950 get_xmit_wait_counters(ppd, tx_width, link_speed, i); 6951 } 6952 6953 /* 6954 * Handle a link up interrupt from the 8051. 6955 * 6956 * This is a work-queue function outside of the interrupt. 6957 */ 6958 void handle_link_up(struct work_struct *work) 6959 { 6960 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6961 link_up_work); 6962 struct hfi1_devdata *dd = ppd->dd; 6963 6964 set_link_state(ppd, HLS_UP_INIT); 6965 6966 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 6967 read_ltp_rtt(dd); 6968 /* 6969 * OPA specifies that certain counters are cleared on a transition 6970 * to link up, so do that. 6971 */ 6972 clear_linkup_counters(dd); 6973 /* 6974 * And (re)set link up default values. 6975 */ 6976 set_linkup_defaults(ppd); 6977 6978 /* 6979 * Set VL15 credits. Use cached value from verify cap interrupt. 6980 * In case of quick linkup or simulator, vl15 value will be set by 6981 * handle_linkup_change. VerifyCap interrupt handler will not be 6982 * called in those scenarios. 6983 */ 6984 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) 6985 set_up_vl15(dd, dd->vl15buf_cached); 6986 6987 /* enforce link speed enabled */ 6988 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { 6989 /* oops - current speed is not enabled, bounce */ 6990 dd_dev_err(dd, 6991 "Link speed active 0x%x is outside enabled 0x%x, downing link\n", 6992 ppd->link_speed_active, ppd->link_speed_enabled); 6993 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, 6994 OPA_LINKDOWN_REASON_SPEED_POLICY); 6995 set_link_state(ppd, HLS_DN_OFFLINE); 6996 start_link(ppd); 6997 } 6998 } 6999 7000 /* 7001 * Several pieces of LNI information were cached for SMA in ppd. 7002 * Reset these on link down 7003 */ 7004 static void reset_neighbor_info(struct hfi1_pportdata *ppd) 7005 { 7006 ppd->neighbor_guid = 0; 7007 ppd->neighbor_port_number = 0; 7008 ppd->neighbor_type = 0; 7009 ppd->neighbor_fm_security = 0; 7010 } 7011 7012 static const char * const link_down_reason_strs[] = { 7013 [OPA_LINKDOWN_REASON_NONE] = "None", 7014 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0", 7015 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length", 7016 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long", 7017 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short", 7018 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID", 7019 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID", 7020 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2", 7021 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC", 7022 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8", 7023 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail", 7024 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10", 7025 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error", 7026 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15", 7027 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker", 7028 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14", 7029 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15", 7030 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance", 7031 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance", 7032 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance", 7033 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack", 7034 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker", 7035 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt", 7036 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit", 7037 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit", 7038 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24", 7039 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25", 7040 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26", 7041 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27", 7042 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28", 7043 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29", 7044 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30", 7045 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] = 7046 "Excessive buffer overrun", 7047 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown", 7048 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot", 7049 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown", 7050 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce", 7051 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy", 7052 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy", 7053 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected", 7054 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] = 7055 "Local media not installed", 7056 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed", 7057 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config", 7058 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] = 7059 "End to end not installed", 7060 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy", 7061 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy", 7062 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy", 7063 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management", 7064 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled", 7065 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient" 7066 }; 7067 7068 /* return the neighbor link down reason string */ 7069 static const char *link_down_reason_str(u8 reason) 7070 { 7071 const char *str = NULL; 7072 7073 if (reason < ARRAY_SIZE(link_down_reason_strs)) 7074 str = link_down_reason_strs[reason]; 7075 if (!str) 7076 str = "(invalid)"; 7077 7078 return str; 7079 } 7080 7081 /* 7082 * Handle a link down interrupt from the 8051. 7083 * 7084 * This is a work-queue function outside of the interrupt. 7085 */ 7086 void handle_link_down(struct work_struct *work) 7087 { 7088 u8 lcl_reason, neigh_reason = 0; 7089 u8 link_down_reason; 7090 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7091 link_down_work); 7092 int was_up; 7093 static const char ldr_str[] = "Link down reason: "; 7094 7095 if ((ppd->host_link_state & 7096 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) && 7097 ppd->port_type == PORT_TYPE_FIXED) 7098 ppd->offline_disabled_reason = 7099 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED); 7100 7101 /* Go offline first, then deal with reading/writing through 8051 */ 7102 was_up = !!(ppd->host_link_state & HLS_UP); 7103 set_link_state(ppd, HLS_DN_OFFLINE); 7104 xchg(&ppd->is_link_down_queued, 0); 7105 7106 if (was_up) { 7107 lcl_reason = 0; 7108 /* link down reason is only valid if the link was up */ 7109 read_link_down_reason(ppd->dd, &link_down_reason); 7110 switch (link_down_reason) { 7111 case LDR_LINK_TRANSFER_ACTIVE_LOW: 7112 /* the link went down, no idle message reason */ 7113 dd_dev_info(ppd->dd, "%sUnexpected link down\n", 7114 ldr_str); 7115 break; 7116 case LDR_RECEIVED_LINKDOWN_IDLE_MSG: 7117 /* 7118 * The neighbor reason is only valid if an idle message 7119 * was received for it. 7120 */ 7121 read_planned_down_reason_code(ppd->dd, &neigh_reason); 7122 dd_dev_info(ppd->dd, 7123 "%sNeighbor link down message %d, %s\n", 7124 ldr_str, neigh_reason, 7125 link_down_reason_str(neigh_reason)); 7126 break; 7127 case LDR_RECEIVED_HOST_OFFLINE_REQ: 7128 dd_dev_info(ppd->dd, 7129 "%sHost requested link to go offline\n", 7130 ldr_str); 7131 break; 7132 default: 7133 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", 7134 ldr_str, link_down_reason); 7135 break; 7136 } 7137 7138 /* 7139 * If no reason, assume peer-initiated but missed 7140 * LinkGoingDown idle flits. 7141 */ 7142 if (neigh_reason == 0) 7143 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN; 7144 } else { 7145 /* went down while polling or going up */ 7146 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT; 7147 } 7148 7149 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0); 7150 7151 /* inform the SMA when the link transitions from up to down */ 7152 if (was_up && ppd->local_link_down_reason.sma == 0 && 7153 ppd->neigh_link_down_reason.sma == 0) { 7154 ppd->local_link_down_reason.sma = 7155 ppd->local_link_down_reason.latest; 7156 ppd->neigh_link_down_reason.sma = 7157 ppd->neigh_link_down_reason.latest; 7158 } 7159 7160 reset_neighbor_info(ppd); 7161 7162 /* disable the port */ 7163 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 7164 7165 /* 7166 * If there is no cable attached, turn the DC off. Otherwise, 7167 * start the link bring up. 7168 */ 7169 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) 7170 dc_shutdown(ppd->dd); 7171 else 7172 start_link(ppd); 7173 } 7174 7175 void handle_link_bounce(struct work_struct *work) 7176 { 7177 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7178 link_bounce_work); 7179 7180 /* 7181 * Only do something if the link is currently up. 7182 */ 7183 if (ppd->host_link_state & HLS_UP) { 7184 set_link_state(ppd, HLS_DN_OFFLINE); 7185 start_link(ppd); 7186 } else { 7187 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", 7188 __func__, link_state_name(ppd->host_link_state)); 7189 } 7190 } 7191 7192 /* 7193 * Mask conversion: Capability exchange to Port LTP. The capability 7194 * exchange has an implicit 16b CRC that is mandatory. 7195 */ 7196 static int cap_to_port_ltp(int cap) 7197 { 7198 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */ 7199 7200 if (cap & CAP_CRC_14B) 7201 port_ltp |= PORT_LTP_CRC_MODE_14; 7202 if (cap & CAP_CRC_48B) 7203 port_ltp |= PORT_LTP_CRC_MODE_48; 7204 if (cap & CAP_CRC_12B_16B_PER_LANE) 7205 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE; 7206 7207 return port_ltp; 7208 } 7209 7210 /* 7211 * Convert an OPA Port LTP mask to capability mask 7212 */ 7213 int port_ltp_to_cap(int port_ltp) 7214 { 7215 int cap_mask = 0; 7216 7217 if (port_ltp & PORT_LTP_CRC_MODE_14) 7218 cap_mask |= CAP_CRC_14B; 7219 if (port_ltp & PORT_LTP_CRC_MODE_48) 7220 cap_mask |= CAP_CRC_48B; 7221 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE) 7222 cap_mask |= CAP_CRC_12B_16B_PER_LANE; 7223 7224 return cap_mask; 7225 } 7226 7227 /* 7228 * Convert a single DC LCB CRC mode to an OPA Port LTP mask. 7229 */ 7230 static int lcb_to_port_ltp(int lcb_crc) 7231 { 7232 int port_ltp = 0; 7233 7234 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE) 7235 port_ltp = PORT_LTP_CRC_MODE_PER_LANE; 7236 else if (lcb_crc == LCB_CRC_48B) 7237 port_ltp = PORT_LTP_CRC_MODE_48; 7238 else if (lcb_crc == LCB_CRC_14B) 7239 port_ltp = PORT_LTP_CRC_MODE_14; 7240 else 7241 port_ltp = PORT_LTP_CRC_MODE_16; 7242 7243 return port_ltp; 7244 } 7245 7246 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) 7247 { 7248 if (ppd->pkeys[2] != 0) { 7249 ppd->pkeys[2] = 0; 7250 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); 7251 hfi1_event_pkey_change(ppd->dd, ppd->port); 7252 } 7253 } 7254 7255 /* 7256 * Convert the given link width to the OPA link width bitmask. 7257 */ 7258 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width) 7259 { 7260 switch (width) { 7261 case 0: 7262 /* 7263 * Simulator and quick linkup do not set the width. 7264 * Just set it to 4x without complaint. 7265 */ 7266 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) 7267 return OPA_LINK_WIDTH_4X; 7268 return 0; /* no lanes up */ 7269 case 1: return OPA_LINK_WIDTH_1X; 7270 case 2: return OPA_LINK_WIDTH_2X; 7271 case 3: return OPA_LINK_WIDTH_3X; 7272 case 4: return OPA_LINK_WIDTH_4X; 7273 default: 7274 dd_dev_info(dd, "%s: invalid width %d, using 4\n", 7275 __func__, width); 7276 return OPA_LINK_WIDTH_4X; 7277 } 7278 } 7279 7280 /* 7281 * Do a population count on the bottom nibble. 7282 */ 7283 static const u8 bit_counts[16] = { 7284 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 7285 }; 7286 7287 static inline u8 nibble_to_count(u8 nibble) 7288 { 7289 return bit_counts[nibble & 0xf]; 7290 } 7291 7292 /* 7293 * Read the active lane information from the 8051 registers and return 7294 * their widths. 7295 * 7296 * Active lane information is found in these 8051 registers: 7297 * enable_lane_tx 7298 * enable_lane_rx 7299 */ 7300 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width, 7301 u16 *rx_width) 7302 { 7303 u16 tx, rx; 7304 u8 enable_lane_rx; 7305 u8 enable_lane_tx; 7306 u8 tx_polarity_inversion; 7307 u8 rx_polarity_inversion; 7308 u8 max_rate; 7309 7310 /* read the active lanes */ 7311 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, 7312 &rx_polarity_inversion, &max_rate); 7313 read_local_lni(dd, &enable_lane_rx); 7314 7315 /* convert to counts */ 7316 tx = nibble_to_count(enable_lane_tx); 7317 rx = nibble_to_count(enable_lane_rx); 7318 7319 /* 7320 * Set link_speed_active here, overriding what was set in 7321 * handle_verify_cap(). The ASIC 8051 firmware does not correctly 7322 * set the max_rate field in handle_verify_cap until v0.19. 7323 */ 7324 if ((dd->icode == ICODE_RTL_SILICON) && 7325 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { 7326 /* max_rate: 0 = 12.5G, 1 = 25G */ 7327 switch (max_rate) { 7328 case 0: 7329 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; 7330 break; 7331 case 1: 7332 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; 7333 break; 7334 default: 7335 dd_dev_err(dd, 7336 "%s: unexpected max rate %d, using 25Gb\n", 7337 __func__, (int)max_rate); 7338 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; 7339 break; 7340 } 7341 } 7342 7343 dd_dev_info(dd, 7344 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n", 7345 enable_lane_tx, tx, enable_lane_rx, rx); 7346 *tx_width = link_width_to_bits(dd, tx); 7347 *rx_width = link_width_to_bits(dd, rx); 7348 } 7349 7350 /* 7351 * Read verify_cap_local_fm_link_width[1] to obtain the link widths. 7352 * Valid after the end of VerifyCap and during LinkUp. Does not change 7353 * after link up. I.e. look elsewhere for downgrade information. 7354 * 7355 * Bits are: 7356 * + bits [7:4] contain the number of active transmitters 7357 * + bits [3:0] contain the number of active receivers 7358 * These are numbers 1 through 4 and can be different values if the 7359 * link is asymmetric. 7360 * 7361 * verify_cap_local_fm_link_width[0] retains its original value. 7362 */ 7363 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width, 7364 u16 *rx_width) 7365 { 7366 u16 widths, tx, rx; 7367 u8 misc_bits, local_flags; 7368 u16 active_tx, active_rx; 7369 7370 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths); 7371 tx = widths >> 12; 7372 rx = (widths >> 8) & 0xf; 7373 7374 *tx_width = link_width_to_bits(dd, tx); 7375 *rx_width = link_width_to_bits(dd, rx); 7376 7377 /* print the active widths */ 7378 get_link_widths(dd, &active_tx, &active_rx); 7379 } 7380 7381 /* 7382 * Set ppd->link_width_active and ppd->link_width_downgrade_active using 7383 * hardware information when the link first comes up. 7384 * 7385 * The link width is not available until after VerifyCap.AllFramesReceived 7386 * (the trigger for handle_verify_cap), so this is outside that routine 7387 * and should be called when the 8051 signals linkup. 7388 */ 7389 void get_linkup_link_widths(struct hfi1_pportdata *ppd) 7390 { 7391 u16 tx_width, rx_width; 7392 7393 /* get end-of-LNI link widths */ 7394 get_linkup_widths(ppd->dd, &tx_width, &rx_width); 7395 7396 /* use tx_width as the link is supposed to be symmetric on link up */ 7397 ppd->link_width_active = tx_width; 7398 /* link width downgrade active (LWD.A) starts out matching LW.A */ 7399 ppd->link_width_downgrade_tx_active = ppd->link_width_active; 7400 ppd->link_width_downgrade_rx_active = ppd->link_width_active; 7401 /* per OPA spec, on link up LWD.E resets to LWD.S */ 7402 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; 7403 /* cache the active egress rate (units {10^6 bits/sec]) */ 7404 ppd->current_egress_rate = active_egress_rate(ppd); 7405 } 7406 7407 /* 7408 * Handle a verify capabilities interrupt from the 8051. 7409 * 7410 * This is a work-queue function outside of the interrupt. 7411 */ 7412 void handle_verify_cap(struct work_struct *work) 7413 { 7414 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7415 link_vc_work); 7416 struct hfi1_devdata *dd = ppd->dd; 7417 u64 reg; 7418 u8 power_management; 7419 u8 continuous; 7420 u8 vcu; 7421 u8 vau; 7422 u8 z; 7423 u16 vl15buf; 7424 u16 link_widths; 7425 u16 crc_mask; 7426 u16 crc_val; 7427 u16 device_id; 7428 u16 active_tx, active_rx; 7429 u8 partner_supported_crc; 7430 u8 remote_tx_rate; 7431 u8 device_rev; 7432 7433 set_link_state(ppd, HLS_VERIFY_CAP); 7434 7435 lcb_shutdown(dd, 0); 7436 adjust_lcb_for_fpga_serdes(dd); 7437 7438 read_vc_remote_phy(dd, &power_management, &continuous); 7439 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf, 7440 &partner_supported_crc); 7441 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths); 7442 read_remote_device_id(dd, &device_id, &device_rev); 7443 7444 /* print the active widths */ 7445 get_link_widths(dd, &active_tx, &active_rx); 7446 dd_dev_info(dd, 7447 "Peer PHY: power management 0x%x, continuous updates 0x%x\n", 7448 (int)power_management, (int)continuous); 7449 dd_dev_info(dd, 7450 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n", 7451 (int)vau, (int)z, (int)vcu, (int)vl15buf, 7452 (int)partner_supported_crc); 7453 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n", 7454 (u32)remote_tx_rate, (u32)link_widths); 7455 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n", 7456 (u32)device_id, (u32)device_rev); 7457 /* 7458 * The peer vAU value just read is the peer receiver value. HFI does 7459 * not support a transmit vAU of 0 (AU == 8). We advertised that 7460 * with Z=1 in the fabric capabilities sent to the peer. The peer 7461 * will see our Z=1, and, if it advertised a vAU of 0, will move its 7462 * receive to vAU of 1 (AU == 16). Do the same here. We do not care 7463 * about the peer Z value - our sent vAU is 3 (hardwired) and is not 7464 * subject to the Z value exception. 7465 */ 7466 if (vau == 0) 7467 vau = 1; 7468 set_up_vau(dd, vau); 7469 7470 /* 7471 * Set VL15 credits to 0 in global credit register. Cache remote VL15 7472 * credits value and wait for link-up interrupt ot set it. 7473 */ 7474 set_up_vl15(dd, 0); 7475 dd->vl15buf_cached = vl15buf; 7476 7477 /* set up the LCB CRC mode */ 7478 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; 7479 7480 /* order is important: use the lowest bit in common */ 7481 if (crc_mask & CAP_CRC_14B) 7482 crc_val = LCB_CRC_14B; 7483 else if (crc_mask & CAP_CRC_48B) 7484 crc_val = LCB_CRC_48B; 7485 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE) 7486 crc_val = LCB_CRC_12B_16B_PER_LANE; 7487 else 7488 crc_val = LCB_CRC_16B; 7489 7490 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val); 7491 write_csr(dd, DC_LCB_CFG_CRC_MODE, 7492 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT); 7493 7494 /* set (14b only) or clear sideband credit */ 7495 reg = read_csr(dd, SEND_CM_CTRL); 7496 if (crc_val == LCB_CRC_14B && crc_14b_sideband) { 7497 write_csr(dd, SEND_CM_CTRL, 7498 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); 7499 } else { 7500 write_csr(dd, SEND_CM_CTRL, 7501 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK); 7502 } 7503 7504 ppd->link_speed_active = 0; /* invalid value */ 7505 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { 7506 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */ 7507 switch (remote_tx_rate) { 7508 case 0: 7509 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; 7510 break; 7511 case 1: 7512 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7513 break; 7514 } 7515 } else { 7516 /* actual rate is highest bit of the ANDed rates */ 7517 u8 rate = remote_tx_rate & ppd->local_tx_rate; 7518 7519 if (rate & 2) 7520 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7521 else if (rate & 1) 7522 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; 7523 } 7524 if (ppd->link_speed_active == 0) { 7525 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n", 7526 __func__, (int)remote_tx_rate); 7527 ppd->link_speed_active = OPA_LINK_SPEED_25G; 7528 } 7529 7530 /* 7531 * Cache the values of the supported, enabled, and active 7532 * LTP CRC modes to return in 'portinfo' queries. But the bit 7533 * flags that are returned in the portinfo query differ from 7534 * what's in the link_crc_mask, crc_sizes, and crc_val 7535 * variables. Convert these here. 7536 */ 7537 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; 7538 /* supported crc modes */ 7539 ppd->port_ltp_crc_mode |= 7540 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; 7541 /* enabled crc modes */ 7542 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); 7543 /* active crc mode */ 7544 7545 /* set up the remote credit return table */ 7546 assign_remote_cm_au_table(dd, vcu); 7547 7548 /* 7549 * The LCB is reset on entry to handle_verify_cap(), so this must 7550 * be applied on every link up. 7551 * 7552 * Adjust LCB error kill enable to kill the link if 7553 * these RBUF errors are seen: 7554 * REPLAY_BUF_MBE_SMASK 7555 * FLIT_INPUT_BUF_MBE_SMASK 7556 */ 7557 if (is_ax(dd)) { /* fixed in B0 */ 7558 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN); 7559 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK 7560 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK; 7561 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg); 7562 } 7563 7564 /* pull LCB fifos out of reset - all fifo clocks must be stable */ 7565 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 7566 7567 /* give 8051 access to the LCB CSRs */ 7568 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ 7569 set_8051_lcb_access(dd); 7570 7571 /* tell the 8051 to go to LinkUp */ 7572 set_link_state(ppd, HLS_GOING_UP); 7573 } 7574 7575 /** 7576 * apply_link_downgrade_policy - Apply the link width downgrade enabled 7577 * policy against the current active link widths. 7578 * @ppd: info of physical Hfi port 7579 * @refresh_widths: True indicates link downgrade event 7580 * @return: True indicates a successful link downgrade. False indicates 7581 * link downgrade event failed and the link will bounce back to 7582 * default link width. 7583 * 7584 * Called when the enabled policy changes or the active link widths 7585 * change. 7586 * Refresh_widths indicates that a link downgrade occurred. The 7587 * link_downgraded variable is set by refresh_widths and 7588 * determines the success/failure of the policy application. 7589 */ 7590 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd, 7591 bool refresh_widths) 7592 { 7593 int do_bounce = 0; 7594 int tries; 7595 u16 lwde; 7596 u16 tx, rx; 7597 bool link_downgraded = refresh_widths; 7598 7599 /* use the hls lock to avoid a race with actual link up */ 7600 tries = 0; 7601 retry: 7602 mutex_lock(&ppd->hls_lock); 7603 /* only apply if the link is up */ 7604 if (ppd->host_link_state & HLS_DOWN) { 7605 /* still going up..wait and retry */ 7606 if (ppd->host_link_state & HLS_GOING_UP) { 7607 if (++tries < 1000) { 7608 mutex_unlock(&ppd->hls_lock); 7609 usleep_range(100, 120); /* arbitrary */ 7610 goto retry; 7611 } 7612 dd_dev_err(ppd->dd, 7613 "%s: giving up waiting for link state change\n", 7614 __func__); 7615 } 7616 goto done; 7617 } 7618 7619 lwde = ppd->link_width_downgrade_enabled; 7620 7621 if (refresh_widths) { 7622 get_link_widths(ppd->dd, &tx, &rx); 7623 ppd->link_width_downgrade_tx_active = tx; 7624 ppd->link_width_downgrade_rx_active = rx; 7625 } 7626 7627 if (ppd->link_width_downgrade_tx_active == 0 || 7628 ppd->link_width_downgrade_rx_active == 0) { 7629 /* the 8051 reported a dead link as a downgrade */ 7630 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); 7631 link_downgraded = false; 7632 } else if (lwde == 0) { 7633 /* downgrade is disabled */ 7634 7635 /* bounce if not at starting active width */ 7636 if ((ppd->link_width_active != 7637 ppd->link_width_downgrade_tx_active) || 7638 (ppd->link_width_active != 7639 ppd->link_width_downgrade_rx_active)) { 7640 dd_dev_err(ppd->dd, 7641 "Link downgrade is disabled and link has downgraded, downing link\n"); 7642 dd_dev_err(ppd->dd, 7643 " original 0x%x, tx active 0x%x, rx active 0x%x\n", 7644 ppd->link_width_active, 7645 ppd->link_width_downgrade_tx_active, 7646 ppd->link_width_downgrade_rx_active); 7647 do_bounce = 1; 7648 link_downgraded = false; 7649 } 7650 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || 7651 (lwde & ppd->link_width_downgrade_rx_active) == 0) { 7652 /* Tx or Rx is outside the enabled policy */ 7653 dd_dev_err(ppd->dd, 7654 "Link is outside of downgrade allowed, downing link\n"); 7655 dd_dev_err(ppd->dd, 7656 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n", 7657 lwde, ppd->link_width_downgrade_tx_active, 7658 ppd->link_width_downgrade_rx_active); 7659 do_bounce = 1; 7660 link_downgraded = false; 7661 } 7662 7663 done: 7664 mutex_unlock(&ppd->hls_lock); 7665 7666 if (do_bounce) { 7667 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0, 7668 OPA_LINKDOWN_REASON_WIDTH_POLICY); 7669 set_link_state(ppd, HLS_DN_OFFLINE); 7670 start_link(ppd); 7671 } 7672 7673 return link_downgraded; 7674 } 7675 7676 /* 7677 * Handle a link downgrade interrupt from the 8051. 7678 * 7679 * This is a work-queue function outside of the interrupt. 7680 */ 7681 void handle_link_downgrade(struct work_struct *work) 7682 { 7683 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 7684 link_downgrade_work); 7685 7686 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); 7687 if (apply_link_downgrade_policy(ppd, true)) 7688 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); 7689 } 7690 7691 static char *dcc_err_string(char *buf, int buf_len, u64 flags) 7692 { 7693 return flag_string(buf, buf_len, flags, dcc_err_flags, 7694 ARRAY_SIZE(dcc_err_flags)); 7695 } 7696 7697 static char *lcb_err_string(char *buf, int buf_len, u64 flags) 7698 { 7699 return flag_string(buf, buf_len, flags, lcb_err_flags, 7700 ARRAY_SIZE(lcb_err_flags)); 7701 } 7702 7703 static char *dc8051_err_string(char *buf, int buf_len, u64 flags) 7704 { 7705 return flag_string(buf, buf_len, flags, dc8051_err_flags, 7706 ARRAY_SIZE(dc8051_err_flags)); 7707 } 7708 7709 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags) 7710 { 7711 return flag_string(buf, buf_len, flags, dc8051_info_err_flags, 7712 ARRAY_SIZE(dc8051_info_err_flags)); 7713 } 7714 7715 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags) 7716 { 7717 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags, 7718 ARRAY_SIZE(dc8051_info_host_msg_flags)); 7719 } 7720 7721 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg) 7722 { 7723 struct hfi1_pportdata *ppd = dd->pport; 7724 u64 info, err, host_msg; 7725 int queue_link_down = 0; 7726 char buf[96]; 7727 7728 /* look at the flags */ 7729 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) { 7730 /* 8051 information set by firmware */ 7731 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */ 7732 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051); 7733 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT) 7734 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK; 7735 host_msg = (info >> 7736 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT) 7737 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK; 7738 7739 /* 7740 * Handle error flags. 7741 */ 7742 if (err & FAILED_LNI) { 7743 /* 7744 * LNI error indications are cleared by the 8051 7745 * only when starting polling. Only pay attention 7746 * to them when in the states that occur during 7747 * LNI. 7748 */ 7749 if (ppd->host_link_state 7750 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 7751 queue_link_down = 1; 7752 dd_dev_info(dd, "Link error: %s\n", 7753 dc8051_info_err_string(buf, 7754 sizeof(buf), 7755 err & 7756 FAILED_LNI)); 7757 } 7758 err &= ~(u64)FAILED_LNI; 7759 } 7760 /* unknown frames can happen durning LNI, just count */ 7761 if (err & UNKNOWN_FRAME) { 7762 ppd->unknown_frame_count++; 7763 err &= ~(u64)UNKNOWN_FRAME; 7764 } 7765 if (err) { 7766 /* report remaining errors, but do not do anything */ 7767 dd_dev_err(dd, "8051 info error: %s\n", 7768 dc8051_info_err_string(buf, sizeof(buf), 7769 err)); 7770 } 7771 7772 /* 7773 * Handle host message flags. 7774 */ 7775 if (host_msg & HOST_REQ_DONE) { 7776 /* 7777 * Presently, the driver does a busy wait for 7778 * host requests to complete. This is only an 7779 * informational message. 7780 * NOTE: The 8051 clears the host message 7781 * information *on the next 8051 command*. 7782 * Therefore, when linkup is achieved, 7783 * this flag will still be set. 7784 */ 7785 host_msg &= ~(u64)HOST_REQ_DONE; 7786 } 7787 if (host_msg & BC_SMA_MSG) { 7788 queue_work(ppd->link_wq, &ppd->sma_message_work); 7789 host_msg &= ~(u64)BC_SMA_MSG; 7790 } 7791 if (host_msg & LINKUP_ACHIEVED) { 7792 dd_dev_info(dd, "8051: Link up\n"); 7793 queue_work(ppd->link_wq, &ppd->link_up_work); 7794 host_msg &= ~(u64)LINKUP_ACHIEVED; 7795 } 7796 if (host_msg & EXT_DEVICE_CFG_REQ) { 7797 handle_8051_request(ppd); 7798 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ; 7799 } 7800 if (host_msg & VERIFY_CAP_FRAME) { 7801 queue_work(ppd->link_wq, &ppd->link_vc_work); 7802 host_msg &= ~(u64)VERIFY_CAP_FRAME; 7803 } 7804 if (host_msg & LINK_GOING_DOWN) { 7805 const char *extra = ""; 7806 /* no downgrade action needed if going down */ 7807 if (host_msg & LINK_WIDTH_DOWNGRADED) { 7808 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; 7809 extra = " (ignoring downgrade)"; 7810 } 7811 dd_dev_info(dd, "8051: Link down%s\n", extra); 7812 queue_link_down = 1; 7813 host_msg &= ~(u64)LINK_GOING_DOWN; 7814 } 7815 if (host_msg & LINK_WIDTH_DOWNGRADED) { 7816 queue_work(ppd->link_wq, &ppd->link_downgrade_work); 7817 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED; 7818 } 7819 if (host_msg) { 7820 /* report remaining messages, but do not do anything */ 7821 dd_dev_info(dd, "8051 info host message: %s\n", 7822 dc8051_info_host_msg_string(buf, 7823 sizeof(buf), 7824 host_msg)); 7825 } 7826 7827 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK; 7828 } 7829 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) { 7830 /* 7831 * Lost the 8051 heartbeat. If this happens, we 7832 * receive constant interrupts about it. Disable 7833 * the interrupt after the first. 7834 */ 7835 dd_dev_err(dd, "Lost 8051 heartbeat\n"); 7836 write_csr(dd, DC_DC8051_ERR_EN, 7837 read_csr(dd, DC_DC8051_ERR_EN) & 7838 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK); 7839 7840 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK; 7841 } 7842 if (reg) { 7843 /* report the error, but do not do anything */ 7844 dd_dev_err(dd, "8051 error: %s\n", 7845 dc8051_err_string(buf, sizeof(buf), reg)); 7846 } 7847 7848 if (queue_link_down) { 7849 /* 7850 * if the link is already going down or disabled, do not 7851 * queue another. If there's a link down entry already 7852 * queued, don't queue another one. 7853 */ 7854 if ((ppd->host_link_state & 7855 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) || 7856 ppd->link_enabled == 0) { 7857 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n", 7858 __func__, ppd->host_link_state, 7859 ppd->link_enabled); 7860 } else { 7861 if (xchg(&ppd->is_link_down_queued, 1) == 1) 7862 dd_dev_info(dd, 7863 "%s: link down request already queued\n", 7864 __func__); 7865 else 7866 queue_work(ppd->link_wq, &ppd->link_down_work); 7867 } 7868 } 7869 } 7870 7871 static const char * const fm_config_txt[] = { 7872 [0] = 7873 "BadHeadDist: Distance violation between two head flits", 7874 [1] = 7875 "BadTailDist: Distance violation between two tail flits", 7876 [2] = 7877 "BadCtrlDist: Distance violation between two credit control flits", 7878 [3] = 7879 "BadCrdAck: Credits return for unsupported VL", 7880 [4] = 7881 "UnsupportedVLMarker: Received VL Marker", 7882 [5] = 7883 "BadPreempt: Exceeded the preemption nesting level", 7884 [6] = 7885 "BadControlFlit: Received unsupported control flit", 7886 /* no 7 */ 7887 [8] = 7888 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL", 7889 }; 7890 7891 static const char * const port_rcv_txt[] = { 7892 [1] = 7893 "BadPktLen: Illegal PktLen", 7894 [2] = 7895 "PktLenTooLong: Packet longer than PktLen", 7896 [3] = 7897 "PktLenTooShort: Packet shorter than PktLen", 7898 [4] = 7899 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)", 7900 [5] = 7901 "BadDLID: Illegal DLID (0, doesn't match HFI)", 7902 [6] = 7903 "BadL2: Illegal L2 opcode", 7904 [7] = 7905 "BadSC: Unsupported SC", 7906 [9] = 7907 "BadRC: Illegal RC", 7908 [11] = 7909 "PreemptError: Preempting with same VL", 7910 [12] = 7911 "PreemptVL15: Preempting a VL15 packet", 7912 }; 7913 7914 #define OPA_LDR_FMCONFIG_OFFSET 16 7915 #define OPA_LDR_PORTRCV_OFFSET 0 7916 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 7917 { 7918 u64 info, hdr0, hdr1; 7919 const char *extra; 7920 char buf[96]; 7921 struct hfi1_pportdata *ppd = dd->pport; 7922 u8 lcl_reason = 0; 7923 int do_bounce = 0; 7924 7925 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) { 7926 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { 7927 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE); 7928 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; 7929 /* set status bit */ 7930 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; 7931 } 7932 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK; 7933 } 7934 7935 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) { 7936 struct hfi1_pportdata *ppd = dd->pport; 7937 /* this counter saturates at (2^32) - 1 */ 7938 if (ppd->link_downed < (u32)UINT_MAX) 7939 ppd->link_downed++; 7940 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK; 7941 } 7942 7943 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) { 7944 u8 reason_valid = 1; 7945 7946 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG); 7947 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { 7948 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; 7949 /* set status bit */ 7950 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; 7951 } 7952 switch (info) { 7953 case 0: 7954 case 1: 7955 case 2: 7956 case 3: 7957 case 4: 7958 case 5: 7959 case 6: 7960 extra = fm_config_txt[info]; 7961 break; 7962 case 8: 7963 extra = fm_config_txt[info]; 7964 if (ppd->port_error_action & 7965 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) { 7966 do_bounce = 1; 7967 /* 7968 * lcl_reason cannot be derived from info 7969 * for this error 7970 */ 7971 lcl_reason = 7972 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER; 7973 } 7974 break; 7975 default: 7976 reason_valid = 0; 7977 snprintf(buf, sizeof(buf), "reserved%lld", info); 7978 extra = buf; 7979 break; 7980 } 7981 7982 if (reason_valid && !do_bounce) { 7983 do_bounce = ppd->port_error_action & 7984 (1 << (OPA_LDR_FMCONFIG_OFFSET + info)); 7985 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST; 7986 } 7987 7988 /* just report this */ 7989 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n", 7990 extra); 7991 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK; 7992 } 7993 7994 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) { 7995 u8 reason_valid = 1; 7996 7997 info = read_csr(dd, DCC_ERR_INFO_PORTRCV); 7998 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0); 7999 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1); 8000 if (!(dd->err_info_rcvport.status_and_code & 8001 OPA_EI_STATUS_SMASK)) { 8002 dd->err_info_rcvport.status_and_code = 8003 info & OPA_EI_CODE_SMASK; 8004 /* set status bit */ 8005 dd->err_info_rcvport.status_and_code |= 8006 OPA_EI_STATUS_SMASK; 8007 /* 8008 * save first 2 flits in the packet that caused 8009 * the error 8010 */ 8011 dd->err_info_rcvport.packet_flit1 = hdr0; 8012 dd->err_info_rcvport.packet_flit2 = hdr1; 8013 } 8014 switch (info) { 8015 case 1: 8016 case 2: 8017 case 3: 8018 case 4: 8019 case 5: 8020 case 6: 8021 case 7: 8022 case 9: 8023 case 11: 8024 case 12: 8025 extra = port_rcv_txt[info]; 8026 break; 8027 default: 8028 reason_valid = 0; 8029 snprintf(buf, sizeof(buf), "reserved%lld", info); 8030 extra = buf; 8031 break; 8032 } 8033 8034 if (reason_valid && !do_bounce) { 8035 do_bounce = ppd->port_error_action & 8036 (1 << (OPA_LDR_PORTRCV_OFFSET + info)); 8037 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0; 8038 } 8039 8040 /* just report this */ 8041 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n" 8042 " hdr0 0x%llx, hdr1 0x%llx\n", 8043 extra, hdr0, hdr1); 8044 8045 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK; 8046 } 8047 8048 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) { 8049 /* informative only */ 8050 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n"); 8051 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK; 8052 } 8053 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) { 8054 /* informative only */ 8055 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n"); 8056 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK; 8057 } 8058 8059 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) 8060 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK; 8061 8062 /* report any remaining errors */ 8063 if (reg) 8064 dd_dev_info_ratelimited(dd, "DCC Error: %s\n", 8065 dcc_err_string(buf, sizeof(buf), reg)); 8066 8067 if (lcl_reason == 0) 8068 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN; 8069 8070 if (do_bounce) { 8071 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n", 8072 __func__); 8073 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason); 8074 queue_work(ppd->link_wq, &ppd->link_bounce_work); 8075 } 8076 } 8077 8078 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg) 8079 { 8080 char buf[96]; 8081 8082 dd_dev_info(dd, "LCB Error: %s\n", 8083 lcb_err_string(buf, sizeof(buf), reg)); 8084 } 8085 8086 /* 8087 * CCE block DC interrupt. Source is < 8. 8088 */ 8089 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source) 8090 { 8091 const struct err_reg_info *eri = &dc_errs[source]; 8092 8093 if (eri->handler) { 8094 interrupt_clear_down(dd, 0, eri); 8095 } else if (source == 3 /* dc_lbm_int */) { 8096 /* 8097 * This indicates that a parity error has occurred on the 8098 * address/control lines presented to the LBM. The error 8099 * is a single pulse, there is no associated error flag, 8100 * and it is non-maskable. This is because if a parity 8101 * error occurs on the request the request is dropped. 8102 * This should never occur, but it is nice to know if it 8103 * ever does. 8104 */ 8105 dd_dev_err(dd, "Parity error in DC LBM block\n"); 8106 } else { 8107 dd_dev_err(dd, "Invalid DC interrupt %u\n", source); 8108 } 8109 } 8110 8111 /* 8112 * TX block send credit interrupt. Source is < 160. 8113 */ 8114 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source) 8115 { 8116 sc_group_release_update(dd, source); 8117 } 8118 8119 /* 8120 * TX block SDMA interrupt. Source is < 48. 8121 * 8122 * SDMA interrupts are grouped by type: 8123 * 8124 * 0 - N-1 = SDma 8125 * N - 2N-1 = SDmaProgress 8126 * 2N - 3N-1 = SDmaIdle 8127 */ 8128 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source) 8129 { 8130 /* what interrupt */ 8131 unsigned int what = source / TXE_NUM_SDMA_ENGINES; 8132 /* which engine */ 8133 unsigned int which = source % TXE_NUM_SDMA_ENGINES; 8134 8135 #ifdef CONFIG_SDMA_VERBOSITY 8136 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which, 8137 slashstrip(__FILE__), __LINE__, __func__); 8138 sdma_dumpstate(&dd->per_sdma[which]); 8139 #endif 8140 8141 if (likely(what < 3 && which < dd->num_sdma)) { 8142 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); 8143 } else { 8144 /* should not happen */ 8145 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source); 8146 } 8147 } 8148 8149 /** 8150 * is_rcv_avail_int() - User receive context available IRQ handler 8151 * @dd: valid dd 8152 * @source: logical IRQ source (offset from IS_RCVAVAIL_START) 8153 * 8154 * RX block receive available interrupt. Source is < 160. 8155 * 8156 * This is the general interrupt handler for user (PSM) receive contexts, 8157 * and can only be used for non-threaded IRQs. 8158 */ 8159 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source) 8160 { 8161 struct hfi1_ctxtdata *rcd; 8162 char *err_detail; 8163 8164 if (likely(source < dd->num_rcv_contexts)) { 8165 rcd = hfi1_rcd_get_by_index(dd, source); 8166 if (rcd) { 8167 handle_user_interrupt(rcd); 8168 hfi1_rcd_put(rcd); 8169 return; /* OK */ 8170 } 8171 /* received an interrupt, but no rcd */ 8172 err_detail = "dataless"; 8173 } else { 8174 /* received an interrupt, but are not using that context */ 8175 err_detail = "out of range"; 8176 } 8177 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n", 8178 err_detail, source); 8179 } 8180 8181 /** 8182 * is_rcv_urgent_int() - User receive context urgent IRQ handler 8183 * @dd: valid dd 8184 * @source: logical IRQ source (offset from IS_RCVURGENT_START) 8185 * 8186 * RX block receive urgent interrupt. Source is < 160. 8187 * 8188 * NOTE: kernel receive contexts specifically do NOT enable this IRQ. 8189 */ 8190 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source) 8191 { 8192 struct hfi1_ctxtdata *rcd; 8193 char *err_detail; 8194 8195 if (likely(source < dd->num_rcv_contexts)) { 8196 rcd = hfi1_rcd_get_by_index(dd, source); 8197 if (rcd) { 8198 handle_user_interrupt(rcd); 8199 hfi1_rcd_put(rcd); 8200 return; /* OK */ 8201 } 8202 /* received an interrupt, but no rcd */ 8203 err_detail = "dataless"; 8204 } else { 8205 /* received an interrupt, but are not using that context */ 8206 err_detail = "out of range"; 8207 } 8208 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n", 8209 err_detail, source); 8210 } 8211 8212 /* 8213 * Reserved range interrupt. Should not be called in normal operation. 8214 */ 8215 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source) 8216 { 8217 char name[64]; 8218 8219 dd_dev_err(dd, "unexpected %s interrupt\n", 8220 is_reserved_name(name, sizeof(name), source)); 8221 } 8222 8223 static const struct is_table is_table[] = { 8224 /* 8225 * start end 8226 * name func interrupt func 8227 */ 8228 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END, 8229 is_misc_err_name, is_misc_err_int }, 8230 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END, 8231 is_sdma_eng_err_name, is_sdma_eng_err_int }, 8232 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, 8233 is_sendctxt_err_name, is_sendctxt_err_int }, 8234 { IS_SDMA_START, IS_SDMA_IDLE_END, 8235 is_sdma_eng_name, is_sdma_eng_int }, 8236 { IS_VARIOUS_START, IS_VARIOUS_END, 8237 is_various_name, is_various_int }, 8238 { IS_DC_START, IS_DC_END, 8239 is_dc_name, is_dc_int }, 8240 { IS_RCVAVAIL_START, IS_RCVAVAIL_END, 8241 is_rcv_avail_name, is_rcv_avail_int }, 8242 { IS_RCVURGENT_START, IS_RCVURGENT_END, 8243 is_rcv_urgent_name, is_rcv_urgent_int }, 8244 { IS_SENDCREDIT_START, IS_SENDCREDIT_END, 8245 is_send_credit_name, is_send_credit_int}, 8246 { IS_RESERVED_START, IS_RESERVED_END, 8247 is_reserved_name, is_reserved_int}, 8248 }; 8249 8250 /* 8251 * Interrupt source interrupt - called when the given source has an interrupt. 8252 * Source is a bit index into an array of 64-bit integers. 8253 */ 8254 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source) 8255 { 8256 const struct is_table *entry; 8257 8258 /* avoids a double compare by walking the table in-order */ 8259 for (entry = &is_table[0]; entry->is_name; entry++) { 8260 if (source <= entry->end) { 8261 trace_hfi1_interrupt(dd, entry, source); 8262 entry->is_int(dd, source - entry->start); 8263 return; 8264 } 8265 } 8266 /* fell off the end */ 8267 dd_dev_err(dd, "invalid interrupt source %u\n", source); 8268 } 8269 8270 /** 8271 * general_interrupt - General interrupt handler 8272 * @irq: MSIx IRQ vector 8273 * @data: hfi1 devdata 8274 * 8275 * This is able to correctly handle all non-threaded interrupts. Receive 8276 * context DATA IRQs are threaded and are not supported by this handler. 8277 * 8278 */ 8279 irqreturn_t general_interrupt(int irq, void *data) 8280 { 8281 struct hfi1_devdata *dd = data; 8282 u64 regs[CCE_NUM_INT_CSRS]; 8283 u32 bit; 8284 int i; 8285 irqreturn_t handled = IRQ_NONE; 8286 8287 this_cpu_inc(*dd->int_counter); 8288 8289 /* phase 1: scan and clear all handled interrupts */ 8290 for (i = 0; i < CCE_NUM_INT_CSRS; i++) { 8291 if (dd->gi_mask[i] == 0) { 8292 regs[i] = 0; /* used later */ 8293 continue; 8294 } 8295 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) & 8296 dd->gi_mask[i]; 8297 /* only clear if anything is set */ 8298 if (regs[i]) 8299 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]); 8300 } 8301 8302 /* phase 2: call the appropriate handler */ 8303 for_each_set_bit(bit, (unsigned long *)®s[0], 8304 CCE_NUM_INT_CSRS * 64) { 8305 is_interrupt(dd, bit); 8306 handled = IRQ_HANDLED; 8307 } 8308 8309 return handled; 8310 } 8311 8312 irqreturn_t sdma_interrupt(int irq, void *data) 8313 { 8314 struct sdma_engine *sde = data; 8315 struct hfi1_devdata *dd = sde->dd; 8316 u64 status; 8317 8318 #ifdef CONFIG_SDMA_VERBOSITY 8319 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, 8320 slashstrip(__FILE__), __LINE__, __func__); 8321 sdma_dumpstate(sde); 8322 #endif 8323 8324 this_cpu_inc(*dd->int_counter); 8325 8326 /* This read_csr is really bad in the hot path */ 8327 status = read_csr(dd, 8328 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64))) 8329 & sde->imask; 8330 if (likely(status)) { 8331 /* clear the interrupt(s) */ 8332 write_csr(dd, 8333 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)), 8334 status); 8335 8336 /* handle the interrupt(s) */ 8337 sdma_engine_interrupt(sde, status); 8338 } else { 8339 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n", 8340 sde->this_idx); 8341 } 8342 return IRQ_HANDLED; 8343 } 8344 8345 /* 8346 * Clear the receive interrupt. Use a read of the interrupt clear CSR 8347 * to insure that the write completed. This does NOT guarantee that 8348 * queued DMA writes to memory from the chip are pushed. 8349 */ 8350 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd) 8351 { 8352 struct hfi1_devdata *dd = rcd->dd; 8353 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); 8354 8355 write_csr(dd, addr, rcd->imask); 8356 /* force the above write on the chip and get a value back */ 8357 (void)read_csr(dd, addr); 8358 } 8359 8360 /* force the receive interrupt */ 8361 void force_recv_intr(struct hfi1_ctxtdata *rcd) 8362 { 8363 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); 8364 } 8365 8366 /* 8367 * Return non-zero if a packet is present. 8368 * 8369 * This routine is called when rechecking for packets after the RcvAvail 8370 * interrupt has been cleared down. First, do a quick check of memory for 8371 * a packet present. If not found, use an expensive CSR read of the context 8372 * tail to determine the actual tail. The CSR read is necessary because there 8373 * is no method to push pending DMAs to memory other than an interrupt and we 8374 * are trying to determine if we need to force an interrupt. 8375 */ 8376 static inline int check_packet_present(struct hfi1_ctxtdata *rcd) 8377 { 8378 u32 tail; 8379 8380 if (hfi1_packet_present(rcd)) 8381 return 1; 8382 8383 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */ 8384 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); 8385 return hfi1_rcd_head(rcd) != tail; 8386 } 8387 8388 /* 8389 * Common code for receive contexts interrupt handlers. 8390 * Update traces, increment kernel IRQ counter and 8391 * setup ASPM when needed. 8392 */ 8393 static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) 8394 { 8395 struct hfi1_devdata *dd = rcd->dd; 8396 8397 trace_hfi1_receive_interrupt(dd, rcd); 8398 this_cpu_inc(*dd->int_counter); 8399 aspm_ctx_disable(rcd); 8400 } 8401 8402 /* 8403 * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt 8404 * when there are packets present in the queue. When calling 8405 * with interrupts enabled please use hfi1_rcd_eoi_intr. 8406 * 8407 * @rcd: valid receive context 8408 */ 8409 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) 8410 { 8411 if (!rcd->rcvhdrq) 8412 return; 8413 clear_recv_intr(rcd); 8414 if (check_packet_present(rcd)) 8415 force_recv_intr(rcd); 8416 } 8417 8418 /** 8419 * hfi1_rcd_eoi_intr() - End of Interrupt processing action 8420 * 8421 * @rcd: Ptr to hfi1_ctxtdata of receive context 8422 * 8423 * Hold IRQs so we can safely clear the interrupt and 8424 * recheck for a packet that may have arrived after the previous 8425 * check and the interrupt clear. If a packet arrived, force another 8426 * interrupt. This routine can be called at the end of receive packet 8427 * processing in interrupt service routines, interrupt service thread 8428 * and softirqs 8429 */ 8430 static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) 8431 { 8432 unsigned long flags; 8433 8434 local_irq_save(flags); 8435 __hfi1_rcd_eoi_intr(rcd); 8436 local_irq_restore(flags); 8437 } 8438 8439 /** 8440 * hfi1_netdev_rx_napi - napi poll function to move eoi inline 8441 * @napi: pointer to napi object 8442 * @budget: netdev budget 8443 */ 8444 int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget) 8445 { 8446 struct hfi1_netdev_rxq *rxq = container_of(napi, 8447 struct hfi1_netdev_rxq, napi); 8448 struct hfi1_ctxtdata *rcd = rxq->rcd; 8449 int work_done = 0; 8450 8451 work_done = rcd->do_interrupt(rcd, budget); 8452 8453 if (work_done < budget) { 8454 napi_complete_done(napi, work_done); 8455 hfi1_rcd_eoi_intr(rcd); 8456 } 8457 8458 return work_done; 8459 } 8460 8461 /* Receive packet napi handler for netdevs AIP */ 8462 irqreturn_t receive_context_interrupt_napi(int irq, void *data) 8463 { 8464 struct hfi1_ctxtdata *rcd = data; 8465 8466 receive_interrupt_common(rcd); 8467 8468 if (likely(rcd->napi)) { 8469 if (likely(napi_schedule_prep(rcd->napi))) 8470 __napi_schedule_irqoff(rcd->napi); 8471 else 8472 __hfi1_rcd_eoi_intr(rcd); 8473 } else { 8474 WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n", 8475 rcd->ctxt); 8476 __hfi1_rcd_eoi_intr(rcd); 8477 } 8478 8479 return IRQ_HANDLED; 8480 } 8481 8482 /* 8483 * Receive packet IRQ handler. This routine expects to be on its own IRQ. 8484 * This routine will try to handle packets immediately (latency), but if 8485 * it finds too many, it will invoke the thread handler (bandwitdh). The 8486 * chip receive interrupt is *not* cleared down until this or the thread (if 8487 * invoked) is finished. The intent is to avoid extra interrupts while we 8488 * are processing packets anyway. 8489 */ 8490 irqreturn_t receive_context_interrupt(int irq, void *data) 8491 { 8492 struct hfi1_ctxtdata *rcd = data; 8493 int disposition; 8494 8495 receive_interrupt_common(rcd); 8496 8497 /* receive interrupt remains blocked while processing packets */ 8498 disposition = rcd->do_interrupt(rcd, 0); 8499 8500 /* 8501 * Too many packets were seen while processing packets in this 8502 * IRQ handler. Invoke the handler thread. The receive interrupt 8503 * remains blocked. 8504 */ 8505 if (disposition == RCV_PKT_LIMIT) 8506 return IRQ_WAKE_THREAD; 8507 8508 __hfi1_rcd_eoi_intr(rcd); 8509 return IRQ_HANDLED; 8510 } 8511 8512 /* 8513 * Receive packet thread handler. This expects to be invoked with the 8514 * receive interrupt still blocked. 8515 */ 8516 irqreturn_t receive_context_thread(int irq, void *data) 8517 { 8518 struct hfi1_ctxtdata *rcd = data; 8519 8520 /* receive interrupt is still blocked from the IRQ handler */ 8521 (void)rcd->do_interrupt(rcd, 1); 8522 8523 hfi1_rcd_eoi_intr(rcd); 8524 8525 return IRQ_HANDLED; 8526 } 8527 8528 /* ========================================================================= */ 8529 8530 u32 read_physical_state(struct hfi1_devdata *dd) 8531 { 8532 u64 reg; 8533 8534 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); 8535 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT) 8536 & DC_DC8051_STS_CUR_STATE_PORT_MASK; 8537 } 8538 8539 u32 read_logical_state(struct hfi1_devdata *dd) 8540 { 8541 u64 reg; 8542 8543 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); 8544 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT) 8545 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK; 8546 } 8547 8548 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate) 8549 { 8550 u64 reg; 8551 8552 reg = read_csr(dd, DCC_CFG_PORT_CONFIG); 8553 /* clear current state, set new state */ 8554 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK; 8555 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT; 8556 write_csr(dd, DCC_CFG_PORT_CONFIG, reg); 8557 } 8558 8559 /* 8560 * Use the 8051 to read a LCB CSR. 8561 */ 8562 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data) 8563 { 8564 u32 regno; 8565 int ret; 8566 8567 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 8568 if (acquire_lcb_access(dd, 0) == 0) { 8569 *data = read_csr(dd, addr); 8570 release_lcb_access(dd, 0); 8571 return 0; 8572 } 8573 return -EBUSY; 8574 } 8575 8576 /* register is an index of LCB registers: (offset - base) / 8 */ 8577 regno = (addr - DC_LCB_CFG_RUN) >> 3; 8578 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data); 8579 if (ret != HCMD_SUCCESS) 8580 return -EBUSY; 8581 return 0; 8582 } 8583 8584 /* 8585 * Provide a cache for some of the LCB registers in case the LCB is 8586 * unavailable. 8587 * (The LCB is unavailable in certain link states, for example.) 8588 */ 8589 struct lcb_datum { 8590 u32 off; 8591 u64 val; 8592 }; 8593 8594 static struct lcb_datum lcb_cache[] = { 8595 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0}, 8596 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 }, 8597 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 }, 8598 }; 8599 8600 static void update_lcb_cache(struct hfi1_devdata *dd) 8601 { 8602 int i; 8603 int ret; 8604 u64 val; 8605 8606 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 8607 ret = read_lcb_csr(dd, lcb_cache[i].off, &val); 8608 8609 /* Update if we get good data */ 8610 if (likely(ret != -EBUSY)) 8611 lcb_cache[i].val = val; 8612 } 8613 } 8614 8615 static int read_lcb_cache(u32 off, u64 *val) 8616 { 8617 int i; 8618 8619 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) { 8620 if (lcb_cache[i].off == off) { 8621 *val = lcb_cache[i].val; 8622 return 0; 8623 } 8624 } 8625 8626 pr_warn("%s bad offset 0x%x\n", __func__, off); 8627 return -1; 8628 } 8629 8630 /* 8631 * Read an LCB CSR. Access may not be in host control, so check. 8632 * Return 0 on success, -EBUSY on failure. 8633 */ 8634 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data) 8635 { 8636 struct hfi1_pportdata *ppd = dd->pport; 8637 8638 /* if up, go through the 8051 for the value */ 8639 if (ppd->host_link_state & HLS_UP) 8640 return read_lcb_via_8051(dd, addr, data); 8641 /* if going up or down, check the cache, otherwise, no access */ 8642 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { 8643 if (read_lcb_cache(addr, data)) 8644 return -EBUSY; 8645 return 0; 8646 } 8647 8648 /* otherwise, host has access */ 8649 *data = read_csr(dd, addr); 8650 return 0; 8651 } 8652 8653 /* 8654 * Use the 8051 to write a LCB CSR. 8655 */ 8656 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data) 8657 { 8658 u32 regno; 8659 int ret; 8660 8661 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || 8662 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { 8663 if (acquire_lcb_access(dd, 0) == 0) { 8664 write_csr(dd, addr, data); 8665 release_lcb_access(dd, 0); 8666 return 0; 8667 } 8668 return -EBUSY; 8669 } 8670 8671 /* register is an index of LCB registers: (offset - base) / 8 */ 8672 regno = (addr - DC_LCB_CFG_RUN) >> 3; 8673 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data); 8674 if (ret != HCMD_SUCCESS) 8675 return -EBUSY; 8676 return 0; 8677 } 8678 8679 /* 8680 * Write an LCB CSR. Access may not be in host control, so check. 8681 * Return 0 on success, -EBUSY on failure. 8682 */ 8683 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data) 8684 { 8685 struct hfi1_pportdata *ppd = dd->pport; 8686 8687 /* if up, go through the 8051 for the value */ 8688 if (ppd->host_link_state & HLS_UP) 8689 return write_lcb_via_8051(dd, addr, data); 8690 /* if going up or down, no access */ 8691 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) 8692 return -EBUSY; 8693 /* otherwise, host has access */ 8694 write_csr(dd, addr, data); 8695 return 0; 8696 } 8697 8698 /* 8699 * Returns: 8700 * < 0 = Linux error, not able to get access 8701 * > 0 = 8051 command RETURN_CODE 8702 */ 8703 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data, 8704 u64 *out_data) 8705 { 8706 u64 reg, completed; 8707 int return_code; 8708 unsigned long timeout; 8709 8710 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data); 8711 8712 mutex_lock(&dd->dc8051_lock); 8713 8714 /* We can't send any commands to the 8051 if it's in reset */ 8715 if (dd->dc_shutdown) { 8716 return_code = -ENODEV; 8717 goto fail; 8718 } 8719 8720 /* 8721 * If an 8051 host command timed out previously, then the 8051 is 8722 * stuck. 8723 * 8724 * On first timeout, attempt to reset and restart the entire DC 8725 * block (including 8051). (Is this too big of a hammer?) 8726 * 8727 * If the 8051 times out a second time, the reset did not bring it 8728 * back to healthy life. In that case, fail any subsequent commands. 8729 */ 8730 if (dd->dc8051_timed_out) { 8731 if (dd->dc8051_timed_out > 1) { 8732 dd_dev_err(dd, 8733 "Previous 8051 host command timed out, skipping command %u\n", 8734 type); 8735 return_code = -ENXIO; 8736 goto fail; 8737 } 8738 _dc_shutdown(dd); 8739 _dc_start(dd); 8740 } 8741 8742 /* 8743 * If there is no timeout, then the 8051 command interface is 8744 * waiting for a command. 8745 */ 8746 8747 /* 8748 * When writing a LCB CSR, out_data contains the full value to 8749 * be written, while in_data contains the relative LCB 8750 * address in 7:0. Do the work here, rather than the caller, 8751 * of distrubting the write data to where it needs to go: 8752 * 8753 * Write data 8754 * 39:00 -> in_data[47:8] 8755 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE 8756 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA 8757 */ 8758 if (type == HCMD_WRITE_LCB_CSR) { 8759 in_data |= ((*out_data) & 0xffffffffffull) << 8; 8760 /* must preserve COMPLETED - it is tied to hardware */ 8761 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0); 8762 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK; 8763 reg |= ((((*out_data) >> 40) & 0xff) << 8764 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT) 8765 | ((((*out_data) >> 48) & 0xffff) << 8766 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT); 8767 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg); 8768 } 8769 8770 /* 8771 * Do two writes: the first to stabilize the type and req_data, the 8772 * second to activate. 8773 */ 8774 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK) 8775 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT 8776 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK) 8777 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT; 8778 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); 8779 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK; 8780 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg); 8781 8782 /* wait for completion, alternate: interrupt */ 8783 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT); 8784 while (1) { 8785 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1); 8786 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK; 8787 if (completed) 8788 break; 8789 if (time_after(jiffies, timeout)) { 8790 dd->dc8051_timed_out++; 8791 dd_dev_err(dd, "8051 host command %u timeout\n", type); 8792 if (out_data) 8793 *out_data = 0; 8794 return_code = -ETIMEDOUT; 8795 goto fail; 8796 } 8797 udelay(2); 8798 } 8799 8800 if (out_data) { 8801 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT) 8802 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK; 8803 if (type == HCMD_READ_LCB_CSR) { 8804 /* top 16 bits are in a different register */ 8805 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1) 8806 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK) 8807 << (48 8808 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); 8809 } 8810 } 8811 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT) 8812 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK; 8813 dd->dc8051_timed_out = 0; 8814 /* 8815 * Clear command for next user. 8816 */ 8817 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0); 8818 8819 fail: 8820 mutex_unlock(&dd->dc8051_lock); 8821 return return_code; 8822 } 8823 8824 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state) 8825 { 8826 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL); 8827 } 8828 8829 int load_8051_config(struct hfi1_devdata *dd, u8 field_id, 8830 u8 lane_id, u32 config_data) 8831 { 8832 u64 data; 8833 int ret; 8834 8835 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT 8836 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT 8837 | (u64)config_data << LOAD_DATA_DATA_SHIFT; 8838 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL); 8839 if (ret != HCMD_SUCCESS) { 8840 dd_dev_err(dd, 8841 "load 8051 config: field id %d, lane %d, err %d\n", 8842 (int)field_id, (int)lane_id, ret); 8843 } 8844 return ret; 8845 } 8846 8847 /* 8848 * Read the 8051 firmware "registers". Use the RAM directly. Always 8849 * set the result, even on error. 8850 * Return 0 on success, -errno on failure 8851 */ 8852 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id, 8853 u32 *result) 8854 { 8855 u64 big_data; 8856 u32 addr; 8857 int ret; 8858 8859 /* address start depends on the lane_id */ 8860 if (lane_id < 4) 8861 addr = (4 * NUM_GENERAL_FIELDS) 8862 + (lane_id * 4 * NUM_LANE_FIELDS); 8863 else 8864 addr = 0; 8865 addr += field_id * 4; 8866 8867 /* read is in 8-byte chunks, hardware will truncate the address down */ 8868 ret = read_8051_data(dd, addr, 8, &big_data); 8869 8870 if (ret == 0) { 8871 /* extract the 4 bytes we want */ 8872 if (addr & 0x4) 8873 *result = (u32)(big_data >> 32); 8874 else 8875 *result = (u32)big_data; 8876 } else { 8877 *result = 0; 8878 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n", 8879 __func__, lane_id, field_id); 8880 } 8881 8882 return ret; 8883 } 8884 8885 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management, 8886 u8 continuous) 8887 { 8888 u32 frame; 8889 8890 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT 8891 | power_management << POWER_MANAGEMENT_SHIFT; 8892 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY, 8893 GENERAL_CONFIG, frame); 8894 } 8895 8896 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu, 8897 u16 vl15buf, u8 crc_sizes) 8898 { 8899 u32 frame; 8900 8901 frame = (u32)vau << VAU_SHIFT 8902 | (u32)z << Z_SHIFT 8903 | (u32)vcu << VCU_SHIFT 8904 | (u32)vl15buf << VL15BUF_SHIFT 8905 | (u32)crc_sizes << CRC_SIZES_SHIFT; 8906 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC, 8907 GENERAL_CONFIG, frame); 8908 } 8909 8910 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits, 8911 u8 *flag_bits, u16 *link_widths) 8912 { 8913 u32 frame; 8914 8915 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, 8916 &frame); 8917 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK; 8918 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK; 8919 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; 8920 } 8921 8922 static int write_vc_local_link_mode(struct hfi1_devdata *dd, 8923 u8 misc_bits, 8924 u8 flag_bits, 8925 u16 link_widths) 8926 { 8927 u32 frame; 8928 8929 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT 8930 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT 8931 | (u32)link_widths << LINK_WIDTH_SHIFT; 8932 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG, 8933 frame); 8934 } 8935 8936 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id, 8937 u8 device_rev) 8938 { 8939 u32 frame; 8940 8941 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT) 8942 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT); 8943 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame); 8944 } 8945 8946 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id, 8947 u8 *device_rev) 8948 { 8949 u32 frame; 8950 8951 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame); 8952 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK; 8953 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT) 8954 & REMOTE_DEVICE_REV_MASK; 8955 } 8956 8957 int write_host_interface_version(struct hfi1_devdata *dd, u8 version) 8958 { 8959 u32 frame; 8960 u32 mask; 8961 8962 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT); 8963 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame); 8964 /* Clear, then set field */ 8965 frame &= ~mask; 8966 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT); 8967 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, 8968 frame); 8969 } 8970 8971 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor, 8972 u8 *ver_patch) 8973 { 8974 u32 frame; 8975 8976 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame); 8977 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) & 8978 STS_FM_VERSION_MAJOR_MASK; 8979 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) & 8980 STS_FM_VERSION_MINOR_MASK; 8981 8982 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame); 8983 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) & 8984 STS_FM_VERSION_PATCH_MASK; 8985 } 8986 8987 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management, 8988 u8 *continuous) 8989 { 8990 u32 frame; 8991 8992 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame); 8993 *power_management = (frame >> POWER_MANAGEMENT_SHIFT) 8994 & POWER_MANAGEMENT_MASK; 8995 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT) 8996 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK; 8997 } 8998 8999 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z, 9000 u8 *vcu, u16 *vl15buf, u8 *crc_sizes) 9001 { 9002 u32 frame; 9003 9004 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame); 9005 *vau = (frame >> VAU_SHIFT) & VAU_MASK; 9006 *z = (frame >> Z_SHIFT) & Z_MASK; 9007 *vcu = (frame >> VCU_SHIFT) & VCU_MASK; 9008 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK; 9009 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK; 9010 } 9011 9012 static void read_vc_remote_link_width(struct hfi1_devdata *dd, 9013 u8 *remote_tx_rate, 9014 u16 *link_widths) 9015 { 9016 u32 frame; 9017 9018 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG, 9019 &frame); 9020 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT) 9021 & REMOTE_TX_RATE_MASK; 9022 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK; 9023 } 9024 9025 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx) 9026 { 9027 u32 frame; 9028 9029 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame); 9030 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK; 9031 } 9032 9033 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls) 9034 { 9035 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls); 9036 } 9037 9038 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs) 9039 { 9040 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs); 9041 } 9042 9043 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality) 9044 { 9045 u32 frame; 9046 int ret; 9047 9048 *link_quality = 0; 9049 if (dd->pport->host_link_state & HLS_UP) { 9050 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, 9051 &frame); 9052 if (ret == 0) 9053 *link_quality = (frame >> LINK_QUALITY_SHIFT) 9054 & LINK_QUALITY_MASK; 9055 } 9056 } 9057 9058 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc) 9059 { 9060 u32 frame; 9061 9062 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame); 9063 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK; 9064 } 9065 9066 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr) 9067 { 9068 u32 frame; 9069 9070 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame); 9071 *ldr = (frame & 0xff); 9072 } 9073 9074 static int read_tx_settings(struct hfi1_devdata *dd, 9075 u8 *enable_lane_tx, 9076 u8 *tx_polarity_inversion, 9077 u8 *rx_polarity_inversion, 9078 u8 *max_rate) 9079 { 9080 u32 frame; 9081 int ret; 9082 9083 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame); 9084 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT) 9085 & ENABLE_LANE_TX_MASK; 9086 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT) 9087 & TX_POLARITY_INVERSION_MASK; 9088 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT) 9089 & RX_POLARITY_INVERSION_MASK; 9090 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK; 9091 return ret; 9092 } 9093 9094 static int write_tx_settings(struct hfi1_devdata *dd, 9095 u8 enable_lane_tx, 9096 u8 tx_polarity_inversion, 9097 u8 rx_polarity_inversion, 9098 u8 max_rate) 9099 { 9100 u32 frame; 9101 9102 /* no need to mask, all variable sizes match field widths */ 9103 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT 9104 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT 9105 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT 9106 | max_rate << MAX_RATE_SHIFT; 9107 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame); 9108 } 9109 9110 /* 9111 * Read an idle LCB message. 9112 * 9113 * Returns 0 on success, -EINVAL on error 9114 */ 9115 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out) 9116 { 9117 int ret; 9118 9119 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out); 9120 if (ret != HCMD_SUCCESS) { 9121 dd_dev_err(dd, "read idle message: type %d, err %d\n", 9122 (u32)type, ret); 9123 return -EINVAL; 9124 } 9125 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out); 9126 /* return only the payload as we already know the type */ 9127 *data_out >>= IDLE_PAYLOAD_SHIFT; 9128 return 0; 9129 } 9130 9131 /* 9132 * Read an idle SMA message. To be done in response to a notification from 9133 * the 8051. 9134 * 9135 * Returns 0 on success, -EINVAL on error 9136 */ 9137 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data) 9138 { 9139 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, 9140 data); 9141 } 9142 9143 /* 9144 * Send an idle LCB message. 9145 * 9146 * Returns 0 on success, -EINVAL on error 9147 */ 9148 static int send_idle_message(struct hfi1_devdata *dd, u64 data) 9149 { 9150 int ret; 9151 9152 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data); 9153 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL); 9154 if (ret != HCMD_SUCCESS) { 9155 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n", 9156 data, ret); 9157 return -EINVAL; 9158 } 9159 return 0; 9160 } 9161 9162 /* 9163 * Send an idle SMA message. 9164 * 9165 * Returns 0 on success, -EINVAL on error 9166 */ 9167 int send_idle_sma(struct hfi1_devdata *dd, u64 message) 9168 { 9169 u64 data; 9170 9171 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) | 9172 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT); 9173 return send_idle_message(dd, data); 9174 } 9175 9176 /* 9177 * Initialize the LCB then do a quick link up. This may or may not be 9178 * in loopback. 9179 * 9180 * return 0 on success, -errno on error 9181 */ 9182 static int do_quick_linkup(struct hfi1_devdata *dd) 9183 { 9184 int ret; 9185 9186 lcb_shutdown(dd, 0); 9187 9188 if (loopback) { 9189 /* LCB_CFG_LOOPBACK.VAL = 2 */ 9190 /* LCB_CFG_LANE_WIDTH.VAL = 0 */ 9191 write_csr(dd, DC_LCB_CFG_LOOPBACK, 9192 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT); 9193 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); 9194 } 9195 9196 /* start the LCBs */ 9197 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */ 9198 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 9199 9200 /* simulator only loopback steps */ 9201 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 9202 /* LCB_CFG_RUN.EN = 1 */ 9203 write_csr(dd, DC_LCB_CFG_RUN, 9204 1ull << DC_LCB_CFG_RUN_EN_SHIFT); 9205 9206 ret = wait_link_transfer_active(dd, 10); 9207 if (ret) 9208 return ret; 9209 9210 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 9211 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT); 9212 } 9213 9214 if (!loopback) { 9215 /* 9216 * When doing quick linkup and not in loopback, both 9217 * sides must be done with LCB set-up before either 9218 * starts the quick linkup. Put a delay here so that 9219 * both sides can be started and have a chance to be 9220 * done with LCB set up before resuming. 9221 */ 9222 dd_dev_err(dd, 9223 "Pausing for peer to be finished with LCB set up\n"); 9224 msleep(5000); 9225 dd_dev_err(dd, "Continuing with quick linkup\n"); 9226 } 9227 9228 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */ 9229 set_8051_lcb_access(dd); 9230 9231 /* 9232 * State "quick" LinkUp request sets the physical link state to 9233 * LinkUp without a verify capability sequence. 9234 * This state is in simulator v37 and later. 9235 */ 9236 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP); 9237 if (ret != HCMD_SUCCESS) { 9238 dd_dev_err(dd, 9239 "%s: set physical link state to quick LinkUp failed with return %d\n", 9240 __func__, ret); 9241 9242 set_host_lcb_access(dd); 9243 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ 9244 9245 if (ret >= 0) 9246 ret = -EINVAL; 9247 return ret; 9248 } 9249 9250 return 0; /* success */ 9251 } 9252 9253 /* 9254 * Do all special steps to set up loopback. 9255 */ 9256 static int init_loopback(struct hfi1_devdata *dd) 9257 { 9258 dd_dev_info(dd, "Entering loopback mode\n"); 9259 9260 /* all loopbacks should disable self GUID check */ 9261 write_csr(dd, DC_DC8051_CFG_MODE, 9262 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK)); 9263 9264 /* 9265 * The simulator has only one loopback option - LCB. Switch 9266 * to that option, which includes quick link up. 9267 * 9268 * Accept all valid loopback values. 9269 */ 9270 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && 9271 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB || 9272 loopback == LOOPBACK_CABLE)) { 9273 loopback = LOOPBACK_LCB; 9274 quick_linkup = 1; 9275 return 0; 9276 } 9277 9278 /* 9279 * SerDes loopback init sequence is handled in set_local_link_attributes 9280 */ 9281 if (loopback == LOOPBACK_SERDES) 9282 return 0; 9283 9284 /* LCB loopback - handled at poll time */ 9285 if (loopback == LOOPBACK_LCB) { 9286 quick_linkup = 1; /* LCB is always quick linkup */ 9287 9288 /* not supported in emulation due to emulation RTL changes */ 9289 if (dd->icode == ICODE_FPGA_EMULATION) { 9290 dd_dev_err(dd, 9291 "LCB loopback not supported in emulation\n"); 9292 return -EINVAL; 9293 } 9294 return 0; 9295 } 9296 9297 /* external cable loopback requires no extra steps */ 9298 if (loopback == LOOPBACK_CABLE) 9299 return 0; 9300 9301 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback); 9302 return -EINVAL; 9303 } 9304 9305 /* 9306 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits 9307 * used in the Verify Capability link width attribute. 9308 */ 9309 static u16 opa_to_vc_link_widths(u16 opa_widths) 9310 { 9311 int i; 9312 u16 result = 0; 9313 9314 static const struct link_bits { 9315 u16 from; 9316 u16 to; 9317 } opa_link_xlate[] = { 9318 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, 9319 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, 9320 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, 9321 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, 9322 }; 9323 9324 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) { 9325 if (opa_widths & opa_link_xlate[i].from) 9326 result |= opa_link_xlate[i].to; 9327 } 9328 return result; 9329 } 9330 9331 /* 9332 * Set link attributes before moving to polling. 9333 */ 9334 static int set_local_link_attributes(struct hfi1_pportdata *ppd) 9335 { 9336 struct hfi1_devdata *dd = ppd->dd; 9337 u8 enable_lane_tx; 9338 u8 tx_polarity_inversion; 9339 u8 rx_polarity_inversion; 9340 int ret; 9341 u32 misc_bits = 0; 9342 /* reset our fabric serdes to clear any lingering problems */ 9343 fabric_serdes_reset(dd); 9344 9345 /* set the local tx rate - need to read-modify-write */ 9346 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion, 9347 &rx_polarity_inversion, &ppd->local_tx_rate); 9348 if (ret) 9349 goto set_local_link_attributes_fail; 9350 9351 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { 9352 /* set the tx rate to the fastest enabled */ 9353 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) 9354 ppd->local_tx_rate = 1; 9355 else 9356 ppd->local_tx_rate = 0; 9357 } else { 9358 /* set the tx rate to all enabled */ 9359 ppd->local_tx_rate = 0; 9360 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) 9361 ppd->local_tx_rate |= 2; 9362 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) 9363 ppd->local_tx_rate |= 1; 9364 } 9365 9366 enable_lane_tx = 0xF; /* enable all four lanes */ 9367 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion, 9368 rx_polarity_inversion, ppd->local_tx_rate); 9369 if (ret != HCMD_SUCCESS) 9370 goto set_local_link_attributes_fail; 9371 9372 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); 9373 if (ret != HCMD_SUCCESS) { 9374 dd_dev_err(dd, 9375 "Failed to set host interface version, return 0x%x\n", 9376 ret); 9377 goto set_local_link_attributes_fail; 9378 } 9379 9380 /* 9381 * DC supports continuous updates. 9382 */ 9383 ret = write_vc_local_phy(dd, 9384 0 /* no power management */, 9385 1 /* continuous updates */); 9386 if (ret != HCMD_SUCCESS) 9387 goto set_local_link_attributes_fail; 9388 9389 /* z=1 in the next call: AU of 0 is not supported by the hardware */ 9390 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, 9391 ppd->port_crc_mode_enabled); 9392 if (ret != HCMD_SUCCESS) 9393 goto set_local_link_attributes_fail; 9394 9395 /* 9396 * SerDes loopback init sequence requires 9397 * setting bit 0 of MISC_CONFIG_BITS 9398 */ 9399 if (loopback == LOOPBACK_SERDES) 9400 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT; 9401 9402 /* 9403 * An external device configuration request is used to reset the LCB 9404 * to retry to obtain operational lanes when the first attempt is 9405 * unsuccesful. 9406 */ 9407 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) 9408 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT; 9409 9410 ret = write_vc_local_link_mode(dd, misc_bits, 0, 9411 opa_to_vc_link_widths( 9412 ppd->link_width_enabled)); 9413 if (ret != HCMD_SUCCESS) 9414 goto set_local_link_attributes_fail; 9415 9416 /* let peer know who we are */ 9417 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); 9418 if (ret == HCMD_SUCCESS) 9419 return 0; 9420 9421 set_local_link_attributes_fail: 9422 dd_dev_err(dd, 9423 "Failed to set local link attributes, return 0x%x\n", 9424 ret); 9425 return ret; 9426 } 9427 9428 /* 9429 * Call this to start the link. 9430 * Do not do anything if the link is disabled. 9431 * Returns 0 if link is disabled, moved to polling, or the driver is not ready. 9432 */ 9433 int start_link(struct hfi1_pportdata *ppd) 9434 { 9435 /* 9436 * Tune the SerDes to a ballpark setting for optimal signal and bit 9437 * error rate. Needs to be done before starting the link. 9438 */ 9439 tune_serdes(ppd); 9440 9441 if (!ppd->driver_link_ready) { 9442 dd_dev_info(ppd->dd, 9443 "%s: stopping link start because driver is not ready\n", 9444 __func__); 9445 return 0; 9446 } 9447 9448 /* 9449 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the 9450 * pkey table can be configured properly if the HFI unit is connected 9451 * to switch port with MgmtAllowed=NO 9452 */ 9453 clear_full_mgmt_pkey(ppd); 9454 9455 return set_link_state(ppd, HLS_DN_POLL); 9456 } 9457 9458 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd) 9459 { 9460 struct hfi1_devdata *dd = ppd->dd; 9461 u64 mask; 9462 unsigned long timeout; 9463 9464 /* 9465 * Some QSFP cables have a quirk that asserts the IntN line as a side 9466 * effect of power up on plug-in. We ignore this false positive 9467 * interrupt until the module has finished powering up by waiting for 9468 * a minimum timeout of the module inrush initialization time of 9469 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the 9470 * module have stabilized. 9471 */ 9472 msleep(500); 9473 9474 /* 9475 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) 9476 */ 9477 timeout = jiffies + msecs_to_jiffies(2000); 9478 while (1) { 9479 mask = read_csr(dd, dd->hfi1_id ? 9480 ASIC_QSFP2_IN : ASIC_QSFP1_IN); 9481 if (!(mask & QSFP_HFI0_INT_N)) 9482 break; 9483 if (time_after(jiffies, timeout)) { 9484 dd_dev_info(dd, "%s: No IntN detected, reset complete\n", 9485 __func__); 9486 break; 9487 } 9488 udelay(2); 9489 } 9490 } 9491 9492 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable) 9493 { 9494 struct hfi1_devdata *dd = ppd->dd; 9495 u64 mask; 9496 9497 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); 9498 if (enable) { 9499 /* 9500 * Clear the status register to avoid an immediate interrupt 9501 * when we re-enable the IntN pin 9502 */ 9503 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, 9504 QSFP_HFI0_INT_N); 9505 mask |= (u64)QSFP_HFI0_INT_N; 9506 } else { 9507 mask &= ~(u64)QSFP_HFI0_INT_N; 9508 } 9509 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); 9510 } 9511 9512 int reset_qsfp(struct hfi1_pportdata *ppd) 9513 { 9514 struct hfi1_devdata *dd = ppd->dd; 9515 u64 mask, qsfp_mask; 9516 9517 /* Disable INT_N from triggering QSFP interrupts */ 9518 set_qsfp_int_n(ppd, 0); 9519 9520 /* Reset the QSFP */ 9521 mask = (u64)QSFP_HFI0_RESET_N; 9522 9523 qsfp_mask = read_csr(dd, 9524 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); 9525 qsfp_mask &= ~mask; 9526 write_csr(dd, 9527 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); 9528 9529 udelay(10); 9530 9531 qsfp_mask |= mask; 9532 write_csr(dd, 9533 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); 9534 9535 wait_for_qsfp_init(ppd); 9536 9537 /* 9538 * Allow INT_N to trigger the QSFP interrupt to watch 9539 * for alarms and warnings 9540 */ 9541 set_qsfp_int_n(ppd, 1); 9542 9543 /* 9544 * After the reset, AOC transmitters are enabled by default. They need 9545 * to be turned off to complete the QSFP setup before they can be 9546 * enabled again. 9547 */ 9548 return set_qsfp_tx(ppd, 0); 9549 } 9550 9551 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, 9552 u8 *qsfp_interrupt_status) 9553 { 9554 struct hfi1_devdata *dd = ppd->dd; 9555 9556 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) || 9557 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING)) 9558 dd_dev_err(dd, "%s: QSFP cable temperature too high\n", 9559 __func__); 9560 9561 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) || 9562 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING)) 9563 dd_dev_err(dd, "%s: QSFP cable temperature too low\n", 9564 __func__); 9565 9566 /* 9567 * The remaining alarms/warnings don't matter if the link is down. 9568 */ 9569 if (ppd->host_link_state & HLS_DOWN) 9570 return 0; 9571 9572 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) || 9573 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING)) 9574 dd_dev_err(dd, "%s: QSFP supply voltage too high\n", 9575 __func__); 9576 9577 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) || 9578 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING)) 9579 dd_dev_err(dd, "%s: QSFP supply voltage too low\n", 9580 __func__); 9581 9582 /* Byte 2 is vendor specific */ 9583 9584 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) || 9585 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING)) 9586 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n", 9587 __func__); 9588 9589 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) || 9590 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING)) 9591 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n", 9592 __func__); 9593 9594 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) || 9595 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING)) 9596 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n", 9597 __func__); 9598 9599 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) || 9600 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING)) 9601 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n", 9602 __func__); 9603 9604 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) || 9605 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING)) 9606 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n", 9607 __func__); 9608 9609 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) || 9610 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING)) 9611 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n", 9612 __func__); 9613 9614 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) || 9615 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING)) 9616 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n", 9617 __func__); 9618 9619 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) || 9620 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING)) 9621 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n", 9622 __func__); 9623 9624 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) || 9625 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING)) 9626 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n", 9627 __func__); 9628 9629 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) || 9630 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING)) 9631 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n", 9632 __func__); 9633 9634 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) || 9635 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING)) 9636 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n", 9637 __func__); 9638 9639 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) || 9640 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING)) 9641 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n", 9642 __func__); 9643 9644 /* Bytes 9-10 and 11-12 are reserved */ 9645 /* Bytes 13-15 are vendor specific */ 9646 9647 return 0; 9648 } 9649 9650 /* This routine will only be scheduled if the QSFP module present is asserted */ 9651 void qsfp_event(struct work_struct *work) 9652 { 9653 struct qsfp_data *qd; 9654 struct hfi1_pportdata *ppd; 9655 struct hfi1_devdata *dd; 9656 9657 qd = container_of(work, struct qsfp_data, qsfp_work); 9658 ppd = qd->ppd; 9659 dd = ppd->dd; 9660 9661 /* Sanity check */ 9662 if (!qsfp_mod_present(ppd)) 9663 return; 9664 9665 if (ppd->host_link_state == HLS_DN_DISABLE) { 9666 dd_dev_info(ppd->dd, 9667 "%s: stopping link start because link is disabled\n", 9668 __func__); 9669 return; 9670 } 9671 9672 /* 9673 * Turn DC back on after cable has been re-inserted. Up until 9674 * now, the DC has been in reset to save power. 9675 */ 9676 dc_start(dd); 9677 9678 if (qd->cache_refresh_required) { 9679 set_qsfp_int_n(ppd, 0); 9680 9681 wait_for_qsfp_init(ppd); 9682 9683 /* 9684 * Allow INT_N to trigger the QSFP interrupt to watch 9685 * for alarms and warnings 9686 */ 9687 set_qsfp_int_n(ppd, 1); 9688 9689 start_link(ppd); 9690 } 9691 9692 if (qd->check_interrupt_flags) { 9693 u8 qsfp_interrupt_status[16] = {0,}; 9694 9695 if (one_qsfp_read(ppd, dd->hfi1_id, 6, 9696 &qsfp_interrupt_status[0], 16) != 16) { 9697 dd_dev_info(dd, 9698 "%s: Failed to read status of QSFP module\n", 9699 __func__); 9700 } else { 9701 unsigned long flags; 9702 9703 handle_qsfp_error_conditions( 9704 ppd, qsfp_interrupt_status); 9705 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); 9706 ppd->qsfp_info.check_interrupt_flags = 0; 9707 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, 9708 flags); 9709 } 9710 } 9711 } 9712 9713 void init_qsfp_int(struct hfi1_devdata *dd) 9714 { 9715 struct hfi1_pportdata *ppd = dd->pport; 9716 u64 qsfp_mask; 9717 9718 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N); 9719 /* Clear current status to avoid spurious interrupts */ 9720 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, 9721 qsfp_mask); 9722 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, 9723 qsfp_mask); 9724 9725 set_qsfp_int_n(ppd, 0); 9726 9727 /* Handle active low nature of INT_N and MODPRST_N pins */ 9728 if (qsfp_mod_present(ppd)) 9729 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N; 9730 write_csr(dd, 9731 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, 9732 qsfp_mask); 9733 9734 /* Enable the appropriate QSFP IRQ source */ 9735 if (!dd->hfi1_id) 9736 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true); 9737 else 9738 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true); 9739 } 9740 9741 /* 9742 * Do a one-time initialize of the LCB block. 9743 */ 9744 static void init_lcb(struct hfi1_devdata *dd) 9745 { 9746 /* simulator does not correctly handle LCB cclk loopback, skip */ 9747 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 9748 return; 9749 9750 /* the DC has been reset earlier in the driver load */ 9751 9752 /* set LCB for cclk loopback on the port */ 9753 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01); 9754 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00); 9755 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00); 9756 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); 9757 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08); 9758 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02); 9759 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00); 9760 } 9761 9762 /* 9763 * Perform a test read on the QSFP. Return 0 on success, -ERRNO 9764 * on error. 9765 */ 9766 static int test_qsfp_read(struct hfi1_pportdata *ppd) 9767 { 9768 int ret; 9769 u8 status; 9770 9771 /* 9772 * Report success if not a QSFP or, if it is a QSFP, but the cable is 9773 * not present 9774 */ 9775 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) 9776 return 0; 9777 9778 /* read byte 2, the status byte */ 9779 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); 9780 if (ret < 0) 9781 return ret; 9782 if (ret != 1) 9783 return -EIO; 9784 9785 return 0; /* success */ 9786 } 9787 9788 /* 9789 * Values for QSFP retry. 9790 * 9791 * Give up after 10s (20 x 500ms). The overall timeout was empirically 9792 * arrived at from experience on a large cluster. 9793 */ 9794 #define MAX_QSFP_RETRIES 20 9795 #define QSFP_RETRY_WAIT 500 /* msec */ 9796 9797 /* 9798 * Try a QSFP read. If it fails, schedule a retry for later. 9799 * Called on first link activation after driver load. 9800 */ 9801 static void try_start_link(struct hfi1_pportdata *ppd) 9802 { 9803 if (test_qsfp_read(ppd)) { 9804 /* read failed */ 9805 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { 9806 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); 9807 return; 9808 } 9809 dd_dev_info(ppd->dd, 9810 "QSFP not responding, waiting and retrying %d\n", 9811 (int)ppd->qsfp_retry_count); 9812 ppd->qsfp_retry_count++; 9813 queue_delayed_work(ppd->link_wq, &ppd->start_link_work, 9814 msecs_to_jiffies(QSFP_RETRY_WAIT)); 9815 return; 9816 } 9817 ppd->qsfp_retry_count = 0; 9818 9819 start_link(ppd); 9820 } 9821 9822 /* 9823 * Workqueue function to start the link after a delay. 9824 */ 9825 void handle_start_link(struct work_struct *work) 9826 { 9827 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 9828 start_link_work.work); 9829 try_start_link(ppd); 9830 } 9831 9832 int bringup_serdes(struct hfi1_pportdata *ppd) 9833 { 9834 struct hfi1_devdata *dd = ppd->dd; 9835 u64 guid; 9836 int ret; 9837 9838 if (HFI1_CAP_IS_KSET(EXTENDED_PSN)) 9839 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK); 9840 9841 guid = ppd->guids[HFI1_PORT_GUID_INDEX]; 9842 if (!guid) { 9843 if (dd->base_guid) 9844 guid = dd->base_guid + ppd->port - 1; 9845 ppd->guids[HFI1_PORT_GUID_INDEX] = guid; 9846 } 9847 9848 /* Set linkinit_reason on power up per OPA spec */ 9849 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; 9850 9851 /* one-time init of the LCB */ 9852 init_lcb(dd); 9853 9854 if (loopback) { 9855 ret = init_loopback(dd); 9856 if (ret < 0) 9857 return ret; 9858 } 9859 9860 get_port_type(ppd); 9861 if (ppd->port_type == PORT_TYPE_QSFP) { 9862 set_qsfp_int_n(ppd, 0); 9863 wait_for_qsfp_init(ppd); 9864 set_qsfp_int_n(ppd, 1); 9865 } 9866 9867 try_start_link(ppd); 9868 return 0; 9869 } 9870 9871 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd) 9872 { 9873 struct hfi1_devdata *dd = ppd->dd; 9874 9875 /* 9876 * Shut down the link and keep it down. First turn off that the 9877 * driver wants to allow the link to be up (driver_link_ready). 9878 * Then make sure the link is not automatically restarted 9879 * (link_enabled). Cancel any pending restart. And finally 9880 * go offline. 9881 */ 9882 ppd->driver_link_ready = 0; 9883 ppd->link_enabled = 0; 9884 9885 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ 9886 flush_delayed_work(&ppd->start_link_work); 9887 cancel_delayed_work_sync(&ppd->start_link_work); 9888 9889 ppd->offline_disabled_reason = 9890 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT); 9891 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0, 9892 OPA_LINKDOWN_REASON_REBOOT); 9893 set_link_state(ppd, HLS_DN_OFFLINE); 9894 9895 /* disable the port */ 9896 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 9897 cancel_work_sync(&ppd->freeze_work); 9898 } 9899 9900 static inline int init_cpu_counters(struct hfi1_devdata *dd) 9901 { 9902 struct hfi1_pportdata *ppd; 9903 int i; 9904 9905 ppd = (struct hfi1_pportdata *)(dd + 1); 9906 for (i = 0; i < dd->num_pports; i++, ppd++) { 9907 ppd->ibport_data.rvp.rc_acks = NULL; 9908 ppd->ibport_data.rvp.rc_qacks = NULL; 9909 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); 9910 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); 9911 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); 9912 if (!ppd->ibport_data.rvp.rc_acks || 9913 !ppd->ibport_data.rvp.rc_delayed_comp || 9914 !ppd->ibport_data.rvp.rc_qacks) 9915 return -ENOMEM; 9916 } 9917 9918 return 0; 9919 } 9920 9921 /* 9922 * index is the index into the receive array 9923 */ 9924 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index, 9925 u32 type, unsigned long pa, u16 order) 9926 { 9927 u64 reg; 9928 9929 if (!(dd->flags & HFI1_PRESENT)) 9930 goto done; 9931 9932 if (type == PT_INVALID || type == PT_INVALID_FLUSH) { 9933 pa = 0; 9934 order = 0; 9935 } else if (type > PT_INVALID) { 9936 dd_dev_err(dd, 9937 "unexpected receive array type %u for index %u, not handled\n", 9938 type, index); 9939 goto done; 9940 } 9941 trace_hfi1_put_tid(dd, index, type, pa, order); 9942 9943 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */ 9944 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK 9945 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT 9946 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK) 9947 << RCV_ARRAY_RT_ADDR_SHIFT; 9948 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); 9949 writeq(reg, dd->rcvarray_wc + (index * 8)); 9950 9951 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3) 9952 /* 9953 * Eager entries are written and flushed 9954 * 9955 * Expected entries are flushed every 4 writes 9956 */ 9957 flush_wc(); 9958 done: 9959 return; 9960 } 9961 9962 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd) 9963 { 9964 struct hfi1_devdata *dd = rcd->dd; 9965 u32 i; 9966 9967 /* this could be optimized */ 9968 for (i = rcd->eager_base; i < rcd->eager_base + 9969 rcd->egrbufs.alloced; i++) 9970 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); 9971 9972 for (i = rcd->expected_base; 9973 i < rcd->expected_base + rcd->expected_count; i++) 9974 hfi1_put_tid(dd, i, PT_INVALID, 0, 0); 9975 } 9976 9977 static const char * const ib_cfg_name_strings[] = { 9978 "HFI1_IB_CFG_LIDLMC", 9979 "HFI1_IB_CFG_LWID_DG_ENB", 9980 "HFI1_IB_CFG_LWID_ENB", 9981 "HFI1_IB_CFG_LWID", 9982 "HFI1_IB_CFG_SPD_ENB", 9983 "HFI1_IB_CFG_SPD", 9984 "HFI1_IB_CFG_RXPOL_ENB", 9985 "HFI1_IB_CFG_LREV_ENB", 9986 "HFI1_IB_CFG_LINKLATENCY", 9987 "HFI1_IB_CFG_HRTBT", 9988 "HFI1_IB_CFG_OP_VLS", 9989 "HFI1_IB_CFG_VL_HIGH_CAP", 9990 "HFI1_IB_CFG_VL_LOW_CAP", 9991 "HFI1_IB_CFG_OVERRUN_THRESH", 9992 "HFI1_IB_CFG_PHYERR_THRESH", 9993 "HFI1_IB_CFG_LINKDEFAULT", 9994 "HFI1_IB_CFG_PKEYS", 9995 "HFI1_IB_CFG_MTU", 9996 "HFI1_IB_CFG_LSTATE", 9997 "HFI1_IB_CFG_VL_HIGH_LIMIT", 9998 "HFI1_IB_CFG_PMA_TICKS", 9999 "HFI1_IB_CFG_PORT" 10000 }; 10001 10002 static const char *ib_cfg_name(int which) 10003 { 10004 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings)) 10005 return "invalid"; 10006 return ib_cfg_name_strings[which]; 10007 } 10008 10009 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which) 10010 { 10011 struct hfi1_devdata *dd = ppd->dd; 10012 int val = 0; 10013 10014 switch (which) { 10015 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ 10016 val = ppd->link_width_enabled; 10017 break; 10018 case HFI1_IB_CFG_LWID: /* currently active Link-width */ 10019 val = ppd->link_width_active; 10020 break; 10021 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ 10022 val = ppd->link_speed_enabled; 10023 break; 10024 case HFI1_IB_CFG_SPD: /* current Link speed */ 10025 val = ppd->link_speed_active; 10026 break; 10027 10028 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ 10029 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ 10030 case HFI1_IB_CFG_LINKLATENCY: 10031 goto unimplemented; 10032 10033 case HFI1_IB_CFG_OP_VLS: 10034 val = ppd->actual_vls_operational; 10035 break; 10036 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */ 10037 val = VL_ARB_HIGH_PRIO_TABLE_SIZE; 10038 break; 10039 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */ 10040 val = VL_ARB_LOW_PRIO_TABLE_SIZE; 10041 break; 10042 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ 10043 val = ppd->overrun_threshold; 10044 break; 10045 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ 10046 val = ppd->phy_error_threshold; 10047 break; 10048 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ 10049 val = HLS_DEFAULT; 10050 break; 10051 10052 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */ 10053 case HFI1_IB_CFG_PMA_TICKS: 10054 default: 10055 unimplemented: 10056 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 10057 dd_dev_info( 10058 dd, 10059 "%s: which %s: not implemented\n", 10060 __func__, 10061 ib_cfg_name(which)); 10062 break; 10063 } 10064 10065 return val; 10066 } 10067 10068 /* 10069 * The largest MAD packet size. 10070 */ 10071 #define MAX_MAD_PACKET 2048 10072 10073 /* 10074 * Return the maximum header bytes that can go on the _wire_ 10075 * for this device. This count includes the ICRC which is 10076 * not part of the packet held in memory but it is appended 10077 * by the HW. 10078 * This is dependent on the device's receive header entry size. 10079 * HFI allows this to be set per-receive context, but the 10080 * driver presently enforces a global value. 10081 */ 10082 u32 lrh_max_header_bytes(struct hfi1_devdata *dd) 10083 { 10084 /* 10085 * The maximum non-payload (MTU) bytes in LRH.PktLen are 10086 * the Receive Header Entry Size minus the PBC (or RHF) size 10087 * plus one DW for the ICRC appended by HW. 10088 * 10089 * dd->rcd[0].rcvhdrqentsize is in DW. 10090 * We use rcd[0] as all context will have the same value. Also, 10091 * the first kernel context would have been allocated by now so 10092 * we are guaranteed a valid value. 10093 */ 10094 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; 10095 } 10096 10097 /* 10098 * Set Send Length 10099 * @ppd: per port data 10100 * 10101 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck* 10102 * registers compare against LRH.PktLen, so use the max bytes included 10103 * in the LRH. 10104 * 10105 * This routine changes all VL values except VL15, which it maintains at 10106 * the same value. 10107 */ 10108 static void set_send_length(struct hfi1_pportdata *ppd) 10109 { 10110 struct hfi1_devdata *dd = ppd->dd; 10111 u32 max_hb = lrh_max_header_bytes(dd), dcmtu; 10112 u32 maxvlmtu = dd->vld[15].mtu; 10113 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) 10114 & SEND_LEN_CHECK1_LEN_VL15_MASK) << 10115 SEND_LEN_CHECK1_LEN_VL15_SHIFT; 10116 int i, j; 10117 u32 thres; 10118 10119 for (i = 0; i < ppd->vls_supported; i++) { 10120 if (dd->vld[i].mtu > maxvlmtu) 10121 maxvlmtu = dd->vld[i].mtu; 10122 if (i <= 3) 10123 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) 10124 & SEND_LEN_CHECK0_LEN_VL0_MASK) << 10125 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT); 10126 else 10127 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) 10128 & SEND_LEN_CHECK1_LEN_VL4_MASK) << 10129 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT); 10130 } 10131 write_csr(dd, SEND_LEN_CHECK0, len1); 10132 write_csr(dd, SEND_LEN_CHECK1, len2); 10133 /* adjust kernel credit return thresholds based on new MTUs */ 10134 /* all kernel receive contexts have the same hdrqentsize */ 10135 for (i = 0; i < ppd->vls_supported; i++) { 10136 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), 10137 sc_mtu_to_threshold(dd->vld[i].sc, 10138 dd->vld[i].mtu, 10139 get_hdrqentsize(dd->rcd[0]))); 10140 for (j = 0; j < INIT_SC_PER_VL; j++) 10141 sc_set_cr_threshold( 10142 pio_select_send_context_vl(dd, j, i), 10143 thres); 10144 } 10145 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), 10146 sc_mtu_to_threshold(dd->vld[15].sc, 10147 dd->vld[15].mtu, 10148 dd->rcd[0]->rcvhdrqentsize)); 10149 sc_set_cr_threshold(dd->vld[15].sc, thres); 10150 10151 /* Adjust maximum MTU for the port in DC */ 10152 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 : 10153 (ilog2(maxvlmtu >> 8) + 1); 10154 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); 10155 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK; 10156 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) << 10157 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT; 10158 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); 10159 } 10160 10161 static void set_lidlmc(struct hfi1_pportdata *ppd) 10162 { 10163 int i; 10164 u64 sreg = 0; 10165 struct hfi1_devdata *dd = ppd->dd; 10166 u32 mask = ~((1U << ppd->lmc) - 1); 10167 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); 10168 u32 lid; 10169 10170 /* 10171 * Program 0 in CSR if port lid is extended. This prevents 10172 * 9B packets being sent out for large lids. 10173 */ 10174 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; 10175 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 10176 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); 10177 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) 10178 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) | 10179 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK) 10180 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT); 10181 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); 10182 10183 /* 10184 * Iterate over all the send contexts and set their SLID check 10185 */ 10186 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) << 10187 SEND_CTXT_CHECK_SLID_MASK_SHIFT) | 10188 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) << 10189 SEND_CTXT_CHECK_SLID_VALUE_SHIFT); 10190 10191 for (i = 0; i < chip_send_contexts(dd); i++) { 10192 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x", 10193 i, (u32)sreg); 10194 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg); 10195 } 10196 10197 /* Now we have to do the same thing for the sdma engines */ 10198 sdma_update_lmc(dd, mask, lid); 10199 } 10200 10201 static const char *state_completed_string(u32 completed) 10202 { 10203 static const char * const state_completed[] = { 10204 "EstablishComm", 10205 "OptimizeEQ", 10206 "VerifyCap" 10207 }; 10208 10209 if (completed < ARRAY_SIZE(state_completed)) 10210 return state_completed[completed]; 10211 10212 return "unknown"; 10213 } 10214 10215 static const char all_lanes_dead_timeout_expired[] = 10216 "All lanes were inactive – was the interconnect media removed?"; 10217 static const char tx_out_of_policy[] = 10218 "Passing lanes on local port do not meet the local link width policy"; 10219 static const char no_state_complete[] = 10220 "State timeout occurred before link partner completed the state"; 10221 static const char * const state_complete_reasons[] = { 10222 [0x00] = "Reason unknown", 10223 [0x01] = "Link was halted by driver, refer to LinkDownReason", 10224 [0x02] = "Link partner reported failure", 10225 [0x10] = "Unable to achieve frame sync on any lane", 10226 [0x11] = 10227 "Unable to find a common bit rate with the link partner", 10228 [0x12] = 10229 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy", 10230 [0x13] = 10231 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy", 10232 [0x14] = no_state_complete, 10233 [0x15] = 10234 "State timeout occurred before link partner identified equalization presets", 10235 [0x16] = 10236 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy", 10237 [0x17] = tx_out_of_policy, 10238 [0x20] = all_lanes_dead_timeout_expired, 10239 [0x21] = 10240 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy", 10241 [0x22] = no_state_complete, 10242 [0x23] = 10243 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy", 10244 [0x24] = tx_out_of_policy, 10245 [0x30] = all_lanes_dead_timeout_expired, 10246 [0x31] = 10247 "State timeout occurred waiting for host to process received frames", 10248 [0x32] = no_state_complete, 10249 [0x33] = 10250 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy", 10251 [0x34] = tx_out_of_policy, 10252 [0x35] = "Negotiated link width is mutually exclusive", 10253 [0x36] = 10254 "Timed out before receiving verifycap frames in VerifyCap.Exchange", 10255 [0x37] = "Unable to resolve secure data exchange", 10256 }; 10257 10258 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd, 10259 u32 code) 10260 { 10261 const char *str = NULL; 10262 10263 if (code < ARRAY_SIZE(state_complete_reasons)) 10264 str = state_complete_reasons[code]; 10265 10266 if (str) 10267 return str; 10268 return "Reserved"; 10269 } 10270 10271 /* describe the given last state complete frame */ 10272 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame, 10273 const char *prefix) 10274 { 10275 struct hfi1_devdata *dd = ppd->dd; 10276 u32 success; 10277 u32 state; 10278 u32 reason; 10279 u32 lanes; 10280 10281 /* 10282 * Decode frame: 10283 * [ 0: 0] - success 10284 * [ 3: 1] - state 10285 * [ 7: 4] - next state timeout 10286 * [15: 8] - reason code 10287 * [31:16] - lanes 10288 */ 10289 success = frame & 0x1; 10290 state = (frame >> 1) & 0x7; 10291 reason = (frame >> 8) & 0xff; 10292 lanes = (frame >> 16) & 0xffff; 10293 10294 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n", 10295 prefix, frame); 10296 dd_dev_err(dd, " last reported state state: %s (0x%x)\n", 10297 state_completed_string(state), state); 10298 dd_dev_err(dd, " state successfully completed: %s\n", 10299 success ? "yes" : "no"); 10300 dd_dev_err(dd, " fail reason 0x%x: %s\n", 10301 reason, state_complete_reason_code_string(ppd, reason)); 10302 dd_dev_err(dd, " passing lane mask: 0x%x", lanes); 10303 } 10304 10305 /* 10306 * Read the last state complete frames and explain them. This routine 10307 * expects to be called if the link went down during link negotiation 10308 * and initialization (LNI). That is, anywhere between polling and link up. 10309 */ 10310 static void check_lni_states(struct hfi1_pportdata *ppd) 10311 { 10312 u32 last_local_state; 10313 u32 last_remote_state; 10314 10315 read_last_local_state(ppd->dd, &last_local_state); 10316 read_last_remote_state(ppd->dd, &last_remote_state); 10317 10318 /* 10319 * Don't report anything if there is nothing to report. A value of 10320 * 0 means the link was taken down while polling and there was no 10321 * training in-process. 10322 */ 10323 if (last_local_state == 0 && last_remote_state == 0) 10324 return; 10325 10326 decode_state_complete(ppd, last_local_state, "transmitted"); 10327 decode_state_complete(ppd, last_remote_state, "received"); 10328 } 10329 10330 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */ 10331 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms) 10332 { 10333 u64 reg; 10334 unsigned long timeout; 10335 10336 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */ 10337 timeout = jiffies + msecs_to_jiffies(wait_ms); 10338 while (1) { 10339 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE); 10340 if (reg) 10341 break; 10342 if (time_after(jiffies, timeout)) { 10343 dd_dev_err(dd, 10344 "timeout waiting for LINK_TRANSFER_ACTIVE\n"); 10345 return -ETIMEDOUT; 10346 } 10347 udelay(2); 10348 } 10349 return 0; 10350 } 10351 10352 /* called when the logical link state is not down as it should be */ 10353 static void force_logical_link_state_down(struct hfi1_pportdata *ppd) 10354 { 10355 struct hfi1_devdata *dd = ppd->dd; 10356 10357 /* 10358 * Bring link up in LCB loopback 10359 */ 10360 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); 10361 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 10362 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK); 10363 10364 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0); 10365 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0); 10366 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110); 10367 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2); 10368 10369 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0); 10370 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET); 10371 udelay(3); 10372 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1); 10373 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT); 10374 10375 wait_link_transfer_active(dd, 100); 10376 10377 /* 10378 * Bring the link down again. 10379 */ 10380 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1); 10381 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0); 10382 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0); 10383 10384 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); 10385 } 10386 10387 /* 10388 * Helper for set_link_state(). Do not call except from that routine. 10389 * Expects ppd->hls_mutex to be held. 10390 * 10391 * @rem_reason value to be sent to the neighbor 10392 * 10393 * LinkDownReasons only set if transition succeeds. 10394 */ 10395 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason) 10396 { 10397 struct hfi1_devdata *dd = ppd->dd; 10398 u32 previous_state; 10399 int offline_state_ret; 10400 int ret; 10401 10402 update_lcb_cache(dd); 10403 10404 previous_state = ppd->host_link_state; 10405 ppd->host_link_state = HLS_GOING_OFFLINE; 10406 10407 /* start offline transition */ 10408 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE); 10409 10410 if (ret != HCMD_SUCCESS) { 10411 dd_dev_err(dd, 10412 "Failed to transition to Offline link state, return %d\n", 10413 ret); 10414 return -EINVAL; 10415 } 10416 if (ppd->offline_disabled_reason == 10417 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)) 10418 ppd->offline_disabled_reason = 10419 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 10420 10421 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000); 10422 if (offline_state_ret < 0) 10423 return offline_state_ret; 10424 10425 /* Disabling AOC transmitters */ 10426 if (ppd->port_type == PORT_TYPE_QSFP && 10427 ppd->qsfp_info.limiting_active && 10428 qsfp_mod_present(ppd)) { 10429 int ret; 10430 10431 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT); 10432 if (ret == 0) { 10433 set_qsfp_tx(ppd, 0); 10434 release_chip_resource(dd, qsfp_resource(dd)); 10435 } else { 10436 /* not fatal, but should warn */ 10437 dd_dev_err(dd, 10438 "Unable to acquire lock to turn off QSFP TX\n"); 10439 } 10440 } 10441 10442 /* 10443 * Wait for the offline.Quiet transition if it hasn't happened yet. It 10444 * can take a while for the link to go down. 10445 */ 10446 if (offline_state_ret != PLS_OFFLINE_QUIET) { 10447 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000); 10448 if (ret < 0) 10449 return ret; 10450 } 10451 10452 /* 10453 * Now in charge of LCB - must be after the physical state is 10454 * offline.quiet and before host_link_state is changed. 10455 */ 10456 set_host_lcb_access(dd); 10457 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */ 10458 10459 /* make sure the logical state is also down */ 10460 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000); 10461 if (ret) 10462 force_logical_link_state_down(ppd); 10463 10464 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ 10465 update_statusp(ppd, IB_PORT_DOWN); 10466 10467 /* 10468 * The LNI has a mandatory wait time after the physical state 10469 * moves to Offline.Quiet. The wait time may be different 10470 * depending on how the link went down. The 8051 firmware 10471 * will observe the needed wait time and only move to ready 10472 * when that is completed. The largest of the quiet timeouts 10473 * is 6s, so wait that long and then at least 0.5s more for 10474 * other transitions, and another 0.5s for a buffer. 10475 */ 10476 ret = wait_fm_ready(dd, 7000); 10477 if (ret) { 10478 dd_dev_err(dd, 10479 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n"); 10480 /* state is really offline, so make it so */ 10481 ppd->host_link_state = HLS_DN_OFFLINE; 10482 return ret; 10483 } 10484 10485 /* 10486 * The state is now offline and the 8051 is ready to accept host 10487 * requests. 10488 * - change our state 10489 * - notify others if we were previously in a linkup state 10490 */ 10491 ppd->host_link_state = HLS_DN_OFFLINE; 10492 if (previous_state & HLS_UP) { 10493 /* went down while link was up */ 10494 handle_linkup_change(dd, 0); 10495 } else if (previous_state 10496 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 10497 /* went down while attempting link up */ 10498 check_lni_states(ppd); 10499 10500 /* The QSFP doesn't need to be reset on LNI failure */ 10501 ppd->qsfp_info.reset_needed = 0; 10502 } 10503 10504 /* the active link width (downgrade) is 0 on link down */ 10505 ppd->link_width_active = 0; 10506 ppd->link_width_downgrade_tx_active = 0; 10507 ppd->link_width_downgrade_rx_active = 0; 10508 ppd->current_egress_rate = 0; 10509 return 0; 10510 } 10511 10512 /* return the link state name */ 10513 static const char *link_state_name(u32 state) 10514 { 10515 const char *name; 10516 int n = ilog2(state); 10517 static const char * const names[] = { 10518 [__HLS_UP_INIT_BP] = "INIT", 10519 [__HLS_UP_ARMED_BP] = "ARMED", 10520 [__HLS_UP_ACTIVE_BP] = "ACTIVE", 10521 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF", 10522 [__HLS_DN_POLL_BP] = "POLL", 10523 [__HLS_DN_DISABLE_BP] = "DISABLE", 10524 [__HLS_DN_OFFLINE_BP] = "OFFLINE", 10525 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP", 10526 [__HLS_GOING_UP_BP] = "GOING_UP", 10527 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE", 10528 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN" 10529 }; 10530 10531 name = n < ARRAY_SIZE(names) ? names[n] : NULL; 10532 return name ? name : "unknown"; 10533 } 10534 10535 /* return the link state reason name */ 10536 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state) 10537 { 10538 if (state == HLS_UP_INIT) { 10539 switch (ppd->linkinit_reason) { 10540 case OPA_LINKINIT_REASON_LINKUP: 10541 return "(LINKUP)"; 10542 case OPA_LINKINIT_REASON_FLAPPING: 10543 return "(FLAPPING)"; 10544 case OPA_LINKINIT_OUTSIDE_POLICY: 10545 return "(OUTSIDE_POLICY)"; 10546 case OPA_LINKINIT_QUARANTINED: 10547 return "(QUARANTINED)"; 10548 case OPA_LINKINIT_INSUFIC_CAPABILITY: 10549 return "(INSUFIC_CAPABILITY)"; 10550 default: 10551 break; 10552 } 10553 } 10554 return ""; 10555 } 10556 10557 /* 10558 * driver_pstate - convert the driver's notion of a port's 10559 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*). 10560 * Return -1 (converted to a u32) to indicate error. 10561 */ 10562 u32 driver_pstate(struct hfi1_pportdata *ppd) 10563 { 10564 switch (ppd->host_link_state) { 10565 case HLS_UP_INIT: 10566 case HLS_UP_ARMED: 10567 case HLS_UP_ACTIVE: 10568 return IB_PORTPHYSSTATE_LINKUP; 10569 case HLS_DN_POLL: 10570 return IB_PORTPHYSSTATE_POLLING; 10571 case HLS_DN_DISABLE: 10572 return IB_PORTPHYSSTATE_DISABLED; 10573 case HLS_DN_OFFLINE: 10574 return OPA_PORTPHYSSTATE_OFFLINE; 10575 case HLS_VERIFY_CAP: 10576 return IB_PORTPHYSSTATE_TRAINING; 10577 case HLS_GOING_UP: 10578 return IB_PORTPHYSSTATE_TRAINING; 10579 case HLS_GOING_OFFLINE: 10580 return OPA_PORTPHYSSTATE_OFFLINE; 10581 case HLS_LINK_COOLDOWN: 10582 return OPA_PORTPHYSSTATE_OFFLINE; 10583 case HLS_DN_DOWNDEF: 10584 default: 10585 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", 10586 ppd->host_link_state); 10587 return -1; 10588 } 10589 } 10590 10591 /* 10592 * driver_lstate - convert the driver's notion of a port's 10593 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1 10594 * (converted to a u32) to indicate error. 10595 */ 10596 u32 driver_lstate(struct hfi1_pportdata *ppd) 10597 { 10598 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) 10599 return IB_PORT_DOWN; 10600 10601 switch (ppd->host_link_state & HLS_UP) { 10602 case HLS_UP_INIT: 10603 return IB_PORT_INIT; 10604 case HLS_UP_ARMED: 10605 return IB_PORT_ARMED; 10606 case HLS_UP_ACTIVE: 10607 return IB_PORT_ACTIVE; 10608 default: 10609 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", 10610 ppd->host_link_state); 10611 return -1; 10612 } 10613 } 10614 10615 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason, 10616 u8 neigh_reason, u8 rem_reason) 10617 { 10618 if (ppd->local_link_down_reason.latest == 0 && 10619 ppd->neigh_link_down_reason.latest == 0) { 10620 ppd->local_link_down_reason.latest = lcl_reason; 10621 ppd->neigh_link_down_reason.latest = neigh_reason; 10622 ppd->remote_link_down_reason = rem_reason; 10623 } 10624 } 10625 10626 /** 10627 * data_vls_operational() - Verify if data VL BCT credits and MTU 10628 * are both set. 10629 * @ppd: pointer to hfi1_pportdata structure 10630 * 10631 * Return: true - Ok, false -otherwise. 10632 */ 10633 static inline bool data_vls_operational(struct hfi1_pportdata *ppd) 10634 { 10635 int i; 10636 u64 reg; 10637 10638 if (!ppd->actual_vls_operational) 10639 return false; 10640 10641 for (i = 0; i < ppd->vls_supported; i++) { 10642 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); 10643 if ((reg && !ppd->dd->vld[i].mtu) || 10644 (!reg && ppd->dd->vld[i].mtu)) 10645 return false; 10646 } 10647 10648 return true; 10649 } 10650 10651 /* 10652 * Change the physical and/or logical link state. 10653 * 10654 * Do not call this routine while inside an interrupt. It contains 10655 * calls to routines that can take multiple seconds to finish. 10656 * 10657 * Returns 0 on success, -errno on failure. 10658 */ 10659 int set_link_state(struct hfi1_pportdata *ppd, u32 state) 10660 { 10661 struct hfi1_devdata *dd = ppd->dd; 10662 struct ib_event event = {.device = NULL}; 10663 int ret1, ret = 0; 10664 int orig_new_state, poll_bounce; 10665 10666 mutex_lock(&ppd->hls_lock); 10667 10668 orig_new_state = state; 10669 if (state == HLS_DN_DOWNDEF) 10670 state = HLS_DEFAULT; 10671 10672 /* interpret poll -> poll as a link bounce */ 10673 poll_bounce = ppd->host_link_state == HLS_DN_POLL && 10674 state == HLS_DN_POLL; 10675 10676 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__, 10677 link_state_name(ppd->host_link_state), 10678 link_state_name(orig_new_state), 10679 poll_bounce ? "(bounce) " : "", 10680 link_state_reason_name(ppd, state)); 10681 10682 /* 10683 * If we're going to a (HLS_*) link state that implies the logical 10684 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then 10685 * reset is_sm_config_started to 0. 10686 */ 10687 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE))) 10688 ppd->is_sm_config_started = 0; 10689 10690 /* 10691 * Do nothing if the states match. Let a poll to poll link bounce 10692 * go through. 10693 */ 10694 if (ppd->host_link_state == state && !poll_bounce) 10695 goto done; 10696 10697 switch (state) { 10698 case HLS_UP_INIT: 10699 if (ppd->host_link_state == HLS_DN_POLL && 10700 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { 10701 /* 10702 * Quick link up jumps from polling to here. 10703 * 10704 * Whether in normal or loopback mode, the 10705 * simulator jumps from polling to link up. 10706 * Accept that here. 10707 */ 10708 /* OK */ 10709 } else if (ppd->host_link_state != HLS_GOING_UP) { 10710 goto unexpected; 10711 } 10712 10713 /* 10714 * Wait for Link_Up physical state. 10715 * Physical and Logical states should already be 10716 * be transitioned to LinkUp and LinkInit respectively. 10717 */ 10718 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000); 10719 if (ret) { 10720 dd_dev_err(dd, 10721 "%s: physical state did not change to LINK-UP\n", 10722 __func__); 10723 break; 10724 } 10725 10726 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000); 10727 if (ret) { 10728 dd_dev_err(dd, 10729 "%s: logical state did not change to INIT\n", 10730 __func__); 10731 break; 10732 } 10733 10734 /* clear old transient LINKINIT_REASON code */ 10735 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) 10736 ppd->linkinit_reason = 10737 OPA_LINKINIT_REASON_LINKUP; 10738 10739 /* enable the port */ 10740 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 10741 10742 handle_linkup_change(dd, 1); 10743 pio_kernel_linkup(dd); 10744 10745 /* 10746 * After link up, a new link width will have been set. 10747 * Update the xmit counters with regards to the new 10748 * link width. 10749 */ 10750 update_xmit_counters(ppd, ppd->link_width_active); 10751 10752 ppd->host_link_state = HLS_UP_INIT; 10753 update_statusp(ppd, IB_PORT_INIT); 10754 break; 10755 case HLS_UP_ARMED: 10756 if (ppd->host_link_state != HLS_UP_INIT) 10757 goto unexpected; 10758 10759 if (!data_vls_operational(ppd)) { 10760 dd_dev_err(dd, 10761 "%s: Invalid data VL credits or mtu\n", 10762 __func__); 10763 ret = -EINVAL; 10764 break; 10765 } 10766 10767 set_logical_state(dd, LSTATE_ARMED); 10768 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000); 10769 if (ret) { 10770 dd_dev_err(dd, 10771 "%s: logical state did not change to ARMED\n", 10772 __func__); 10773 break; 10774 } 10775 ppd->host_link_state = HLS_UP_ARMED; 10776 update_statusp(ppd, IB_PORT_ARMED); 10777 /* 10778 * The simulator does not currently implement SMA messages, 10779 * so neighbor_normal is not set. Set it here when we first 10780 * move to Armed. 10781 */ 10782 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 10783 ppd->neighbor_normal = 1; 10784 break; 10785 case HLS_UP_ACTIVE: 10786 if (ppd->host_link_state != HLS_UP_ARMED) 10787 goto unexpected; 10788 10789 set_logical_state(dd, LSTATE_ACTIVE); 10790 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000); 10791 if (ret) { 10792 dd_dev_err(dd, 10793 "%s: logical state did not change to ACTIVE\n", 10794 __func__); 10795 } else { 10796 /* tell all engines to go running */ 10797 sdma_all_running(dd); 10798 ppd->host_link_state = HLS_UP_ACTIVE; 10799 update_statusp(ppd, IB_PORT_ACTIVE); 10800 10801 /* Signal the IB layer that the port has went active */ 10802 event.device = &dd->verbs_dev.rdi.ibdev; 10803 event.element.port_num = ppd->port; 10804 event.event = IB_EVENT_PORT_ACTIVE; 10805 } 10806 break; 10807 case HLS_DN_POLL: 10808 if ((ppd->host_link_state == HLS_DN_DISABLE || 10809 ppd->host_link_state == HLS_DN_OFFLINE) && 10810 dd->dc_shutdown) 10811 dc_start(dd); 10812 /* Hand LED control to the DC */ 10813 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 10814 10815 if (ppd->host_link_state != HLS_DN_OFFLINE) { 10816 u8 tmp = ppd->link_enabled; 10817 10818 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10819 if (ret) { 10820 ppd->link_enabled = tmp; 10821 break; 10822 } 10823 ppd->remote_link_down_reason = 0; 10824 10825 if (ppd->driver_link_ready) 10826 ppd->link_enabled = 1; 10827 } 10828 10829 set_all_slowpath(ppd->dd); 10830 ret = set_local_link_attributes(ppd); 10831 if (ret) 10832 break; 10833 10834 ppd->port_error_action = 0; 10835 10836 if (quick_linkup) { 10837 /* quick linkup does not go into polling */ 10838 ret = do_quick_linkup(dd); 10839 } else { 10840 ret1 = set_physical_link_state(dd, PLS_POLLING); 10841 if (!ret1) 10842 ret1 = wait_phys_link_out_of_offline(ppd, 10843 3000); 10844 if (ret1 != HCMD_SUCCESS) { 10845 dd_dev_err(dd, 10846 "Failed to transition to Polling link state, return 0x%x\n", 10847 ret1); 10848 ret = -EINVAL; 10849 } 10850 } 10851 10852 /* 10853 * Change the host link state after requesting DC8051 to 10854 * change its physical state so that we can ignore any 10855 * interrupt with stale LNI(XX) error, which will not be 10856 * cleared until DC8051 transitions to Polling state. 10857 */ 10858 ppd->host_link_state = HLS_DN_POLL; 10859 ppd->offline_disabled_reason = 10860 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE); 10861 /* 10862 * If an error occurred above, go back to offline. The 10863 * caller may reschedule another attempt. 10864 */ 10865 if (ret) 10866 goto_offline(ppd, 0); 10867 else 10868 log_physical_state(ppd, PLS_POLLING); 10869 break; 10870 case HLS_DN_DISABLE: 10871 /* link is disabled */ 10872 ppd->link_enabled = 0; 10873 10874 /* allow any state to transition to disabled */ 10875 10876 /* must transition to offline first */ 10877 if (ppd->host_link_state != HLS_DN_OFFLINE) { 10878 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10879 if (ret) 10880 break; 10881 ppd->remote_link_down_reason = 0; 10882 } 10883 10884 if (!dd->dc_shutdown) { 10885 ret1 = set_physical_link_state(dd, PLS_DISABLED); 10886 if (ret1 != HCMD_SUCCESS) { 10887 dd_dev_err(dd, 10888 "Failed to transition to Disabled link state, return 0x%x\n", 10889 ret1); 10890 ret = -EINVAL; 10891 break; 10892 } 10893 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000); 10894 if (ret) { 10895 dd_dev_err(dd, 10896 "%s: physical state did not change to DISABLED\n", 10897 __func__); 10898 break; 10899 } 10900 dc_shutdown(dd); 10901 } 10902 ppd->host_link_state = HLS_DN_DISABLE; 10903 break; 10904 case HLS_DN_OFFLINE: 10905 if (ppd->host_link_state == HLS_DN_DISABLE) 10906 dc_start(dd); 10907 10908 /* allow any state to transition to offline */ 10909 ret = goto_offline(ppd, ppd->remote_link_down_reason); 10910 if (!ret) 10911 ppd->remote_link_down_reason = 0; 10912 break; 10913 case HLS_VERIFY_CAP: 10914 if (ppd->host_link_state != HLS_DN_POLL) 10915 goto unexpected; 10916 ppd->host_link_state = HLS_VERIFY_CAP; 10917 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP); 10918 break; 10919 case HLS_GOING_UP: 10920 if (ppd->host_link_state != HLS_VERIFY_CAP) 10921 goto unexpected; 10922 10923 ret1 = set_physical_link_state(dd, PLS_LINKUP); 10924 if (ret1 != HCMD_SUCCESS) { 10925 dd_dev_err(dd, 10926 "Failed to transition to link up state, return 0x%x\n", 10927 ret1); 10928 ret = -EINVAL; 10929 break; 10930 } 10931 ppd->host_link_state = HLS_GOING_UP; 10932 break; 10933 10934 case HLS_GOING_OFFLINE: /* transient within goto_offline() */ 10935 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */ 10936 default: 10937 dd_dev_info(dd, "%s: state 0x%x: not supported\n", 10938 __func__, state); 10939 ret = -EINVAL; 10940 break; 10941 } 10942 10943 goto done; 10944 10945 unexpected: 10946 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n", 10947 __func__, link_state_name(ppd->host_link_state), 10948 link_state_name(state)); 10949 ret = -EINVAL; 10950 10951 done: 10952 mutex_unlock(&ppd->hls_lock); 10953 10954 if (event.device) 10955 ib_dispatch_event(&event); 10956 10957 return ret; 10958 } 10959 10960 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val) 10961 { 10962 u64 reg; 10963 int ret = 0; 10964 10965 switch (which) { 10966 case HFI1_IB_CFG_LIDLMC: 10967 set_lidlmc(ppd); 10968 break; 10969 case HFI1_IB_CFG_VL_HIGH_LIMIT: 10970 /* 10971 * The VL Arbitrator high limit is sent in units of 4k 10972 * bytes, while HFI stores it in units of 64 bytes. 10973 */ 10974 val *= 4096 / 64; 10975 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK) 10976 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT; 10977 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); 10978 break; 10979 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */ 10980 /* HFI only supports POLL as the default link down state */ 10981 if (val != HLS_DN_POLL) 10982 ret = -EINVAL; 10983 break; 10984 case HFI1_IB_CFG_OP_VLS: 10985 if (ppd->vls_operational != val) { 10986 ppd->vls_operational = val; 10987 if (!ppd->port) 10988 ret = -EINVAL; 10989 } 10990 break; 10991 /* 10992 * For link width, link width downgrade, and speed enable, always AND 10993 * the setting with what is actually supported. This has two benefits. 10994 * First, enabled can't have unsupported values, no matter what the 10995 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean 10996 * "fill in with your supported value" have all the bits in the 10997 * field set, so simply ANDing with supported has the desired result. 10998 */ 10999 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ 11000 ppd->link_width_enabled = val & ppd->link_width_supported; 11001 break; 11002 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */ 11003 ppd->link_width_downgrade_enabled = 11004 val & ppd->link_width_downgrade_supported; 11005 break; 11006 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */ 11007 ppd->link_speed_enabled = val & ppd->link_speed_supported; 11008 break; 11009 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */ 11010 /* 11011 * HFI does not follow IB specs, save this value 11012 * so we can report it, if asked. 11013 */ 11014 ppd->overrun_threshold = val; 11015 break; 11016 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */ 11017 /* 11018 * HFI does not follow IB specs, save this value 11019 * so we can report it, if asked. 11020 */ 11021 ppd->phy_error_threshold = val; 11022 break; 11023 11024 case HFI1_IB_CFG_MTU: 11025 set_send_length(ppd); 11026 break; 11027 11028 case HFI1_IB_CFG_PKEYS: 11029 if (HFI1_CAP_IS_KSET(PKEY_CHECK)) 11030 set_partition_keys(ppd); 11031 break; 11032 11033 default: 11034 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 11035 dd_dev_info(ppd->dd, 11036 "%s: which %s, val 0x%x: not implemented\n", 11037 __func__, ib_cfg_name(which), val); 11038 break; 11039 } 11040 return ret; 11041 } 11042 11043 /* begin functions related to vl arbitration table caching */ 11044 static void init_vl_arb_caches(struct hfi1_pportdata *ppd) 11045 { 11046 int i; 11047 11048 BUILD_BUG_ON(VL_ARB_TABLE_SIZE != 11049 VL_ARB_LOW_PRIO_TABLE_SIZE); 11050 BUILD_BUG_ON(VL_ARB_TABLE_SIZE != 11051 VL_ARB_HIGH_PRIO_TABLE_SIZE); 11052 11053 /* 11054 * Note that we always return values directly from the 11055 * 'vl_arb_cache' (and do no CSR reads) in response to a 11056 * 'Get(VLArbTable)'. This is obviously correct after a 11057 * 'Set(VLArbTable)', since the cache will then be up to 11058 * date. But it's also correct prior to any 'Set(VLArbTable)' 11059 * since then both the cache, and the relevant h/w registers 11060 * will be zeroed. 11061 */ 11062 11063 for (i = 0; i < MAX_PRIO_TABLE; i++) 11064 spin_lock_init(&ppd->vl_arb_cache[i].lock); 11065 } 11066 11067 /* 11068 * vl_arb_lock_cache 11069 * 11070 * All other vl_arb_* functions should be called only after locking 11071 * the cache. 11072 */ 11073 static inline struct vl_arb_cache * 11074 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx) 11075 { 11076 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE) 11077 return NULL; 11078 spin_lock(&ppd->vl_arb_cache[idx].lock); 11079 return &ppd->vl_arb_cache[idx]; 11080 } 11081 11082 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx) 11083 { 11084 spin_unlock(&ppd->vl_arb_cache[idx].lock); 11085 } 11086 11087 static void vl_arb_get_cache(struct vl_arb_cache *cache, 11088 struct ib_vl_weight_elem *vl) 11089 { 11090 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11091 } 11092 11093 static void vl_arb_set_cache(struct vl_arb_cache *cache, 11094 struct ib_vl_weight_elem *vl) 11095 { 11096 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11097 } 11098 11099 static int vl_arb_match_cache(struct vl_arb_cache *cache, 11100 struct ib_vl_weight_elem *vl) 11101 { 11102 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); 11103 } 11104 11105 /* end functions related to vl arbitration table caching */ 11106 11107 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target, 11108 u32 size, struct ib_vl_weight_elem *vl) 11109 { 11110 struct hfi1_devdata *dd = ppd->dd; 11111 u64 reg; 11112 unsigned int i, is_up = 0; 11113 int drain, ret = 0; 11114 11115 mutex_lock(&ppd->hls_lock); 11116 11117 if (ppd->host_link_state & HLS_UP) 11118 is_up = 1; 11119 11120 drain = !is_ax(dd) && is_up; 11121 11122 if (drain) 11123 /* 11124 * Before adjusting VL arbitration weights, empty per-VL 11125 * FIFOs, otherwise a packet whose VL weight is being 11126 * set to 0 could get stuck in a FIFO with no chance to 11127 * egress. 11128 */ 11129 ret = stop_drain_data_vls(dd); 11130 11131 if (ret) { 11132 dd_dev_err( 11133 dd, 11134 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", 11135 __func__); 11136 goto err; 11137 } 11138 11139 for (i = 0; i < size; i++, vl++) { 11140 /* 11141 * NOTE: The low priority shift and mask are used here, but 11142 * they are the same for both the low and high registers. 11143 */ 11144 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) 11145 << SEND_LOW_PRIORITY_LIST_VL_SHIFT) 11146 | (((u64)vl->weight 11147 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK) 11148 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT); 11149 write_csr(dd, target + (i * 8), reg); 11150 } 11151 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE); 11152 11153 if (drain) 11154 open_fill_data_vls(dd); /* reopen all VLs */ 11155 11156 err: 11157 mutex_unlock(&ppd->hls_lock); 11158 11159 return ret; 11160 } 11161 11162 /* 11163 * Read one credit merge VL register. 11164 */ 11165 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr, 11166 struct vl_limit *vll) 11167 { 11168 u64 reg = read_csr(dd, csr); 11169 11170 vll->dedicated = cpu_to_be16( 11171 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT) 11172 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK); 11173 vll->shared = cpu_to_be16( 11174 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT) 11175 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK); 11176 } 11177 11178 /* 11179 * Read the current credit merge limits. 11180 */ 11181 static int get_buffer_control(struct hfi1_devdata *dd, 11182 struct buffer_control *bc, u16 *overall_limit) 11183 { 11184 u64 reg; 11185 int i; 11186 11187 /* not all entries are filled in */ 11188 memset(bc, 0, sizeof(*bc)); 11189 11190 /* OPA and HFI have a 1-1 mapping */ 11191 for (i = 0; i < TXE_NUM_DATA_VL; i++) 11192 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); 11193 11194 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ 11195 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); 11196 11197 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11198 bc->overall_shared_limit = cpu_to_be16( 11199 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) 11200 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK); 11201 if (overall_limit) 11202 *overall_limit = (reg 11203 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) 11204 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK; 11205 return sizeof(struct buffer_control); 11206 } 11207 11208 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) 11209 { 11210 u64 reg; 11211 int i; 11212 11213 /* each register contains 16 SC->VLnt mappings, 4 bits each */ 11214 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0); 11215 for (i = 0; i < sizeof(u64); i++) { 11216 u8 byte = *(((u8 *)®) + i); 11217 11218 dp->vlnt[2 * i] = byte & 0xf; 11219 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; 11220 } 11221 11222 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16); 11223 for (i = 0; i < sizeof(u64); i++) { 11224 u8 byte = *(((u8 *)®) + i); 11225 11226 dp->vlnt[16 + (2 * i)] = byte & 0xf; 11227 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; 11228 } 11229 return sizeof(struct sc2vlnt); 11230 } 11231 11232 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems, 11233 struct ib_vl_weight_elem *vl) 11234 { 11235 unsigned int i; 11236 11237 for (i = 0; i < nelems; i++, vl++) { 11238 vl->vl = 0xf; 11239 vl->weight = 0; 11240 } 11241 } 11242 11243 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp) 11244 { 11245 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, 11246 DC_SC_VL_VAL(15_0, 11247 0, dp->vlnt[0] & 0xf, 11248 1, dp->vlnt[1] & 0xf, 11249 2, dp->vlnt[2] & 0xf, 11250 3, dp->vlnt[3] & 0xf, 11251 4, dp->vlnt[4] & 0xf, 11252 5, dp->vlnt[5] & 0xf, 11253 6, dp->vlnt[6] & 0xf, 11254 7, dp->vlnt[7] & 0xf, 11255 8, dp->vlnt[8] & 0xf, 11256 9, dp->vlnt[9] & 0xf, 11257 10, dp->vlnt[10] & 0xf, 11258 11, dp->vlnt[11] & 0xf, 11259 12, dp->vlnt[12] & 0xf, 11260 13, dp->vlnt[13] & 0xf, 11261 14, dp->vlnt[14] & 0xf, 11262 15, dp->vlnt[15] & 0xf)); 11263 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, 11264 DC_SC_VL_VAL(31_16, 11265 16, dp->vlnt[16] & 0xf, 11266 17, dp->vlnt[17] & 0xf, 11267 18, dp->vlnt[18] & 0xf, 11268 19, dp->vlnt[19] & 0xf, 11269 20, dp->vlnt[20] & 0xf, 11270 21, dp->vlnt[21] & 0xf, 11271 22, dp->vlnt[22] & 0xf, 11272 23, dp->vlnt[23] & 0xf, 11273 24, dp->vlnt[24] & 0xf, 11274 25, dp->vlnt[25] & 0xf, 11275 26, dp->vlnt[26] & 0xf, 11276 27, dp->vlnt[27] & 0xf, 11277 28, dp->vlnt[28] & 0xf, 11278 29, dp->vlnt[29] & 0xf, 11279 30, dp->vlnt[30] & 0xf, 11280 31, dp->vlnt[31] & 0xf)); 11281 } 11282 11283 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what, 11284 u16 limit) 11285 { 11286 if (limit != 0) 11287 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n", 11288 what, (int)limit, idx); 11289 } 11290 11291 /* change only the shared limit portion of SendCmGLobalCredit */ 11292 static void set_global_shared(struct hfi1_devdata *dd, u16 limit) 11293 { 11294 u64 reg; 11295 11296 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11297 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK; 11298 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT; 11299 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 11300 } 11301 11302 /* change only the total credit limit portion of SendCmGLobalCredit */ 11303 static void set_global_limit(struct hfi1_devdata *dd, u16 limit) 11304 { 11305 u64 reg; 11306 11307 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 11308 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK; 11309 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 11310 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 11311 } 11312 11313 /* set the given per-VL shared limit */ 11314 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit) 11315 { 11316 u64 reg; 11317 u32 addr; 11318 11319 if (vl < TXE_NUM_DATA_VL) 11320 addr = SEND_CM_CREDIT_VL + (8 * vl); 11321 else 11322 addr = SEND_CM_CREDIT_VL15; 11323 11324 reg = read_csr(dd, addr); 11325 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK; 11326 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT; 11327 write_csr(dd, addr, reg); 11328 } 11329 11330 /* set the given per-VL dedicated limit */ 11331 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit) 11332 { 11333 u64 reg; 11334 u32 addr; 11335 11336 if (vl < TXE_NUM_DATA_VL) 11337 addr = SEND_CM_CREDIT_VL + (8 * vl); 11338 else 11339 addr = SEND_CM_CREDIT_VL15; 11340 11341 reg = read_csr(dd, addr); 11342 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK; 11343 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT; 11344 write_csr(dd, addr, reg); 11345 } 11346 11347 /* spin until the given per-VL status mask bits clear */ 11348 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask, 11349 const char *which) 11350 { 11351 unsigned long timeout; 11352 u64 reg; 11353 11354 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT); 11355 while (1) { 11356 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask; 11357 11358 if (reg == 0) 11359 return; /* success */ 11360 if (time_after(jiffies, timeout)) 11361 break; /* timed out */ 11362 udelay(1); 11363 } 11364 11365 dd_dev_err(dd, 11366 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n", 11367 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg); 11368 /* 11369 * If this occurs, it is likely there was a credit loss on the link. 11370 * The only recovery from that is a link bounce. 11371 */ 11372 dd_dev_err(dd, 11373 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n"); 11374 } 11375 11376 /* 11377 * The number of credits on the VLs may be changed while everything 11378 * is "live", but the following algorithm must be followed due to 11379 * how the hardware is actually implemented. In particular, 11380 * Return_Credit_Status[] is the only correct status check. 11381 * 11382 * if (reducing Global_Shared_Credit_Limit or any shared limit changing) 11383 * set Global_Shared_Credit_Limit = 0 11384 * use_all_vl = 1 11385 * mask0 = all VLs that are changing either dedicated or shared limits 11386 * set Shared_Limit[mask0] = 0 11387 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0 11388 * if (changing any dedicated limit) 11389 * mask1 = all VLs that are lowering dedicated limits 11390 * lower Dedicated_Limit[mask1] 11391 * spin until Return_Credit_Status[mask1] == 0 11392 * raise Dedicated_Limits 11393 * raise Shared_Limits 11394 * raise Global_Shared_Credit_Limit 11395 * 11396 * lower = if the new limit is lower, set the limit to the new value 11397 * raise = if the new limit is higher than the current value (may be changed 11398 * earlier in the algorithm), set the new limit to the new value 11399 */ 11400 int set_buffer_control(struct hfi1_pportdata *ppd, 11401 struct buffer_control *new_bc) 11402 { 11403 struct hfi1_devdata *dd = ppd->dd; 11404 u64 changing_mask, ld_mask, stat_mask; 11405 int change_count; 11406 int i, use_all_mask; 11407 int this_shared_changing; 11408 int vl_count = 0, ret; 11409 /* 11410 * A0: add the variable any_shared_limit_changing below and in the 11411 * algorithm above. If removing A0 support, it can be removed. 11412 */ 11413 int any_shared_limit_changing; 11414 struct buffer_control cur_bc; 11415 u8 changing[OPA_MAX_VLS]; 11416 u8 lowering_dedicated[OPA_MAX_VLS]; 11417 u16 cur_total; 11418 u32 new_total = 0; 11419 const u64 all_mask = 11420 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK 11421 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK 11422 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK 11423 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK 11424 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK 11425 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK 11426 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK 11427 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK 11428 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK; 11429 11430 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15) 11431 #define NUM_USABLE_VLS 16 /* look at VL15 and less */ 11432 11433 /* find the new total credits, do sanity check on unused VLs */ 11434 for (i = 0; i < OPA_MAX_VLS; i++) { 11435 if (valid_vl(i)) { 11436 new_total += be16_to_cpu(new_bc->vl[i].dedicated); 11437 continue; 11438 } 11439 nonzero_msg(dd, i, "dedicated", 11440 be16_to_cpu(new_bc->vl[i].dedicated)); 11441 nonzero_msg(dd, i, "shared", 11442 be16_to_cpu(new_bc->vl[i].shared)); 11443 new_bc->vl[i].dedicated = 0; 11444 new_bc->vl[i].shared = 0; 11445 } 11446 new_total += be16_to_cpu(new_bc->overall_shared_limit); 11447 11448 /* fetch the current values */ 11449 get_buffer_control(dd, &cur_bc, &cur_total); 11450 11451 /* 11452 * Create the masks we will use. 11453 */ 11454 memset(changing, 0, sizeof(changing)); 11455 memset(lowering_dedicated, 0, sizeof(lowering_dedicated)); 11456 /* 11457 * NOTE: Assumes that the individual VL bits are adjacent and in 11458 * increasing order 11459 */ 11460 stat_mask = 11461 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK; 11462 changing_mask = 0; 11463 ld_mask = 0; 11464 change_count = 0; 11465 any_shared_limit_changing = 0; 11466 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) { 11467 if (!valid_vl(i)) 11468 continue; 11469 this_shared_changing = new_bc->vl[i].shared 11470 != cur_bc.vl[i].shared; 11471 if (this_shared_changing) 11472 any_shared_limit_changing = 1; 11473 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || 11474 this_shared_changing) { 11475 changing[i] = 1; 11476 changing_mask |= stat_mask; 11477 change_count++; 11478 } 11479 if (be16_to_cpu(new_bc->vl[i].dedicated) < 11480 be16_to_cpu(cur_bc.vl[i].dedicated)) { 11481 lowering_dedicated[i] = 1; 11482 ld_mask |= stat_mask; 11483 } 11484 } 11485 11486 /* bracket the credit change with a total adjustment */ 11487 if (new_total > cur_total) 11488 set_global_limit(dd, new_total); 11489 11490 /* 11491 * Start the credit change algorithm. 11492 */ 11493 use_all_mask = 0; 11494 if ((be16_to_cpu(new_bc->overall_shared_limit) < 11495 be16_to_cpu(cur_bc.overall_shared_limit)) || 11496 (is_ax(dd) && any_shared_limit_changing)) { 11497 set_global_shared(dd, 0); 11498 cur_bc.overall_shared_limit = 0; 11499 use_all_mask = 1; 11500 } 11501 11502 for (i = 0; i < NUM_USABLE_VLS; i++) { 11503 if (!valid_vl(i)) 11504 continue; 11505 11506 if (changing[i]) { 11507 set_vl_shared(dd, i, 0); 11508 cur_bc.vl[i].shared = 0; 11509 } 11510 } 11511 11512 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask, 11513 "shared"); 11514 11515 if (change_count > 0) { 11516 for (i = 0; i < NUM_USABLE_VLS; i++) { 11517 if (!valid_vl(i)) 11518 continue; 11519 11520 if (lowering_dedicated[i]) { 11521 set_vl_dedicated(dd, i, 11522 be16_to_cpu(new_bc-> 11523 vl[i].dedicated)); 11524 cur_bc.vl[i].dedicated = 11525 new_bc->vl[i].dedicated; 11526 } 11527 } 11528 11529 wait_for_vl_status_clear(dd, ld_mask, "dedicated"); 11530 11531 /* now raise all dedicated that are going up */ 11532 for (i = 0; i < NUM_USABLE_VLS; i++) { 11533 if (!valid_vl(i)) 11534 continue; 11535 11536 if (be16_to_cpu(new_bc->vl[i].dedicated) > 11537 be16_to_cpu(cur_bc.vl[i].dedicated)) 11538 set_vl_dedicated(dd, i, 11539 be16_to_cpu(new_bc-> 11540 vl[i].dedicated)); 11541 } 11542 } 11543 11544 /* next raise all shared that are going up */ 11545 for (i = 0; i < NUM_USABLE_VLS; i++) { 11546 if (!valid_vl(i)) 11547 continue; 11548 11549 if (be16_to_cpu(new_bc->vl[i].shared) > 11550 be16_to_cpu(cur_bc.vl[i].shared)) 11551 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); 11552 } 11553 11554 /* finally raise the global shared */ 11555 if (be16_to_cpu(new_bc->overall_shared_limit) > 11556 be16_to_cpu(cur_bc.overall_shared_limit)) 11557 set_global_shared(dd, 11558 be16_to_cpu(new_bc->overall_shared_limit)); 11559 11560 /* bracket the credit change with a total adjustment */ 11561 if (new_total < cur_total) 11562 set_global_limit(dd, new_total); 11563 11564 /* 11565 * Determine the actual number of operational VLS using the number of 11566 * dedicated and shared credits for each VL. 11567 */ 11568 if (change_count > 0) { 11569 for (i = 0; i < TXE_NUM_DATA_VL; i++) 11570 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || 11571 be16_to_cpu(new_bc->vl[i].shared) > 0) 11572 vl_count++; 11573 ppd->actual_vls_operational = vl_count; 11574 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? 11575 ppd->actual_vls_operational : 11576 ppd->vls_operational, 11577 NULL); 11578 if (ret == 0) 11579 ret = pio_map_init(dd, ppd->port - 1, vl_count ? 11580 ppd->actual_vls_operational : 11581 ppd->vls_operational, NULL); 11582 if (ret) 11583 return ret; 11584 } 11585 return 0; 11586 } 11587 11588 /* 11589 * Read the given fabric manager table. Return the size of the 11590 * table (in bytes) on success, and a negative error code on 11591 * failure. 11592 */ 11593 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t) 11594 11595 { 11596 int size; 11597 struct vl_arb_cache *vlc; 11598 11599 switch (which) { 11600 case FM_TBL_VL_HIGH_ARB: 11601 size = 256; 11602 /* 11603 * OPA specifies 128 elements (of 2 bytes each), though 11604 * HFI supports only 16 elements in h/w. 11605 */ 11606 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); 11607 vl_arb_get_cache(vlc, t); 11608 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11609 break; 11610 case FM_TBL_VL_LOW_ARB: 11611 size = 256; 11612 /* 11613 * OPA specifies 128 elements (of 2 bytes each), though 11614 * HFI supports only 16 elements in h/w. 11615 */ 11616 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); 11617 vl_arb_get_cache(vlc, t); 11618 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11619 break; 11620 case FM_TBL_BUFFER_CONTROL: 11621 size = get_buffer_control(ppd->dd, t, NULL); 11622 break; 11623 case FM_TBL_SC2VLNT: 11624 size = get_sc2vlnt(ppd->dd, t); 11625 break; 11626 case FM_TBL_VL_PREEMPT_ELEMS: 11627 size = 256; 11628 /* OPA specifies 128 elements, of 2 bytes each */ 11629 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); 11630 break; 11631 case FM_TBL_VL_PREEMPT_MATRIX: 11632 size = 256; 11633 /* 11634 * OPA specifies that this is the same size as the VL 11635 * arbitration tables (i.e., 256 bytes). 11636 */ 11637 break; 11638 default: 11639 return -EINVAL; 11640 } 11641 return size; 11642 } 11643 11644 /* 11645 * Write the given fabric manager table. 11646 */ 11647 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t) 11648 { 11649 int ret = 0; 11650 struct vl_arb_cache *vlc; 11651 11652 switch (which) { 11653 case FM_TBL_VL_HIGH_ARB: 11654 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE); 11655 if (vl_arb_match_cache(vlc, t)) { 11656 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11657 break; 11658 } 11659 vl_arb_set_cache(vlc, t); 11660 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE); 11661 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST, 11662 VL_ARB_HIGH_PRIO_TABLE_SIZE, t); 11663 break; 11664 case FM_TBL_VL_LOW_ARB: 11665 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE); 11666 if (vl_arb_match_cache(vlc, t)) { 11667 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11668 break; 11669 } 11670 vl_arb_set_cache(vlc, t); 11671 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE); 11672 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST, 11673 VL_ARB_LOW_PRIO_TABLE_SIZE, t); 11674 break; 11675 case FM_TBL_BUFFER_CONTROL: 11676 ret = set_buffer_control(ppd, t); 11677 break; 11678 case FM_TBL_SC2VLNT: 11679 set_sc2vlnt(ppd->dd, t); 11680 break; 11681 default: 11682 ret = -EINVAL; 11683 } 11684 return ret; 11685 } 11686 11687 /* 11688 * Disable all data VLs. 11689 * 11690 * Return 0 if disabled, non-zero if the VLs cannot be disabled. 11691 */ 11692 static int disable_data_vls(struct hfi1_devdata *dd) 11693 { 11694 if (is_ax(dd)) 11695 return 1; 11696 11697 pio_send_control(dd, PSC_DATA_VL_DISABLE); 11698 11699 return 0; 11700 } 11701 11702 /* 11703 * open_fill_data_vls() - the counterpart to stop_drain_data_vls(). 11704 * Just re-enables all data VLs (the "fill" part happens 11705 * automatically - the name was chosen for symmetry with 11706 * stop_drain_data_vls()). 11707 * 11708 * Return 0 if successful, non-zero if the VLs cannot be enabled. 11709 */ 11710 int open_fill_data_vls(struct hfi1_devdata *dd) 11711 { 11712 if (is_ax(dd)) 11713 return 1; 11714 11715 pio_send_control(dd, PSC_DATA_VL_ENABLE); 11716 11717 return 0; 11718 } 11719 11720 /* 11721 * drain_data_vls() - assumes that disable_data_vls() has been called, 11722 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA 11723 * engines to drop to 0. 11724 */ 11725 static void drain_data_vls(struct hfi1_devdata *dd) 11726 { 11727 sc_wait(dd); 11728 sdma_wait(dd); 11729 pause_for_credit_return(dd); 11730 } 11731 11732 /* 11733 * stop_drain_data_vls() - disable, then drain all per-VL fifos. 11734 * 11735 * Use open_fill_data_vls() to resume using data VLs. This pair is 11736 * meant to be used like this: 11737 * 11738 * stop_drain_data_vls(dd); 11739 * // do things with per-VL resources 11740 * open_fill_data_vls(dd); 11741 */ 11742 int stop_drain_data_vls(struct hfi1_devdata *dd) 11743 { 11744 int ret; 11745 11746 ret = disable_data_vls(dd); 11747 if (ret == 0) 11748 drain_data_vls(dd); 11749 11750 return ret; 11751 } 11752 11753 /* 11754 * Convert a nanosecond time to a cclock count. No matter how slow 11755 * the cclock, a non-zero ns will always have a non-zero result. 11756 */ 11757 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns) 11758 { 11759 u32 cclocks; 11760 11761 if (dd->icode == ICODE_FPGA_EMULATION) 11762 cclocks = (ns * 1000) / FPGA_CCLOCK_PS; 11763 else /* simulation pretends to be ASIC */ 11764 cclocks = (ns * 1000) / ASIC_CCLOCK_PS; 11765 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */ 11766 cclocks = 1; 11767 return cclocks; 11768 } 11769 11770 /* 11771 * Convert a cclock count to nanoseconds. Not matter how slow 11772 * the cclock, a non-zero cclocks will always have a non-zero result. 11773 */ 11774 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks) 11775 { 11776 u32 ns; 11777 11778 if (dd->icode == ICODE_FPGA_EMULATION) 11779 ns = (cclocks * FPGA_CCLOCK_PS) / 1000; 11780 else /* simulation pretends to be ASIC */ 11781 ns = (cclocks * ASIC_CCLOCK_PS) / 1000; 11782 if (cclocks && !ns) 11783 ns = 1; 11784 return ns; 11785 } 11786 11787 /* 11788 * Dynamically adjust the receive interrupt timeout for a context based on 11789 * incoming packet rate. 11790 * 11791 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero. 11792 */ 11793 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts) 11794 { 11795 struct hfi1_devdata *dd = rcd->dd; 11796 u32 timeout = rcd->rcvavail_timeout; 11797 11798 /* 11799 * This algorithm doubles or halves the timeout depending on whether 11800 * the number of packets received in this interrupt were less than or 11801 * greater equal the interrupt count. 11802 * 11803 * The calculations below do not allow a steady state to be achieved. 11804 * Only at the endpoints it is possible to have an unchanging 11805 * timeout. 11806 */ 11807 if (npkts < rcv_intr_count) { 11808 /* 11809 * Not enough packets arrived before the timeout, adjust 11810 * timeout downward. 11811 */ 11812 if (timeout < 2) /* already at minimum? */ 11813 return; 11814 timeout >>= 1; 11815 } else { 11816 /* 11817 * More than enough packets arrived before the timeout, adjust 11818 * timeout upward. 11819 */ 11820 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ 11821 return; 11822 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); 11823 } 11824 11825 rcd->rcvavail_timeout = timeout; 11826 /* 11827 * timeout cannot be larger than rcv_intr_timeout_csr which has already 11828 * been verified to be in range 11829 */ 11830 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, 11831 (u64)timeout << 11832 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); 11833 } 11834 11835 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, 11836 u32 intr_adjust, u32 npkts) 11837 { 11838 struct hfi1_devdata *dd = rcd->dd; 11839 u64 reg; 11840 u32 ctxt = rcd->ctxt; 11841 11842 /* 11843 * Need to write timeout register before updating RcvHdrHead to ensure 11844 * that a new value is used when the HW decides to restart counting. 11845 */ 11846 if (intr_adjust) 11847 adjust_rcv_timeout(rcd, npkts); 11848 if (updegr) { 11849 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK) 11850 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT; 11851 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); 11852 } 11853 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) | 11854 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) 11855 << RCV_HDR_HEAD_HEAD_SHIFT); 11856 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); 11857 } 11858 11859 u32 hdrqempty(struct hfi1_ctxtdata *rcd) 11860 { 11861 u32 head, tail; 11862 11863 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) 11864 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT; 11865 11866 if (hfi1_rcvhdrtail_kvaddr(rcd)) 11867 tail = get_rcvhdrtail(rcd); 11868 else 11869 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); 11870 11871 return head == tail; 11872 } 11873 11874 /* 11875 * Context Control and Receive Array encoding for buffer size: 11876 * 0x0 invalid 11877 * 0x1 4 KB 11878 * 0x2 8 KB 11879 * 0x3 16 KB 11880 * 0x4 32 KB 11881 * 0x5 64 KB 11882 * 0x6 128 KB 11883 * 0x7 256 KB 11884 * 0x8 512 KB (Receive Array only) 11885 * 0x9 1 MB (Receive Array only) 11886 * 0xa 2 MB (Receive Array only) 11887 * 11888 * 0xB-0xF - reserved (Receive Array only) 11889 * 11890 * 11891 * This routine assumes that the value has already been sanity checked. 11892 */ 11893 static u32 encoded_size(u32 size) 11894 { 11895 switch (size) { 11896 case 4 * 1024: return 0x1; 11897 case 8 * 1024: return 0x2; 11898 case 16 * 1024: return 0x3; 11899 case 32 * 1024: return 0x4; 11900 case 64 * 1024: return 0x5; 11901 case 128 * 1024: return 0x6; 11902 case 256 * 1024: return 0x7; 11903 case 512 * 1024: return 0x8; 11904 case 1 * 1024 * 1024: return 0x9; 11905 case 2 * 1024 * 1024: return 0xa; 11906 } 11907 return 0x1; /* if invalid, go with the minimum size */ 11908 } 11909 11910 /** 11911 * encode_rcv_header_entry_size - return chip specific encoding for size 11912 * @size: size in dwords 11913 * 11914 * Convert a receive header entry size that to the encoding used in the CSR. 11915 * 11916 * Return a zero if the given size is invalid, otherwise the encoding. 11917 */ 11918 u8 encode_rcv_header_entry_size(u8 size) 11919 { 11920 /* there are only 3 valid receive header entry sizes */ 11921 if (size == 2) 11922 return 1; 11923 if (size == 16) 11924 return 2; 11925 if (size == 32) 11926 return 4; 11927 return 0; /* invalid */ 11928 } 11929 11930 /** 11931 * hfi1_validate_rcvhdrcnt - validate hdrcnt 11932 * @dd: the device data 11933 * @thecnt: the header count 11934 */ 11935 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt) 11936 { 11937 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 11938 dd_dev_err(dd, "Receive header queue count too small\n"); 11939 return -EINVAL; 11940 } 11941 11942 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { 11943 dd_dev_err(dd, 11944 "Receive header queue count cannot be greater than %u\n", 11945 HFI1_MAX_HDRQ_EGRBUF_CNT); 11946 return -EINVAL; 11947 } 11948 11949 if (thecnt % HDRQ_INCREMENT) { 11950 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n", 11951 thecnt, HDRQ_INCREMENT); 11952 return -EINVAL; 11953 } 11954 11955 return 0; 11956 } 11957 11958 /** 11959 * set_hdrq_regs - set header queue registers for context 11960 * @dd: the device data 11961 * @ctxt: the context 11962 * @entsize: the dword entry size 11963 * @hdrcnt: the number of header entries 11964 */ 11965 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt) 11966 { 11967 u64 reg; 11968 11969 reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) << 11970 RCV_HDR_CNT_CNT_SHIFT; 11971 write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg); 11972 reg = ((u64)encode_rcv_header_entry_size(entsize) & 11973 RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) << 11974 RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT; 11975 write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg); 11976 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) << 11977 RCV_HDR_SIZE_HDR_SIZE_SHIFT; 11978 write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg); 11979 11980 /* 11981 * Program dummy tail address for every receive context 11982 * before enabling any receive context 11983 */ 11984 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 11985 dd->rcvhdrtail_dummy_dma); 11986 } 11987 11988 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, 11989 struct hfi1_ctxtdata *rcd) 11990 { 11991 u64 rcvctrl, reg; 11992 int did_enable = 0; 11993 u16 ctxt; 11994 11995 if (!rcd) 11996 return; 11997 11998 ctxt = rcd->ctxt; 11999 12000 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op); 12001 12002 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); 12003 /* if the context already enabled, don't do the extra steps */ 12004 if ((op & HFI1_RCVCTRL_CTXT_ENB) && 12005 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) { 12006 /* reset the tail and hdr addresses, and sequence count */ 12007 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, 12008 rcd->rcvhdrq_dma); 12009 if (hfi1_rcvhdrtail_kvaddr(rcd)) 12010 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12011 rcd->rcvhdrqtailaddr_dma); 12012 hfi1_set_seq_cnt(rcd, 1); 12013 12014 /* reset the cached receive header queue head value */ 12015 hfi1_set_rcd_head(rcd, 0); 12016 12017 /* 12018 * Zero the receive header queue so we don't get false 12019 * positives when checking the sequence number. The 12020 * sequence numbers could land exactly on the same spot. 12021 * E.g. a rcd restart before the receive header wrapped. 12022 */ 12023 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); 12024 12025 /* starting timeout */ 12026 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; 12027 12028 /* enable the context */ 12029 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK; 12030 12031 /* clean the egr buffer size first */ 12032 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; 12033 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) 12034 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK) 12035 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT; 12036 12037 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ 12038 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); 12039 did_enable = 1; 12040 12041 /* zero RcvEgrIndexHead */ 12042 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); 12043 12044 /* set eager count and base index */ 12045 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) 12046 & RCV_EGR_CTRL_EGR_CNT_MASK) 12047 << RCV_EGR_CTRL_EGR_CNT_SHIFT) | 12048 (((rcd->eager_base >> RCV_SHIFT) 12049 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK) 12050 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT); 12051 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); 12052 12053 /* 12054 * Set TID (expected) count and base index. 12055 * rcd->expected_count is set to individual RcvArray entries, 12056 * not pairs, and the CSR takes a pair-count in groups of 12057 * four, so divide by 8. 12058 */ 12059 reg = (((rcd->expected_count >> RCV_SHIFT) 12060 & RCV_TID_CTRL_TID_PAIR_CNT_MASK) 12061 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) | 12062 (((rcd->expected_base >> RCV_SHIFT) 12063 & RCV_TID_CTRL_TID_BASE_INDEX_MASK) 12064 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT); 12065 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); 12066 if (ctxt == HFI1_CTRL_CTXT) 12067 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT); 12068 } 12069 if (op & HFI1_RCVCTRL_CTXT_DIS) { 12070 write_csr(dd, RCV_VL15, 0); 12071 /* 12072 * When receive context is being disabled turn on tail 12073 * update with a dummy tail address and then disable 12074 * receive context. 12075 */ 12076 if (dd->rcvhdrtail_dummy_dma) { 12077 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12078 dd->rcvhdrtail_dummy_dma); 12079 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */ 12080 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; 12081 } 12082 12083 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK; 12084 } 12085 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) { 12086 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, 12087 IS_RCVAVAIL_START + rcd->ctxt, true); 12088 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; 12089 } 12090 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) { 12091 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, 12092 IS_RCVAVAIL_START + rcd->ctxt, false); 12093 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; 12094 } 12095 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd)) 12096 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; 12097 if (op & HFI1_RCVCTRL_TAILUPD_DIS) { 12098 /* See comment on RcvCtxtCtrl.TailUpd above */ 12099 if (!(op & HFI1_RCVCTRL_CTXT_DIS)) 12100 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK; 12101 } 12102 if (op & HFI1_RCVCTRL_TIDFLOW_ENB) 12103 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; 12104 if (op & HFI1_RCVCTRL_TIDFLOW_DIS) 12105 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK; 12106 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) { 12107 /* 12108 * In one-packet-per-eager mode, the size comes from 12109 * the RcvArray entry. 12110 */ 12111 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK; 12112 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; 12113 } 12114 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS) 12115 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK; 12116 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB) 12117 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; 12118 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS) 12119 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK; 12120 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB) 12121 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; 12122 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS) 12123 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK; 12124 if (op & HFI1_RCVCTRL_URGENT_ENB) 12125 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, 12126 IS_RCVURGENT_START + rcd->ctxt, true); 12127 if (op & HFI1_RCVCTRL_URGENT_DIS) 12128 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, 12129 IS_RCVURGENT_START + rcd->ctxt, false); 12130 12131 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx", ctxt, rcvctrl); 12132 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); 12133 12134 /* work around sticky RcvCtxtStatus.BlockedRHQFull */ 12135 if (did_enable && 12136 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) { 12137 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); 12138 if (reg != 0) { 12139 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n", 12140 ctxt, reg); 12141 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); 12142 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); 12143 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); 12144 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); 12145 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); 12146 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n", 12147 ctxt, reg, reg == 0 ? "not" : "still"); 12148 } 12149 } 12150 12151 if (did_enable) { 12152 /* 12153 * The interrupt timeout and count must be set after 12154 * the context is enabled to take effect. 12155 */ 12156 /* set interrupt timeout */ 12157 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, 12158 (u64)rcd->rcvavail_timeout << 12159 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT); 12160 12161 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */ 12162 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT; 12163 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); 12164 } 12165 12166 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS)) 12167 /* 12168 * If the context has been disabled and the Tail Update has 12169 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address 12170 * so it doesn't contain an address that is invalid. 12171 */ 12172 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 12173 dd->rcvhdrtail_dummy_dma); 12174 } 12175 12176 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp) 12177 { 12178 int ret; 12179 u64 val = 0; 12180 12181 if (namep) { 12182 ret = dd->cntrnameslen; 12183 *namep = dd->cntrnames; 12184 } else { 12185 const struct cntr_entry *entry; 12186 int i, j; 12187 12188 ret = (dd->ndevcntrs) * sizeof(u64); 12189 12190 /* Get the start of the block of counters */ 12191 *cntrp = dd->cntrs; 12192 12193 /* 12194 * Now go and fill in each counter in the block. 12195 */ 12196 for (i = 0; i < DEV_CNTR_LAST; i++) { 12197 entry = &dev_cntrs[i]; 12198 hfi1_cdbg(CNTR, "reading %s", entry->name); 12199 if (entry->flags & CNTR_DISABLED) { 12200 /* Nothing */ 12201 hfi1_cdbg(CNTR, "\tDisabled"); 12202 } else { 12203 if (entry->flags & CNTR_VL) { 12204 hfi1_cdbg(CNTR, "\tPer VL"); 12205 for (j = 0; j < C_VL_COUNT; j++) { 12206 val = entry->rw_cntr(entry, 12207 dd, j, 12208 CNTR_MODE_R, 12209 0); 12210 hfi1_cdbg( 12211 CNTR, 12212 "\t\tRead 0x%llx for %d", 12213 val, j); 12214 dd->cntrs[entry->offset + j] = 12215 val; 12216 } 12217 } else if (entry->flags & CNTR_SDMA) { 12218 hfi1_cdbg(CNTR, 12219 "\t Per SDMA Engine"); 12220 for (j = 0; j < chip_sdma_engines(dd); 12221 j++) { 12222 val = 12223 entry->rw_cntr(entry, dd, j, 12224 CNTR_MODE_R, 0); 12225 hfi1_cdbg(CNTR, 12226 "\t\tRead 0x%llx for %d", 12227 val, j); 12228 dd->cntrs[entry->offset + j] = 12229 val; 12230 } 12231 } else { 12232 val = entry->rw_cntr(entry, dd, 12233 CNTR_INVALID_VL, 12234 CNTR_MODE_R, 0); 12235 dd->cntrs[entry->offset] = val; 12236 hfi1_cdbg(CNTR, "\tRead 0x%llx", val); 12237 } 12238 } 12239 } 12240 } 12241 return ret; 12242 } 12243 12244 /* 12245 * Used by sysfs to create files for hfi stats to read 12246 */ 12247 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp) 12248 { 12249 int ret; 12250 u64 val = 0; 12251 12252 if (namep) { 12253 ret = ppd->dd->portcntrnameslen; 12254 *namep = ppd->dd->portcntrnames; 12255 } else { 12256 const struct cntr_entry *entry; 12257 int i, j; 12258 12259 ret = ppd->dd->nportcntrs * sizeof(u64); 12260 *cntrp = ppd->cntrs; 12261 12262 for (i = 0; i < PORT_CNTR_LAST; i++) { 12263 entry = &port_cntrs[i]; 12264 hfi1_cdbg(CNTR, "reading %s", entry->name); 12265 if (entry->flags & CNTR_DISABLED) { 12266 /* Nothing */ 12267 hfi1_cdbg(CNTR, "\tDisabled"); 12268 continue; 12269 } 12270 12271 if (entry->flags & CNTR_VL) { 12272 hfi1_cdbg(CNTR, "\tPer VL"); 12273 for (j = 0; j < C_VL_COUNT; j++) { 12274 val = entry->rw_cntr(entry, ppd, j, 12275 CNTR_MODE_R, 12276 0); 12277 hfi1_cdbg( 12278 CNTR, 12279 "\t\tRead 0x%llx for %d", 12280 val, j); 12281 ppd->cntrs[entry->offset + j] = val; 12282 } 12283 } else { 12284 val = entry->rw_cntr(entry, ppd, 12285 CNTR_INVALID_VL, 12286 CNTR_MODE_R, 12287 0); 12288 ppd->cntrs[entry->offset] = val; 12289 hfi1_cdbg(CNTR, "\tRead 0x%llx", val); 12290 } 12291 } 12292 } 12293 return ret; 12294 } 12295 12296 static void free_cntrs(struct hfi1_devdata *dd) 12297 { 12298 struct hfi1_pportdata *ppd; 12299 int i; 12300 12301 if (dd->synth_stats_timer.function) 12302 timer_delete_sync(&dd->synth_stats_timer); 12303 cancel_work_sync(&dd->update_cntr_work); 12304 ppd = (struct hfi1_pportdata *)(dd + 1); 12305 for (i = 0; i < dd->num_pports; i++, ppd++) { 12306 kfree(ppd->cntrs); 12307 kfree(ppd->scntrs); 12308 free_percpu(ppd->ibport_data.rvp.rc_acks); 12309 free_percpu(ppd->ibport_data.rvp.rc_qacks); 12310 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); 12311 ppd->cntrs = NULL; 12312 ppd->scntrs = NULL; 12313 ppd->ibport_data.rvp.rc_acks = NULL; 12314 ppd->ibport_data.rvp.rc_qacks = NULL; 12315 ppd->ibport_data.rvp.rc_delayed_comp = NULL; 12316 } 12317 kfree(dd->portcntrnames); 12318 dd->portcntrnames = NULL; 12319 kfree(dd->cntrs); 12320 dd->cntrs = NULL; 12321 kfree(dd->scntrs); 12322 dd->scntrs = NULL; 12323 kfree(dd->cntrnames); 12324 dd->cntrnames = NULL; 12325 if (dd->update_cntr_wq) { 12326 destroy_workqueue(dd->update_cntr_wq); 12327 dd->update_cntr_wq = NULL; 12328 } 12329 } 12330 12331 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry, 12332 u64 *psval, void *context, int vl) 12333 { 12334 u64 val; 12335 u64 sval = *psval; 12336 12337 if (entry->flags & CNTR_DISABLED) { 12338 dd_dev_err(dd, "Counter %s not enabled", entry->name); 12339 return 0; 12340 } 12341 12342 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); 12343 12344 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); 12345 12346 /* If its a synthetic counter there is more work we need to do */ 12347 if (entry->flags & CNTR_SYNTH) { 12348 if (sval == CNTR_MAX) { 12349 /* No need to read already saturated */ 12350 return CNTR_MAX; 12351 } 12352 12353 if (entry->flags & CNTR_32BIT) { 12354 /* 32bit counters can wrap multiple times */ 12355 u64 upper = sval >> 32; 12356 u64 lower = (sval << 32) >> 32; 12357 12358 if (lower > val) { /* hw wrapped */ 12359 if (upper == CNTR_32BIT_MAX) 12360 val = CNTR_MAX; 12361 else 12362 upper++; 12363 } 12364 12365 if (val != CNTR_MAX) 12366 val = (upper << 32) | val; 12367 12368 } else { 12369 /* If we rolled we are saturated */ 12370 if ((val < sval) || (val > CNTR_MAX)) 12371 val = CNTR_MAX; 12372 } 12373 } 12374 12375 *psval = val; 12376 12377 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); 12378 12379 return val; 12380 } 12381 12382 static u64 write_dev_port_cntr(struct hfi1_devdata *dd, 12383 struct cntr_entry *entry, 12384 u64 *psval, void *context, int vl, u64 data) 12385 { 12386 u64 val; 12387 12388 if (entry->flags & CNTR_DISABLED) { 12389 dd_dev_err(dd, "Counter %s not enabled", entry->name); 12390 return 0; 12391 } 12392 12393 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); 12394 12395 if (entry->flags & CNTR_SYNTH) { 12396 *psval = data; 12397 if (entry->flags & CNTR_32BIT) { 12398 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, 12399 (data << 32) >> 32); 12400 val = data; /* return the full 64bit value */ 12401 } else { 12402 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, 12403 data); 12404 } 12405 } else { 12406 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); 12407 } 12408 12409 *psval = val; 12410 12411 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val); 12412 12413 return val; 12414 } 12415 12416 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl) 12417 { 12418 struct cntr_entry *entry; 12419 u64 *sval; 12420 12421 entry = &dev_cntrs[index]; 12422 sval = dd->scntrs + entry->offset; 12423 12424 if (vl != CNTR_INVALID_VL) 12425 sval += vl; 12426 12427 return read_dev_port_cntr(dd, entry, sval, dd, vl); 12428 } 12429 12430 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data) 12431 { 12432 struct cntr_entry *entry; 12433 u64 *sval; 12434 12435 entry = &dev_cntrs[index]; 12436 sval = dd->scntrs + entry->offset; 12437 12438 if (vl != CNTR_INVALID_VL) 12439 sval += vl; 12440 12441 return write_dev_port_cntr(dd, entry, sval, dd, vl, data); 12442 } 12443 12444 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl) 12445 { 12446 struct cntr_entry *entry; 12447 u64 *sval; 12448 12449 entry = &port_cntrs[index]; 12450 sval = ppd->scntrs + entry->offset; 12451 12452 if (vl != CNTR_INVALID_VL) 12453 sval += vl; 12454 12455 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && 12456 (index <= C_RCV_HDR_OVF_LAST)) { 12457 /* We do not want to bother for disabled contexts */ 12458 return 0; 12459 } 12460 12461 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); 12462 } 12463 12464 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data) 12465 { 12466 struct cntr_entry *entry; 12467 u64 *sval; 12468 12469 entry = &port_cntrs[index]; 12470 sval = ppd->scntrs + entry->offset; 12471 12472 if (vl != CNTR_INVALID_VL) 12473 sval += vl; 12474 12475 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && 12476 (index <= C_RCV_HDR_OVF_LAST)) { 12477 /* We do not want to bother for disabled contexts */ 12478 return 0; 12479 } 12480 12481 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); 12482 } 12483 12484 static void do_update_synth_timer(struct work_struct *work) 12485 { 12486 u64 cur_tx; 12487 u64 cur_rx; 12488 u64 total_flits; 12489 u8 update = 0; 12490 int i, j, vl; 12491 struct hfi1_pportdata *ppd; 12492 struct cntr_entry *entry; 12493 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata, 12494 update_cntr_work); 12495 12496 /* 12497 * Rather than keep beating on the CSRs pick a minimal set that we can 12498 * check to watch for potential roll over. We can do this by looking at 12499 * the number of flits sent/recv. If the total flits exceeds 32bits then 12500 * we have to iterate all the counters and update. 12501 */ 12502 entry = &dev_cntrs[C_DC_RCV_FLITS]; 12503 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); 12504 12505 entry = &dev_cntrs[C_DC_XMIT_FLITS]; 12506 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); 12507 12508 hfi1_cdbg( 12509 CNTR, 12510 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx", 12511 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); 12512 12513 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { 12514 /* 12515 * May not be strictly necessary to update but it won't hurt and 12516 * simplifies the logic here. 12517 */ 12518 update = 1; 12519 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating", 12520 dd->unit); 12521 } else { 12522 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); 12523 hfi1_cdbg(CNTR, 12524 "[%d] total flits 0x%llx limit 0x%llx", dd->unit, 12525 total_flits, (u64)CNTR_32BIT_MAX); 12526 if (total_flits >= CNTR_32BIT_MAX) { 12527 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating", 12528 dd->unit); 12529 update = 1; 12530 } 12531 } 12532 12533 if (update) { 12534 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); 12535 for (i = 0; i < DEV_CNTR_LAST; i++) { 12536 entry = &dev_cntrs[i]; 12537 if (entry->flags & CNTR_VL) { 12538 for (vl = 0; vl < C_VL_COUNT; vl++) 12539 read_dev_cntr(dd, i, vl); 12540 } else { 12541 read_dev_cntr(dd, i, CNTR_INVALID_VL); 12542 } 12543 } 12544 ppd = (struct hfi1_pportdata *)(dd + 1); 12545 for (i = 0; i < dd->num_pports; i++, ppd++) { 12546 for (j = 0; j < PORT_CNTR_LAST; j++) { 12547 entry = &port_cntrs[j]; 12548 if (entry->flags & CNTR_VL) { 12549 for (vl = 0; vl < C_VL_COUNT; vl++) 12550 read_port_cntr(ppd, j, vl); 12551 } else { 12552 read_port_cntr(ppd, j, CNTR_INVALID_VL); 12553 } 12554 } 12555 } 12556 12557 /* 12558 * We want the value in the register. The goal is to keep track 12559 * of the number of "ticks" not the counter value. In other 12560 * words if the register rolls we want to notice it and go ahead 12561 * and force an update. 12562 */ 12563 entry = &dev_cntrs[C_DC_XMIT_FLITS]; 12564 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, 12565 CNTR_MODE_R, 0); 12566 12567 entry = &dev_cntrs[C_DC_RCV_FLITS]; 12568 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, 12569 CNTR_MODE_R, 0); 12570 12571 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx", 12572 dd->unit, dd->last_tx, dd->last_rx); 12573 12574 } else { 12575 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); 12576 } 12577 } 12578 12579 static void update_synth_timer(struct timer_list *t) 12580 { 12581 struct hfi1_devdata *dd = timer_container_of(dd, t, synth_stats_timer); 12582 12583 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); 12584 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12585 } 12586 12587 #define C_MAX_NAME 16 /* 15 chars + one for /0 */ 12588 static int init_cntrs(struct hfi1_devdata *dd) 12589 { 12590 int i, rcv_ctxts, j; 12591 size_t sz; 12592 char *p; 12593 char name[C_MAX_NAME]; 12594 struct hfi1_pportdata *ppd; 12595 const char *bit_type_32 = ",32"; 12596 const int bit_type_32_sz = strlen(bit_type_32); 12597 u32 sdma_engines = chip_sdma_engines(dd); 12598 12599 /* set up the stats timer; the add_timer is done at the end */ 12600 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); 12601 12602 /***********************/ 12603 /* per device counters */ 12604 /***********************/ 12605 12606 /* size names and determine how many we have*/ 12607 dd->ndevcntrs = 0; 12608 sz = 0; 12609 12610 for (i = 0; i < DEV_CNTR_LAST; i++) { 12611 if (dev_cntrs[i].flags & CNTR_DISABLED) { 12612 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name); 12613 continue; 12614 } 12615 12616 if (dev_cntrs[i].flags & CNTR_VL) { 12617 dev_cntrs[i].offset = dd->ndevcntrs; 12618 for (j = 0; j < C_VL_COUNT; j++) { 12619 snprintf(name, C_MAX_NAME, "%s%d", 12620 dev_cntrs[i].name, vl_from_idx(j)); 12621 sz += strlen(name); 12622 /* Add ",32" for 32-bit counters */ 12623 if (dev_cntrs[i].flags & CNTR_32BIT) 12624 sz += bit_type_32_sz; 12625 sz++; 12626 dd->ndevcntrs++; 12627 } 12628 } else if (dev_cntrs[i].flags & CNTR_SDMA) { 12629 dev_cntrs[i].offset = dd->ndevcntrs; 12630 for (j = 0; j < sdma_engines; j++) { 12631 snprintf(name, C_MAX_NAME, "%s%d", 12632 dev_cntrs[i].name, j); 12633 sz += strlen(name); 12634 /* Add ",32" for 32-bit counters */ 12635 if (dev_cntrs[i].flags & CNTR_32BIT) 12636 sz += bit_type_32_sz; 12637 sz++; 12638 dd->ndevcntrs++; 12639 } 12640 } else { 12641 /* +1 for newline. */ 12642 sz += strlen(dev_cntrs[i].name) + 1; 12643 /* Add ",32" for 32-bit counters */ 12644 if (dev_cntrs[i].flags & CNTR_32BIT) 12645 sz += bit_type_32_sz; 12646 dev_cntrs[i].offset = dd->ndevcntrs; 12647 dd->ndevcntrs++; 12648 } 12649 } 12650 12651 /* allocate space for the counter values */ 12652 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), 12653 GFP_KERNEL); 12654 if (!dd->cntrs) 12655 goto bail; 12656 12657 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); 12658 if (!dd->scntrs) 12659 goto bail; 12660 12661 /* allocate space for the counter names */ 12662 dd->cntrnameslen = sz; 12663 dd->cntrnames = kmalloc(sz, GFP_KERNEL); 12664 if (!dd->cntrnames) 12665 goto bail; 12666 12667 /* fill in the names */ 12668 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { 12669 if (dev_cntrs[i].flags & CNTR_DISABLED) { 12670 /* Nothing */ 12671 } else if (dev_cntrs[i].flags & CNTR_VL) { 12672 for (j = 0; j < C_VL_COUNT; j++) { 12673 snprintf(name, C_MAX_NAME, "%s%d", 12674 dev_cntrs[i].name, 12675 vl_from_idx(j)); 12676 memcpy(p, name, strlen(name)); 12677 p += strlen(name); 12678 12679 /* Counter is 32 bits */ 12680 if (dev_cntrs[i].flags & CNTR_32BIT) { 12681 memcpy(p, bit_type_32, bit_type_32_sz); 12682 p += bit_type_32_sz; 12683 } 12684 12685 *p++ = '\n'; 12686 } 12687 } else if (dev_cntrs[i].flags & CNTR_SDMA) { 12688 for (j = 0; j < sdma_engines; j++) { 12689 snprintf(name, C_MAX_NAME, "%s%d", 12690 dev_cntrs[i].name, j); 12691 memcpy(p, name, strlen(name)); 12692 p += strlen(name); 12693 12694 /* Counter is 32 bits */ 12695 if (dev_cntrs[i].flags & CNTR_32BIT) { 12696 memcpy(p, bit_type_32, bit_type_32_sz); 12697 p += bit_type_32_sz; 12698 } 12699 12700 *p++ = '\n'; 12701 } 12702 } else { 12703 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name)); 12704 p += strlen(dev_cntrs[i].name); 12705 12706 /* Counter is 32 bits */ 12707 if (dev_cntrs[i].flags & CNTR_32BIT) { 12708 memcpy(p, bit_type_32, bit_type_32_sz); 12709 p += bit_type_32_sz; 12710 } 12711 12712 *p++ = '\n'; 12713 } 12714 } 12715 12716 /*********************/ 12717 /* per port counters */ 12718 /*********************/ 12719 12720 /* 12721 * Go through the counters for the overflows and disable the ones we 12722 * don't need. This varies based on platform so we need to do it 12723 * dynamically here. 12724 */ 12725 rcv_ctxts = dd->num_rcv_contexts; 12726 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts; 12727 i <= C_RCV_HDR_OVF_LAST; i++) { 12728 port_cntrs[i].flags |= CNTR_DISABLED; 12729 } 12730 12731 /* size port counter names and determine how many we have*/ 12732 sz = 0; 12733 dd->nportcntrs = 0; 12734 for (i = 0; i < PORT_CNTR_LAST; i++) { 12735 if (port_cntrs[i].flags & CNTR_DISABLED) { 12736 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name); 12737 continue; 12738 } 12739 12740 if (port_cntrs[i].flags & CNTR_VL) { 12741 port_cntrs[i].offset = dd->nportcntrs; 12742 for (j = 0; j < C_VL_COUNT; j++) { 12743 snprintf(name, C_MAX_NAME, "%s%d", 12744 port_cntrs[i].name, vl_from_idx(j)); 12745 sz += strlen(name); 12746 /* Add ",32" for 32-bit counters */ 12747 if (port_cntrs[i].flags & CNTR_32BIT) 12748 sz += bit_type_32_sz; 12749 sz++; 12750 dd->nportcntrs++; 12751 } 12752 } else { 12753 /* +1 for newline */ 12754 sz += strlen(port_cntrs[i].name) + 1; 12755 /* Add ",32" for 32-bit counters */ 12756 if (port_cntrs[i].flags & CNTR_32BIT) 12757 sz += bit_type_32_sz; 12758 port_cntrs[i].offset = dd->nportcntrs; 12759 dd->nportcntrs++; 12760 } 12761 } 12762 12763 /* allocate space for the counter names */ 12764 dd->portcntrnameslen = sz; 12765 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); 12766 if (!dd->portcntrnames) 12767 goto bail; 12768 12769 /* fill in port cntr names */ 12770 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { 12771 if (port_cntrs[i].flags & CNTR_DISABLED) 12772 continue; 12773 12774 if (port_cntrs[i].flags & CNTR_VL) { 12775 for (j = 0; j < C_VL_COUNT; j++) { 12776 snprintf(name, C_MAX_NAME, "%s%d", 12777 port_cntrs[i].name, vl_from_idx(j)); 12778 memcpy(p, name, strlen(name)); 12779 p += strlen(name); 12780 12781 /* Counter is 32 bits */ 12782 if (port_cntrs[i].flags & CNTR_32BIT) { 12783 memcpy(p, bit_type_32, bit_type_32_sz); 12784 p += bit_type_32_sz; 12785 } 12786 12787 *p++ = '\n'; 12788 } 12789 } else { 12790 memcpy(p, port_cntrs[i].name, 12791 strlen(port_cntrs[i].name)); 12792 p += strlen(port_cntrs[i].name); 12793 12794 /* Counter is 32 bits */ 12795 if (port_cntrs[i].flags & CNTR_32BIT) { 12796 memcpy(p, bit_type_32, bit_type_32_sz); 12797 p += bit_type_32_sz; 12798 } 12799 12800 *p++ = '\n'; 12801 } 12802 } 12803 12804 /* allocate per port storage for counter values */ 12805 ppd = (struct hfi1_pportdata *)(dd + 1); 12806 for (i = 0; i < dd->num_pports; i++, ppd++) { 12807 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); 12808 if (!ppd->cntrs) 12809 goto bail; 12810 12811 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); 12812 if (!ppd->scntrs) 12813 goto bail; 12814 } 12815 12816 /* CPU counters need to be allocated and zeroed */ 12817 if (init_cpu_counters(dd)) 12818 goto bail; 12819 12820 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", 12821 WQ_MEM_RECLAIM, dd->unit); 12822 if (!dd->update_cntr_wq) 12823 goto bail; 12824 12825 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); 12826 12827 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12828 return 0; 12829 bail: 12830 free_cntrs(dd); 12831 return -ENOMEM; 12832 } 12833 12834 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate) 12835 { 12836 switch (chip_lstate) { 12837 case LSTATE_DOWN: 12838 return IB_PORT_DOWN; 12839 case LSTATE_INIT: 12840 return IB_PORT_INIT; 12841 case LSTATE_ARMED: 12842 return IB_PORT_ARMED; 12843 case LSTATE_ACTIVE: 12844 return IB_PORT_ACTIVE; 12845 default: 12846 dd_dev_err(dd, 12847 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n", 12848 chip_lstate); 12849 return IB_PORT_DOWN; 12850 } 12851 } 12852 12853 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate) 12854 { 12855 /* look at the HFI meta-states only */ 12856 switch (chip_pstate & 0xf0) { 12857 case PLS_DISABLED: 12858 return IB_PORTPHYSSTATE_DISABLED; 12859 case PLS_OFFLINE: 12860 return OPA_PORTPHYSSTATE_OFFLINE; 12861 case PLS_POLLING: 12862 return IB_PORTPHYSSTATE_POLLING; 12863 case PLS_CONFIGPHY: 12864 return IB_PORTPHYSSTATE_TRAINING; 12865 case PLS_LINKUP: 12866 return IB_PORTPHYSSTATE_LINKUP; 12867 case PLS_PHYTEST: 12868 return IB_PORTPHYSSTATE_PHY_TEST; 12869 default: 12870 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n", 12871 chip_pstate); 12872 return IB_PORTPHYSSTATE_DISABLED; 12873 } 12874 } 12875 12876 /* return the OPA port physical state name */ 12877 const char *opa_pstate_name(u32 pstate) 12878 { 12879 static const char * const port_physical_names[] = { 12880 "PHYS_NOP", 12881 "reserved1", 12882 "PHYS_POLL", 12883 "PHYS_DISABLED", 12884 "PHYS_TRAINING", 12885 "PHYS_LINKUP", 12886 "PHYS_LINK_ERR_RECOVER", 12887 "PHYS_PHY_TEST", 12888 "reserved8", 12889 "PHYS_OFFLINE", 12890 "PHYS_GANGED", 12891 "PHYS_TEST", 12892 }; 12893 if (pstate < ARRAY_SIZE(port_physical_names)) 12894 return port_physical_names[pstate]; 12895 return "unknown"; 12896 } 12897 12898 /** 12899 * update_statusp - Update userspace status flag 12900 * @ppd: Port data structure 12901 * @state: port state information 12902 * 12903 * Actual port status is determined by the host_link_state value 12904 * in the ppd. 12905 * 12906 * host_link_state MUST be updated before updating the user space 12907 * statusp. 12908 */ 12909 static void update_statusp(struct hfi1_pportdata *ppd, u32 state) 12910 { 12911 /* 12912 * Set port status flags in the page mapped into userspace 12913 * memory. Do it here to ensure a reliable state - this is 12914 * the only function called by all state handling code. 12915 * Always set the flags due to the fact that the cache value 12916 * might have been changed explicitly outside of this 12917 * function. 12918 */ 12919 if (ppd->statusp) { 12920 switch (state) { 12921 case IB_PORT_DOWN: 12922 case IB_PORT_INIT: 12923 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | 12924 HFI1_STATUS_IB_READY); 12925 break; 12926 case IB_PORT_ARMED: 12927 *ppd->statusp |= HFI1_STATUS_IB_CONF; 12928 break; 12929 case IB_PORT_ACTIVE: 12930 *ppd->statusp |= HFI1_STATUS_IB_READY; 12931 break; 12932 } 12933 } 12934 } 12935 12936 /** 12937 * wait_logical_linkstate - wait for an IB link state change to occur 12938 * @ppd: port device 12939 * @state: the state to wait for 12940 * @msecs: the number of milliseconds to wait 12941 * 12942 * Wait up to msecs milliseconds for IB link state change to occur. 12943 * For now, take the easy polling route. 12944 * Returns 0 if state reached, otherwise -ETIMEDOUT. 12945 */ 12946 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 12947 int msecs) 12948 { 12949 unsigned long timeout; 12950 u32 new_state; 12951 12952 timeout = jiffies + msecs_to_jiffies(msecs); 12953 while (1) { 12954 new_state = chip_to_opa_lstate(ppd->dd, 12955 read_logical_state(ppd->dd)); 12956 if (new_state == state) 12957 break; 12958 if (time_after(jiffies, timeout)) { 12959 dd_dev_err(ppd->dd, 12960 "timeout waiting for link state 0x%x\n", 12961 state); 12962 return -ETIMEDOUT; 12963 } 12964 msleep(20); 12965 } 12966 12967 return 0; 12968 } 12969 12970 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state) 12971 { 12972 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); 12973 12974 dd_dev_info(ppd->dd, 12975 "physical state changed to %s (0x%x), phy 0x%x\n", 12976 opa_pstate_name(ib_pstate), ib_pstate, state); 12977 } 12978 12979 /* 12980 * Read the physical hardware link state and check if it matches host 12981 * drivers anticipated state. 12982 */ 12983 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state) 12984 { 12985 u32 read_state = read_physical_state(ppd->dd); 12986 12987 if (read_state == state) { 12988 log_state_transition(ppd, state); 12989 } else { 12990 dd_dev_err(ppd->dd, 12991 "anticipated phy link state 0x%x, read 0x%x\n", 12992 state, read_state); 12993 } 12994 } 12995 12996 /* 12997 * wait_physical_linkstate - wait for an physical link state change to occur 12998 * @ppd: port device 12999 * @state: the state to wait for 13000 * @msecs: the number of milliseconds to wait 13001 * 13002 * Wait up to msecs milliseconds for physical link state change to occur. 13003 * Returns 0 if state reached, otherwise -ETIMEDOUT. 13004 */ 13005 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state, 13006 int msecs) 13007 { 13008 u32 read_state; 13009 unsigned long timeout; 13010 13011 timeout = jiffies + msecs_to_jiffies(msecs); 13012 while (1) { 13013 read_state = read_physical_state(ppd->dd); 13014 if (read_state == state) 13015 break; 13016 if (time_after(jiffies, timeout)) { 13017 dd_dev_err(ppd->dd, 13018 "timeout waiting for phy link state 0x%x\n", 13019 state); 13020 return -ETIMEDOUT; 13021 } 13022 usleep_range(1950, 2050); /* sleep 2ms-ish */ 13023 } 13024 13025 log_state_transition(ppd, state); 13026 return 0; 13027 } 13028 13029 /* 13030 * wait_phys_link_offline_quiet_substates - wait for any offline substate 13031 * @ppd: port device 13032 * @msecs: the number of milliseconds to wait 13033 * 13034 * Wait up to msecs milliseconds for any offline physical link 13035 * state change to occur. 13036 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. 13037 */ 13038 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd, 13039 int msecs) 13040 { 13041 u32 read_state; 13042 unsigned long timeout; 13043 13044 timeout = jiffies + msecs_to_jiffies(msecs); 13045 while (1) { 13046 read_state = read_physical_state(ppd->dd); 13047 if ((read_state & 0xF0) == PLS_OFFLINE) 13048 break; 13049 if (time_after(jiffies, timeout)) { 13050 dd_dev_err(ppd->dd, 13051 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n", 13052 read_state, msecs); 13053 return -ETIMEDOUT; 13054 } 13055 usleep_range(1950, 2050); /* sleep 2ms-ish */ 13056 } 13057 13058 log_state_transition(ppd, read_state); 13059 return read_state; 13060 } 13061 13062 /* 13063 * wait_phys_link_out_of_offline - wait for any out of offline state 13064 * @ppd: port device 13065 * @msecs: the number of milliseconds to wait 13066 * 13067 * Wait up to msecs milliseconds for any out of offline physical link 13068 * state change to occur. 13069 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT. 13070 */ 13071 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd, 13072 int msecs) 13073 { 13074 u32 read_state; 13075 unsigned long timeout; 13076 13077 timeout = jiffies + msecs_to_jiffies(msecs); 13078 while (1) { 13079 read_state = read_physical_state(ppd->dd); 13080 if ((read_state & 0xF0) != PLS_OFFLINE) 13081 break; 13082 if (time_after(jiffies, timeout)) { 13083 dd_dev_err(ppd->dd, 13084 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n", 13085 read_state, msecs); 13086 return -ETIMEDOUT; 13087 } 13088 usleep_range(1950, 2050); /* sleep 2ms-ish */ 13089 } 13090 13091 log_state_transition(ppd, read_state); 13092 return read_state; 13093 } 13094 13095 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 13096 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 13097 13098 #define SET_STATIC_RATE_CONTROL_SMASK(r) \ 13099 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 13100 13101 void hfi1_init_ctxt(struct send_context *sc) 13102 { 13103 if (sc) { 13104 struct hfi1_devdata *dd = sc->dd; 13105 u64 reg; 13106 u8 set = (sc->type == SC_USER ? 13107 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) : 13108 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)); 13109 reg = read_kctxt_csr(dd, sc->hw_context, 13110 SEND_CTXT_CHECK_ENABLE); 13111 if (set) 13112 CLEAR_STATIC_RATE_CONTROL_SMASK(reg); 13113 else 13114 SET_STATIC_RATE_CONTROL_SMASK(reg); 13115 write_kctxt_csr(dd, sc->hw_context, 13116 SEND_CTXT_CHECK_ENABLE, reg); 13117 } 13118 } 13119 13120 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp) 13121 { 13122 int ret = 0; 13123 u64 reg; 13124 13125 if (dd->icode != ICODE_RTL_SILICON) { 13126 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL)) 13127 dd_dev_info(dd, "%s: tempsense not supported by HW\n", 13128 __func__); 13129 return -EINVAL; 13130 } 13131 reg = read_csr(dd, ASIC_STS_THERM); 13132 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & 13133 ASIC_STS_THERM_CURR_TEMP_MASK); 13134 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & 13135 ASIC_STS_THERM_LO_TEMP_MASK); 13136 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & 13137 ASIC_STS_THERM_HI_TEMP_MASK); 13138 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & 13139 ASIC_STS_THERM_CRIT_TEMP_MASK); 13140 /* triggers is a 3-bit value - 1 bit per trigger. */ 13141 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); 13142 13143 return ret; 13144 } 13145 13146 /* ========================================================================= */ 13147 13148 /** 13149 * read_mod_write() - Calculate the IRQ register index and set/clear the bits 13150 * @dd: valid devdata 13151 * @src: IRQ source to determine register index from 13152 * @bits: the bits to set or clear 13153 * @set: true == set the bits, false == clear the bits 13154 * 13155 */ 13156 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, 13157 bool set) 13158 { 13159 u64 reg; 13160 u16 idx = src / BITS_PER_REGISTER; 13161 unsigned long flags; 13162 13163 spin_lock_irqsave(&dd->irq_src_lock, flags); 13164 reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); 13165 if (set) 13166 reg |= bits; 13167 else 13168 reg &= ~bits; 13169 write_csr(dd, CCE_INT_MASK + (8 * idx), reg); 13170 spin_unlock_irqrestore(&dd->irq_src_lock, flags); 13171 } 13172 13173 /** 13174 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources 13175 * @dd: valid devdata 13176 * @first: first IRQ source to set/clear 13177 * @last: last IRQ source (inclusive) to set/clear 13178 * @set: true == set the bits, false == clear the bits 13179 * 13180 * If first == last, set the exact source. 13181 */ 13182 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set) 13183 { 13184 u64 bits = 0; 13185 u64 bit; 13186 u16 src; 13187 13188 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES) 13189 return -EINVAL; 13190 13191 if (last < first) 13192 return -ERANGE; 13193 13194 for (src = first; src <= last; src++) { 13195 bit = src % BITS_PER_REGISTER; 13196 /* wrapped to next register? */ 13197 if (!bit && bits) { 13198 read_mod_write(dd, src - 1, bits, set); 13199 bits = 0; 13200 } 13201 bits |= BIT_ULL(bit); 13202 } 13203 read_mod_write(dd, last, bits, set); 13204 13205 return 0; 13206 } 13207 13208 /* 13209 * Clear all interrupt sources on the chip. 13210 */ 13211 static void clear_all_interrupts(struct hfi1_devdata *dd) 13212 { 13213 int i; 13214 13215 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13216 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0); 13217 13218 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0); 13219 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0); 13220 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0); 13221 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0); 13222 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0); 13223 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0); 13224 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0); 13225 for (i = 0; i < chip_send_contexts(dd); i++) 13226 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0); 13227 for (i = 0; i < chip_sdma_engines(dd); i++) 13228 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0); 13229 13230 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0); 13231 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0); 13232 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0); 13233 } 13234 13235 /* 13236 * Remap the interrupt source from the general handler to the given MSI-X 13237 * interrupt. 13238 */ 13239 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) 13240 { 13241 u64 reg; 13242 int m, n; 13243 13244 /* clear from the handled mask of the general interrupt */ 13245 m = isrc / 64; 13246 n = isrc % 64; 13247 if (likely(m < CCE_NUM_INT_CSRS)) { 13248 dd->gi_mask[m] &= ~((u64)1 << n); 13249 } else { 13250 dd_dev_err(dd, "remap interrupt err\n"); 13251 return; 13252 } 13253 13254 /* direct the chip source to the given MSI-X interrupt */ 13255 m = isrc / 8; 13256 n = isrc % 8; 13257 reg = read_csr(dd, CCE_INT_MAP + (8 * m)); 13258 reg &= ~((u64)0xff << (8 * n)); 13259 reg |= ((u64)msix_intr & 0xff) << (8 * n); 13260 write_csr(dd, CCE_INT_MAP + (8 * m), reg); 13261 } 13262 13263 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr) 13264 { 13265 /* 13266 * SDMA engine interrupt sources grouped by type, rather than 13267 * engine. Per-engine interrupts are as follows: 13268 * SDMA 13269 * SDMAProgress 13270 * SDMAIdle 13271 */ 13272 remap_intr(dd, IS_SDMA_START + engine, msix_intr); 13273 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr); 13274 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr); 13275 } 13276 13277 /* 13278 * Set the general handler to accept all interrupts, remap all 13279 * chip interrupts back to MSI-X 0. 13280 */ 13281 void reset_interrupts(struct hfi1_devdata *dd) 13282 { 13283 int i; 13284 13285 /* all interrupts handled by the general handler */ 13286 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 13287 dd->gi_mask[i] = ~(u64)0; 13288 13289 /* all chip interrupts map to MSI-X 0 */ 13290 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13291 write_csr(dd, CCE_INT_MAP + (8 * i), 0); 13292 } 13293 13294 /** 13295 * set_up_interrupts() - Initialize the IRQ resources and state 13296 * @dd: valid devdata 13297 * 13298 */ 13299 static int set_up_interrupts(struct hfi1_devdata *dd) 13300 { 13301 int ret; 13302 13303 /* mask all interrupts */ 13304 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false); 13305 13306 /* clear all pending interrupts */ 13307 clear_all_interrupts(dd); 13308 13309 /* reset general handler mask, chip MSI-X mappings */ 13310 reset_interrupts(dd); 13311 13312 /* ask for MSI-X interrupts */ 13313 ret = msix_initialize(dd); 13314 if (ret) 13315 return ret; 13316 13317 ret = msix_request_irqs(dd); 13318 if (ret) 13319 msix_clean_up_interrupts(dd); 13320 13321 return ret; 13322 } 13323 13324 /* 13325 * Set up context values in dd. Sets: 13326 * 13327 * num_rcv_contexts - number of contexts being used 13328 * n_krcv_queues - number of kernel contexts 13329 * first_dyn_alloc_ctxt - first dynamically allocated context 13330 * in array of contexts 13331 * freectxts - number of free user contexts 13332 * num_send_contexts - number of PIO send contexts being used 13333 * num_netdev_contexts - number of contexts reserved for netdev 13334 */ 13335 static int set_up_context_variables(struct hfi1_devdata *dd) 13336 { 13337 unsigned long num_kernel_contexts; 13338 u16 num_netdev_contexts; 13339 int ret; 13340 unsigned ngroups; 13341 int rmt_count; 13342 u32 n_usr_ctxts; 13343 u32 send_contexts = chip_send_contexts(dd); 13344 u32 rcv_contexts = chip_rcv_contexts(dd); 13345 13346 /* 13347 * Kernel receive contexts: 13348 * - Context 0 - control context (VL15/multicast/error) 13349 * - Context 1 - first kernel context 13350 * - Context 2 - second kernel context 13351 * ... 13352 */ 13353 if (n_krcvqs) 13354 /* 13355 * n_krcvqs is the sum of module parameter kernel receive 13356 * contexts, krcvqs[]. It does not include the control 13357 * context, so add that. 13358 */ 13359 num_kernel_contexts = n_krcvqs + 1; 13360 else 13361 num_kernel_contexts = DEFAULT_KRCVQS + 1; 13362 /* 13363 * Every kernel receive context needs an ACK send context. 13364 * one send context is allocated for each VL{0-7} and VL15 13365 */ 13366 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { 13367 dd_dev_err(dd, 13368 "Reducing # kernel rcv contexts to: %d, from %lu\n", 13369 send_contexts - num_vls - 1, 13370 num_kernel_contexts); 13371 num_kernel_contexts = send_contexts - num_vls - 1; 13372 } 13373 13374 /* 13375 * User contexts: 13376 * - default to 1 user context per real (non-HT) CPU core if 13377 * num_user_contexts is negative 13378 */ 13379 if (num_user_contexts < 0) 13380 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask); 13381 else 13382 n_usr_ctxts = num_user_contexts; 13383 /* 13384 * Adjust the counts given a global max. 13385 */ 13386 if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) { 13387 dd_dev_err(dd, 13388 "Reducing # user receive contexts to: %u, from %u\n", 13389 (u32)(rcv_contexts - num_kernel_contexts), 13390 n_usr_ctxts); 13391 /* recalculate */ 13392 n_usr_ctxts = rcv_contexts - num_kernel_contexts; 13393 } 13394 13395 num_netdev_contexts = 13396 hfi1_num_netdev_contexts(dd, rcv_contexts - 13397 (num_kernel_contexts + n_usr_ctxts), 13398 &node_affinity.real_cpu_mask); 13399 /* 13400 * RMT entries are allocated as follows: 13401 * 1. QOS (0 to 128 entries) 13402 * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + 13403 * num_netdev_contexts [b]) 13404 * 3. netdev (NUM_NETDEV_MAP_ENTRIES) 13405 * 13406 * Notes: 13407 * [a] Kernel contexts (except control) are included in FECN if kernel 13408 * TID_RDMA is active. 13409 * [b] Netdev and user contexts are randomly allocated from the same 13410 * context pool, so FECN must cover all contexts in the pool. 13411 */ 13412 rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) 13413 + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 13414 : 0) 13415 + n_usr_ctxts 13416 + num_netdev_contexts 13417 + NUM_NETDEV_MAP_ENTRIES; 13418 if (rmt_count > NUM_MAP_ENTRIES) { 13419 int over = rmt_count - NUM_MAP_ENTRIES; 13420 /* try to squish user contexts, minimum of 1 */ 13421 if (over >= n_usr_ctxts) { 13422 dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); 13423 return -EINVAL; 13424 } 13425 dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", 13426 n_usr_ctxts, n_usr_ctxts - over); 13427 n_usr_ctxts -= over; 13428 } 13429 13430 /* the first N are kernel contexts, the rest are user/netdev contexts */ 13431 dd->num_rcv_contexts = 13432 num_kernel_contexts + n_usr_ctxts + num_netdev_contexts; 13433 dd->n_krcv_queues = num_kernel_contexts; 13434 dd->first_dyn_alloc_ctxt = num_kernel_contexts; 13435 dd->num_netdev_contexts = num_netdev_contexts; 13436 dd->num_user_contexts = n_usr_ctxts; 13437 dd->freectxts = n_usr_ctxts; 13438 dd_dev_info(dd, 13439 "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n", 13440 rcv_contexts, 13441 (int)dd->num_rcv_contexts, 13442 (int)dd->n_krcv_queues, 13443 dd->num_netdev_contexts, 13444 dd->num_user_contexts); 13445 13446 /* 13447 * Receive array allocation: 13448 * All RcvArray entries are divided into groups of 8. This 13449 * is required by the hardware and will speed up writes to 13450 * consecutive entries by using write-combining of the entire 13451 * cacheline. 13452 * 13453 * The number of groups are evenly divided among all contexts. 13454 * any left over groups will be given to the first N user 13455 * contexts. 13456 */ 13457 dd->rcv_entries.group_size = RCV_INCREMENT; 13458 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; 13459 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; 13460 dd->rcv_entries.nctxt_extra = ngroups - 13461 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); 13462 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n", 13463 dd->rcv_entries.ngroups, 13464 dd->rcv_entries.nctxt_extra); 13465 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > 13466 MAX_EAGER_ENTRIES * 2) { 13467 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / 13468 dd->rcv_entries.group_size; 13469 dd_dev_info(dd, 13470 "RcvArray group count too high, change to %u\n", 13471 dd->rcv_entries.ngroups); 13472 dd->rcv_entries.nctxt_extra = 0; 13473 } 13474 /* 13475 * PIO send contexts 13476 */ 13477 ret = init_sc_pools_and_sizes(dd); 13478 if (ret >= 0) { /* success */ 13479 dd->num_send_contexts = ret; 13480 dd_dev_info( 13481 dd, 13482 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n", 13483 send_contexts, 13484 dd->num_send_contexts, 13485 dd->sc_sizes[SC_KERNEL].count, 13486 dd->sc_sizes[SC_ACK].count, 13487 dd->sc_sizes[SC_USER].count, 13488 dd->sc_sizes[SC_VL15].count); 13489 ret = 0; /* success */ 13490 } 13491 13492 return ret; 13493 } 13494 13495 /* 13496 * Set the device/port partition key table. The MAD code 13497 * will ensure that, at least, the partial management 13498 * partition key is present in the table. 13499 */ 13500 static void set_partition_keys(struct hfi1_pportdata *ppd) 13501 { 13502 struct hfi1_devdata *dd = ppd->dd; 13503 u64 reg = 0; 13504 int i; 13505 13506 dd_dev_info(dd, "Setting partition keys\n"); 13507 for (i = 0; i < hfi1_get_npkeys(dd); i++) { 13508 reg |= (ppd->pkeys[i] & 13509 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) << 13510 ((i % 4) * 13511 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT); 13512 /* Each register holds 4 PKey values. */ 13513 if ((i % 4) == 3) { 13514 write_csr(dd, RCV_PARTITION_KEY + 13515 ((i - 3) * 2), reg); 13516 reg = 0; 13517 } 13518 } 13519 13520 /* Always enable HW pkeys check when pkeys table is set */ 13521 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK); 13522 } 13523 13524 /* 13525 * These CSRs and memories are uninitialized on reset and must be 13526 * written before reading to set the ECC/parity bits. 13527 * 13528 * NOTE: All user context CSRs that are not mmaped write-only 13529 * (e.g. the TID flows) must be initialized even if the driver never 13530 * reads them. 13531 */ 13532 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd) 13533 { 13534 int i, j; 13535 13536 /* CceIntMap */ 13537 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13538 write_csr(dd, CCE_INT_MAP + (8 * i), 0); 13539 13540 /* SendCtxtCreditReturnAddr */ 13541 for (i = 0; i < chip_send_contexts(dd); i++) 13542 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); 13543 13544 /* PIO Send buffers */ 13545 /* SDMA Send buffers */ 13546 /* 13547 * These are not normally read, and (presently) have no method 13548 * to be read, so are not pre-initialized 13549 */ 13550 13551 /* RcvHdrAddr */ 13552 /* RcvHdrTailAddr */ 13553 /* RcvTidFlowTable */ 13554 for (i = 0; i < chip_rcv_contexts(dd); i++) { 13555 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); 13556 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); 13557 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) 13558 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0); 13559 } 13560 13561 /* RcvArray */ 13562 for (i = 0; i < chip_rcv_array_count(dd); i++) 13563 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0); 13564 13565 /* RcvQPMapTable */ 13566 for (i = 0; i < 32; i++) 13567 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); 13568 } 13569 13570 /* 13571 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus. 13572 */ 13573 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits, 13574 u64 ctrl_bits) 13575 { 13576 unsigned long timeout; 13577 u64 reg; 13578 13579 /* is the condition present? */ 13580 reg = read_csr(dd, CCE_STATUS); 13581 if ((reg & status_bits) == 0) 13582 return; 13583 13584 /* clear the condition */ 13585 write_csr(dd, CCE_CTRL, ctrl_bits); 13586 13587 /* wait for the condition to clear */ 13588 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT); 13589 while (1) { 13590 reg = read_csr(dd, CCE_STATUS); 13591 if ((reg & status_bits) == 0) 13592 return; 13593 if (time_after(jiffies, timeout)) { 13594 dd_dev_err(dd, 13595 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n", 13596 status_bits, reg & status_bits); 13597 return; 13598 } 13599 udelay(1); 13600 } 13601 } 13602 13603 /* set CCE CSRs to chip reset defaults */ 13604 static void reset_cce_csrs(struct hfi1_devdata *dd) 13605 { 13606 int i; 13607 13608 /* CCE_REVISION read-only */ 13609 /* CCE_REVISION2 read-only */ 13610 /* CCE_CTRL - bits clear automatically */ 13611 /* CCE_STATUS read-only, use CceCtrl to clear */ 13612 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK); 13613 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK); 13614 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK); 13615 for (i = 0; i < CCE_NUM_SCRATCH; i++) 13616 write_csr(dd, CCE_SCRATCH + (8 * i), 0); 13617 /* CCE_ERR_STATUS read-only */ 13618 write_csr(dd, CCE_ERR_MASK, 0); 13619 write_csr(dd, CCE_ERR_CLEAR, ~0ull); 13620 /* CCE_ERR_FORCE leave alone */ 13621 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++) 13622 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0); 13623 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR); 13624 /* CCE_PCIE_CTRL leave alone */ 13625 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) { 13626 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0); 13627 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i), 13628 CCE_MSIX_TABLE_UPPER_RESETCSR); 13629 } 13630 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) { 13631 /* CCE_MSIX_PBA read-only */ 13632 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull); 13633 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull); 13634 } 13635 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++) 13636 write_csr(dd, CCE_INT_MAP, 0); 13637 for (i = 0; i < CCE_NUM_INT_CSRS; i++) { 13638 /* CCE_INT_STATUS read-only */ 13639 write_csr(dd, CCE_INT_MASK + (8 * i), 0); 13640 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull); 13641 /* CCE_INT_FORCE leave alone */ 13642 /* CCE_INT_BLOCKED read-only */ 13643 } 13644 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++) 13645 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0); 13646 } 13647 13648 /* set MISC CSRs to chip reset defaults */ 13649 static void reset_misc_csrs(struct hfi1_devdata *dd) 13650 { 13651 int i; 13652 13653 for (i = 0; i < 32; i++) { 13654 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0); 13655 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0); 13656 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0); 13657 } 13658 /* 13659 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can 13660 * only be written 128-byte chunks 13661 */ 13662 /* init RSA engine to clear lingering errors */ 13663 write_csr(dd, MISC_CFG_RSA_CMD, 1); 13664 write_csr(dd, MISC_CFG_RSA_MU, 0); 13665 write_csr(dd, MISC_CFG_FW_CTRL, 0); 13666 /* MISC_STS_8051_DIGEST read-only */ 13667 /* MISC_STS_SBM_DIGEST read-only */ 13668 /* MISC_STS_PCIE_DIGEST read-only */ 13669 /* MISC_STS_FAB_DIGEST read-only */ 13670 /* MISC_ERR_STATUS read-only */ 13671 write_csr(dd, MISC_ERR_MASK, 0); 13672 write_csr(dd, MISC_ERR_CLEAR, ~0ull); 13673 /* MISC_ERR_FORCE leave alone */ 13674 } 13675 13676 /* set TXE CSRs to chip reset defaults */ 13677 static void reset_txe_csrs(struct hfi1_devdata *dd) 13678 { 13679 int i; 13680 13681 /* 13682 * TXE Kernel CSRs 13683 */ 13684 write_csr(dd, SEND_CTRL, 0); 13685 __cm_reset(dd, 0); /* reset CM internal state */ 13686 /* SEND_CONTEXTS read-only */ 13687 /* SEND_DMA_ENGINES read-only */ 13688 /* SEND_PIO_MEM_SIZE read-only */ 13689 /* SEND_DMA_MEM_SIZE read-only */ 13690 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0); 13691 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */ 13692 /* SEND_PIO_ERR_STATUS read-only */ 13693 write_csr(dd, SEND_PIO_ERR_MASK, 0); 13694 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull); 13695 /* SEND_PIO_ERR_FORCE leave alone */ 13696 /* SEND_DMA_ERR_STATUS read-only */ 13697 write_csr(dd, SEND_DMA_ERR_MASK, 0); 13698 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull); 13699 /* SEND_DMA_ERR_FORCE leave alone */ 13700 /* SEND_EGRESS_ERR_STATUS read-only */ 13701 write_csr(dd, SEND_EGRESS_ERR_MASK, 0); 13702 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull); 13703 /* SEND_EGRESS_ERR_FORCE leave alone */ 13704 write_csr(dd, SEND_BTH_QP, 0); 13705 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0); 13706 write_csr(dd, SEND_SC2VLT0, 0); 13707 write_csr(dd, SEND_SC2VLT1, 0); 13708 write_csr(dd, SEND_SC2VLT2, 0); 13709 write_csr(dd, SEND_SC2VLT3, 0); 13710 write_csr(dd, SEND_LEN_CHECK0, 0); 13711 write_csr(dd, SEND_LEN_CHECK1, 0); 13712 /* SEND_ERR_STATUS read-only */ 13713 write_csr(dd, SEND_ERR_MASK, 0); 13714 write_csr(dd, SEND_ERR_CLEAR, ~0ull); 13715 /* SEND_ERR_FORCE read-only */ 13716 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++) 13717 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0); 13718 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++) 13719 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0); 13720 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++) 13721 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0); 13722 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++) 13723 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0); 13724 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++) 13725 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0); 13726 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR); 13727 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR); 13728 /* SEND_CM_CREDIT_USED_STATUS read-only */ 13729 write_csr(dd, SEND_CM_TIMER_CTRL, 0); 13730 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0); 13731 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0); 13732 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0); 13733 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0); 13734 for (i = 0; i < TXE_NUM_DATA_VL; i++) 13735 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 13736 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 13737 /* SEND_CM_CREDIT_USED_VL read-only */ 13738 /* SEND_CM_CREDIT_USED_VL15 read-only */ 13739 /* SEND_EGRESS_CTXT_STATUS read-only */ 13740 /* SEND_EGRESS_SEND_DMA_STATUS read-only */ 13741 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull); 13742 /* SEND_EGRESS_ERR_INFO read-only */ 13743 /* SEND_EGRESS_ERR_SOURCE read-only */ 13744 13745 /* 13746 * TXE Per-Context CSRs 13747 */ 13748 for (i = 0; i < chip_send_contexts(dd); i++) { 13749 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); 13750 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0); 13751 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0); 13752 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0); 13753 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0); 13754 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull); 13755 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0); 13756 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0); 13757 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0); 13758 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0); 13759 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0); 13760 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0); 13761 } 13762 13763 /* 13764 * TXE Per-SDMA CSRs 13765 */ 13766 for (i = 0; i < chip_sdma_engines(dd); i++) { 13767 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); 13768 /* SEND_DMA_STATUS read-only */ 13769 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0); 13770 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0); 13771 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0); 13772 /* SEND_DMA_HEAD read-only */ 13773 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0); 13774 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0); 13775 /* SEND_DMA_IDLE_CNT read-only */ 13776 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0); 13777 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0); 13778 /* SEND_DMA_DESC_FETCHED_CNT read-only */ 13779 /* SEND_DMA_ENG_ERR_STATUS read-only */ 13780 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0); 13781 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull); 13782 /* SEND_DMA_ENG_ERR_FORCE leave alone */ 13783 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0); 13784 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0); 13785 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0); 13786 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0); 13787 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0); 13788 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0); 13789 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0); 13790 } 13791 } 13792 13793 /* 13794 * Expect on entry: 13795 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0 13796 */ 13797 static void init_rbufs(struct hfi1_devdata *dd) 13798 { 13799 u64 reg; 13800 int count; 13801 13802 /* 13803 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are 13804 * clear. 13805 */ 13806 count = 0; 13807 while (1) { 13808 reg = read_csr(dd, RCV_STATUS); 13809 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK 13810 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0) 13811 break; 13812 /* 13813 * Give up after 1ms - maximum wait time. 13814 * 13815 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at 13816 * 250MB/s bandwidth. Lower rate to 66% for overhead to get: 13817 * 136 KB / (66% * 250MB/s) = 844us 13818 */ 13819 if (count++ > 500) { 13820 dd_dev_err(dd, 13821 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", 13822 __func__, reg); 13823 break; 13824 } 13825 udelay(2); /* do not busy-wait the CSR */ 13826 } 13827 13828 /* start the init - expect RcvCtrl to be 0 */ 13829 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK); 13830 13831 /* 13832 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief 13833 * period after the write before RcvStatus.RxRbufInitDone is valid. 13834 * The delay in the first run through the loop below is sufficient and 13835 * required before the first read of RcvStatus.RxRbufInintDone. 13836 */ 13837 read_csr(dd, RCV_CTRL); 13838 13839 /* wait for the init to finish */ 13840 count = 0; 13841 while (1) { 13842 /* delay is required first time through - see above */ 13843 udelay(2); /* do not busy-wait the CSR */ 13844 reg = read_csr(dd, RCV_STATUS); 13845 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK)) 13846 break; 13847 13848 /* give up after 100us - slowest possible at 33MHz is 73us */ 13849 if (count++ > 50) { 13850 dd_dev_err(dd, 13851 "%s: RcvStatus.RxRbufInit not set, continuing\n", 13852 __func__); 13853 break; 13854 } 13855 } 13856 } 13857 13858 /* set RXE CSRs to chip reset defaults */ 13859 static void reset_rxe_csrs(struct hfi1_devdata *dd) 13860 { 13861 int i, j; 13862 13863 /* 13864 * RXE Kernel CSRs 13865 */ 13866 write_csr(dd, RCV_CTRL, 0); 13867 init_rbufs(dd); 13868 /* RCV_STATUS read-only */ 13869 /* RCV_CONTEXTS read-only */ 13870 /* RCV_ARRAY_CNT read-only */ 13871 /* RCV_BUF_SIZE read-only */ 13872 write_csr(dd, RCV_BTH_QP, 0); 13873 write_csr(dd, RCV_MULTICAST, 0); 13874 write_csr(dd, RCV_BYPASS, 0); 13875 write_csr(dd, RCV_VL15, 0); 13876 /* this is a clear-down */ 13877 write_csr(dd, RCV_ERR_INFO, 13878 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK); 13879 /* RCV_ERR_STATUS read-only */ 13880 write_csr(dd, RCV_ERR_MASK, 0); 13881 write_csr(dd, RCV_ERR_CLEAR, ~0ull); 13882 /* RCV_ERR_FORCE leave alone */ 13883 for (i = 0; i < 32; i++) 13884 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0); 13885 for (i = 0; i < 4; i++) 13886 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0); 13887 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++) 13888 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0); 13889 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++) 13890 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0); 13891 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) 13892 clear_rsm_rule(dd, i); 13893 for (i = 0; i < 32; i++) 13894 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0); 13895 13896 /* 13897 * RXE Kernel and User Per-Context CSRs 13898 */ 13899 for (i = 0; i < chip_rcv_contexts(dd); i++) { 13900 /* kernel */ 13901 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0); 13902 /* RCV_CTXT_STATUS read-only */ 13903 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0); 13904 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0); 13905 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0); 13906 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0); 13907 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0); 13908 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0); 13909 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0); 13910 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0); 13911 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0); 13912 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0); 13913 13914 /* user */ 13915 /* RCV_HDR_TAIL read-only */ 13916 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0); 13917 /* RCV_EGR_INDEX_TAIL read-only */ 13918 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0); 13919 /* RCV_EGR_OFFSET_TAIL read-only */ 13920 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) { 13921 write_uctxt_csr(dd, i, 13922 RCV_TID_FLOW_TABLE + (8 * j), 0); 13923 } 13924 } 13925 } 13926 13927 /* 13928 * Set sc2vl tables. 13929 * 13930 * They power on to zeros, so to avoid send context errors 13931 * they need to be set: 13932 * 13933 * SC 0-7 -> VL 0-7 (respectively) 13934 * SC 15 -> VL 15 13935 * otherwise 13936 * -> VL 0 13937 */ 13938 static void init_sc2vl_tables(struct hfi1_devdata *dd) 13939 { 13940 int i; 13941 /* init per architecture spec, constrained by hardware capability */ 13942 13943 /* HFI maps sent packets */ 13944 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL( 13945 0, 13946 0, 0, 1, 1, 13947 2, 2, 3, 3, 13948 4, 4, 5, 5, 13949 6, 6, 7, 7)); 13950 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL( 13951 1, 13952 8, 0, 9, 0, 13953 10, 0, 11, 0, 13954 12, 0, 13, 0, 13955 14, 0, 15, 15)); 13956 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL( 13957 2, 13958 16, 0, 17, 0, 13959 18, 0, 19, 0, 13960 20, 0, 21, 0, 13961 22, 0, 23, 0)); 13962 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL( 13963 3, 13964 24, 0, 25, 0, 13965 26, 0, 27, 0, 13966 28, 0, 29, 0, 13967 30, 0, 31, 0)); 13968 13969 /* DC maps received packets */ 13970 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL( 13971 15_0, 13972 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 13973 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15)); 13974 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL( 13975 31_16, 13976 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 13977 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0)); 13978 13979 /* initialize the cached sc2vl values consistently with h/w */ 13980 for (i = 0; i < 32; i++) { 13981 if (i < 8 || i == 15) 13982 *((u8 *)(dd->sc2vl) + i) = (u8)i; 13983 else 13984 *((u8 *)(dd->sc2vl) + i) = 0; 13985 } 13986 } 13987 13988 /* 13989 * Read chip sizes and then reset parts to sane, disabled, values. We cannot 13990 * depend on the chip going through a power-on reset - a driver may be loaded 13991 * and unloaded many times. 13992 * 13993 * Do not write any CSR values to the chip in this routine - there may be 13994 * a reset following the (possible) FLR in this routine. 13995 * 13996 */ 13997 static int init_chip(struct hfi1_devdata *dd) 13998 { 13999 int i; 14000 int ret = 0; 14001 14002 /* 14003 * Put the HFI CSRs in a known state. 14004 * Combine this with a DC reset. 14005 * 14006 * Stop the device from doing anything while we do a 14007 * reset. We know there are no other active users of 14008 * the device since we are now in charge. Turn off 14009 * off all outbound and inbound traffic and make sure 14010 * the device does not generate any interrupts. 14011 */ 14012 14013 /* disable send contexts and SDMA engines */ 14014 write_csr(dd, SEND_CTRL, 0); 14015 for (i = 0; i < chip_send_contexts(dd); i++) 14016 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0); 14017 for (i = 0; i < chip_sdma_engines(dd); i++) 14018 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0); 14019 /* disable port (turn off RXE inbound traffic) and contexts */ 14020 write_csr(dd, RCV_CTRL, 0); 14021 for (i = 0; i < chip_rcv_contexts(dd); i++) 14022 write_csr(dd, RCV_CTXT_CTRL, 0); 14023 /* mask all interrupt sources */ 14024 for (i = 0; i < CCE_NUM_INT_CSRS; i++) 14025 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull); 14026 14027 /* 14028 * DC Reset: do a full DC reset before the register clear. 14029 * A recommended length of time to hold is one CSR read, 14030 * so reread the CceDcCtrl. Then, hold the DC in reset 14031 * across the clear. 14032 */ 14033 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK); 14034 (void)read_csr(dd, CCE_DC_CTRL); 14035 14036 if (use_flr) { 14037 /* 14038 * A FLR will reset the SPC core and part of the PCIe. 14039 * The parts that need to be restored have already been 14040 * saved. 14041 */ 14042 dd_dev_info(dd, "Resetting CSRs with FLR\n"); 14043 14044 /* do the FLR, the DC reset will remain */ 14045 pcie_flr(dd->pcidev); 14046 14047 /* restore command and BARs */ 14048 ret = restore_pci_variables(dd); 14049 if (ret) { 14050 dd_dev_err(dd, "%s: Could not restore PCI variables\n", 14051 __func__); 14052 return ret; 14053 } 14054 14055 if (is_ax(dd)) { 14056 dd_dev_info(dd, "Resetting CSRs with FLR\n"); 14057 pcie_flr(dd->pcidev); 14058 ret = restore_pci_variables(dd); 14059 if (ret) { 14060 dd_dev_err(dd, "%s: Could not restore PCI variables\n", 14061 __func__); 14062 return ret; 14063 } 14064 } 14065 } else { 14066 dd_dev_info(dd, "Resetting CSRs with writes\n"); 14067 reset_cce_csrs(dd); 14068 reset_txe_csrs(dd); 14069 reset_rxe_csrs(dd); 14070 reset_misc_csrs(dd); 14071 } 14072 /* clear the DC reset */ 14073 write_csr(dd, CCE_DC_CTRL, 0); 14074 14075 /* Set the LED off */ 14076 setextled(dd, 0); 14077 14078 /* 14079 * Clear the QSFP reset. 14080 * An FLR enforces a 0 on all out pins. The driver does not touch 14081 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and 14082 * anything plugged constantly in reset, if it pays attention 14083 * to RESET_N. 14084 * Prime examples of this are optical cables. Set all pins high. 14085 * I2CCLK and I2CDAT will change per direction, and INT_N and 14086 * MODPRS_N are input only and their value is ignored. 14087 */ 14088 write_csr(dd, ASIC_QSFP1_OUT, 0x1f); 14089 write_csr(dd, ASIC_QSFP2_OUT, 0x1f); 14090 init_chip_resources(dd); 14091 return ret; 14092 } 14093 14094 static void init_early_variables(struct hfi1_devdata *dd) 14095 { 14096 int i; 14097 14098 /* assign link credit variables */ 14099 dd->vau = CM_VAU; 14100 dd->link_credits = CM_GLOBAL_CREDITS; 14101 if (is_ax(dd)) 14102 dd->link_credits--; 14103 dd->vcu = cu_to_vcu(hfi1_cu); 14104 /* enough room for 8 MAD packets plus header - 17K */ 14105 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); 14106 if (dd->vl15_init > dd->link_credits) 14107 dd->vl15_init = dd->link_credits; 14108 14109 write_uninitialized_csrs_and_memories(dd); 14110 14111 if (HFI1_CAP_IS_KSET(PKEY_CHECK)) 14112 for (i = 0; i < dd->num_pports; i++) { 14113 struct hfi1_pportdata *ppd = &dd->pport[i]; 14114 14115 set_partition_keys(ppd); 14116 } 14117 init_sc2vl_tables(dd); 14118 } 14119 14120 static void init_kdeth_qp(struct hfi1_devdata *dd) 14121 { 14122 write_csr(dd, SEND_BTH_QP, 14123 (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) << 14124 SEND_BTH_QP_KDETH_QP_SHIFT); 14125 14126 write_csr(dd, RCV_BTH_QP, 14127 (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) << 14128 RCV_BTH_QP_KDETH_QP_SHIFT); 14129 } 14130 14131 /** 14132 * hfi1_get_qp_map - get qp map 14133 * @dd: device data 14134 * @idx: index to read 14135 */ 14136 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx) 14137 { 14138 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8); 14139 14140 reg >>= (idx % 8) * 8; 14141 return reg; 14142 } 14143 14144 /** 14145 * init_qpmap_table - init qp map 14146 * @dd: device data 14147 * @first_ctxt: first context 14148 * @last_ctxt: first context 14149 * 14150 * This return sets the qpn mapping table that 14151 * is indexed by qpn[8:1]. 14152 * 14153 * The routine will round robin the 256 settings 14154 * from first_ctxt to last_ctxt. 14155 * 14156 * The first/last looks ahead to having specialized 14157 * receive contexts for mgmt and bypass. Normal 14158 * verbs traffic will assumed to be on a range 14159 * of receive contexts. 14160 */ 14161 static void init_qpmap_table(struct hfi1_devdata *dd, 14162 u32 first_ctxt, 14163 u32 last_ctxt) 14164 { 14165 u64 reg = 0; 14166 u64 regno = RCV_QP_MAP_TABLE; 14167 int i; 14168 u64 ctxt = first_ctxt; 14169 14170 for (i = 0; i < 256; i++) { 14171 reg |= ctxt << (8 * (i % 8)); 14172 ctxt++; 14173 if (ctxt > last_ctxt) 14174 ctxt = first_ctxt; 14175 if (i % 8 == 7) { 14176 write_csr(dd, regno, reg); 14177 reg = 0; 14178 regno += 8; 14179 } 14180 } 14181 14182 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK 14183 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK); 14184 } 14185 14186 struct rsm_map_table { 14187 u64 map[NUM_MAP_REGS]; 14188 unsigned int used; 14189 }; 14190 14191 struct rsm_rule_data { 14192 u8 offset; 14193 u8 pkt_type; 14194 u32 field1_off; 14195 u32 field2_off; 14196 u32 index1_off; 14197 u32 index1_width; 14198 u32 index2_off; 14199 u32 index2_width; 14200 u32 mask1; 14201 u32 value1; 14202 u32 mask2; 14203 u32 value2; 14204 }; 14205 14206 /* 14207 * Return an initialized RMT map table for users to fill in. OK if it 14208 * returns NULL, indicating no table. 14209 */ 14210 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd) 14211 { 14212 struct rsm_map_table *rmt; 14213 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */ 14214 14215 rmt = kmalloc_obj(*rmt); 14216 if (rmt) { 14217 memset(rmt->map, rxcontext, sizeof(rmt->map)); 14218 rmt->used = 0; 14219 } 14220 14221 return rmt; 14222 } 14223 14224 /* 14225 * Write the final RMT map table to the chip and free the table. OK if 14226 * table is NULL. 14227 */ 14228 static void complete_rsm_map_table(struct hfi1_devdata *dd, 14229 struct rsm_map_table *rmt) 14230 { 14231 int i; 14232 14233 if (rmt) { 14234 /* write table to chip */ 14235 for (i = 0; i < NUM_MAP_REGS; i++) 14236 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); 14237 14238 /* enable RSM */ 14239 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14240 } 14241 } 14242 14243 /* Is a receive side mapping rule */ 14244 static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) 14245 { 14246 return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0; 14247 } 14248 14249 /* 14250 * Add a receive side mapping rule. 14251 */ 14252 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index, 14253 struct rsm_rule_data *rrd) 14254 { 14255 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 14256 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | 14257 1ull << rule_index | /* enable bit */ 14258 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); 14259 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 14260 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | 14261 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | 14262 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | 14263 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | 14264 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | 14265 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); 14266 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 14267 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | 14268 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | 14269 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | 14270 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); 14271 } 14272 14273 /* 14274 * Clear a receive side mapping rule. 14275 */ 14276 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) 14277 { 14278 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0); 14279 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0); 14280 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0); 14281 } 14282 14283 /* return the number of RSM map table entries that will be used for QOS */ 14284 static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, 14285 unsigned int *np) 14286 { 14287 int i; 14288 unsigned int m, n; 14289 uint max_by_vl = 0; 14290 14291 /* is QOS active at all? */ 14292 if (n_krcv_queues < MIN_KERNEL_KCTXTS || 14293 num_vls == 1 || 14294 krcvqsset <= 1) 14295 goto no_qos; 14296 14297 /* determine bits for qpn */ 14298 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++) 14299 if (krcvqs[i] > max_by_vl) 14300 max_by_vl = krcvqs[i]; 14301 if (max_by_vl > 32) 14302 goto no_qos; 14303 m = ilog2(__roundup_pow_of_two(max_by_vl)); 14304 14305 /* determine bits for vl */ 14306 n = ilog2(__roundup_pow_of_two(num_vls)); 14307 14308 /* reject if too much is used */ 14309 if ((m + n) > 7) 14310 goto no_qos; 14311 14312 if (mp) 14313 *mp = m; 14314 if (np) 14315 *np = n; 14316 14317 return 1 << (m + n); 14318 14319 no_qos: 14320 if (mp) 14321 *mp = 0; 14322 if (np) 14323 *np = 0; 14324 return 0; 14325 } 14326 14327 /** 14328 * init_qos - init RX qos 14329 * @dd: device data 14330 * @rmt: RSM map table 14331 * 14332 * This routine initializes Rule 0 and the RSM map table to implement 14333 * quality of service (qos). 14334 * 14335 * If all of the limit tests succeed, qos is applied based on the array 14336 * interpretation of krcvqs where entry 0 is VL0. 14337 * 14338 * The number of vl bits (n) and the number of qpn bits (m) are computed to 14339 * feed both the RSM map table and the single rule. 14340 */ 14341 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) 14342 { 14343 struct rsm_rule_data rrd; 14344 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; 14345 unsigned int rmt_entries; 14346 u64 reg; 14347 14348 if (!rmt) 14349 goto bail; 14350 rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); 14351 if (rmt_entries == 0) 14352 goto bail; 14353 qpns_per_vl = 1 << m; 14354 14355 /* enough room in the map table? */ 14356 rmt_entries = 1 << (m + n); 14357 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) 14358 goto bail; 14359 14360 /* add qos entries to the RSM map table */ 14361 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) { 14362 unsigned tctxt; 14363 14364 for (qpn = 0, tctxt = ctxt; 14365 krcvqs[i] && qpn < qpns_per_vl; qpn++) { 14366 unsigned idx, regoff, regidx; 14367 14368 /* generate the index the hardware will produce */ 14369 idx = rmt->used + ((qpn << n) ^ i); 14370 regoff = (idx % 8) * 8; 14371 regidx = idx / 8; 14372 /* replace default with context number */ 14373 reg = rmt->map[regidx]; 14374 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 14375 << regoff); 14376 reg |= (u64)(tctxt++) << regoff; 14377 rmt->map[regidx] = reg; 14378 if (tctxt == ctxt + krcvqs[i]) 14379 tctxt = ctxt; 14380 } 14381 ctxt += krcvqs[i]; 14382 } 14383 14384 rrd.offset = rmt->used; 14385 rrd.pkt_type = 2; 14386 rrd.field1_off = LRH_BTH_MATCH_OFFSET; 14387 rrd.field2_off = LRH_SC_MATCH_OFFSET; 14388 rrd.index1_off = LRH_SC_SELECT_OFFSET; 14389 rrd.index1_width = n; 14390 rrd.index2_off = QPN_SELECT_OFFSET; 14391 rrd.index2_width = m + n; 14392 rrd.mask1 = LRH_BTH_MASK; 14393 rrd.value1 = LRH_BTH_VALUE; 14394 rrd.mask2 = LRH_SC_MASK; 14395 rrd.value2 = LRH_SC_VALUE; 14396 14397 /* add rule 0 */ 14398 add_rsm_rule(dd, RSM_INS_VERBS, &rrd); 14399 14400 /* mark RSM map entries as used */ 14401 rmt->used += rmt_entries; 14402 /* map everything else to the mcast/err/vl15 context */ 14403 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT); 14404 dd->qos_shift = n + 1; 14405 return; 14406 bail: 14407 dd->qos_shift = 1; 14408 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); 14409 } 14410 14411 static void init_fecn_handling(struct hfi1_devdata *dd, 14412 struct rsm_map_table *rmt) 14413 { 14414 struct rsm_rule_data rrd; 14415 u64 reg; 14416 int i, idx, regoff, regidx, start; 14417 u8 offset; 14418 u32 total_cnt; 14419 14420 if (HFI1_CAP_IS_KSET(TID_RDMA)) 14421 /* Exclude context 0 */ 14422 start = 1; 14423 else 14424 start = dd->first_dyn_alloc_ctxt; 14425 14426 total_cnt = dd->num_rcv_contexts - start; 14427 14428 /* there needs to be enough room in the map table */ 14429 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { 14430 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); 14431 return; 14432 } 14433 14434 /* 14435 * RSM will extract the destination context as an index into the 14436 * map table. The destination contexts are a sequential block 14437 * in the range start...num_rcv_contexts-1 (inclusive). 14438 * Map entries are accessed as offset + extracted value. Adjust 14439 * the added offset so this sequence can be placed anywhere in 14440 * the table - as long as the entries themselves do not wrap. 14441 * There are only enough bits in offset for the table size, so 14442 * start with that to allow for a "negative" offset. 14443 */ 14444 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); 14445 14446 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; 14447 i++, idx++) { 14448 /* replace with identity mapping */ 14449 regoff = (idx % 8) * 8; 14450 regidx = idx / 8; 14451 reg = rmt->map[regidx]; 14452 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff); 14453 reg |= (u64)i << regoff; 14454 rmt->map[regidx] = reg; 14455 } 14456 14457 /* 14458 * For RSM intercept of Expected FECN packets: 14459 * o packet type 0 - expected 14460 * o match on F (bit 95), using select/match 1, and 14461 * o match on SH (bit 133), using select/match 2. 14462 * 14463 * Use index 1 to extract the 8-bit receive context from DestQP 14464 * (start at bit 64). Use that as the RSM map table index. 14465 */ 14466 rrd.offset = offset; 14467 rrd.pkt_type = 0; 14468 rrd.field1_off = 95; 14469 rrd.field2_off = 133; 14470 rrd.index1_off = 64; 14471 rrd.index1_width = 8; 14472 rrd.index2_off = 0; 14473 rrd.index2_width = 0; 14474 rrd.mask1 = 1; 14475 rrd.value1 = 1; 14476 rrd.mask2 = 1; 14477 rrd.value2 = 1; 14478 14479 /* add rule 1 */ 14480 add_rsm_rule(dd, RSM_INS_FECN, &rrd); 14481 14482 rmt->used += total_cnt; 14483 } 14484 14485 static inline bool hfi1_is_rmt_full(int start, int spare) 14486 { 14487 return (start + spare) > NUM_MAP_ENTRIES; 14488 } 14489 14490 static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd) 14491 { 14492 u8 i, j; 14493 u8 ctx_id = 0; 14494 u64 reg; 14495 u32 regoff; 14496 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); 14497 int ctxt_count = hfi1_netdev_ctxt_count(dd); 14498 14499 /* We already have contexts mapped in RMT */ 14500 if (has_rsm_rule(dd, RSM_INS_AIP)) { 14501 dd_dev_info(dd, "Contexts are already mapped in RMT\n"); 14502 return true; 14503 } 14504 14505 if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) { 14506 dd_dev_err(dd, "Not enough RMT entries used = %d\n", 14507 rmt_start); 14508 return false; 14509 } 14510 14511 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", 14512 rmt_start, 14513 rmt_start + NUM_NETDEV_MAP_ENTRIES); 14514 14515 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ 14516 regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8; 14517 reg = read_csr(dd, regoff); 14518 for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) { 14519 /* Update map register with netdev context */ 14520 j = (rmt_start + i) % 8; 14521 reg &= ~(0xffllu << (j * 8)); 14522 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); 14523 /* Wrap up netdev ctx index */ 14524 ctx_id %= ctxt_count; 14525 /* Write back map register */ 14526 if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) { 14527 dev_dbg(&(dd)->pcidev->dev, 14528 "RMT[%d] =0x%llx\n", 14529 regoff - RCV_RSM_MAP_TABLE, reg); 14530 14531 write_csr(dd, regoff, reg); 14532 regoff += 8; 14533 if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) 14534 reg = read_csr(dd, regoff); 14535 } 14536 } 14537 14538 return true; 14539 } 14540 14541 static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd, 14542 int rule, struct rsm_rule_data *rrd) 14543 { 14544 if (!hfi1_netdev_update_rmt(dd)) { 14545 dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule); 14546 return; 14547 } 14548 14549 add_rsm_rule(dd, rule, rrd); 14550 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); 14551 } 14552 14553 void hfi1_init_aip_rsm(struct hfi1_devdata *dd) 14554 { 14555 /* 14556 * go through with the initialisation only if this rule actually doesn't 14557 * exist yet 14558 */ 14559 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { 14560 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd); 14561 struct rsm_rule_data rrd = { 14562 .offset = rmt_start, 14563 .pkt_type = IB_PACKET_TYPE, 14564 .field1_off = LRH_BTH_MATCH_OFFSET, 14565 .mask1 = LRH_BTH_MASK, 14566 .value1 = LRH_BTH_VALUE, 14567 .field2_off = BTH_DESTQP_MATCH_OFFSET, 14568 .mask2 = BTH_DESTQP_MASK, 14569 .value2 = BTH_DESTQP_VALUE, 14570 .index1_off = DETH_AIP_SQPN_SELECT_OFFSET + 14571 ilog2(NUM_NETDEV_MAP_ENTRIES), 14572 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES), 14573 .index2_off = DETH_AIP_SQPN_SELECT_OFFSET, 14574 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES) 14575 }; 14576 14577 hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd); 14578 } 14579 } 14580 14581 void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd) 14582 { 14583 /* only actually clear the rule if it's the last user asking to do so */ 14584 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) 14585 clear_rsm_rule(dd, RSM_INS_AIP); 14586 } 14587 14588 static int init_rxe(struct hfi1_devdata *dd) 14589 { 14590 struct rsm_map_table *rmt; 14591 u64 val; 14592 14593 /* enable all receive errors */ 14594 write_csr(dd, RCV_ERR_MASK, ~0ull); 14595 14596 rmt = alloc_rsm_map_table(dd); 14597 if (!rmt) 14598 return -ENOMEM; 14599 14600 /* set up QOS, including the QPN map table */ 14601 init_qos(dd, rmt); 14602 init_fecn_handling(dd, rmt); 14603 complete_rsm_map_table(dd, rmt); 14604 /* record number of used rsm map entries for netdev */ 14605 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); 14606 kfree(rmt); 14607 14608 /* 14609 * make sure RcvCtrl.RcvWcb <= PCIe Device Control 14610 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config 14611 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one 14612 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and 14613 * Max_PayLoad_Size set to its minimum of 128. 14614 * 14615 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0 14616 * (64 bytes). Max_Payload_Size is possibly modified upward in 14617 * tune_pcie_caps() which is called after this routine. 14618 */ 14619 14620 /* Have 16 bytes (4DW) of bypass header available in header queue */ 14621 val = read_csr(dd, RCV_BYPASS); 14622 val &= ~RCV_BYPASS_HDR_SIZE_SMASK; 14623 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << 14624 RCV_BYPASS_HDR_SIZE_SHIFT); 14625 write_csr(dd, RCV_BYPASS, val); 14626 return 0; 14627 } 14628 14629 static void init_other(struct hfi1_devdata *dd) 14630 { 14631 /* enable all CCE errors */ 14632 write_csr(dd, CCE_ERR_MASK, ~0ull); 14633 /* enable *some* Misc errors */ 14634 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK); 14635 /* enable all DC errors, except LCB */ 14636 write_csr(dd, DCC_ERR_FLG_EN, ~0ull); 14637 write_csr(dd, DC_DC8051_ERR_EN, ~0ull); 14638 } 14639 14640 /* 14641 * Fill out the given AU table using the given CU. A CU is defined in terms 14642 * AUs. The table is a an encoding: given the index, how many AUs does that 14643 * represent? 14644 * 14645 * NOTE: Assumes that the register layout is the same for the 14646 * local and remote tables. 14647 */ 14648 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu, 14649 u32 csr0to3, u32 csr4to7) 14650 { 14651 write_csr(dd, csr0to3, 14652 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT | 14653 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT | 14654 2ull * cu << 14655 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT | 14656 4ull * cu << 14657 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT); 14658 write_csr(dd, csr4to7, 14659 8ull * cu << 14660 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT | 14661 16ull * cu << 14662 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT | 14663 32ull * cu << 14664 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT | 14665 64ull * cu << 14666 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT); 14667 } 14668 14669 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu) 14670 { 14671 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3, 14672 SEND_CM_LOCAL_AU_TABLE4_TO7); 14673 } 14674 14675 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu) 14676 { 14677 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3, 14678 SEND_CM_REMOTE_AU_TABLE4_TO7); 14679 } 14680 14681 static void init_txe(struct hfi1_devdata *dd) 14682 { 14683 int i; 14684 14685 /* enable all PIO, SDMA, general, and Egress errors */ 14686 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull); 14687 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull); 14688 write_csr(dd, SEND_ERR_MASK, ~0ull); 14689 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull); 14690 14691 /* enable all per-context and per-SDMA engine errors */ 14692 for (i = 0; i < chip_send_contexts(dd); i++) 14693 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull); 14694 for (i = 0; i < chip_sdma_engines(dd); i++) 14695 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull); 14696 14697 /* set the local CU to AU mapping */ 14698 assign_local_cm_au_table(dd, dd->vcu); 14699 14700 /* 14701 * Set reasonable default for Credit Return Timer 14702 * Don't set on Simulator - causes it to choke. 14703 */ 14704 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) 14705 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE); 14706 } 14707 14708 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, 14709 u16 jkey) 14710 { 14711 u8 hw_ctxt; 14712 u64 reg; 14713 14714 if (!rcd || !rcd->sc) 14715 return -EINVAL; 14716 14717 hw_ctxt = rcd->sc->hw_context; 14718 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */ 14719 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) << 14720 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT); 14721 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */ 14722 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) 14723 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK; 14724 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg); 14725 /* 14726 * Enable send-side J_KEY integrity check, unless this is A0 h/w 14727 */ 14728 if (!is_ax(dd)) { 14729 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14730 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 14731 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14732 } 14733 14734 /* Enable J_KEY check on receive context. */ 14735 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK | 14736 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) << 14737 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT); 14738 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); 14739 14740 return 0; 14741 } 14742 14743 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 14744 { 14745 u8 hw_ctxt; 14746 u64 reg; 14747 14748 if (!rcd || !rcd->sc) 14749 return -EINVAL; 14750 14751 hw_ctxt = rcd->sc->hw_context; 14752 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0); 14753 /* 14754 * Disable send-side J_KEY integrity check, unless this is A0 h/w. 14755 * This check would not have been enabled for A0 h/w, see 14756 * set_ctxt_jkey(). 14757 */ 14758 if (!is_ax(dd)) { 14759 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14760 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 14761 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14762 } 14763 /* Turn off the J_KEY on the receive side */ 14764 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); 14765 14766 return 0; 14767 } 14768 14769 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd, 14770 u16 pkey) 14771 { 14772 u8 hw_ctxt; 14773 u64 reg; 14774 14775 if (!rcd || !rcd->sc) 14776 return -EINVAL; 14777 14778 hw_ctxt = rcd->sc->hw_context; 14779 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) << 14780 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT; 14781 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg); 14782 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14783 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14784 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK; 14785 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14786 14787 return 0; 14788 } 14789 14790 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) 14791 { 14792 u8 hw_ctxt; 14793 u64 reg; 14794 14795 if (!ctxt || !ctxt->sc) 14796 return -EINVAL; 14797 14798 hw_ctxt = ctxt->sc->hw_context; 14799 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE); 14800 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK; 14801 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg); 14802 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0); 14803 14804 return 0; 14805 } 14806 14807 /* 14808 * Start doing the clean up the chip. Our clean up happens in multiple 14809 * stages and this is just the first. 14810 */ 14811 void hfi1_start_cleanup(struct hfi1_devdata *dd) 14812 { 14813 aspm_exit(dd); 14814 free_cntrs(dd); 14815 free_rcverr(dd); 14816 finish_chip_resources(dd); 14817 } 14818 14819 #define HFI_BASE_GUID(dev) \ 14820 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT)) 14821 14822 /* 14823 * Information can be shared between the two HFIs on the same ASIC 14824 * in the same OS. This function finds the peer device and sets 14825 * up a shared structure. 14826 */ 14827 static int init_asic_data(struct hfi1_devdata *dd) 14828 { 14829 unsigned long index; 14830 struct hfi1_devdata *peer; 14831 struct hfi1_asic_data *asic_data; 14832 int ret = 0; 14833 14834 /* pre-allocate the asic structure in case we are the first device */ 14835 asic_data = kzalloc_obj(*dd->asic_data); 14836 if (!asic_data) 14837 return -ENOMEM; 14838 14839 xa_lock_irq(&hfi1_dev_table); 14840 /* Find our peer device */ 14841 xa_for_each(&hfi1_dev_table, index, peer) { 14842 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) && 14843 dd->unit != peer->unit) 14844 break; 14845 } 14846 14847 if (peer) { 14848 /* use already allocated structure */ 14849 dd->asic_data = peer->asic_data; 14850 kfree(asic_data); 14851 } else { 14852 dd->asic_data = asic_data; 14853 mutex_init(&dd->asic_data->asic_resource_mutex); 14854 } 14855 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ 14856 xa_unlock_irq(&hfi1_dev_table); 14857 14858 /* first one through - set up i2c devices */ 14859 if (!peer) 14860 ret = set_up_i2c(dd, dd->asic_data); 14861 14862 return ret; 14863 } 14864 14865 /* 14866 * Set dd->boardname. Use a generic name if a name is not returned from 14867 * EFI variable space. 14868 * 14869 * Return 0 on success, -ENOMEM if space could not be allocated. 14870 */ 14871 static int obtain_boardname(struct hfi1_devdata *dd) 14872 { 14873 /* generic board description */ 14874 const char generic[] = 14875 "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series"; 14876 unsigned long size; 14877 int ret; 14878 14879 ret = read_hfi1_efi_var(dd, "description", &size, 14880 (void **)&dd->boardname); 14881 if (ret) { 14882 dd_dev_info(dd, "Board description not found\n"); 14883 /* use generic description */ 14884 dd->boardname = kstrdup(generic, GFP_KERNEL); 14885 if (!dd->boardname) 14886 return -ENOMEM; 14887 } 14888 return 0; 14889 } 14890 14891 /* 14892 * Check the interrupt registers to make sure that they are mapped correctly. 14893 * It is intended to help user identify any mismapping by VMM when the driver 14894 * is running in a VM. This function should only be called before interrupt 14895 * is set up properly. 14896 * 14897 * Return 0 on success, -EINVAL on failure. 14898 */ 14899 static int check_int_registers(struct hfi1_devdata *dd) 14900 { 14901 u64 reg; 14902 u64 all_bits = ~(u64)0; 14903 u64 mask; 14904 14905 /* Clear CceIntMask[0] to avoid raising any interrupts */ 14906 mask = read_csr(dd, CCE_INT_MASK); 14907 write_csr(dd, CCE_INT_MASK, 0ull); 14908 reg = read_csr(dd, CCE_INT_MASK); 14909 if (reg) 14910 goto err_exit; 14911 14912 /* Clear all interrupt status bits */ 14913 write_csr(dd, CCE_INT_CLEAR, all_bits); 14914 reg = read_csr(dd, CCE_INT_STATUS); 14915 if (reg) 14916 goto err_exit; 14917 14918 /* Set all interrupt status bits */ 14919 write_csr(dd, CCE_INT_FORCE, all_bits); 14920 reg = read_csr(dd, CCE_INT_STATUS); 14921 if (reg != all_bits) 14922 goto err_exit; 14923 14924 /* Restore the interrupt mask */ 14925 write_csr(dd, CCE_INT_CLEAR, all_bits); 14926 write_csr(dd, CCE_INT_MASK, mask); 14927 14928 return 0; 14929 err_exit: 14930 write_csr(dd, CCE_INT_MASK, mask); 14931 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n"); 14932 return -EINVAL; 14933 } 14934 14935 /** 14936 * hfi1_init_dd() - Initialize most of the dd structure. 14937 * @dd: the dd device 14938 * 14939 * This is global, and is called directly at init to set up the 14940 * chip-specific function pointers for later use. 14941 */ 14942 int hfi1_init_dd(struct hfi1_devdata *dd) 14943 { 14944 struct pci_dev *pdev = dd->pcidev; 14945 struct hfi1_pportdata *ppd; 14946 u64 reg; 14947 int i, ret; 14948 static const char * const inames[] = { /* implementation names */ 14949 "RTL silicon", 14950 "RTL VCS simulation", 14951 "RTL FPGA emulation", 14952 "Functional simulator" 14953 }; 14954 struct pci_dev *parent = pdev->bus->self; 14955 u32 sdma_engines = chip_sdma_engines(dd); 14956 14957 ppd = dd->pport; 14958 for (i = 0; i < dd->num_pports; i++, ppd++) { 14959 int vl; 14960 /* init common fields */ 14961 hfi1_init_pportdata(pdev, ppd, dd, 0, 1); 14962 /* DC supports 4 link widths */ 14963 ppd->link_width_supported = 14964 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X | 14965 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X; 14966 ppd->link_width_downgrade_supported = 14967 ppd->link_width_supported; 14968 /* start out enabling only 4X */ 14969 ppd->link_width_enabled = OPA_LINK_WIDTH_4X; 14970 ppd->link_width_downgrade_enabled = 14971 ppd->link_width_downgrade_supported; 14972 /* link width active is 0 when link is down */ 14973 /* link width downgrade active is 0 when link is down */ 14974 14975 if (num_vls < HFI1_MIN_VLS_SUPPORTED || 14976 num_vls > HFI1_MAX_VLS_SUPPORTED) { 14977 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n", 14978 num_vls, HFI1_MAX_VLS_SUPPORTED); 14979 num_vls = HFI1_MAX_VLS_SUPPORTED; 14980 } 14981 ppd->vls_supported = num_vls; 14982 ppd->vls_operational = ppd->vls_supported; 14983 /* Set the default MTU. */ 14984 for (vl = 0; vl < num_vls; vl++) 14985 dd->vld[vl].mtu = hfi1_max_mtu; 14986 dd->vld[15].mtu = MAX_MAD_PACKET; 14987 /* 14988 * Set the initial values to reasonable default, will be set 14989 * for real when link is up. 14990 */ 14991 ppd->overrun_threshold = 0x4; 14992 ppd->phy_error_threshold = 0xf; 14993 ppd->port_crc_mode_enabled = link_crc_mask; 14994 /* initialize supported LTP CRC mode */ 14995 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; 14996 /* initialize enabled LTP CRC mode */ 14997 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; 14998 /* start in offline */ 14999 ppd->host_link_state = HLS_DN_OFFLINE; 15000 init_vl_arb_caches(ppd); 15001 } 15002 15003 /* 15004 * Do remaining PCIe setup and save PCIe values in dd. 15005 * Any error printing is already done by the init code. 15006 * On return, we have the chip mapped. 15007 */ 15008 ret = hfi1_pcie_ddinit(dd, pdev); 15009 if (ret < 0) 15010 goto bail_free; 15011 15012 /* Save PCI space registers to rewrite after device reset */ 15013 ret = save_pci_variables(dd); 15014 if (ret < 0) 15015 goto bail_cleanup; 15016 15017 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) 15018 & CCE_REVISION_CHIP_REV_MAJOR_MASK; 15019 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) 15020 & CCE_REVISION_CHIP_REV_MINOR_MASK; 15021 15022 /* 15023 * Check interrupt registers mapping if the driver has no access to 15024 * the upstream component. In this case, it is likely that the driver 15025 * is running in a VM. 15026 */ 15027 if (!parent) { 15028 ret = check_int_registers(dd); 15029 if (ret) 15030 goto bail_cleanup; 15031 } 15032 15033 /* 15034 * obtain the hardware ID - NOT related to unit, which is a 15035 * software enumeration 15036 */ 15037 reg = read_csr(dd, CCE_REVISION2); 15038 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) 15039 & CCE_REVISION2_HFI_ID_MASK; 15040 /* the variable size will remove unwanted bits */ 15041 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; 15042 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; 15043 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n", 15044 dd->icode < ARRAY_SIZE(inames) ? 15045 inames[dd->icode] : "unknown", (int)dd->irev); 15046 15047 /* speeds the hardware can support */ 15048 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; 15049 /* speeds allowed to run at */ 15050 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; 15051 /* give a reasonable active value, will be set on link up */ 15052 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; 15053 15054 /* fix up link widths for emulation _p */ 15055 ppd = dd->pport; 15056 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { 15057 ppd->link_width_supported = 15058 ppd->link_width_enabled = 15059 ppd->link_width_downgrade_supported = 15060 ppd->link_width_downgrade_enabled = 15061 OPA_LINK_WIDTH_1X; 15062 } 15063 /* insure num_vls isn't larger than number of sdma engines */ 15064 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) { 15065 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n", 15066 num_vls, sdma_engines); 15067 num_vls = sdma_engines; 15068 ppd->vls_supported = sdma_engines; 15069 ppd->vls_operational = ppd->vls_supported; 15070 } 15071 15072 /* 15073 * Convert the ns parameter to the 64 * cclocks used in the CSR. 15074 * Limit the max if larger than the field holds. If timeout is 15075 * non-zero, then the calculated field will be at least 1. 15076 * 15077 * Must be after icode is set up - the cclock rate depends 15078 * on knowing the hardware being used. 15079 */ 15080 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; 15081 if (dd->rcv_intr_timeout_csr > 15082 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK) 15083 dd->rcv_intr_timeout_csr = 15084 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK; 15085 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) 15086 dd->rcv_intr_timeout_csr = 1; 15087 15088 /* needs to be done before we look for the peer device */ 15089 read_guid(dd); 15090 15091 /* set up shared ASIC data with peer device */ 15092 ret = init_asic_data(dd); 15093 if (ret) 15094 goto bail_cleanup; 15095 15096 /* obtain chip sizes, reset chip CSRs */ 15097 ret = init_chip(dd); 15098 if (ret) 15099 goto bail_cleanup; 15100 15101 /* read in the PCIe link speed information */ 15102 ret = pcie_speeds(dd); 15103 if (ret) 15104 goto bail_cleanup; 15105 15106 /* call before get_platform_config(), after init_chip_resources() */ 15107 ret = eprom_init(dd); 15108 if (ret) 15109 goto bail_free_rcverr; 15110 15111 /* Needs to be called before hfi1_firmware_init */ 15112 get_platform_config(dd); 15113 15114 /* read in firmware */ 15115 ret = hfi1_firmware_init(dd); 15116 if (ret) 15117 goto bail_cleanup; 15118 15119 /* 15120 * In general, the PCIe Gen3 transition must occur after the 15121 * chip has been idled (so it won't initiate any PCIe transactions 15122 * e.g. an interrupt) and before the driver changes any registers 15123 * (the transition will reset the registers). 15124 * 15125 * In particular, place this call after: 15126 * - init_chip() - the chip will not initiate any PCIe transactions 15127 * - pcie_speeds() - reads the current link speed 15128 * - hfi1_firmware_init() - the needed firmware is ready to be 15129 * downloaded 15130 */ 15131 ret = do_pcie_gen3_transition(dd); 15132 if (ret) 15133 goto bail_cleanup; 15134 15135 /* 15136 * This should probably occur in hfi1_pcie_init(), but historically 15137 * occurs after the do_pcie_gen3_transition() code. 15138 */ 15139 tune_pcie_caps(dd); 15140 15141 /* start setting dd values and adjusting CSRs */ 15142 init_early_variables(dd); 15143 15144 parse_platform_config(dd); 15145 15146 ret = obtain_boardname(dd); 15147 if (ret) 15148 goto bail_cleanup; 15149 15150 snprintf(dd->boardversion, BOARD_VERS_MAX, 15151 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n", 15152 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN, 15153 (u32)dd->majrev, 15154 (u32)dd->minrev, 15155 (dd->revision >> CCE_REVISION_SW_SHIFT) 15156 & CCE_REVISION_SW_MASK); 15157 15158 /* alloc AIP rx data */ 15159 ret = hfi1_alloc_rx(dd); 15160 if (ret) 15161 goto bail_cleanup; 15162 15163 ret = set_up_context_variables(dd); 15164 if (ret) 15165 goto bail_cleanup; 15166 15167 /* set initial RXE CSRs */ 15168 ret = init_rxe(dd); 15169 if (ret) 15170 goto bail_cleanup; 15171 15172 /* set initial TXE CSRs */ 15173 init_txe(dd); 15174 /* set initial non-RXE, non-TXE CSRs */ 15175 init_other(dd); 15176 /* set up KDETH QP prefix in both RX and TX CSRs */ 15177 init_kdeth_qp(dd); 15178 15179 ret = hfi1_dev_affinity_init(dd); 15180 if (ret) 15181 goto bail_cleanup; 15182 15183 /* send contexts must be set up before receive contexts */ 15184 ret = init_send_contexts(dd); 15185 if (ret) 15186 goto bail_cleanup; 15187 15188 ret = hfi1_create_kctxts(dd); 15189 if (ret) 15190 goto bail_cleanup; 15191 15192 /* 15193 * Initialize aspm, to be done after gen3 transition and setting up 15194 * contexts and before enabling interrupts 15195 */ 15196 aspm_init(dd); 15197 15198 ret = init_pervl_scs(dd); 15199 if (ret) 15200 goto bail_cleanup; 15201 15202 /* sdma init */ 15203 for (i = 0; i < dd->num_pports; ++i) { 15204 ret = sdma_init(dd, i); 15205 if (ret) 15206 goto bail_cleanup; 15207 } 15208 15209 /* use contexts created by hfi1_create_kctxts */ 15210 ret = set_up_interrupts(dd); 15211 if (ret) 15212 goto bail_cleanup; 15213 15214 ret = hfi1_comp_vectors_set_up(dd); 15215 if (ret) 15216 goto bail_clear_intr; 15217 15218 /* set up LCB access - must be after set_up_interrupts() */ 15219 init_lcb_access(dd); 15220 15221 /* 15222 * Serial number is created from the base guid: 15223 * [27:24] = base guid [38:35] 15224 * [23: 0] = base guid [23: 0] 15225 */ 15226 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", 15227 (dd->base_guid & 0xFFFFFF) | 15228 ((dd->base_guid >> 11) & 0xF000000)); 15229 15230 dd->oui1 = dd->base_guid >> 56 & 0xFF; 15231 dd->oui2 = dd->base_guid >> 48 & 0xFF; 15232 dd->oui3 = dd->base_guid >> 40 & 0xFF; 15233 15234 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */ 15235 if (ret) 15236 goto bail_clear_intr; 15237 15238 thermal_init(dd); 15239 15240 ret = init_cntrs(dd); 15241 if (ret) 15242 goto bail_clear_intr; 15243 15244 ret = init_rcverr(dd); 15245 if (ret) 15246 goto bail_free_cntrs; 15247 15248 init_completion(&dd->user_comp); 15249 15250 /* The user refcount starts with one to inidicate an active device */ 15251 refcount_set(&dd->user_refcount, 1); 15252 15253 goto bail; 15254 15255 bail_free_rcverr: 15256 free_rcverr(dd); 15257 bail_free_cntrs: 15258 free_cntrs(dd); 15259 bail_clear_intr: 15260 hfi1_comp_vectors_clean_up(dd); 15261 msix_clean_up_interrupts(dd); 15262 bail_cleanup: 15263 hfi1_free_rx(dd); 15264 hfi1_pcie_ddcleanup(dd); 15265 bail_free: 15266 hfi1_free_devdata(dd); 15267 bail: 15268 return ret; 15269 } 15270 15271 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, 15272 u32 dw_len) 15273 { 15274 u32 delta_cycles; 15275 u32 current_egress_rate = ppd->current_egress_rate; 15276 /* rates here are in units of 10^6 bits/sec */ 15277 15278 if (desired_egress_rate == -1) 15279 return 0; /* shouldn't happen */ 15280 15281 if (desired_egress_rate >= current_egress_rate) 15282 return 0; /* we can't help go faster, only slower */ 15283 15284 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - 15285 egress_cycles(dw_len * 4, current_egress_rate); 15286 15287 return (u16)delta_cycles; 15288 } 15289 15290 /** 15291 * create_pbc - build a pbc for transmission 15292 * @ppd: info of physical Hfi port 15293 * @flags: special case flags or-ed in built pbc 15294 * @srate_mbs: static rate 15295 * @vl: vl 15296 * @dw_len: dword length (header words + data words + pbc words) 15297 * 15298 * Create a PBC with the given flags, rate, VL, and length. 15299 * 15300 * NOTE: The PBC created will not insert any HCRC - all callers but one are 15301 * for verbs, which does not use this PSM feature. The lone other caller 15302 * is for the diagnostic interface which calls this if the user does not 15303 * supply their own PBC. 15304 */ 15305 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl, 15306 u32 dw_len) 15307 { 15308 u64 pbc, delay = 0; 15309 15310 if (unlikely(srate_mbs)) 15311 delay = delay_cycles(ppd, srate_mbs, dw_len); 15312 15313 pbc = flags 15314 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT) 15315 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) 15316 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT 15317 | (dw_len & PBC_LENGTH_DWS_MASK) 15318 << PBC_LENGTH_DWS_SHIFT; 15319 15320 return pbc; 15321 } 15322 15323 #define SBUS_THERMAL 0x4f 15324 #define SBUS_THERM_MONITOR_MODE 0x1 15325 15326 #define THERM_FAILURE(dev, ret, reason) \ 15327 dd_dev_err((dd), \ 15328 "Thermal sensor initialization failed: %s (%d)\n", \ 15329 (reason), (ret)) 15330 15331 /* 15332 * Initialize the thermal sensor. 15333 * 15334 * After initialization, enable polling of thermal sensor through 15335 * SBus interface. In order for this to work, the SBus Master 15336 * firmware has to be loaded due to the fact that the HW polling 15337 * logic uses SBus interrupts, which are not supported with 15338 * default firmware. Otherwise, no data will be returned through 15339 * the ASIC_STS_THERM CSR. 15340 */ 15341 static int thermal_init(struct hfi1_devdata *dd) 15342 { 15343 int ret = 0; 15344 15345 if (dd->icode != ICODE_RTL_SILICON || 15346 check_chip_resource(dd, CR_THERM_INIT, NULL)) 15347 return ret; 15348 15349 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); 15350 if (ret) { 15351 THERM_FAILURE(dd, ret, "Acquire SBus"); 15352 return ret; 15353 } 15354 15355 dd_dev_info(dd, "Initializing thermal sensor\n"); 15356 /* Disable polling of thermal readings */ 15357 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0); 15358 msleep(100); 15359 /* Thermal Sensor Initialization */ 15360 /* Step 1: Reset the Thermal SBus Receiver */ 15361 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15362 RESET_SBUS_RECEIVER, 0); 15363 if (ret) { 15364 THERM_FAILURE(dd, ret, "Bus Reset"); 15365 goto done; 15366 } 15367 /* Step 2: Set Reset bit in Thermal block */ 15368 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15369 WRITE_SBUS_RECEIVER, 0x1); 15370 if (ret) { 15371 THERM_FAILURE(dd, ret, "Therm Block Reset"); 15372 goto done; 15373 } 15374 /* Step 3: Write clock divider value (100MHz -> 2MHz) */ 15375 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1, 15376 WRITE_SBUS_RECEIVER, 0x32); 15377 if (ret) { 15378 THERM_FAILURE(dd, ret, "Write Clock Div"); 15379 goto done; 15380 } 15381 /* Step 4: Select temperature mode */ 15382 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3, 15383 WRITE_SBUS_RECEIVER, 15384 SBUS_THERM_MONITOR_MODE); 15385 if (ret) { 15386 THERM_FAILURE(dd, ret, "Write Mode Sel"); 15387 goto done; 15388 } 15389 /* Step 5: De-assert block reset and start conversion */ 15390 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0, 15391 WRITE_SBUS_RECEIVER, 0x2); 15392 if (ret) { 15393 THERM_FAILURE(dd, ret, "Write Reset Deassert"); 15394 goto done; 15395 } 15396 /* Step 5.1: Wait for first conversion (21.5ms per spec) */ 15397 msleep(22); 15398 15399 /* Enable polling of thermal readings */ 15400 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1); 15401 15402 /* Set initialized flag */ 15403 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0); 15404 if (ret) 15405 THERM_FAILURE(dd, ret, "Unable to set thermal init flag"); 15406 15407 done: 15408 release_chip_resource(dd, CR_SBUS); 15409 return ret; 15410 } 15411 15412 static void handle_temp_err(struct hfi1_devdata *dd) 15413 { 15414 struct hfi1_pportdata *ppd = &dd->pport[0]; 15415 /* 15416 * Thermal Critical Interrupt 15417 * Put the device into forced freeze mode, take link down to 15418 * offline, and put DC into reset. 15419 */ 15420 dd_dev_emerg(dd, 15421 "Critical temperature reached! Forcing device into freeze mode!\n"); 15422 dd->flags |= HFI1_FORCED_FREEZE; 15423 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT); 15424 /* 15425 * Shut DC down as much and as quickly as possible. 15426 * 15427 * Step 1: Take the link down to OFFLINE. This will cause the 15428 * 8051 to put the Serdes in reset. However, we don't want to 15429 * go through the entire link state machine since we want to 15430 * shutdown ASAP. Furthermore, this is not a graceful shutdown 15431 * but rather an attempt to save the chip. 15432 * Code below is almost the same as quiet_serdes() but avoids 15433 * all the extra work and the sleeps. 15434 */ 15435 ppd->driver_link_ready = 0; 15436 ppd->link_enabled = 0; 15437 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) | 15438 PLS_OFFLINE); 15439 /* 15440 * Step 2: Shutdown LCB and 8051 15441 * After shutdown, do not restore DC_CFG_RESET value. 15442 */ 15443 dc_shutdown(dd); 15444 } 15445