1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 /* 28 * File : ecore_int.c 29 */ 30 #include <sys/cdefs.h> 31 #include "bcm_osal.h" 32 #include "ecore.h" 33 #include "ecore_spq.h" 34 #include "reg_addr.h" 35 #include "ecore_gtt_reg_addr.h" 36 #include "ecore_init_ops.h" 37 #include "ecore_rt_defs.h" 38 #include "ecore_int.h" 39 #include "reg_addr.h" 40 #include "ecore_hw.h" 41 #include "ecore_sriov.h" 42 #include "ecore_vf.h" 43 #include "ecore_hw_defs.h" 44 #include "ecore_hsi_common.h" 45 #include "ecore_mcp.h" 46 #include "ecore_dbg_fw_funcs.h" 47 48 #ifdef DIAG 49 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor], 50 * and so the functions are lacking ecore prefix. 51 * If there would be other clients needing this [or if the content that isn't 52 * really optional there would increase], we'll need to re-think this. 53 */ 54 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev, 55 struct ecore_ptt *ptt, 56 enum block_id block, 57 enum dbg_attn_type attn_type, 58 bool clear_status, 59 struct dbg_attn_block_result *results); 60 61 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev, 62 struct dbg_attn_block_result *results); 63 64 const char* dbg_get_status_str(enum dbg_status status); 65 66 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \ 67 dbg_read_attn(hwfn, ptt, id, type, clear, results) 68 #define ecore_dbg_parse_attn(hwfn, results) \ 69 dbg_parse_attn(hwfn, results) 70 #define ecore_dbg_get_status_str(status) \ 71 dbg_get_status_str(status) 72 #endif 73 74 struct ecore_pi_info { 75 ecore_int_comp_cb_t comp_cb; 76 void *cookie; /* Will be sent to the completion callback function */ 77 }; 78 79 struct ecore_sb_sp_info { 80 struct ecore_sb_info sb_info; 81 /* per protocol index data */ 82 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 83 }; 84 85 enum ecore_attention_type { 86 ECORE_ATTN_TYPE_ATTN, 87 ECORE_ATTN_TYPE_PARITY, 88 }; 89 90 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 91 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 92 93 struct aeu_invert_reg_bit { 94 char bit_name[30]; 95 96 #define ATTENTION_PARITY (1 << 0) 97 98 #define ATTENTION_LENGTH_MASK (0x00000ff0) 99 #define ATTENTION_LENGTH_SHIFT (4) 100 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 101 ATTENTION_LENGTH_SHIFT) 102 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 103 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 104 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 105 ATTENTION_PARITY) 106 107 /* Multiple bits start with this offset */ 108 #define ATTENTION_OFFSET_MASK (0x000ff000) 109 #define ATTENTION_OFFSET_SHIFT (12) 110 111 #define ATTENTION_BB_MASK (0x00700000) 112 #define ATTENTION_BB_SHIFT (20) 113 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 114 #define ATTENTION_BB_DIFFERENT (1 << 23) 115 116 #define ATTENTION_CLEAR_ENABLE (1 << 28) 117 unsigned int flags; 118 119 /* Callback to call if attention will be triggered */ 120 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 121 122 enum block_id block_index; 123 }; 124 125 struct aeu_invert_reg { 126 struct aeu_invert_reg_bit bits[32]; 127 }; 128 129 #define MAX_ATTN_GRPS (8) 130 #define NUM_ATTN_REGS (9) 131 132 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 133 { 134 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 135 136 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 137 tmp); 138 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 139 0xffffffff); 140 141 return ECORE_SUCCESS; 142 } 143 144 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 145 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 146 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 147 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 148 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 149 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 150 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 151 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 152 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 153 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 154 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 155 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 156 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 167 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 168 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 169 { 170 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID); 171 172 /* Disabled VF access */ 173 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 174 u32 addr, data; 175 176 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 177 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 178 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 179 PSWHST_REG_VF_DISABLED_ERROR_DATA); 180 DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n", 181 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >> 182 ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 183 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >> 184 ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 185 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 186 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 187 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 188 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 189 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 190 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 191 addr); 192 } 193 194 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 195 PSWHST_REG_INCORRECT_ACCESS_VALID); 196 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 197 u32 addr, data, length; 198 199 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 200 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 201 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 202 PSWHST_REG_INCORRECT_ACCESS_DATA); 203 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 204 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 205 206 DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 207 addr, length, 208 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 209 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 210 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 211 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 212 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 213 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 214 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 215 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 216 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 217 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 218 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 219 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 220 data); 221 } 222 223 /* TODO - We know 'some' of these are legal due to virtualization, 224 * but is it true for all of them? 225 */ 226 return ECORE_SUCCESS; 227 } 228 229 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 230 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 231 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 232 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 233 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 234 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 235 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 236 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 237 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 238 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 239 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 240 static const char* grc_timeout_attn_master_to_str(u8 master) 241 { 242 switch(master) { 243 case 1: return "PXP"; 244 case 2: return "MCP"; 245 case 3: return "MSDM"; 246 case 4: return "PSDM"; 247 case 5: return "YSDM"; 248 case 6: return "USDM"; 249 case 7: return "TSDM"; 250 case 8: return "XSDM"; 251 case 9: return "DBU"; 252 case 10: return "DMAE"; 253 default: 254 return "Unkown"; 255 } 256 } 257 258 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 259 { 260 u32 tmp, tmp2; 261 262 /* We've already cleared the timeout interrupt register, so we learn 263 * of interrupts via the validity register 264 */ 265 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 266 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 267 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 268 goto out; 269 270 /* Read the GRC timeout information */ 271 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 272 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 273 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 274 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 275 276 DP_NOTICE(p_hwfn->p_dev, false, 277 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 278 tmp2, tmp, 279 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 280 : "Read from", 281 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 282 grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 283 ECORE_GRC_ATTENTION_MASTER_SHIFT), 284 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 285 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 286 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 287 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 288 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 289 ECORE_GRC_ATTENTION_VF_SHIFT); 290 291 out: 292 /* Regardles of anything else, clean the validity bit */ 293 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 294 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 295 return ECORE_SUCCESS; 296 } 297 298 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 299 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 300 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 301 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 302 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 303 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 304 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 305 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 306 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 307 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 308 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 309 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 310 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 311 312 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 313 struct ecore_ptt *p_ptt) 314 { 315 u32 tmp; 316 317 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 318 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 319 u32 addr_lo, addr_hi, details; 320 321 addr_lo = ecore_rd(p_hwfn, p_ptt, 322 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 323 addr_hi = ecore_rd(p_hwfn, p_ptt, 324 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 325 details = ecore_rd(p_hwfn, p_ptt, 326 PGLUE_B_REG_TX_ERR_WR_DETAILS); 327 328 DP_NOTICE(p_hwfn, false, 329 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 330 addr_hi, addr_lo, details, 331 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 332 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 333 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 334 tmp, 335 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0), 336 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0), 337 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); 338 } 339 340 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 341 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 342 u32 addr_lo, addr_hi, details; 343 344 addr_lo = ecore_rd(p_hwfn, p_ptt, 345 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 346 addr_hi = ecore_rd(p_hwfn, p_ptt, 347 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 348 details = ecore_rd(p_hwfn, p_ptt, 349 PGLUE_B_REG_TX_ERR_RD_DETAILS); 350 351 DP_NOTICE(p_hwfn, false, 352 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 353 addr_hi, addr_lo, details, 354 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 355 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 356 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 357 tmp, 358 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0), 359 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0), 360 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); 361 } 362 363 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 364 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 365 DP_NOTICE(p_hwfn, false, "ICPL error - %08x\n", tmp); 366 367 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 368 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 369 u32 addr_hi, addr_lo; 370 371 addr_lo = ecore_rd(p_hwfn, p_ptt, 372 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 373 addr_hi = ecore_rd(p_hwfn, p_ptt, 374 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 375 376 DP_NOTICE(p_hwfn, false, 377 "ICPL error - %08x [Address %08x:%08x]\n", 378 tmp, addr_hi, addr_lo); 379 } 380 381 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 382 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 383 u32 addr_hi, addr_lo, details; 384 385 addr_lo = ecore_rd(p_hwfn, p_ptt, 386 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 387 addr_hi = ecore_rd(p_hwfn, p_ptt, 388 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 389 details = ecore_rd(p_hwfn, p_ptt, 390 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 391 392 DP_NOTICE(p_hwfn, false, 393 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 394 details, tmp, addr_hi, addr_lo); 395 } 396 397 /* Clear the indications */ 398 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 399 400 return ECORE_SUCCESS; 401 } 402 403 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 404 { 405 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 406 } 407 408 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 409 { 410 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 411 412 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 413 414 return ECORE_INVAL; 415 } 416 417 static enum _ecore_status_t 418 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 419 { 420 DP_INFO(p_hwfn, "General attention 35!\n"); 421 422 return ECORE_SUCCESS; 423 } 424 425 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 426 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 427 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 428 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 429 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 430 431 #define ECORE_DB_REC_COUNT 10 432 #define ECORE_DB_REC_INTERVAL 100 433 434 /* assumes sticky overflow indication was set for this PF */ 435 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 436 struct ecore_ptt *p_ptt) 437 { 438 u8 count = ECORE_DB_REC_COUNT; 439 u32 usage = 1; 440 441 /* wait for usage to zero or count to run out. This is necessary since 442 * EDPM doorbell transactions can take multiple 64b cycles, and as such 443 * can "split" over the pci. Possibly, the doorbell drop can happen with 444 * half an EDPM in the queue and other half dropped. Another EDPM 445 * doorbell to the same address (from doorbell recovery mechanism or 446 * from the doorbelling entity) could have first half dropped and second 447 * half interperted as continuation of the first. To prevent such 448 * malformed doorbells from reaching the device, flush the queue before 449 * releaseing the overflow sticky indication. 450 */ 451 while (count-- && usage) { 452 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 453 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 454 } 455 456 /* should have been depleted by now */ 457 if (usage) { 458 DP_NOTICE(p_hwfn->p_dev, false, 459 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 460 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 461 return ECORE_TIMEOUT; 462 } 463 464 /* flush any pending (e)dpm as they may never arrive */ 465 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 466 467 /* release overflow sticky indication (stop silently dropping everything) */ 468 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 469 470 /* repeat all last doorbells (doorbell drop recovery) */ 471 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 472 473 return ECORE_SUCCESS; 474 } 475 476 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 477 { 478 u32 int_sts, first_drop_reason, details, address, overflow, 479 all_drops_reason; 480 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 481 enum _ecore_status_t rc; 482 483 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 484 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 485 int_sts); 486 487 /* int_sts may be zero since all PFs were interrupted for doorbell 488 * overflow but another one already handled it. Can abort here. If 489 * This PF also requires overflow recovery we will be interrupted again. 490 * The masked almost full indication may also be set. Ignoring. 491 */ 492 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 493 return ECORE_SUCCESS; 494 495 /* check if db_drop or overflow happened */ 496 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 497 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 498 /* obtain data about db drop/overflow */ 499 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 500 DORQ_REG_DB_DROP_REASON) & 501 ECORE_DORQ_ATTENTION_REASON_MASK; 502 details = ecore_rd(p_hwfn, p_ptt, 503 DORQ_REG_DB_DROP_DETAILS); 504 address = ecore_rd(p_hwfn, p_ptt, 505 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 506 overflow = ecore_rd(p_hwfn, p_ptt, 507 DORQ_REG_PF_OVFL_STICKY); 508 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 509 DORQ_REG_DB_DROP_DETAILS_REASON); 510 511 /* log info */ 512 DP_NOTICE(p_hwfn->p_dev, false, 513 "Doorbell drop occurred\n" 514 "Address\t\t0x%08x\t(second BAR address)\n" 515 "FID\t\t0x%04x\t\t(Opaque FID)\n" 516 "Size\t\t0x%04x\t\t(in bytes)\n" 517 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 518 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 519 "Overflow\t0x%x\t\t(a per PF indication)\n", 520 address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 521 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 522 first_drop_reason, all_drops_reason, overflow); 523 524 /* if this PF caused overflow, initiate recovery */ 525 if (overflow) { 526 rc = ecore_db_rec_attn(p_hwfn, p_ptt); 527 if (rc != ECORE_SUCCESS) 528 return rc; 529 } 530 531 /* clear the doorbell drop details and prepare for next drop */ 532 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 533 534 /* mark interrupt as handeld (note: even if drop was due to a diffrent 535 * reason than overflow we mark as handled) 536 */ 537 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 538 DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 539 540 /* if there are no indications otherthan drop indications, success */ 541 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 542 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 543 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 544 return ECORE_SUCCESS; 545 } 546 547 /* some other indication was present - non recoverable */ 548 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 549 550 return ECORE_INVAL; 551 } 552 553 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 554 { 555 #ifndef ASIC_ONLY 556 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 557 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 558 TM_REG_INT_STS_1); 559 560 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 561 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 562 return ECORE_INVAL; 563 564 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 565 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 566 DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n"); 567 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 568 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 569 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 570 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 571 572 return ECORE_SUCCESS; 573 } 574 #endif 575 576 return ECORE_INVAL; 577 } 578 579 /* Instead of major changes to the data-structure, we have a some 'special' 580 * identifiers for sources that changed meaning between adapters. 581 */ 582 enum aeu_invert_reg_special_type { 583 AEU_INVERT_REG_SPECIAL_CNIG_0, 584 AEU_INVERT_REG_SPECIAL_CNIG_1, 585 AEU_INVERT_REG_SPECIAL_CNIG_2, 586 AEU_INVERT_REG_SPECIAL_CNIG_3, 587 AEU_INVERT_REG_SPECIAL_MAX, 588 }; 589 590 static struct aeu_invert_reg_bit 591 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 592 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 593 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 594 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 595 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 596 }; 597 598 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 599 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = 600 { 601 { 602 { /* After Invert 1 */ 603 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 604 } 605 }, 606 607 { 608 { /* After Invert 2 */ 609 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 610 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 611 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 612 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 613 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 614 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 615 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 616 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 617 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS}, 618 } 619 }, 620 621 { 622 { /* After Invert 3 */ 623 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 624 } 625 }, 626 627 { 628 { /* After Invert 4 */ 629 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID}, 630 {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 631 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID}, 632 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 633 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS}, 634 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 635 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS}, 636 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 637 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM}, 638 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 639 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM}, 640 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 641 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 642 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 643 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 644 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 645 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 646 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 647 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 648 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 649 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 650 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 651 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 652 } 653 }, 654 655 { 656 { /* After Invert 5 */ 657 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 658 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 659 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 660 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 661 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 662 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 663 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 664 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 665 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 666 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 667 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 668 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 669 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 670 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 671 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 672 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 673 } 674 }, 675 676 { 677 { /* After Invert 6 */ 678 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 679 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 680 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 681 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 682 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 683 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 684 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 685 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 686 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 687 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 688 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 689 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 690 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 691 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 692 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 693 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 694 } 695 }, 696 697 { 698 { /* After Invert 7 */ 699 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 700 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 701 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 702 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 703 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 704 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 705 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 706 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 707 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 708 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 709 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 710 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 711 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 712 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 713 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 714 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 715 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 716 } 717 }, 718 719 { 720 { /* After Invert 8 */ 721 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 722 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 723 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 724 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 725 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 726 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 727 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 728 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 729 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 730 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 731 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 732 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 733 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 734 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 735 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 736 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 737 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 738 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 739 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 740 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 741 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 742 } 743 }, 744 745 { 746 { /* After Invert 9 */ 747 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 748 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 749 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 750 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 751 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 752 } 753 }, 754 755 }; 756 757 static struct aeu_invert_reg_bit * 758 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 759 struct aeu_invert_reg_bit *p_bit) 760 { 761 if (!ECORE_IS_BB(p_hwfn->p_dev)) 762 return p_bit; 763 764 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 765 return p_bit; 766 767 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 768 ATTENTION_BB_SHIFT]; 769 } 770 771 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 772 struct aeu_invert_reg_bit *p_bit) 773 { 774 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 775 ATTENTION_PARITY); 776 } 777 778 #define ATTN_STATE_BITS (0xfff) 779 #define ATTN_BITS_MASKABLE (0x3ff) 780 struct ecore_sb_attn_info { 781 /* Virtual & Physical address of the SB */ 782 struct atten_status_block *sb_attn; 783 dma_addr_t sb_phys; 784 785 /* Last seen running index */ 786 u16 index; 787 788 /* A mask of the AEU bits resulting in a parity error */ 789 u32 parity_mask[NUM_ATTN_REGS]; 790 791 /* A pointer to the attention description structure */ 792 struct aeu_invert_reg *p_aeu_desc; 793 794 /* Previously asserted attentions, which are still unasserted */ 795 u16 known_attn; 796 797 /* Cleanup address for the link's general hw attention */ 798 u32 mfw_attn_addr; 799 }; 800 801 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 802 struct ecore_sb_attn_info *p_sb_desc) 803 { 804 u16 rc = 0, index; 805 806 OSAL_MMIOWB(p_hwfn->p_dev); 807 808 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 809 if (p_sb_desc->index != index) { 810 p_sb_desc->index = index; 811 rc = ECORE_SB_ATT_IDX; 812 } 813 814 OSAL_MMIOWB(p_hwfn->p_dev); 815 816 return rc; 817 } 818 819 /** 820 * @brief ecore_int_assertion - handles asserted attention bits 821 * 822 * @param p_hwfn 823 * @param asserted_bits newly asserted bits 824 * @return enum _ecore_status_t 825 */ 826 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 827 u16 asserted_bits) 828 { 829 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 830 u32 igu_mask; 831 832 /* Mask the source of the attention in the IGU */ 833 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 834 IGU_REG_ATTENTION_ENABLE); 835 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 836 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 837 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 838 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 839 840 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 841 "inner known ATTN state: 0x%04x --> 0x%04x\n", 842 sb_attn_sw->known_attn, 843 sb_attn_sw->known_attn | asserted_bits); 844 sb_attn_sw->known_attn |= asserted_bits; 845 846 /* Handle MCP events */ 847 if (asserted_bits & 0x100) { 848 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 849 /* Clean the MCP attention */ 850 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 851 sb_attn_sw->mfw_attn_addr, 0); 852 } 853 854 /* FIXME - this will change once we'll have GOOD gtt definitions */ 855 DIRECT_REG_WR(p_hwfn, 856 (u8 OSAL_IOMEM*)p_hwfn->regview + 857 GTT_BAR0_MAP_REG_IGU_CMD + 858 ((IGU_CMD_ATTN_BIT_SET_UPPER - 859 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 860 861 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 862 asserted_bits); 863 864 return ECORE_SUCCESS; 865 } 866 867 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 868 enum block_id id, enum dbg_attn_type type, 869 bool b_clear) 870 { 871 struct dbg_attn_block_result attn_results; 872 enum dbg_status status; 873 874 OSAL_MEMSET(&attn_results, 0, sizeof(attn_results)); 875 876 status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 877 b_clear, &attn_results); 878 #ifdef ATTN_DESC 879 if (status != DBG_STATUS_OK) 880 DP_NOTICE(p_hwfn, true, 881 "Failed to parse attention information [status: %s]\n", 882 ecore_dbg_get_status_str(status)); 883 else 884 ecore_dbg_parse_attn(p_hwfn, &attn_results); 885 #else 886 if (status != DBG_STATUS_OK) 887 DP_NOTICE(p_hwfn, true, 888 "Failed to parse attention information [status: %d]\n", 889 status); 890 else 891 ecore_dbg_print_attn(p_hwfn, &attn_results); 892 #endif 893 } 894 895 /** 896 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 897 * cause of the attention 898 * 899 * @param p_hwfn 900 * @param p_aeu - descriptor of an AEU bit which caused the attention 901 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 902 * this bit to this group. 903 * @param bit_index - index of this bit in the aeu_en_reg 904 * 905 * @return enum _ecore_status_t 906 */ 907 static enum _ecore_status_t 908 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 909 struct aeu_invert_reg_bit *p_aeu, 910 u32 aeu_en_reg, 911 const char *p_bit_name, 912 u32 bitmask) 913 { 914 enum _ecore_status_t rc = ECORE_INVAL; 915 bool b_fatal = false; 916 917 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 918 p_bit_name, bitmask); 919 920 /* Call callback before clearing the interrupt status */ 921 if (p_aeu->cb) { 922 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 923 p_bit_name); 924 rc = p_aeu->cb(p_hwfn); 925 } 926 927 if (rc != ECORE_SUCCESS) 928 b_fatal = true; 929 930 /* Print HW block interrupt registers */ 931 if (p_aeu->block_index != MAX_BLOCK_ID) 932 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 933 ATTN_TYPE_INTERRUPT, !b_fatal); 934 935 /* Reach assertion if attention is fatal */ 936 if (b_fatal) { 937 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 938 p_bit_name); 939 940 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 941 } 942 943 /* Prevent this Attention from being asserted in the future */ 944 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 945 p_hwfn->p_dev->attn_clr_en) { 946 u32 val; 947 u32 mask = ~bitmask; 948 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 949 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 950 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 951 p_bit_name); 952 } 953 954 return rc; 955 } 956 957 /** 958 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 959 * 960 * @param p_hwfn 961 * @param p_aeu - descriptor of an AEU bit which caused the parity 962 * @param aeu_en_reg - address of the AEU enable register 963 * @param bit_index 964 */ 965 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 966 struct aeu_invert_reg_bit *p_aeu, 967 u32 aeu_en_reg, u8 bit_index) 968 { 969 u32 block_id = p_aeu->block_index, mask, val; 970 971 DP_NOTICE(p_hwfn->p_dev, false, 972 "%s parity attention is set [address 0x%08x, bit %d]\n", 973 p_aeu->bit_name, aeu_en_reg, bit_index); 974 975 if (block_id != MAX_BLOCK_ID) { 976 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 977 978 /* In A0, there's a single parity bit for several blocks */ 979 if (block_id == BLOCK_BTB) { 980 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 981 ATTN_TYPE_PARITY, false); 982 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 983 ATTN_TYPE_PARITY, false); 984 } 985 } 986 987 /* Prevent this parity error from being re-asserted */ 988 mask = ~(0x1 << bit_index); 989 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 990 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 991 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 992 p_aeu->bit_name); 993 } 994 995 /** 996 * @brief - handles deassertion of previously asserted attentions. 997 * 998 * @param p_hwfn 999 * @param deasserted_bits - newly deasserted bits 1000 * @return enum _ecore_status_t 1001 * 1002 */ 1003 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1004 u16 deasserted_bits) 1005 { 1006 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1007 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1008 u8 i, j, k, bit_idx; 1009 enum _ecore_status_t rc = ECORE_SUCCESS; 1010 1011 /* Read the attention registers in the AEU */ 1012 for (i = 0; i < NUM_ATTN_REGS; i++) { 1013 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1014 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1015 i * 0x4); 1016 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1017 "Deasserted bits [%d]: %08x\n", 1018 i, aeu_inv_arr[i]); 1019 } 1020 1021 /* Handle parity attentions first */ 1022 for (i = 0; i < NUM_ATTN_REGS; i++) 1023 { 1024 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1025 u32 parities; 1026 1027 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1028 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1029 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1030 1031 /* Skip register in which no parity bit is currently set */ 1032 if (!parities) 1033 continue; 1034 1035 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1036 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1037 1038 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1039 !!(parities & (1 << bit_idx))) 1040 ecore_int_deassertion_parity(p_hwfn, p_bit, 1041 aeu_en, bit_idx); 1042 1043 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1044 } 1045 } 1046 1047 /* Find non-parity cause for attention and act */ 1048 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1049 struct aeu_invert_reg_bit *p_aeu; 1050 1051 /* Handle only groups whose attention is currently deasserted */ 1052 if (!(deasserted_bits & (1 << k))) 1053 continue; 1054 1055 for (i = 0; i < NUM_ATTN_REGS; i++) { 1056 u32 bits; 1057 1058 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1059 i * sizeof(u32) + 1060 k * sizeof(u32) * NUM_ATTN_REGS; 1061 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1062 bits = aeu_inv_arr[i] & en; 1063 1064 /* Skip if no bit from this group is currently set */ 1065 if (!bits) 1066 continue; 1067 1068 /* Find all set bits from current register which belong 1069 * to current group, making them responsible for the 1070 * previous assertion. 1071 */ 1072 for (j = 0, bit_idx = 0; bit_idx < 32; j++) 1073 { 1074 long unsigned int bitmask; 1075 u8 bit, bit_len; 1076 1077 /* Need to account bits with changed meaning */ 1078 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1079 p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu); 1080 1081 bit = bit_idx; 1082 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1083 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1084 /* Skip Parity */ 1085 bit++; 1086 bit_len--; 1087 } 1088 1089 /* Find the bits relating to HW-block, then 1090 * shift so they'll become LSB. 1091 */ 1092 bitmask = bits & (((1 << bit_len) - 1) << bit); 1093 bitmask >>= bit; 1094 1095 if (bitmask) { 1096 u32 flags = p_aeu->flags; 1097 char bit_name[30]; 1098 u8 num; 1099 1100 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1101 bit_len); 1102 1103 /* Some bits represent more than a 1104 * a single interrupt. Correctly print 1105 * their name. 1106 */ 1107 if (ATTENTION_LENGTH(flags) > 2 || 1108 ((flags & ATTENTION_PAR_INT) && 1109 ATTENTION_LENGTH(flags) > 1)) 1110 OSAL_SNPRINTF(bit_name, 30, 1111 p_aeu->bit_name, 1112 num); 1113 else 1114 OSAL_STRNCPY(bit_name, 1115 p_aeu->bit_name, 1116 30); 1117 1118 /* We now need to pass bitmask in its 1119 * correct position. 1120 */ 1121 bitmask <<= bit; 1122 1123 /* Handle source of the attention */ 1124 ecore_int_deassertion_aeu_bit(p_hwfn, 1125 p_aeu, 1126 aeu_en, 1127 bit_name, 1128 bitmask); 1129 } 1130 1131 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1132 } 1133 } 1134 } 1135 1136 /* Clear IGU indication for the deasserted bits */ 1137 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1138 DIRECT_REG_WR(p_hwfn, 1139 (u8 OSAL_IOMEM*)p_hwfn->regview + 1140 GTT_BAR0_MAP_REG_IGU_CMD + 1141 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1142 IGU_CMD_INT_ACK_BASE) << 3), 1143 ~((u32)deasserted_bits)); 1144 1145 /* Unmask deasserted attentions in IGU */ 1146 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1147 IGU_REG_ATTENTION_ENABLE); 1148 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1149 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1150 1151 /* Clear deassertion from inner state */ 1152 sb_attn_sw->known_attn &= ~deasserted_bits; 1153 1154 return rc; 1155 } 1156 1157 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1158 { 1159 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1160 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1161 u16 index = 0, asserted_bits, deasserted_bits; 1162 u32 attn_bits = 0, attn_acks = 0; 1163 enum _ecore_status_t rc = ECORE_SUCCESS; 1164 1165 /* Read current attention bits/acks - safeguard against attentions 1166 * by guaranting work on a synchronized timeframe 1167 */ 1168 do { 1169 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1170 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1171 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1172 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1173 p_sb_attn->sb_index = index; 1174 1175 /* Attention / Deassertion are meaningful (and in correct state) 1176 * only when they differ and consistent with known state - deassertion 1177 * when previous attention & current ack, and assertion when current 1178 * attention with no previous attention 1179 */ 1180 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1181 ~p_sb_attn_sw->known_attn; 1182 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1183 p_sb_attn_sw->known_attn; 1184 1185 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1186 DP_INFO(p_hwfn, 1187 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1188 index, attn_bits, attn_acks, asserted_bits, 1189 deasserted_bits, p_sb_attn_sw->known_attn); 1190 else if (asserted_bits == 0x100) 1191 DP_INFO(p_hwfn, 1192 "MFW indication via attention\n"); 1193 else 1194 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1195 "MFW indication [deassertion]\n"); 1196 1197 if (asserted_bits) { 1198 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1199 if (rc) 1200 return rc; 1201 } 1202 1203 if (deasserted_bits) 1204 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1205 1206 return rc; 1207 } 1208 1209 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1210 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1211 { 1212 struct igu_prod_cons_update igu_ack = { 0 }; 1213 1214 igu_ack.sb_id_and_flags = 1215 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1216 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1217 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1218 (IGU_SEG_ACCESS_ATTN << 1219 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1220 1221 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1222 1223 /* Both segments (interrupts & acks) are written to same place address; 1224 * Need to guarantee all commands will be received (in-order) by HW. 1225 */ 1226 OSAL_MMIOWB(p_hwfn->p_dev); 1227 OSAL_BARRIER(p_hwfn->p_dev); 1228 } 1229 1230 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1231 { 1232 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1233 struct ecore_pi_info *pi_info = OSAL_NULL; 1234 struct ecore_sb_attn_info *sb_attn; 1235 struct ecore_sb_info *sb_info; 1236 int arr_size; 1237 u16 rc = 0; 1238 1239 if (!p_hwfn) 1240 return; 1241 1242 if (!p_hwfn->p_sp_sb) { 1243 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1244 return; 1245 } 1246 1247 sb_info = &p_hwfn->p_sp_sb->sb_info; 1248 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1249 if (!sb_info) { 1250 DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n"); 1251 return; 1252 } 1253 1254 if (!p_hwfn->p_sb_attn) { 1255 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1256 return; 1257 } 1258 sb_attn = p_hwfn->p_sb_attn; 1259 1260 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1261 p_hwfn, p_hwfn->my_id); 1262 1263 /* Disable ack for def status block. Required both for msix + 1264 * inta in non-mask mode, in inta does no harm. 1265 */ 1266 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1267 1268 /* Gather Interrupts/Attentions information */ 1269 if (!sb_info->sb_virt) { 1270 DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1271 } else { 1272 u32 tmp_index = sb_info->sb_ack; 1273 rc = ecore_sb_update_sb_idx(sb_info); 1274 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1275 "Interrupt indices: 0x%08x --> 0x%08x\n", 1276 tmp_index, sb_info->sb_ack); 1277 } 1278 1279 if (!sb_attn || !sb_attn->sb_attn) { 1280 DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n"); 1281 } else { 1282 u16 tmp_index = sb_attn->index; 1283 1284 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1285 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1286 "Attention indices: 0x%08x --> 0x%08x\n", 1287 tmp_index, sb_attn->index); 1288 } 1289 1290 /* Check if we expect interrupts at this time. if not just ack them */ 1291 if (!(rc & ECORE_SB_EVENT_MASK)) { 1292 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1293 return; 1294 } 1295 1296 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1297 if (!p_hwfn->p_dpc_ptt) { 1298 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1299 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1300 return; 1301 } 1302 1303 if (rc & ECORE_SB_ATT_IDX) 1304 ecore_int_attentions(p_hwfn); 1305 1306 if (rc & ECORE_SB_IDX) { 1307 int pi; 1308 1309 /* Since we only looked at the SB index, it's possible more 1310 * than a single protocol-index on the SB incremented. 1311 * Iterate over all configured protocol indices and check 1312 * whether something happened for each. 1313 */ 1314 for (pi = 0; pi < arr_size; pi++) { 1315 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1316 if (pi_info->comp_cb != OSAL_NULL) 1317 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1318 } 1319 } 1320 1321 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1322 /* This should be done before the interrupts are enabled, 1323 * since otherwise a new attention will be generated. 1324 */ 1325 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1326 } 1327 1328 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1329 } 1330 1331 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1332 { 1333 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1334 1335 if (!p_sb) 1336 return; 1337 1338 if (p_sb->sb_attn) { 1339 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1340 p_sb->sb_phys, 1341 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1342 } 1343 1344 OSAL_FREE(p_hwfn->p_dev, p_sb); 1345 p_hwfn->p_sb_attn = OSAL_NULL; 1346 } 1347 1348 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1349 struct ecore_ptt *p_ptt) 1350 { 1351 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1352 1353 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1354 1355 sb_info->index = 0; 1356 sb_info->known_attn = 0; 1357 1358 /* Configure Attention Status Block in IGU */ 1359 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1360 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1361 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1362 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1363 } 1364 1365 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1366 struct ecore_ptt *p_ptt, 1367 void *sb_virt_addr, 1368 dma_addr_t sb_phy_addr) 1369 { 1370 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1371 int i, j, k; 1372 1373 sb_info->sb_attn = sb_virt_addr; 1374 sb_info->sb_phys = sb_phy_addr; 1375 1376 /* Set the pointer to the AEU descriptors */ 1377 sb_info->p_aeu_desc = aeu_descs; 1378 1379 /* Calculate Parity Masks */ 1380 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1381 for (i = 0; i < NUM_ATTN_REGS; i++) { 1382 /* j is array index, k is bit index */ 1383 for (j = 0, k = 0; k < 32; j++) { 1384 struct aeu_invert_reg_bit *p_aeu; 1385 1386 p_aeu = &aeu_descs[i].bits[j]; 1387 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1388 sb_info->parity_mask[i] |= 1 << k; 1389 1390 k += ATTENTION_LENGTH(p_aeu->flags); 1391 } 1392 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1393 "Attn Mask [Reg %d]: 0x%08x\n", 1394 i, sb_info->parity_mask[i]); 1395 } 1396 1397 /* Set the address of cleanup for the mcp attention */ 1398 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1399 MISC_REG_AEU_GENERAL_ATTN_0; 1400 1401 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1402 } 1403 1404 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1405 struct ecore_ptt *p_ptt) 1406 { 1407 struct ecore_dev *p_dev = p_hwfn->p_dev; 1408 struct ecore_sb_attn_info *p_sb; 1409 dma_addr_t p_phys = 0; 1410 void *p_virt; 1411 1412 /* SB struct */ 1413 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1414 if (!p_sb) { 1415 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1416 return ECORE_NOMEM; 1417 } 1418 1419 /* SB ring */ 1420 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1421 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1422 if (!p_virt) { 1423 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1424 OSAL_FREE(p_dev, p_sb); 1425 return ECORE_NOMEM; 1426 } 1427 1428 /* Attention setup */ 1429 p_hwfn->p_sb_attn = p_sb; 1430 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1431 1432 return ECORE_SUCCESS; 1433 } 1434 1435 /* coalescing timeout = timeset << (timer_res + 1) */ 1436 #define ECORE_CAU_DEF_RX_USECS 24 1437 #define ECORE_CAU_DEF_TX_USECS 48 1438 1439 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1440 struct cau_sb_entry *p_sb_entry, 1441 u8 pf_id, u16 vf_number, u8 vf_valid) 1442 { 1443 struct ecore_dev *p_dev = p_hwfn->p_dev; 1444 u32 cau_state; 1445 u8 timer_res; 1446 1447 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1448 1449 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1450 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1451 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1452 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1453 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1454 1455 cau_state = CAU_HC_DISABLE_STATE; 1456 1457 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1458 cau_state = CAU_HC_ENABLE_STATE; 1459 if (!p_dev->rx_coalesce_usecs) 1460 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1461 if (!p_dev->tx_coalesce_usecs) 1462 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1463 } 1464 1465 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1466 if (p_dev->rx_coalesce_usecs <= 0x7F) 1467 timer_res = 0; 1468 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1469 timer_res = 1; 1470 else 1471 timer_res = 2; 1472 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1473 1474 if (p_dev->tx_coalesce_usecs <= 0x7F) 1475 timer_res = 0; 1476 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1477 timer_res = 1; 1478 else 1479 timer_res = 2; 1480 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1481 1482 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1483 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1484 } 1485 1486 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1487 struct ecore_ptt *p_ptt, 1488 u16 igu_sb_id, u32 pi_index, 1489 enum ecore_coalescing_fsm coalescing_fsm, 1490 u8 timeset) 1491 { 1492 struct cau_pi_entry pi_entry; 1493 u32 sb_offset, pi_offset; 1494 1495 if (IS_VF(p_hwfn->p_dev)) 1496 return;/* @@@TBD MichalK- VF CAU... */ 1497 1498 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1499 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1500 1501 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1502 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1503 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1504 else 1505 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1506 1507 pi_offset = sb_offset + pi_index; 1508 if (p_hwfn->hw_init_done) { 1509 ecore_wr(p_hwfn, p_ptt, 1510 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1511 *((u32 *)&(pi_entry))); 1512 } else { 1513 STORE_RT_REG(p_hwfn, 1514 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1515 *((u32 *)&(pi_entry))); 1516 } 1517 } 1518 1519 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1520 struct ecore_ptt *p_ptt, 1521 struct ecore_sb_info *p_sb, u32 pi_index, 1522 enum ecore_coalescing_fsm coalescing_fsm, 1523 u8 timeset) 1524 { 1525 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1526 pi_index, coalescing_fsm, timeset); 1527 } 1528 1529 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1530 struct ecore_ptt *p_ptt, 1531 dma_addr_t sb_phys, u16 igu_sb_id, 1532 u16 vf_number, u8 vf_valid) 1533 { 1534 struct cau_sb_entry sb_entry; 1535 1536 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1537 vf_number, vf_valid); 1538 1539 if (p_hwfn->hw_init_done) { 1540 /* Wide-bus, initialize via DMAE */ 1541 u64 phys_addr = (u64)sb_phys; 1542 1543 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr, 1544 CAU_REG_SB_ADDR_MEMORY + 1545 igu_sb_id * sizeof(u64), 2, 1546 OSAL_NULL /* default parameters */); 1547 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry, 1548 CAU_REG_SB_VAR_MEMORY + 1549 igu_sb_id * sizeof(u64), 2, 1550 OSAL_NULL /* default parameters */); 1551 } else { 1552 /* Initialize Status Block Address */ 1553 STORE_RT_REG_AGG(p_hwfn, 1554 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2, 1555 sb_phys); 1556 1557 STORE_RT_REG_AGG(p_hwfn, 1558 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2, 1559 sb_entry); 1560 } 1561 1562 /* Configure pi coalescing if set */ 1563 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1564 /* eth will open queues for all tcs, so configure all of them 1565 * properly, rather than just the active ones 1566 */ 1567 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1568 1569 u8 timeset, timer_res; 1570 u8 i; 1571 1572 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1573 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1574 timer_res = 0; 1575 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1576 timer_res = 1; 1577 else 1578 timer_res = 2; 1579 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1580 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1581 ECORE_COAL_RX_STATE_MACHINE, 1582 timeset); 1583 1584 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1585 timer_res = 0; 1586 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1587 timer_res = 1; 1588 else 1589 timer_res = 2; 1590 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1591 for (i = 0; i < num_tc; i++) { 1592 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1593 igu_sb_id, TX_PI(i), 1594 ECORE_COAL_TX_STATE_MACHINE, 1595 timeset); 1596 } 1597 } 1598 } 1599 1600 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1601 struct ecore_ptt *p_ptt, 1602 struct ecore_sb_info *sb_info) 1603 { 1604 /* zero status block and ack counter */ 1605 sb_info->sb_ack = 0; 1606 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1607 1608 if (IS_PF(p_hwfn->p_dev)) 1609 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1610 sb_info->igu_sb_id, 0, 0); 1611 } 1612 1613 struct ecore_igu_block * 1614 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1615 { 1616 struct ecore_igu_block *p_block; 1617 u16 igu_id; 1618 1619 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1620 igu_id++) { 1621 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1622 1623 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1624 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1625 continue; 1626 1627 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1628 b_is_pf) 1629 return p_block; 1630 } 1631 1632 return OSAL_NULL; 1633 } 1634 1635 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1636 u16 vector_id) 1637 { 1638 struct ecore_igu_block *p_block; 1639 u16 igu_id; 1640 1641 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1642 igu_id++) { 1643 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1644 1645 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1646 !p_block->is_pf || 1647 p_block->vector_number != vector_id) 1648 continue; 1649 1650 return igu_id; 1651 } 1652 1653 return ECORE_SB_INVALID_IDX; 1654 } 1655 1656 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1657 { 1658 u16 igu_sb_id; 1659 1660 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1661 if (sb_id == ECORE_SP_SB_ID) 1662 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1663 else if (IS_PF(p_hwfn->p_dev)) 1664 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1665 else 1666 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1667 1668 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1669 DP_NOTICE(p_hwfn, true, 1670 "Slowpath SB vector %04x doesn't exist\n", 1671 sb_id); 1672 else if (sb_id == ECORE_SP_SB_ID) 1673 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1674 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1675 else 1676 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1677 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1678 1679 return igu_sb_id; 1680 } 1681 1682 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1683 struct ecore_ptt *p_ptt, 1684 struct ecore_sb_info *sb_info, 1685 void *sb_virt_addr, 1686 dma_addr_t sb_phy_addr, 1687 u16 sb_id) 1688 { 1689 sb_info->sb_virt = sb_virt_addr; 1690 sb_info->sb_phys = sb_phy_addr; 1691 1692 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1693 1694 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1695 return ECORE_INVAL; 1696 1697 /* Let the igu info reference the client's SB info */ 1698 if (sb_id != ECORE_SP_SB_ID) { 1699 if (IS_PF(p_hwfn->p_dev)) { 1700 struct ecore_igu_info *p_info; 1701 struct ecore_igu_block *p_block; 1702 1703 p_info = p_hwfn->hw_info.p_igu_info; 1704 p_block = &p_info->entry[sb_info->igu_sb_id]; 1705 1706 p_block->sb_info = sb_info; 1707 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1708 p_info->usage.free_cnt--; 1709 } else { 1710 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1711 } 1712 } 1713 1714 #ifdef ECORE_CONFIG_DIRECT_HWFN 1715 sb_info->p_hwfn = p_hwfn; 1716 #endif 1717 sb_info->p_dev = p_hwfn->p_dev; 1718 1719 /* The igu address will hold the absolute address that needs to be 1720 * written to for a specific status block 1721 */ 1722 if (IS_PF(p_hwfn->p_dev)) { 1723 sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview + 1724 GTT_BAR0_MAP_REG_IGU_CMD + 1725 (sb_info->igu_sb_id << 3); 1726 1727 } else { 1728 sb_info->igu_addr = 1729 (u8 OSAL_IOMEM*)p_hwfn->regview + 1730 PXP_VF_BAR0_START_IGU + 1731 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1732 } 1733 1734 sb_info->flags |= ECORE_SB_INFO_INIT; 1735 1736 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1737 1738 return ECORE_SUCCESS; 1739 } 1740 1741 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1742 struct ecore_sb_info *sb_info, 1743 u16 sb_id) 1744 { 1745 struct ecore_igu_info *p_info; 1746 struct ecore_igu_block *p_block; 1747 1748 if (sb_info == OSAL_NULL) 1749 return ECORE_SUCCESS; 1750 1751 /* zero status block and ack counter */ 1752 sb_info->sb_ack = 0; 1753 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1754 1755 if (IS_VF(p_hwfn->p_dev)) { 1756 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1757 return ECORE_SUCCESS; 1758 } 1759 1760 p_info = p_hwfn->hw_info.p_igu_info; 1761 p_block = &p_info->entry[sb_info->igu_sb_id]; 1762 1763 /* Vector 0 is reserved to Default SB */ 1764 if (p_block->vector_number == 0) { 1765 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1766 return ECORE_INVAL; 1767 } 1768 1769 /* Lose reference to client's SB info, and fix counters */ 1770 p_block->sb_info = OSAL_NULL; 1771 p_block->status |= ECORE_IGU_STATUS_FREE; 1772 p_info->usage.free_cnt++; 1773 1774 return ECORE_SUCCESS; 1775 } 1776 1777 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1778 { 1779 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1780 1781 if (!p_sb) 1782 return; 1783 1784 if (p_sb->sb_info.sb_virt) { 1785 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1786 p_sb->sb_info.sb_virt, 1787 p_sb->sb_info.sb_phys, 1788 SB_ALIGNED_SIZE(p_hwfn)); 1789 } 1790 1791 OSAL_FREE(p_hwfn->p_dev, p_sb); 1792 p_hwfn->p_sp_sb = OSAL_NULL; 1793 } 1794 1795 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1796 struct ecore_ptt *p_ptt) 1797 { 1798 struct ecore_sb_sp_info *p_sb; 1799 dma_addr_t p_phys = 0; 1800 void *p_virt; 1801 1802 /* SB struct */ 1803 p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); 1804 if (!p_sb) { 1805 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); 1806 return ECORE_NOMEM; 1807 } 1808 1809 /* SB ring */ 1810 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1811 &p_phys, 1812 SB_ALIGNED_SIZE(p_hwfn)); 1813 if (!p_virt) { 1814 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1815 OSAL_FREE(p_hwfn->p_dev, p_sb); 1816 return ECORE_NOMEM; 1817 } 1818 1819 /* Status Block setup */ 1820 p_hwfn->p_sp_sb = p_sb; 1821 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1822 p_virt, p_phys, ECORE_SP_SB_ID); 1823 1824 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1825 1826 return ECORE_SUCCESS; 1827 } 1828 1829 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1830 ecore_int_comp_cb_t comp_cb, 1831 void *cookie, 1832 u8 *sb_idx, 1833 __le16 **p_fw_cons) 1834 { 1835 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1836 enum _ecore_status_t rc = ECORE_NOMEM; 1837 u8 pi; 1838 1839 /* Look for a free index */ 1840 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1841 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1842 continue; 1843 1844 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1845 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1846 *sb_idx = pi; 1847 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1848 rc = ECORE_SUCCESS; 1849 break; 1850 } 1851 1852 return rc; 1853 } 1854 1855 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, 1856 u8 pi) 1857 { 1858 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1859 1860 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1861 return ECORE_NOMEM; 1862 1863 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1864 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1865 1866 return ECORE_SUCCESS; 1867 } 1868 1869 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1870 { 1871 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1872 } 1873 1874 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1875 struct ecore_ptt *p_ptt, 1876 enum ecore_int_mode int_mode) 1877 { 1878 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1879 1880 #ifndef ASIC_ONLY 1881 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1882 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1883 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1884 } 1885 #endif 1886 1887 p_hwfn->p_dev->int_mode = int_mode; 1888 switch (p_hwfn->p_dev->int_mode) { 1889 case ECORE_INT_MODE_INTA: 1890 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1891 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1892 break; 1893 1894 case ECORE_INT_MODE_MSI: 1895 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1896 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1897 break; 1898 1899 case ECORE_INT_MODE_MSIX: 1900 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1901 break; 1902 case ECORE_INT_MODE_POLL: 1903 break; 1904 } 1905 1906 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1907 } 1908 1909 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1910 struct ecore_ptt *p_ptt) 1911 { 1912 #ifndef ASIC_ONLY 1913 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1914 DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n"); 1915 return; 1916 } 1917 #endif 1918 1919 /* Configure AEU signal change to produce attentions */ 1920 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1921 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1922 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1923 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1924 1925 /* Flush the writes to IGU */ 1926 OSAL_MMIOWB(p_hwfn->p_dev); 1927 1928 /* Unmask AEU signals toward IGU */ 1929 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1930 } 1931 1932 enum _ecore_status_t 1933 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1934 enum ecore_int_mode int_mode) 1935 { 1936 enum _ecore_status_t rc = ECORE_SUCCESS; 1937 1938 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1939 1940 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1941 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1942 if (rc != ECORE_SUCCESS) { 1943 DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n"); 1944 return ECORE_NORESOURCES; 1945 } 1946 p_hwfn->b_int_requested = true; 1947 } 1948 1949 /* Enable interrupt Generation */ 1950 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1951 1952 p_hwfn->b_int_enabled = 1; 1953 1954 return rc; 1955 } 1956 1957 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1958 struct ecore_ptt *p_ptt) 1959 { 1960 p_hwfn->b_int_enabled = 0; 1961 1962 if (IS_VF(p_hwfn->p_dev)) 1963 return; 1964 1965 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1966 } 1967 1968 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1969 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1970 struct ecore_ptt *p_ptt, 1971 u16 igu_sb_id, 1972 bool cleanup_set, 1973 u16 opaque_fid) 1974 { 1975 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1976 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1977 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1978 u8 type = 0; /* FIXME MichalS type??? */ 1979 1980 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1981 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1982 1983 /* USE Control Command Register to perform cleanup. There is an 1984 * option to do this using IGU bar, but then it can't be used for VFs. 1985 */ 1986 1987 /* Set the data field */ 1988 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1989 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1990 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1991 1992 /* Set the control register */ 1993 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1994 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1995 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1996 1997 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1998 1999 OSAL_BARRIER(p_hwfn->p_dev); 2000 2001 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 2002 2003 /* Flush the write to IGU */ 2004 OSAL_MMIOWB(p_hwfn->p_dev); 2005 2006 /* calculate where to read the status bit from */ 2007 sb_bit = 1 << (igu_sb_id % 32); 2008 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2009 2010 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2011 2012 /* Now wait for the command to complete */ 2013 while (--sleep_cnt) { 2014 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2015 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2016 break; 2017 OSAL_MSLEEP(5); 2018 } 2019 2020 if (!sleep_cnt) 2021 DP_NOTICE(p_hwfn, true, 2022 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2023 val, igu_sb_id); 2024 } 2025 2026 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2027 struct ecore_ptt *p_ptt, 2028 u16 igu_sb_id, u16 opaque, bool b_set) 2029 { 2030 struct ecore_igu_block *p_block; 2031 int pi, i; 2032 2033 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2034 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2035 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2036 igu_sb_id, p_block->function_id, p_block->is_pf, 2037 p_block->vector_number); 2038 2039 /* Set */ 2040 if (b_set) 2041 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2042 2043 /* Clear */ 2044 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2045 2046 /* Wait for the IGU SB to cleanup */ 2047 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2048 u32 val; 2049 2050 val = ecore_rd(p_hwfn, p_ptt, 2051 IGU_REG_WRITE_DONE_PENDING + 2052 ((igu_sb_id / 32) * 4)); 2053 if (val & (1 << (igu_sb_id % 32))) 2054 OSAL_UDELAY(10); 2055 else 2056 break; 2057 } 2058 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2059 DP_NOTICE(p_hwfn, true, 2060 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2061 igu_sb_id); 2062 2063 /* Clear the CAU for the SB */ 2064 for (pi = 0; pi < 12; pi++) 2065 ecore_wr(p_hwfn, p_ptt, 2066 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2067 } 2068 2069 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2070 struct ecore_ptt *p_ptt, 2071 bool b_set, 2072 bool b_slowpath) 2073 { 2074 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2075 struct ecore_igu_block *p_block; 2076 u16 igu_sb_id = 0; 2077 u32 val = 0; 2078 2079 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2080 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2081 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2082 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2083 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2084 /* end temporary */ 2085 2086 for (igu_sb_id = 0; 2087 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2088 igu_sb_id++) { 2089 p_block = &p_info->entry[igu_sb_id]; 2090 2091 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2092 !p_block->is_pf || 2093 (p_block->status & ECORE_IGU_STATUS_DSB)) 2094 continue; 2095 2096 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2097 p_hwfn->hw_info.opaque_fid, 2098 b_set); 2099 } 2100 2101 if (b_slowpath) 2102 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2103 p_info->igu_dsb_id, 2104 p_hwfn->hw_info.opaque_fid, 2105 b_set); 2106 } 2107 2108 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2109 struct ecore_ptt *p_ptt) 2110 { 2111 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2112 struct ecore_igu_block *p_block; 2113 int pf_sbs, vf_sbs; 2114 u16 igu_sb_id; 2115 u32 val, rval; 2116 2117 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2118 /* We're using an old MFW - have to prevent any switching 2119 * of SBs between PF and VFs as later driver wouldn't be 2120 * able to tell which belongs to which. 2121 */ 2122 p_info->b_allow_pf_vf_change = false; 2123 } else { 2124 /* Use the numbers the MFW have provided - 2125 * don't forget MFW accounts for the default SB as well. 2126 */ 2127 p_info->b_allow_pf_vf_change = true; 2128 2129 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2130 DP_INFO(p_hwfn, 2131 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2132 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2133 p_info->usage.cnt); 2134 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2135 } 2136 2137 /* TODO - how do we learn about VF SBs from MFW? */ 2138 if (IS_PF_SRIOV(p_hwfn)) { 2139 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2140 2141 if (vfs != p_info->usage.iov_cnt) 2142 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2143 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2144 p_info->usage.iov_cnt, vfs); 2145 2146 /* At this point we know how many SBs we have totally 2147 * in IGU + number of PF SBs. So we can validate that 2148 * we'd have sufficient for VF. 2149 */ 2150 if (vfs > p_info->usage.free_cnt + 2151 p_info->usage.free_cnt_iov - 2152 p_info->usage.cnt) { 2153 DP_NOTICE(p_hwfn, true, 2154 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2155 p_info->usage.free_cnt + 2156 p_info->usage.free_cnt_iov, 2157 p_info->usage.cnt, vfs); 2158 return ECORE_INVAL; 2159 } 2160 } 2161 } 2162 2163 /* Cap the number of VFs SBs by the number of VFs */ 2164 if (IS_PF_SRIOV(p_hwfn)) 2165 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2166 2167 /* Mark all SBs as free, now in the right PF/VFs division */ 2168 p_info->usage.free_cnt = p_info->usage.cnt; 2169 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2170 p_info->usage.orig = p_info->usage.cnt; 2171 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2172 2173 /* We now proceed to re-configure the IGU cam to reflect the initial 2174 * configuration. We can start with the Default SB. 2175 */ 2176 pf_sbs = p_info->usage.cnt; 2177 vf_sbs = p_info->usage.iov_cnt; 2178 2179 for (igu_sb_id = p_info->igu_dsb_id; 2180 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2181 igu_sb_id++) { 2182 p_block = &p_info->entry[igu_sb_id]; 2183 val = 0; 2184 2185 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2186 continue; 2187 2188 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2189 p_block->function_id = p_hwfn->rel_pf_id; 2190 p_block->is_pf = 1; 2191 p_block->vector_number = 0; 2192 p_block->status = ECORE_IGU_STATUS_VALID | 2193 ECORE_IGU_STATUS_PF | 2194 ECORE_IGU_STATUS_DSB; 2195 } else if (pf_sbs) { 2196 pf_sbs--; 2197 p_block->function_id = p_hwfn->rel_pf_id; 2198 p_block->is_pf = 1; 2199 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2200 p_block->status = ECORE_IGU_STATUS_VALID | 2201 ECORE_IGU_STATUS_PF | 2202 ECORE_IGU_STATUS_FREE; 2203 } else if (vf_sbs) { 2204 p_block->function_id = 2205 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2206 p_info->usage.iov_cnt - vf_sbs; 2207 p_block->is_pf = 0; 2208 p_block->vector_number = 0; 2209 p_block->status = ECORE_IGU_STATUS_VALID | 2210 ECORE_IGU_STATUS_FREE; 2211 vf_sbs--; 2212 } else { 2213 p_block->function_id = 0; 2214 p_block->is_pf = 0; 2215 p_block->vector_number = 0; 2216 } 2217 2218 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2219 p_block->function_id); 2220 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2221 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2222 p_block->vector_number); 2223 2224 /* VF entries would be enabled when VF is initializaed */ 2225 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2226 2227 rval = ecore_rd(p_hwfn, p_ptt, 2228 IGU_REG_MAPPING_MEMORY + 2229 sizeof(u32) * igu_sb_id); 2230 2231 if (rval != val) { 2232 ecore_wr(p_hwfn, p_ptt, 2233 IGU_REG_MAPPING_MEMORY + 2234 sizeof(u32) * igu_sb_id, 2235 val); 2236 2237 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2238 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2239 igu_sb_id, p_block->function_id, 2240 p_block->is_pf, p_block->vector_number, 2241 rval, val); 2242 } 2243 } 2244 2245 return 0; 2246 } 2247 2248 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2249 struct ecore_ptt *p_ptt) 2250 { 2251 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2252 2253 /* Return all the usage indications to default prior to the reset; 2254 * The reset expects the !orig to reflect the initial status of the 2255 * SBs, and would re-calculate the originals based on those. 2256 */ 2257 p_cnt->cnt = p_cnt->orig; 2258 p_cnt->free_cnt = p_cnt->orig; 2259 p_cnt->iov_cnt = p_cnt->iov_orig; 2260 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2261 p_cnt->orig = 0; 2262 p_cnt->iov_orig = 0; 2263 2264 /* TODO - we probably need to re-configure the CAU as well... */ 2265 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2266 } 2267 2268 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2269 struct ecore_ptt *p_ptt, 2270 u16 igu_sb_id) 2271 { 2272 u32 val = ecore_rd(p_hwfn, p_ptt, 2273 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2274 struct ecore_igu_block *p_block; 2275 2276 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2277 2278 /* Fill the block information */ 2279 p_block->function_id = GET_FIELD(val, 2280 IGU_MAPPING_LINE_FUNCTION_NUMBER); 2281 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2282 p_block->vector_number = GET_FIELD(val, 2283 IGU_MAPPING_LINE_VECTOR_NUMBER); 2284 p_block->igu_sb_id = igu_sb_id; 2285 } 2286 2287 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2288 struct ecore_ptt *p_ptt) 2289 { 2290 struct ecore_igu_info *p_igu_info; 2291 struct ecore_igu_block *p_block; 2292 u32 min_vf = 0, max_vf = 0; 2293 u16 igu_sb_id; 2294 2295 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2296 GFP_KERNEL, 2297 sizeof(*p_igu_info)); 2298 if (!p_hwfn->hw_info.p_igu_info) 2299 return ECORE_NOMEM; 2300 p_igu_info = p_hwfn->hw_info.p_igu_info; 2301 2302 /* Distinguish between existent and onn-existent default SB */ 2303 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2304 2305 /* Find the range of VF ids whose SB belong to this PF */ 2306 if (p_hwfn->p_dev->p_iov_info) { 2307 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2308 2309 min_vf = p_iov->first_vf_in_pf; 2310 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2311 } 2312 2313 for (igu_sb_id = 0; 2314 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2315 igu_sb_id++) { 2316 /* Read current entry; Notice it might not belong to this PF */ 2317 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2318 p_block = &p_igu_info->entry[igu_sb_id]; 2319 2320 if ((p_block->is_pf) && 2321 (p_block->function_id == p_hwfn->rel_pf_id)) { 2322 p_block->status = ECORE_IGU_STATUS_PF | 2323 ECORE_IGU_STATUS_VALID | 2324 ECORE_IGU_STATUS_FREE; 2325 2326 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2327 p_igu_info->usage.cnt++; 2328 } else if (!(p_block->is_pf) && 2329 (p_block->function_id >= min_vf) && 2330 (p_block->function_id < max_vf)) { 2331 /* Available for VFs of this PF */ 2332 p_block->status = ECORE_IGU_STATUS_VALID | 2333 ECORE_IGU_STATUS_FREE; 2334 2335 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2336 p_igu_info->usage.iov_cnt++; 2337 } 2338 2339 /* Mark the First entry belonging to the PF or its VFs 2340 * as the default SB [we'll reset IGU prior to first usage]. 2341 */ 2342 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2343 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2344 p_igu_info->igu_dsb_id = igu_sb_id; 2345 p_block->status |= ECORE_IGU_STATUS_DSB; 2346 } 2347 2348 /* While this isn't suitable for all clients, limit number 2349 * of prints by having each PF print only its entries with the 2350 * exception of PF0 which would print everything. 2351 */ 2352 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2353 (p_hwfn->abs_pf_id == 0)) 2354 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2355 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2356 igu_sb_id, p_block->function_id, 2357 p_block->is_pf, p_block->vector_number); 2358 } 2359 2360 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2361 DP_NOTICE(p_hwfn, true, 2362 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2363 p_igu_info->igu_dsb_id); 2364 return ECORE_INVAL; 2365 } 2366 2367 /* All non default SB are considered free at this point */ 2368 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2369 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2370 2371 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2372 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2373 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2374 p_igu_info->usage.iov_cnt); 2375 2376 return ECORE_SUCCESS; 2377 } 2378 2379 enum _ecore_status_t 2380 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2381 u16 sb_id, bool b_to_vf) 2382 { 2383 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2384 struct ecore_igu_block *p_block = OSAL_NULL; 2385 u16 igu_sb_id = 0, vf_num = 0; 2386 u32 val = 0; 2387 2388 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2389 return ECORE_INVAL; 2390 2391 if (sb_id == ECORE_SP_SB_ID) 2392 return ECORE_INVAL; 2393 2394 if (!p_info->b_allow_pf_vf_change) { 2395 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2396 return ECORE_INVAL; 2397 } 2398 2399 /* If we're moving a SB from PF to VF, the client had to specify 2400 * which vector it wants to move. 2401 */ 2402 if (b_to_vf) { 2403 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2404 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2405 return ECORE_INVAL; 2406 } 2407 2408 /* If we're moving a SB from VF to PF, need to validate there isn't 2409 * already a line configured for that vector. 2410 */ 2411 if (!b_to_vf) { 2412 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2413 ECORE_SB_INVALID_IDX) 2414 return ECORE_INVAL; 2415 } 2416 2417 /* We need to validate that the SB can actually be relocated. 2418 * This would also handle the previous case where we've explicitly 2419 * stated which IGU SB needs to move. 2420 */ 2421 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2422 igu_sb_id++) { 2423 p_block = &p_info->entry[igu_sb_id]; 2424 2425 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2426 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2427 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2428 if (b_to_vf) 2429 return ECORE_INVAL; 2430 else 2431 continue; 2432 } 2433 2434 break; 2435 } 2436 2437 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2438 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2439 "Failed to find a free SB to move\n"); 2440 return ECORE_INVAL; 2441 } 2442 2443 if (p_block == OSAL_NULL) { 2444 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2445 "SB address (p_block) is NULL\n"); 2446 return ECORE_INVAL; 2447 } 2448 2449 /* At this point, p_block points to the SB we want to relocate */ 2450 if (b_to_vf) { 2451 p_block->status &= ~ECORE_IGU_STATUS_PF; 2452 2453 /* It doesn't matter which VF number we choose, since we're 2454 * going to disable the line; But let's keep it in range. 2455 */ 2456 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2457 2458 p_block->function_id = (u8)vf_num; 2459 p_block->is_pf = 0; 2460 p_block->vector_number = 0; 2461 2462 p_info->usage.cnt--; 2463 p_info->usage.free_cnt--; 2464 p_info->usage.iov_cnt++; 2465 p_info->usage.free_cnt_iov++; 2466 2467 /* TODO - if SBs aren't really the limiting factor, 2468 * then it might not be accurate [in the since that 2469 * we might not need decrement the feature]. 2470 */ 2471 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2472 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2473 } else { 2474 p_block->status |= ECORE_IGU_STATUS_PF; 2475 p_block->function_id = p_hwfn->rel_pf_id; 2476 p_block->is_pf = 1; 2477 p_block->vector_number = sb_id + 1; 2478 2479 p_info->usage.cnt++; 2480 p_info->usage.free_cnt++; 2481 p_info->usage.iov_cnt--; 2482 p_info->usage.free_cnt_iov--; 2483 2484 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2485 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2486 } 2487 2488 /* Update the IGU and CAU with the new configuration */ 2489 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2490 p_block->function_id); 2491 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2492 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2493 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2494 p_block->vector_number); 2495 2496 ecore_wr(p_hwfn, p_ptt, 2497 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2498 val); 2499 2500 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2501 igu_sb_id, vf_num, 2502 p_block->is_pf ? 0 : 1); 2503 2504 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2505 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2506 igu_sb_id, p_block->function_id, 2507 p_block->is_pf, p_block->vector_number); 2508 2509 return ECORE_SUCCESS; 2510 } 2511 2512 /** 2513 * @brief Initialize igu runtime registers 2514 * 2515 * @param p_hwfn 2516 */ 2517 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2518 { 2519 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2520 2521 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2522 } 2523 2524 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2525 IGU_CMD_INT_ACK_BASE) 2526 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2527 IGU_CMD_INT_ACK_BASE) 2528 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2529 { 2530 u32 intr_status_hi = 0, intr_status_lo = 0; 2531 u64 intr_status = 0; 2532 2533 intr_status_lo = REG_RD(p_hwfn, 2534 GTT_BAR0_MAP_REG_IGU_CMD + 2535 LSB_IGU_CMD_ADDR * 8); 2536 intr_status_hi = REG_RD(p_hwfn, 2537 GTT_BAR0_MAP_REG_IGU_CMD + 2538 MSB_IGU_CMD_ADDR * 8); 2539 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2540 2541 return intr_status; 2542 } 2543 2544 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2545 { 2546 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2547 p_hwfn->b_sp_dpc_enabled = true; 2548 } 2549 2550 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2551 { 2552 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2553 if (!p_hwfn->sp_dpc) 2554 return ECORE_NOMEM; 2555 2556 return ECORE_SUCCESS; 2557 } 2558 2559 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2560 { 2561 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2562 p_hwfn->sp_dpc = OSAL_NULL; 2563 } 2564 2565 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2566 struct ecore_ptt *p_ptt) 2567 { 2568 enum _ecore_status_t rc = ECORE_SUCCESS; 2569 2570 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2571 if (rc != ECORE_SUCCESS) { 2572 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2573 return rc; 2574 } 2575 2576 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2577 if (rc != ECORE_SUCCESS) { 2578 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2579 return rc; 2580 } 2581 2582 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2583 if (rc != ECORE_SUCCESS) 2584 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2585 2586 return rc; 2587 } 2588 2589 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2590 { 2591 ecore_int_sp_sb_free(p_hwfn); 2592 ecore_int_sb_attn_free(p_hwfn); 2593 ecore_int_sp_dpc_free(p_hwfn); 2594 } 2595 2596 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2597 { 2598 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2599 return; 2600 2601 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2602 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2603 ecore_int_sp_dpc_setup(p_hwfn); 2604 } 2605 2606 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2607 struct ecore_sb_cnt_info *p_sb_cnt_info) 2608 { 2609 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2610 2611 if (!p_igu_info || !p_sb_cnt_info) 2612 return; 2613 2614 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2615 sizeof(*p_sb_cnt_info)); 2616 } 2617 2618 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2619 { 2620 int i; 2621 2622 for_each_hwfn(p_dev, i) 2623 p_dev->hwfns[i].b_int_requested = false; 2624 } 2625 2626 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2627 { 2628 p_dev->attn_clr_en = clr_enable; 2629 } 2630 2631 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2632 struct ecore_ptt *p_ptt, 2633 u8 timer_res, u16 sb_id, bool tx) 2634 { 2635 struct cau_sb_entry sb_entry; 2636 enum _ecore_status_t rc; 2637 2638 if (!p_hwfn->hw_init_done) { 2639 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2640 return ECORE_INVAL; 2641 } 2642 2643 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2644 sb_id * sizeof(u64), 2645 (u64)(osal_uintptr_t)&sb_entry, 2, 2646 OSAL_NULL /* default parameters */); 2647 if (rc != ECORE_SUCCESS) { 2648 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2649 return rc; 2650 } 2651 2652 if (tx) 2653 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2654 else 2655 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2656 2657 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2658 (u64)(osal_uintptr_t)&sb_entry, 2659 CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, 2660 OSAL_NULL /* default parameters */); 2661 if (rc != ECORE_SUCCESS) { 2662 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2663 return rc; 2664 } 2665 2666 return rc; 2667 } 2668 2669 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2670 struct ecore_ptt *p_ptt, 2671 struct ecore_sb_info *p_sb, 2672 struct ecore_sb_info_dbg *p_info) 2673 { 2674 u16 sbid = p_sb->igu_sb_id; 2675 int i; 2676 2677 if (IS_VF(p_hwfn->p_dev)) 2678 return ECORE_INVAL; 2679 2680 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2681 return ECORE_INVAL; 2682 2683 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2684 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2685 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2686 IGU_REG_CONSUMER_MEM + sbid * 4); 2687 2688 for (i = 0; i < PIS_PER_SB_E4; i++) 2689 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2690 CAU_REG_PI_MEMORY + 2691 sbid * 4 * PIS_PER_SB_E4 + i * 4); 2692 2693 return ECORE_SUCCESS; 2694 } 2695