1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 /* 28 * File : ecore_int.c 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "bcm_osal.h" 34 #include "ecore.h" 35 #include "ecore_spq.h" 36 #include "reg_addr.h" 37 #include "ecore_gtt_reg_addr.h" 38 #include "ecore_init_ops.h" 39 #include "ecore_rt_defs.h" 40 #include "ecore_int.h" 41 #include "reg_addr.h" 42 #include "ecore_hw.h" 43 #include "ecore_sriov.h" 44 #include "ecore_vf.h" 45 #include "ecore_hw_defs.h" 46 #include "ecore_hsi_common.h" 47 #include "ecore_mcp.h" 48 #include "ecore_dbg_fw_funcs.h" 49 50 #ifdef DIAG 51 /* This is nasty, but diag is using the drv_dbg_fw_funcs.c [non-ecore flavor], 52 * and so the functions are lacking ecore prefix. 53 * If there would be other clients needing this [or if the content that isn't 54 * really optional there would increase], we'll need to re-think this. 55 */ 56 enum dbg_status dbg_read_attn(struct ecore_hwfn *dev, 57 struct ecore_ptt *ptt, 58 enum block_id block, 59 enum dbg_attn_type attn_type, 60 bool clear_status, 61 struct dbg_attn_block_result *results); 62 63 enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev, 64 struct dbg_attn_block_result *results); 65 66 const char* dbg_get_status_str(enum dbg_status status); 67 68 #define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \ 69 dbg_read_attn(hwfn, ptt, id, type, clear, results) 70 #define ecore_dbg_parse_attn(hwfn, results) \ 71 dbg_parse_attn(hwfn, results) 72 #define ecore_dbg_get_status_str(status) \ 73 dbg_get_status_str(status) 74 #endif 75 76 struct ecore_pi_info { 77 ecore_int_comp_cb_t comp_cb; 78 void *cookie; /* Will be sent to the completion callback function */ 79 }; 80 81 struct ecore_sb_sp_info { 82 struct ecore_sb_info sb_info; 83 /* per protocol index data */ 84 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 85 }; 86 87 enum ecore_attention_type { 88 ECORE_ATTN_TYPE_ATTN, 89 ECORE_ATTN_TYPE_PARITY, 90 }; 91 92 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 93 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 94 95 struct aeu_invert_reg_bit { 96 char bit_name[30]; 97 98 #define ATTENTION_PARITY (1 << 0) 99 100 #define ATTENTION_LENGTH_MASK (0x00000ff0) 101 #define ATTENTION_LENGTH_SHIFT (4) 102 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 103 ATTENTION_LENGTH_SHIFT) 104 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 105 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 106 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 107 ATTENTION_PARITY) 108 109 /* Multiple bits start with this offset */ 110 #define ATTENTION_OFFSET_MASK (0x000ff000) 111 #define ATTENTION_OFFSET_SHIFT (12) 112 113 #define ATTENTION_BB_MASK (0x00700000) 114 #define ATTENTION_BB_SHIFT (20) 115 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 116 #define ATTENTION_BB_DIFFERENT (1 << 23) 117 118 #define ATTENTION_CLEAR_ENABLE (1 << 28) 119 unsigned int flags; 120 121 /* Callback to call if attention will be triggered */ 122 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 123 124 enum block_id block_index; 125 }; 126 127 struct aeu_invert_reg { 128 struct aeu_invert_reg_bit bits[32]; 129 }; 130 131 #define MAX_ATTN_GRPS (8) 132 #define NUM_ATTN_REGS (9) 133 134 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 135 { 136 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 137 138 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 139 tmp); 140 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 141 0xffffffff); 142 143 return ECORE_SUCCESS; 144 } 145 146 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 147 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 148 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 149 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 150 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 151 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 152 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 153 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 154 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 155 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 156 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 157 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 158 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 159 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 160 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 161 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 162 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 163 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 164 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 165 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 166 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 167 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 168 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 169 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 170 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 171 { 172 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, PSWHST_REG_VF_DISABLED_ERROR_VALID); 173 174 /* Disabled VF access */ 175 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 176 u32 addr, data; 177 178 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 179 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 180 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 181 PSWHST_REG_VF_DISABLED_ERROR_DATA); 182 DP_INFO(p_hwfn->p_dev, "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x] Write [0x%02x] Addr [0x%08x]\n", 183 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) >> 184 ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 185 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) >> 186 ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 187 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 188 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 189 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 190 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 191 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 192 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 193 addr); 194 } 195 196 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 197 PSWHST_REG_INCORRECT_ACCESS_VALID); 198 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 199 u32 addr, data, length; 200 201 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 202 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 203 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 204 PSWHST_REG_INCORRECT_ACCESS_DATA); 205 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 206 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 207 208 DP_INFO(p_hwfn->p_dev, "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 209 addr, length, 210 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 211 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 212 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 213 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 214 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 215 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 216 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 217 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 218 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 219 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 220 (u8)((data & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 221 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 222 data); 223 } 224 225 /* TODO - We know 'some' of these are legal due to virtualization, 226 * but is it true for all of them? 227 */ 228 return ECORE_SUCCESS; 229 } 230 231 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 232 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 233 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 234 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 235 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 236 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 237 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 238 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 239 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 240 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 241 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 242 static const char* grc_timeout_attn_master_to_str(u8 master) 243 { 244 switch(master) { 245 case 1: return "PXP"; 246 case 2: return "MCP"; 247 case 3: return "MSDM"; 248 case 4: return "PSDM"; 249 case 5: return "YSDM"; 250 case 6: return "USDM"; 251 case 7: return "TSDM"; 252 case 8: return "XSDM"; 253 case 9: return "DBU"; 254 case 10: return "DMAE"; 255 default: 256 return "Unkown"; 257 } 258 } 259 260 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 261 { 262 u32 tmp, tmp2; 263 264 /* We've already cleared the timeout interrupt register, so we learn 265 * of interrupts via the validity register 266 */ 267 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 268 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 269 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 270 goto out; 271 272 /* Read the GRC timeout information */ 273 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 274 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 275 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 276 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 277 278 DP_NOTICE(p_hwfn->p_dev, false, 279 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 280 tmp2, tmp, 281 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 282 : "Read from", 283 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 284 grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 285 ECORE_GRC_ATTENTION_MASTER_SHIFT), 286 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 287 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 288 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 289 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 290 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 291 ECORE_GRC_ATTENTION_VF_SHIFT); 292 293 out: 294 /* Regardles of anything else, clean the validity bit */ 295 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 296 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 297 return ECORE_SUCCESS; 298 } 299 300 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 301 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 302 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 303 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 304 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 305 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 306 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 307 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 308 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 309 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 310 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 311 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 312 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 313 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 314 { 315 u32 tmp; 316 317 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 318 PGLUE_B_REG_TX_ERR_WR_DETAILS2); 319 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 320 u32 addr_lo, addr_hi, details; 321 322 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 323 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 324 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 325 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 326 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 327 PGLUE_B_REG_TX_ERR_WR_DETAILS); 328 329 DP_INFO(p_hwfn, "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 330 addr_hi, addr_lo, details, 331 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 332 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 333 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 334 tmp, 335 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0), 336 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0), 337 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); 338 } 339 340 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 341 PGLUE_B_REG_TX_ERR_RD_DETAILS2); 342 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 343 u32 addr_lo, addr_hi, details; 344 345 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 346 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 347 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 348 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 349 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 350 PGLUE_B_REG_TX_ERR_RD_DETAILS); 351 352 DP_INFO(p_hwfn, "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 353 addr_hi, addr_lo, details, 354 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 355 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 356 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 357 tmp, 358 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0), 359 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0), 360 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); 361 } 362 363 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 364 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 365 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 366 DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp); 367 368 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 369 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 370 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 371 u32 addr_hi, addr_lo; 372 373 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 374 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 375 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 376 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 377 378 DP_INFO(p_hwfn, "ICPL eror - %08x [Address %08x:%08x]\n", 379 tmp, addr_hi, addr_lo); 380 } 381 382 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 383 PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 384 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 385 u32 addr_hi, addr_lo, details; 386 387 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 388 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 389 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 390 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 391 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 392 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 393 394 DP_INFO(p_hwfn, "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 395 details, tmp, addr_hi, addr_lo); 396 } 397 398 /* Clear the indications */ 399 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 400 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 401 402 return ECORE_SUCCESS; 403 } 404 405 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 406 { 407 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 408 409 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 410 411 return ECORE_INVAL; 412 } 413 414 static enum _ecore_status_t 415 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 416 { 417 DP_INFO(p_hwfn, "General attention 35!\n"); 418 419 return ECORE_SUCCESS; 420 } 421 422 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 423 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 424 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 425 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 426 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 427 428 #define ECORE_DB_REC_COUNT 10 429 #define ECORE_DB_REC_INTERVAL 100 430 431 /* assumes sticky overflow indication was set for this PF */ 432 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 433 struct ecore_ptt *p_ptt) 434 { 435 u8 count = ECORE_DB_REC_COUNT; 436 u32 usage = 1; 437 438 /* wait for usage to zero or count to run out. This is necessary since 439 * EDPM doorbell transactions can take multiple 64b cycles, and as such 440 * can "split" over the pci. Possibly, the doorbell drop can happen with 441 * half an EDPM in the queue and other half dropped. Another EDPM 442 * doorbell to the same address (from doorbell recovery mechanism or 443 * from the doorbelling entity) could have first half dropped and second 444 * half interperted as continuation of the first. To prevent such 445 * malformed doorbells from reaching the device, flush the queue before 446 * releaseing the overflow sticky indication. 447 */ 448 while (count-- && usage) { 449 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 450 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 451 } 452 453 /* should have been depleted by now */ 454 if (usage) { 455 DP_NOTICE(p_hwfn->p_dev, false, 456 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 457 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 458 return ECORE_TIMEOUT; 459 } 460 461 /* flush any pedning (e)dpm as they may never arrive */ 462 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 463 464 /* release overflow sticky indication (stop silently dropping everything) */ 465 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 466 467 /* repeat all last doorbells (doorbell drop recovery) */ 468 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 469 470 return ECORE_SUCCESS; 471 } 472 473 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 474 { 475 u32 int_sts, first_drop_reason, details, address, overflow, 476 all_drops_reason; 477 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 478 enum _ecore_status_t rc; 479 480 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 481 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 482 int_sts); 483 484 /* check if db_drop or overflow happened */ 485 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 486 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 487 488 /* obtain data about db drop/overflow */ 489 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 490 DORQ_REG_DB_DROP_REASON) & 491 ECORE_DORQ_ATTENTION_REASON_MASK; 492 details = ecore_rd(p_hwfn, p_ptt, 493 DORQ_REG_DB_DROP_DETAILS); 494 address = ecore_rd(p_hwfn, p_ptt, 495 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 496 overflow = ecore_rd(p_hwfn, p_ptt, 497 DORQ_REG_PF_OVFL_STICKY); 498 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 499 DORQ_REG_DB_DROP_DETAILS_REASON); 500 501 /* log info */ 502 DP_NOTICE(p_hwfn->p_dev, false, 503 "Doorbell drop occurred\n" 504 "Address\t\t0x%08x\t(second BAR address)\n" 505 "FID\t\t0x%04x\t\t(Opaque FID)\n" 506 "Size\t\t0x%04x\t\t(in bytes)\n" 507 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 508 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 509 "Overflow\t0x%x\t\t(a per PF indication)\n", 510 address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 511 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 512 first_drop_reason, all_drops_reason, overflow); 513 514 /* if this PF caused overflow, initiate recovery */ 515 if (overflow) { 516 rc = ecore_db_rec_attn(p_hwfn, p_ptt); 517 if (rc != ECORE_SUCCESS) 518 return rc; 519 } 520 521 /* clear the doorbell drop details and prepare for next drop */ 522 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 523 524 /* mark interrupt as handeld (note: even if drop was due to a diffrent 525 * reason than overflow we mark as handled) 526 */ 527 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 528 DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 529 530 /* if there are no indications otherthan drop indications, success */ 531 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 532 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 533 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 534 return ECORE_SUCCESS; 535 } 536 537 /* some other indication was present - non recoverable */ 538 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 539 540 return ECORE_INVAL; 541 } 542 543 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 544 { 545 #ifndef ASIC_ONLY 546 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 547 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 548 TM_REG_INT_STS_1); 549 550 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 551 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 552 return ECORE_INVAL; 553 554 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 555 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 556 DP_INFO(p_hwfn, "TM attention on emulation - most likely results of clock-ratios\n"); 557 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 558 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 559 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 560 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 561 562 return ECORE_SUCCESS; 563 } 564 #endif 565 566 return ECORE_INVAL; 567 } 568 569 /* Instead of major changes to the data-structure, we have a some 'special' 570 * identifiers for sources that changed meaning between adapters. 571 */ 572 enum aeu_invert_reg_special_type { 573 AEU_INVERT_REG_SPECIAL_CNIG_0, 574 AEU_INVERT_REG_SPECIAL_CNIG_1, 575 AEU_INVERT_REG_SPECIAL_CNIG_2, 576 AEU_INVERT_REG_SPECIAL_CNIG_3, 577 AEU_INVERT_REG_SPECIAL_MAX, 578 }; 579 580 static struct aeu_invert_reg_bit 581 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 582 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 583 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 584 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 585 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 586 }; 587 588 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 589 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = 590 { 591 { 592 { /* After Invert 1 */ 593 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 594 } 595 }, 596 597 { 598 { /* After Invert 2 */ 599 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 600 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 601 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, 602 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 603 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 604 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 605 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 606 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 607 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, BLOCK_PGLCS}, 608 } 609 }, 610 611 { 612 { /* After Invert 3 */ 613 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 614 } 615 }, 616 617 { 618 { /* After Invert 4 */ 619 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_fw_assertion, MAX_BLOCK_ID}, 620 {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), OSAL_NULL, MAX_BLOCK_ID}, 621 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, ecore_general_attention_35, MAX_BLOCK_ID}, 622 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 623 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0) , OSAL_NULL, BLOCK_NWS}, 624 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 625 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), OSAL_NULL, BLOCK_NWS}, 626 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 627 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), OSAL_NULL, BLOCK_NWM}, 628 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 629 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), OSAL_NULL, BLOCK_NWM}, 630 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 631 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 632 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 633 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 634 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 635 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 636 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 637 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 638 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 639 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 640 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 641 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 642 } 643 }, 644 645 { 646 { /* After Invert 5 */ 647 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 648 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 649 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 650 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 651 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 652 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 653 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 654 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 655 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 656 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 657 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 658 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 659 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 660 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 661 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 662 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 663 } 664 }, 665 666 { 667 { /* After Invert 6 */ 668 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 669 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 670 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 671 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 672 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 673 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 674 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 675 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 676 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 677 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 678 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 679 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 680 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 681 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 682 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 683 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 684 } 685 }, 686 687 { 688 { /* After Invert 7 */ 689 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 690 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 691 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 692 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 693 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 694 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 695 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 696 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 697 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 698 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 699 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 700 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 701 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 702 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 703 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 704 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 705 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 706 } 707 }, 708 709 { 710 { /* After Invert 8 */ 711 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 712 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 713 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 714 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 715 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 716 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 717 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 718 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 719 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 720 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 721 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 722 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 723 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 724 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 725 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 726 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 727 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 728 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 729 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 730 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 731 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 732 } 733 }, 734 735 { 736 { /* After Invert 9 */ 737 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 738 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 739 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 740 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 741 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, MAX_BLOCK_ID }, 742 } 743 }, 744 745 }; 746 747 static struct aeu_invert_reg_bit * 748 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 749 struct aeu_invert_reg_bit *p_bit) 750 { 751 if (!ECORE_IS_BB(p_hwfn->p_dev)) 752 return p_bit; 753 754 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 755 return p_bit; 756 757 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 758 ATTENTION_BB_SHIFT]; 759 } 760 761 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 762 struct aeu_invert_reg_bit *p_bit) 763 { 764 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 765 ATTENTION_PARITY); 766 } 767 768 #define ATTN_STATE_BITS (0xfff) 769 #define ATTN_BITS_MASKABLE (0x3ff) 770 struct ecore_sb_attn_info { 771 /* Virtual & Physical address of the SB */ 772 struct atten_status_block *sb_attn; 773 dma_addr_t sb_phys; 774 775 /* Last seen running index */ 776 u16 index; 777 778 /* A mask of the AEU bits resulting in a parity error */ 779 u32 parity_mask[NUM_ATTN_REGS]; 780 781 /* A pointer to the attention description structure */ 782 struct aeu_invert_reg *p_aeu_desc; 783 784 /* Previously asserted attentions, which are still unasserted */ 785 u16 known_attn; 786 787 /* Cleanup address for the link's general hw attention */ 788 u32 mfw_attn_addr; 789 }; 790 791 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 792 struct ecore_sb_attn_info *p_sb_desc) 793 { 794 u16 rc = 0, index; 795 796 OSAL_MMIOWB(p_hwfn->p_dev); 797 798 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 799 if (p_sb_desc->index != index) { 800 p_sb_desc->index = index; 801 rc = ECORE_SB_ATT_IDX; 802 } 803 804 OSAL_MMIOWB(p_hwfn->p_dev); 805 806 return rc; 807 } 808 809 /** 810 * @brief ecore_int_assertion - handles asserted attention bits 811 * 812 * @param p_hwfn 813 * @param asserted_bits newly asserted bits 814 * @return enum _ecore_status_t 815 */ 816 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 817 u16 asserted_bits) 818 { 819 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 820 u32 igu_mask; 821 822 /* Mask the source of the attention in the IGU */ 823 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 824 IGU_REG_ATTENTION_ENABLE); 825 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 826 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 827 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 828 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 829 830 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 831 "inner known ATTN state: 0x%04x --> 0x%04x\n", 832 sb_attn_sw->known_attn, 833 sb_attn_sw->known_attn | asserted_bits); 834 sb_attn_sw->known_attn |= asserted_bits; 835 836 /* Handle MCP events */ 837 if (asserted_bits & 0x100) { 838 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 839 /* Clean the MCP attention */ 840 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 841 sb_attn_sw->mfw_attn_addr, 0); 842 } 843 844 /* FIXME - this will change once we'll have GOOD gtt definitions */ 845 DIRECT_REG_WR(p_hwfn, 846 (u8 OSAL_IOMEM*)p_hwfn->regview + 847 GTT_BAR0_MAP_REG_IGU_CMD + 848 ((IGU_CMD_ATTN_BIT_SET_UPPER - 849 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 850 851 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 852 asserted_bits); 853 854 return ECORE_SUCCESS; 855 } 856 857 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 858 enum block_id id, enum dbg_attn_type type, 859 bool b_clear) 860 { 861 struct dbg_attn_block_result attn_results; 862 enum dbg_status status; 863 864 OSAL_MEMSET(&attn_results, 0, sizeof(attn_results)); 865 866 status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 867 b_clear, &attn_results); 868 #ifdef ATTN_DESC 869 if (status != DBG_STATUS_OK) 870 DP_NOTICE(p_hwfn, true, 871 "Failed to parse attention information [status: %s]\n", 872 ecore_dbg_get_status_str(status)); 873 else 874 ecore_dbg_parse_attn(p_hwfn, &attn_results); 875 #else 876 if (status != DBG_STATUS_OK) 877 DP_NOTICE(p_hwfn, true, 878 "Failed to parse attention information [status: %d]\n", 879 status); 880 else 881 ecore_dbg_print_attn(p_hwfn, &attn_results); 882 #endif 883 } 884 885 /** 886 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 887 * cause of the attention 888 * 889 * @param p_hwfn 890 * @param p_aeu - descriptor of an AEU bit which caused the attention 891 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 892 * this bit to this group. 893 * @param bit_index - index of this bit in the aeu_en_reg 894 * 895 * @return enum _ecore_status_t 896 */ 897 static enum _ecore_status_t 898 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 899 struct aeu_invert_reg_bit *p_aeu, 900 u32 aeu_en_reg, 901 const char *p_bit_name, 902 u32 bitmask) 903 { 904 enum _ecore_status_t rc = ECORE_INVAL; 905 bool b_fatal = false; 906 907 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 908 p_bit_name, bitmask); 909 910 /* Call callback before clearing the interrupt status */ 911 if (p_aeu->cb) { 912 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 913 p_bit_name); 914 rc = p_aeu->cb(p_hwfn); 915 } 916 917 if (rc != ECORE_SUCCESS) 918 b_fatal = true; 919 920 /* Print HW block interrupt registers */ 921 if (p_aeu->block_index != MAX_BLOCK_ID) 922 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 923 ATTN_TYPE_INTERRUPT, !b_fatal); 924 925 /* Reach assertion if attention is fatal */ 926 if (b_fatal) { 927 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 928 p_bit_name); 929 930 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 931 } 932 933 /* Prevent this Attention from being asserted in the future */ 934 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 935 p_hwfn->p_dev->attn_clr_en) { 936 u32 val; 937 u32 mask = ~bitmask; 938 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 939 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 940 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 941 p_bit_name); 942 } 943 944 return rc; 945 } 946 947 /** 948 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 949 * 950 * @param p_hwfn 951 * @param p_aeu - descriptor of an AEU bit which caused the parity 952 * @param aeu_en_reg - address of the AEU enable register 953 * @param bit_index 954 */ 955 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 956 struct aeu_invert_reg_bit *p_aeu, 957 u32 aeu_en_reg, u8 bit_index) 958 { 959 u32 block_id = p_aeu->block_index, mask, val; 960 961 DP_NOTICE(p_hwfn->p_dev, false, 962 "%s parity attention is set [address 0x%08x, bit %d]\n", 963 p_aeu->bit_name, aeu_en_reg, bit_index); 964 965 if (block_id == MAX_BLOCK_ID) 966 return; 967 968 ecore_int_attn_print(p_hwfn, block_id, 969 ATTN_TYPE_PARITY, false); 970 971 /* In A0, there's a single parity bit for several blocks */ 972 if (block_id == BLOCK_BTB) { 973 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 974 ATTN_TYPE_PARITY, false); 975 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 976 ATTN_TYPE_PARITY, false); 977 } 978 979 /* Prevent this parity error from being re-asserted */ 980 mask = ~(0x1 << bit_index); 981 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 982 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 983 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 984 p_aeu->bit_name); 985 } 986 987 /** 988 * @brief - handles deassertion of previously asserted attentions. 989 * 990 * @param p_hwfn 991 * @param deasserted_bits - newly deasserted bits 992 * @return enum _ecore_status_t 993 * 994 */ 995 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 996 u16 deasserted_bits) 997 { 998 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 999 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1000 u8 i, j, k, bit_idx; 1001 enum _ecore_status_t rc = ECORE_SUCCESS; 1002 1003 /* Read the attention registers in the AEU */ 1004 for (i = 0; i < NUM_ATTN_REGS; i++) { 1005 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1006 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1007 i * 0x4); 1008 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1009 "Deasserted bits [%d]: %08x\n", 1010 i, aeu_inv_arr[i]); 1011 } 1012 1013 /* Handle parity attentions first */ 1014 for (i = 0; i < NUM_ATTN_REGS; i++) 1015 { 1016 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1017 u32 parities; 1018 1019 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1020 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1021 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1022 1023 /* Skip register in which no parity bit is currently set */ 1024 if (!parities) 1025 continue; 1026 1027 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1028 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1029 1030 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1031 !!(parities & (1 << bit_idx))) 1032 ecore_int_deassertion_parity(p_hwfn, p_bit, 1033 aeu_en, bit_idx); 1034 1035 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1036 } 1037 } 1038 1039 /* Find non-parity cause for attention and act */ 1040 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1041 struct aeu_invert_reg_bit *p_aeu; 1042 1043 /* Handle only groups whose attention is currently deasserted */ 1044 if (!(deasserted_bits & (1 << k))) 1045 continue; 1046 1047 for (i = 0; i < NUM_ATTN_REGS; i++) { 1048 u32 bits; 1049 1050 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1051 i * sizeof(u32) + 1052 k * sizeof(u32) * NUM_ATTN_REGS; 1053 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1054 bits = aeu_inv_arr[i] & en; 1055 1056 /* Skip if no bit from this group is currently set */ 1057 if (!bits) 1058 continue; 1059 1060 /* Find all set bits from current register which belong 1061 * to current group, making them responsible for the 1062 * previous assertion. 1063 */ 1064 for (j = 0, bit_idx = 0; bit_idx < 32; j++) 1065 { 1066 long unsigned int bitmask; 1067 u8 bit, bit_len; 1068 1069 /* Need to account bits with changed meaning */ 1070 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1071 p_aeu = ecore_int_aeu_translate(p_hwfn, p_aeu); 1072 1073 bit = bit_idx; 1074 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1075 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1076 /* Skip Parity */ 1077 bit++; 1078 bit_len--; 1079 } 1080 1081 /* Find the bits relating to HW-block, then 1082 * shift so they'll become LSB. 1083 */ 1084 bitmask = bits & (((1 << bit_len) - 1) << bit); 1085 bitmask >>= bit; 1086 1087 if (bitmask) { 1088 u32 flags = p_aeu->flags; 1089 char bit_name[30]; 1090 u8 num; 1091 1092 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1093 bit_len); 1094 1095 /* Some bits represent more than a 1096 * a single interrupt. Correctly print 1097 * their name. 1098 */ 1099 if (ATTENTION_LENGTH(flags) > 2 || 1100 ((flags & ATTENTION_PAR_INT) && 1101 ATTENTION_LENGTH(flags) > 1)) 1102 OSAL_SNPRINTF(bit_name, 30, 1103 p_aeu->bit_name, 1104 num); 1105 else 1106 OSAL_STRNCPY(bit_name, 1107 p_aeu->bit_name, 1108 30); 1109 1110 /* We now need to pass bitmask in its 1111 * correct position. 1112 */ 1113 bitmask <<= bit; 1114 1115 /* Handle source of the attention */ 1116 ecore_int_deassertion_aeu_bit(p_hwfn, 1117 p_aeu, 1118 aeu_en, 1119 bit_name, 1120 bitmask); 1121 } 1122 1123 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1124 } 1125 } 1126 } 1127 1128 /* Clear IGU indication for the deasserted bits */ 1129 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1130 DIRECT_REG_WR(p_hwfn, 1131 (u8 OSAL_IOMEM*)p_hwfn->regview + 1132 GTT_BAR0_MAP_REG_IGU_CMD + 1133 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1134 IGU_CMD_INT_ACK_BASE) << 3), 1135 ~((u32)deasserted_bits)); 1136 1137 /* Unmask deasserted attentions in IGU */ 1138 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1139 IGU_REG_ATTENTION_ENABLE); 1140 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1141 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1142 1143 /* Clear deassertion from inner state */ 1144 sb_attn_sw->known_attn &= ~deasserted_bits; 1145 1146 return rc; 1147 } 1148 1149 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1150 { 1151 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1152 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1153 u16 index = 0, asserted_bits, deasserted_bits; 1154 u32 attn_bits = 0, attn_acks = 0; 1155 enum _ecore_status_t rc = ECORE_SUCCESS; 1156 1157 /* Read current attention bits/acks - safeguard against attentions 1158 * by guaranting work on a synchronized timeframe 1159 */ 1160 do { 1161 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1162 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1163 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1164 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1165 p_sb_attn->sb_index = index; 1166 1167 /* Attention / Deassertion are meaningful (and in correct state) 1168 * only when they differ and consistent with known state - deassertion 1169 * when previous attention & current ack, and assertion when current 1170 * attention with no previous attention 1171 */ 1172 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1173 ~p_sb_attn_sw->known_attn; 1174 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1175 p_sb_attn_sw->known_attn; 1176 1177 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1178 DP_INFO(p_hwfn, 1179 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1180 index, attn_bits, attn_acks, asserted_bits, 1181 deasserted_bits, p_sb_attn_sw->known_attn); 1182 else if (asserted_bits == 0x100) 1183 DP_INFO(p_hwfn, 1184 "MFW indication via attention\n"); 1185 else 1186 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1187 "MFW indication [deassertion]\n"); 1188 1189 if (asserted_bits) { 1190 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1191 if (rc) 1192 return rc; 1193 } 1194 1195 if (deasserted_bits) 1196 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1197 1198 return rc; 1199 } 1200 1201 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1202 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1203 { 1204 struct igu_prod_cons_update igu_ack = { 0 }; 1205 1206 igu_ack.sb_id_and_flags = 1207 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1208 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1209 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1210 (IGU_SEG_ACCESS_ATTN << 1211 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1212 1213 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1214 1215 /* Both segments (interrupts & acks) are written to same place address; 1216 * Need to guarantee all commands will be received (in-order) by HW. 1217 */ 1218 OSAL_MMIOWB(p_hwfn->p_dev); 1219 OSAL_BARRIER(p_hwfn->p_dev); 1220 } 1221 1222 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1223 { 1224 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1225 struct ecore_pi_info *pi_info = OSAL_NULL; 1226 struct ecore_sb_attn_info *sb_attn; 1227 struct ecore_sb_info *sb_info; 1228 int arr_size; 1229 u16 rc = 0; 1230 1231 if (!p_hwfn) 1232 return; 1233 1234 if (!p_hwfn->p_sp_sb) { 1235 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1236 return; 1237 } 1238 1239 sb_info = &p_hwfn->p_sp_sb->sb_info; 1240 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1241 if (!sb_info) { 1242 DP_ERR(p_hwfn->p_dev, "Status block is NULL - cannot ack interrupts\n"); 1243 return; 1244 } 1245 1246 if (!p_hwfn->p_sb_attn) { 1247 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1248 return; 1249 } 1250 sb_attn = p_hwfn->p_sb_attn; 1251 1252 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1253 p_hwfn, p_hwfn->my_id); 1254 1255 /* Disable ack for def status block. Required both for msix + 1256 * inta in non-mask mode, in inta does no harm. 1257 */ 1258 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1259 1260 /* Gather Interrupts/Attentions information */ 1261 if (!sb_info->sb_virt) { 1262 DP_ERR(p_hwfn->p_dev, "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1263 } else { 1264 u32 tmp_index = sb_info->sb_ack; 1265 rc = ecore_sb_update_sb_idx(sb_info); 1266 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1267 "Interrupt indices: 0x%08x --> 0x%08x\n", 1268 tmp_index, sb_info->sb_ack); 1269 } 1270 1271 if (!sb_attn || !sb_attn->sb_attn) { 1272 DP_ERR(p_hwfn->p_dev, "Attentions Status block is NULL - cannot check for new attentions!\n"); 1273 } else { 1274 u16 tmp_index = sb_attn->index; 1275 1276 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1277 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1278 "Attention indices: 0x%08x --> 0x%08x\n", 1279 tmp_index, sb_attn->index); 1280 } 1281 1282 /* Check if we expect interrupts at this time. if not just ack them */ 1283 if (!(rc & ECORE_SB_EVENT_MASK)) { 1284 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1285 return; 1286 } 1287 1288 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1289 if (!p_hwfn->p_dpc_ptt) { 1290 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1291 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1292 return; 1293 } 1294 1295 if (rc & ECORE_SB_ATT_IDX) 1296 ecore_int_attentions(p_hwfn); 1297 1298 if (rc & ECORE_SB_IDX) { 1299 int pi; 1300 1301 /* Since we only looked at the SB index, it's possible more 1302 * than a single protocol-index on the SB incremented. 1303 * Iterate over all configured protocol indices and check 1304 * whether something happened for each. 1305 */ 1306 for (pi = 0; pi < arr_size; pi++) { 1307 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1308 if (pi_info->comp_cb != OSAL_NULL) 1309 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1310 } 1311 } 1312 1313 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1314 /* This should be done before the interrupts are enabled, 1315 * since otherwise a new attention will be generated. 1316 */ 1317 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1318 } 1319 1320 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1321 } 1322 1323 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1324 { 1325 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1326 1327 if (!p_sb) 1328 return; 1329 1330 if (p_sb->sb_attn) { 1331 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1332 p_sb->sb_phys, 1333 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1334 } 1335 1336 OSAL_FREE(p_hwfn->p_dev, p_sb); 1337 p_hwfn->p_sb_attn = OSAL_NULL; 1338 } 1339 1340 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1341 struct ecore_ptt *p_ptt) 1342 { 1343 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1344 1345 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1346 1347 sb_info->index = 0; 1348 sb_info->known_attn = 0; 1349 1350 /* Configure Attention Status Block in IGU */ 1351 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1352 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1353 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1354 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1355 } 1356 1357 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1358 struct ecore_ptt *p_ptt, 1359 void *sb_virt_addr, 1360 dma_addr_t sb_phy_addr) 1361 { 1362 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1363 int i, j, k; 1364 1365 sb_info->sb_attn = sb_virt_addr; 1366 sb_info->sb_phys = sb_phy_addr; 1367 1368 /* Set the pointer to the AEU descriptors */ 1369 sb_info->p_aeu_desc = aeu_descs; 1370 1371 /* Calculate Parity Masks */ 1372 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1373 for (i = 0; i < NUM_ATTN_REGS; i++) { 1374 /* j is array index, k is bit index */ 1375 for (j = 0, k = 0; k < 32; j++) { 1376 struct aeu_invert_reg_bit *p_aeu; 1377 1378 p_aeu = &aeu_descs[i].bits[j]; 1379 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1380 sb_info->parity_mask[i] |= 1 << k; 1381 1382 k += ATTENTION_LENGTH(p_aeu->flags); 1383 } 1384 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1385 "Attn Mask [Reg %d]: 0x%08x\n", 1386 i, sb_info->parity_mask[i]); 1387 } 1388 1389 /* Set the address of cleanup for the mcp attention */ 1390 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1391 MISC_REG_AEU_GENERAL_ATTN_0; 1392 1393 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1394 } 1395 1396 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1397 struct ecore_ptt *p_ptt) 1398 { 1399 struct ecore_dev *p_dev = p_hwfn->p_dev; 1400 struct ecore_sb_attn_info *p_sb; 1401 dma_addr_t p_phys = 0; 1402 void *p_virt; 1403 1404 /* SB struct */ 1405 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1406 if (!p_sb) { 1407 DP_NOTICE(p_dev, true, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1408 return ECORE_NOMEM; 1409 } 1410 1411 /* SB ring */ 1412 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1413 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1414 if (!p_virt) { 1415 DP_NOTICE(p_dev, true, "Failed to allocate status block (attentions)\n"); 1416 OSAL_FREE(p_dev, p_sb); 1417 return ECORE_NOMEM; 1418 } 1419 1420 /* Attention setup */ 1421 p_hwfn->p_sb_attn = p_sb; 1422 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1423 1424 return ECORE_SUCCESS; 1425 } 1426 1427 /* coalescing timeout = timeset << (timer_res + 1) */ 1428 #define ECORE_CAU_DEF_RX_USECS 24 1429 #define ECORE_CAU_DEF_TX_USECS 48 1430 1431 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1432 struct cau_sb_entry *p_sb_entry, 1433 u8 pf_id, u16 vf_number, u8 vf_valid) 1434 { 1435 struct ecore_dev *p_dev = p_hwfn->p_dev; 1436 u32 cau_state; 1437 u8 timer_res; 1438 1439 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1440 1441 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1442 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1443 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1444 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1445 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1446 1447 cau_state = CAU_HC_DISABLE_STATE; 1448 1449 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1450 cau_state = CAU_HC_ENABLE_STATE; 1451 if (!p_dev->rx_coalesce_usecs) 1452 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1453 if (!p_dev->tx_coalesce_usecs) 1454 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1455 } 1456 1457 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1458 if (p_dev->rx_coalesce_usecs <= 0x7F) 1459 timer_res = 0; 1460 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1461 timer_res = 1; 1462 else 1463 timer_res = 2; 1464 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1465 1466 if (p_dev->tx_coalesce_usecs <= 0x7F) 1467 timer_res = 0; 1468 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1469 timer_res = 1; 1470 else 1471 timer_res = 2; 1472 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1473 1474 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1475 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1476 } 1477 1478 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1479 struct ecore_ptt *p_ptt, 1480 u16 igu_sb_id, u32 pi_index, 1481 enum ecore_coalescing_fsm coalescing_fsm, 1482 u8 timeset) 1483 { 1484 struct cau_pi_entry pi_entry; 1485 u32 sb_offset, pi_offset; 1486 1487 if (IS_VF(p_hwfn->p_dev)) 1488 return;/* @@@TBD MichalK- VF CAU... */ 1489 1490 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1491 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1492 1493 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1494 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1495 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1496 else 1497 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1498 1499 pi_offset = sb_offset + pi_index; 1500 if (p_hwfn->hw_init_done) { 1501 ecore_wr(p_hwfn, p_ptt, 1502 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1503 *((u32 *)&(pi_entry))); 1504 } else { 1505 STORE_RT_REG(p_hwfn, 1506 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1507 *((u32 *)&(pi_entry))); 1508 } 1509 } 1510 1511 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1512 struct ecore_ptt *p_ptt, 1513 struct ecore_sb_info *p_sb, u32 pi_index, 1514 enum ecore_coalescing_fsm coalescing_fsm, 1515 u8 timeset) 1516 { 1517 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1518 pi_index, coalescing_fsm, timeset); 1519 } 1520 1521 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1522 struct ecore_ptt *p_ptt, 1523 dma_addr_t sb_phys, u16 igu_sb_id, 1524 u16 vf_number, u8 vf_valid) 1525 { 1526 struct cau_sb_entry sb_entry; 1527 1528 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1529 vf_number, vf_valid); 1530 1531 if (p_hwfn->hw_init_done) { 1532 /* Wide-bus, initialize via DMAE */ 1533 u64 phys_addr = (u64)sb_phys; 1534 1535 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&phys_addr, 1536 CAU_REG_SB_ADDR_MEMORY + 1537 igu_sb_id * sizeof(u64), 2, 0); 1538 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&sb_entry, 1539 CAU_REG_SB_VAR_MEMORY + 1540 igu_sb_id * sizeof(u64), 2, 0); 1541 } else { 1542 /* Initialize Status Block Address */ 1543 STORE_RT_REG_AGG(p_hwfn, 1544 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET+igu_sb_id*2, 1545 sb_phys); 1546 1547 STORE_RT_REG_AGG(p_hwfn, 1548 CAU_REG_SB_VAR_MEMORY_RT_OFFSET+igu_sb_id*2, 1549 sb_entry); 1550 } 1551 1552 /* Configure pi coalescing if set */ 1553 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1554 /* eth will open queues for all tcs, so configure all of them 1555 * properly, rather than just the active ones 1556 */ 1557 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1558 1559 u8 timeset, timer_res; 1560 u8 i; 1561 1562 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1563 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1564 timer_res = 0; 1565 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1566 timer_res = 1; 1567 else 1568 timer_res = 2; 1569 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1570 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1571 ECORE_COAL_RX_STATE_MACHINE, 1572 timeset); 1573 1574 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1575 timer_res = 0; 1576 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1577 timer_res = 1; 1578 else 1579 timer_res = 2; 1580 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1581 for (i = 0; i < num_tc; i++) { 1582 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1583 igu_sb_id, TX_PI(i), 1584 ECORE_COAL_TX_STATE_MACHINE, 1585 timeset); 1586 } 1587 } 1588 } 1589 1590 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1591 struct ecore_ptt *p_ptt, 1592 struct ecore_sb_info *sb_info) 1593 { 1594 /* zero status block and ack counter */ 1595 sb_info->sb_ack = 0; 1596 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1597 1598 if (IS_PF(p_hwfn->p_dev)) 1599 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1600 sb_info->igu_sb_id, 0, 0); 1601 } 1602 1603 struct ecore_igu_block * 1604 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1605 { 1606 struct ecore_igu_block *p_block; 1607 u16 igu_id; 1608 1609 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1610 igu_id++) { 1611 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1612 1613 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1614 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1615 continue; 1616 1617 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1618 b_is_pf) 1619 return p_block; 1620 } 1621 1622 return OSAL_NULL; 1623 } 1624 1625 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1626 u16 vector_id) 1627 { 1628 struct ecore_igu_block *p_block; 1629 u16 igu_id; 1630 1631 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1632 igu_id++) { 1633 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1634 1635 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1636 !p_block->is_pf || 1637 p_block->vector_number != vector_id) 1638 continue; 1639 1640 return igu_id; 1641 } 1642 1643 return ECORE_SB_INVALID_IDX; 1644 } 1645 1646 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1647 { 1648 u16 igu_sb_id; 1649 1650 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1651 if (sb_id == ECORE_SP_SB_ID) 1652 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1653 else if (IS_PF(p_hwfn->p_dev)) 1654 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1655 else 1656 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1657 1658 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1659 DP_NOTICE(p_hwfn, true, 1660 "Slowpath SB vector %04x doesn't exist\n", 1661 sb_id); 1662 else if (sb_id == ECORE_SP_SB_ID) 1663 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1664 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1665 else 1666 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1667 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1668 1669 return igu_sb_id; 1670 } 1671 1672 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1673 struct ecore_ptt *p_ptt, 1674 struct ecore_sb_info *sb_info, 1675 void *sb_virt_addr, 1676 dma_addr_t sb_phy_addr, 1677 u16 sb_id) 1678 { 1679 sb_info->sb_virt = sb_virt_addr; 1680 sb_info->sb_phys = sb_phy_addr; 1681 1682 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1683 1684 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1685 return ECORE_INVAL; 1686 1687 /* Let the igu info reference the client's SB info */ 1688 if (sb_id != ECORE_SP_SB_ID) { 1689 if (IS_PF(p_hwfn->p_dev)) { 1690 struct ecore_igu_info *p_info; 1691 struct ecore_igu_block *p_block; 1692 1693 p_info = p_hwfn->hw_info.p_igu_info; 1694 p_block = &p_info->entry[sb_info->igu_sb_id]; 1695 1696 p_block->sb_info = sb_info; 1697 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1698 p_info->usage.free_cnt--; 1699 } else { 1700 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1701 } 1702 } 1703 1704 #ifdef ECORE_CONFIG_DIRECT_HWFN 1705 sb_info->p_hwfn = p_hwfn; 1706 #endif 1707 sb_info->p_dev = p_hwfn->p_dev; 1708 1709 /* The igu address will hold the absolute address that needs to be 1710 * written to for a specific status block 1711 */ 1712 if (IS_PF(p_hwfn->p_dev)) { 1713 sb_info->igu_addr = (u8 OSAL_IOMEM*)p_hwfn->regview + 1714 GTT_BAR0_MAP_REG_IGU_CMD + 1715 (sb_info->igu_sb_id << 3); 1716 1717 } else { 1718 sb_info->igu_addr = 1719 (u8 OSAL_IOMEM*)p_hwfn->regview + 1720 PXP_VF_BAR0_START_IGU + 1721 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1722 } 1723 1724 sb_info->flags |= ECORE_SB_INFO_INIT; 1725 1726 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1727 1728 return ECORE_SUCCESS; 1729 } 1730 1731 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1732 struct ecore_sb_info *sb_info, 1733 u16 sb_id) 1734 { 1735 struct ecore_igu_info *p_info; 1736 struct ecore_igu_block *p_block; 1737 1738 if (sb_info == OSAL_NULL) 1739 return ECORE_SUCCESS; 1740 1741 /* zero status block and ack counter */ 1742 sb_info->sb_ack = 0; 1743 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1744 1745 if (IS_VF(p_hwfn->p_dev)) { 1746 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1747 return ECORE_SUCCESS; 1748 } 1749 1750 p_info = p_hwfn->hw_info.p_igu_info; 1751 p_block = &p_info->entry[sb_info->igu_sb_id]; 1752 1753 /* Vector 0 is reserved to Default SB */ 1754 if (p_block->vector_number == 0) { 1755 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1756 return ECORE_INVAL; 1757 } 1758 1759 /* Lose reference to client's SB info, and fix counters */ 1760 p_block->sb_info = OSAL_NULL; 1761 p_block->status |= ECORE_IGU_STATUS_FREE; 1762 p_info->usage.free_cnt++; 1763 1764 return ECORE_SUCCESS; 1765 } 1766 1767 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1768 { 1769 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1770 1771 if (!p_sb) 1772 return; 1773 1774 if (p_sb->sb_info.sb_virt) { 1775 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1776 p_sb->sb_info.sb_virt, 1777 p_sb->sb_info.sb_phys, 1778 SB_ALIGNED_SIZE(p_hwfn)); 1779 } 1780 1781 OSAL_FREE(p_hwfn->p_dev, p_sb); 1782 p_hwfn->p_sp_sb = OSAL_NULL; 1783 } 1784 1785 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1786 struct ecore_ptt *p_ptt) 1787 { 1788 struct ecore_sb_sp_info *p_sb; 1789 dma_addr_t p_phys = 0; 1790 void *p_virt; 1791 1792 /* SB struct */ 1793 p_sb = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); 1794 if (!p_sb) { 1795 DP_NOTICE(p_hwfn, true, "Failed to allocate `struct ecore_sb_info'\n"); 1796 return ECORE_NOMEM; 1797 } 1798 1799 /* SB ring */ 1800 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1801 &p_phys, 1802 SB_ALIGNED_SIZE(p_hwfn)); 1803 if (!p_virt) { 1804 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); 1805 OSAL_FREE(p_hwfn->p_dev, p_sb); 1806 return ECORE_NOMEM; 1807 } 1808 1809 1810 /* Status Block setup */ 1811 p_hwfn->p_sp_sb = p_sb; 1812 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1813 p_virt, p_phys, ECORE_SP_SB_ID); 1814 1815 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1816 1817 return ECORE_SUCCESS; 1818 } 1819 1820 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1821 ecore_int_comp_cb_t comp_cb, 1822 void *cookie, 1823 u8 *sb_idx, 1824 __le16 **p_fw_cons) 1825 { 1826 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1827 enum _ecore_status_t rc = ECORE_NOMEM; 1828 u8 pi; 1829 1830 /* Look for a free index */ 1831 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1832 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1833 continue; 1834 1835 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1836 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1837 *sb_idx = pi; 1838 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1839 rc = ECORE_SUCCESS; 1840 break; 1841 } 1842 1843 return rc; 1844 } 1845 1846 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, 1847 u8 pi) 1848 { 1849 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1850 1851 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1852 return ECORE_NOMEM; 1853 1854 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1855 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1856 1857 return ECORE_SUCCESS; 1858 } 1859 1860 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1861 { 1862 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1863 } 1864 1865 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1866 struct ecore_ptt *p_ptt, 1867 enum ecore_int_mode int_mode) 1868 { 1869 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1870 1871 #ifndef ASIC_ONLY 1872 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1873 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1874 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1875 } 1876 #endif 1877 1878 p_hwfn->p_dev->int_mode = int_mode; 1879 switch (p_hwfn->p_dev->int_mode) { 1880 case ECORE_INT_MODE_INTA: 1881 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1882 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1883 break; 1884 1885 case ECORE_INT_MODE_MSI: 1886 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1887 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1888 break; 1889 1890 case ECORE_INT_MODE_MSIX: 1891 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1892 break; 1893 case ECORE_INT_MODE_POLL: 1894 break; 1895 } 1896 1897 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1898 } 1899 1900 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1901 struct ecore_ptt *p_ptt) 1902 { 1903 #ifndef ASIC_ONLY 1904 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1905 DP_INFO(p_hwfn, "FPGA - Don't enable Attentions in IGU and MISC\n"); 1906 return; 1907 } 1908 #endif 1909 1910 /* Configure AEU signal change to produce attentions */ 1911 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1912 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1913 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1914 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1915 1916 /* Flush the writes to IGU */ 1917 OSAL_MMIOWB(p_hwfn->p_dev); 1918 1919 /* Unmask AEU signals toward IGU */ 1920 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1921 } 1922 1923 enum _ecore_status_t 1924 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1925 enum ecore_int_mode int_mode) 1926 { 1927 enum _ecore_status_t rc = ECORE_SUCCESS; 1928 u32 tmp; 1929 1930 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop 1931 * attentions. Since we're waiting for BRCM answer regarding this 1932 * attention, in the meanwhile we simply mask it. 1933 */ 1934 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0); 1935 tmp &= ~0x800; 1936 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp); 1937 1938 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1939 1940 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1941 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1942 if (rc != ECORE_SUCCESS) { 1943 DP_NOTICE(p_hwfn, true, "Slowpath IRQ request failed\n"); 1944 return ECORE_NORESOURCES; 1945 } 1946 p_hwfn->b_int_requested = true; 1947 } 1948 1949 /* Enable interrupt Generation */ 1950 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1951 1952 p_hwfn->b_int_enabled = 1; 1953 1954 return rc; 1955 } 1956 1957 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1958 struct ecore_ptt *p_ptt) 1959 { 1960 p_hwfn->b_int_enabled = 0; 1961 1962 if (IS_VF(p_hwfn->p_dev)) 1963 return; 1964 1965 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1966 } 1967 1968 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1969 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1970 struct ecore_ptt *p_ptt, 1971 u16 igu_sb_id, 1972 bool cleanup_set, 1973 u16 opaque_fid) 1974 { 1975 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1976 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1977 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1978 u8 type = 0; /* FIXME MichalS type??? */ 1979 1980 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1981 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1982 1983 /* USE Control Command Register to perform cleanup. There is an 1984 * option to do this using IGU bar, but then it can't be used for VFs. 1985 */ 1986 1987 /* Set the data field */ 1988 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1989 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1990 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1991 1992 /* Set the control register */ 1993 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1994 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1995 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1996 1997 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1998 1999 OSAL_BARRIER(p_hwfn->p_dev); 2000 2001 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 2002 2003 /* Flush the write to IGU */ 2004 OSAL_MMIOWB(p_hwfn->p_dev); 2005 2006 /* calculate where to read the status bit from */ 2007 sb_bit = 1 << (igu_sb_id % 32); 2008 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2009 2010 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2011 2012 /* Now wait for the command to complete */ 2013 while (--sleep_cnt) { 2014 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2015 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2016 break; 2017 OSAL_MSLEEP(5); 2018 } 2019 2020 if (!sleep_cnt) 2021 DP_NOTICE(p_hwfn, true, 2022 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2023 val, igu_sb_id); 2024 } 2025 2026 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2027 struct ecore_ptt *p_ptt, 2028 u16 igu_sb_id, u16 opaque, bool b_set) 2029 { 2030 struct ecore_igu_block *p_block; 2031 int pi, i; 2032 2033 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2034 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2035 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2036 igu_sb_id, p_block->function_id, p_block->is_pf, 2037 p_block->vector_number); 2038 2039 /* Set */ 2040 if (b_set) 2041 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2042 2043 /* Clear */ 2044 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2045 2046 /* Wait for the IGU SB to cleanup */ 2047 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2048 u32 val; 2049 2050 val = ecore_rd(p_hwfn, p_ptt, 2051 IGU_REG_WRITE_DONE_PENDING + 2052 ((igu_sb_id / 32) * 4)); 2053 if (val & (1 << (igu_sb_id % 32))) 2054 OSAL_UDELAY(10); 2055 else 2056 break; 2057 } 2058 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2059 DP_NOTICE(p_hwfn, true, 2060 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2061 igu_sb_id); 2062 2063 /* Clear the CAU for the SB */ 2064 for (pi = 0; pi < 12; pi++) 2065 ecore_wr(p_hwfn, p_ptt, 2066 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2067 } 2068 2069 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2070 struct ecore_ptt *p_ptt, 2071 bool b_set, 2072 bool b_slowpath) 2073 { 2074 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2075 struct ecore_igu_block *p_block; 2076 u16 igu_sb_id = 0; 2077 u32 val = 0; 2078 2079 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2080 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2081 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2082 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2083 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2084 /* end temporary */ 2085 2086 for (igu_sb_id = 0; 2087 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2088 igu_sb_id++) { 2089 p_block = &p_info->entry[igu_sb_id]; 2090 2091 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2092 !p_block->is_pf || 2093 (p_block->status & ECORE_IGU_STATUS_DSB)) 2094 continue; 2095 2096 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2097 p_hwfn->hw_info.opaque_fid, 2098 b_set); 2099 } 2100 2101 if (b_slowpath) 2102 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2103 p_info->igu_dsb_id, 2104 p_hwfn->hw_info.opaque_fid, 2105 b_set); 2106 } 2107 2108 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2109 struct ecore_ptt *p_ptt) 2110 { 2111 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2112 struct ecore_igu_block *p_block; 2113 int pf_sbs, vf_sbs; 2114 u16 igu_sb_id; 2115 u32 val, rval; 2116 2117 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2118 /* We're using an old MFW - have to prevent any switching 2119 * of SBs between PF and VFs as later driver wouldn't be 2120 * able to tell which belongs to which. 2121 */ 2122 p_info->b_allow_pf_vf_change = false; 2123 } else { 2124 /* Use the numbers the MFW have provided - 2125 * don't forget MFW accounts for the default SB as well. 2126 */ 2127 p_info->b_allow_pf_vf_change = true; 2128 2129 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2130 DP_INFO(p_hwfn, 2131 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2132 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2133 p_info->usage.cnt); 2134 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2135 } 2136 2137 /* TODO - how do we learn about VF SBs from MFW? */ 2138 if (IS_PF_SRIOV(p_hwfn)) { 2139 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2140 2141 if (vfs != p_info->usage.iov_cnt) 2142 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2143 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2144 p_info->usage.iov_cnt, vfs); 2145 2146 /* At this point we know how many SBs we have totally 2147 * in IGU + number of PF SBs. So we can validate that 2148 * we'd have sufficient for VF. 2149 */ 2150 if (vfs > p_info->usage.free_cnt + 2151 p_info->usage.free_cnt_iov - 2152 p_info->usage.cnt) { 2153 DP_NOTICE(p_hwfn, true, 2154 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2155 p_info->usage.free_cnt + 2156 p_info->usage.free_cnt_iov, 2157 p_info->usage.cnt, vfs); 2158 return ECORE_INVAL; 2159 } 2160 2161 /* Currently cap the number of VFs SBs by the 2162 * number of VFs. 2163 */ 2164 p_info->usage.iov_cnt = vfs; 2165 } 2166 } 2167 2168 /* Mark all SBs as free, now in the right PF/VFs division */ 2169 p_info->usage.free_cnt = p_info->usage.cnt; 2170 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2171 p_info->usage.orig = p_info->usage.cnt; 2172 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2173 2174 /* We now proceed to re-configure the IGU cam to reflect the initial 2175 * configuration. We can start with the Default SB. 2176 */ 2177 pf_sbs = p_info->usage.cnt; 2178 vf_sbs = p_info->usage.iov_cnt; 2179 2180 for (igu_sb_id = p_info->igu_dsb_id; 2181 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2182 igu_sb_id++) { 2183 p_block = &p_info->entry[igu_sb_id]; 2184 val = 0; 2185 2186 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2187 continue; 2188 2189 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2190 p_block->function_id = p_hwfn->rel_pf_id; 2191 p_block->is_pf = 1; 2192 p_block->vector_number = 0; 2193 p_block->status = ECORE_IGU_STATUS_VALID | 2194 ECORE_IGU_STATUS_PF | 2195 ECORE_IGU_STATUS_DSB; 2196 } else if (pf_sbs) { 2197 pf_sbs--; 2198 p_block->function_id = p_hwfn->rel_pf_id; 2199 p_block->is_pf = 1; 2200 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2201 p_block->status = ECORE_IGU_STATUS_VALID | 2202 ECORE_IGU_STATUS_PF | 2203 ECORE_IGU_STATUS_FREE; 2204 } else if (vf_sbs) { 2205 p_block->function_id = 2206 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2207 p_info->usage.iov_cnt - vf_sbs; 2208 p_block->is_pf = 0; 2209 p_block->vector_number = 0; 2210 p_block->status = ECORE_IGU_STATUS_VALID | 2211 ECORE_IGU_STATUS_FREE; 2212 vf_sbs--; 2213 } else { 2214 p_block->function_id = 0; 2215 p_block->is_pf = 0; 2216 p_block->vector_number = 0; 2217 } 2218 2219 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2220 p_block->function_id); 2221 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2222 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2223 p_block->vector_number); 2224 2225 /* VF entries would be enabled when VF is initializaed */ 2226 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2227 2228 rval = ecore_rd(p_hwfn, p_ptt, 2229 IGU_REG_MAPPING_MEMORY + 2230 sizeof(u32) * igu_sb_id); 2231 2232 if (rval != val) { 2233 ecore_wr(p_hwfn, p_ptt, 2234 IGU_REG_MAPPING_MEMORY + 2235 sizeof(u32) * igu_sb_id, 2236 val); 2237 2238 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2239 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2240 igu_sb_id, p_block->function_id, 2241 p_block->is_pf, p_block->vector_number, 2242 rval, val); 2243 } 2244 } 2245 2246 return 0; 2247 } 2248 2249 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2250 struct ecore_ptt *p_ptt) 2251 { 2252 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2253 2254 /* Return all the usage indications to default prior to the reset; 2255 * The reset expects the !orig to reflect the initial status of the 2256 * SBs, and would re-calculate the originals based on those. 2257 */ 2258 p_cnt->cnt = p_cnt->orig; 2259 p_cnt->free_cnt = p_cnt->orig; 2260 p_cnt->iov_cnt = p_cnt->iov_orig; 2261 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2262 p_cnt->orig = 0; 2263 p_cnt->iov_orig = 0; 2264 2265 /* TODO - we probably need to re-configure the CAU as well... */ 2266 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2267 } 2268 2269 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2270 struct ecore_ptt *p_ptt, 2271 u16 igu_sb_id) 2272 { 2273 u32 val = ecore_rd(p_hwfn, p_ptt, 2274 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2275 struct ecore_igu_block *p_block; 2276 2277 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2278 2279 /* Fill the block information */ 2280 p_block->function_id = GET_FIELD(val, 2281 IGU_MAPPING_LINE_FUNCTION_NUMBER); 2282 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2283 p_block->vector_number = GET_FIELD(val, 2284 IGU_MAPPING_LINE_VECTOR_NUMBER); 2285 p_block->igu_sb_id = igu_sb_id; 2286 } 2287 2288 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2289 struct ecore_ptt *p_ptt) 2290 { 2291 struct ecore_igu_info *p_igu_info; 2292 struct ecore_igu_block *p_block; 2293 u32 min_vf = 0, max_vf = 0; 2294 u16 igu_sb_id; 2295 2296 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2297 GFP_KERNEL, 2298 sizeof(*p_igu_info)); 2299 if (!p_hwfn->hw_info.p_igu_info) 2300 return ECORE_NOMEM; 2301 p_igu_info = p_hwfn->hw_info.p_igu_info; 2302 2303 /* Distinguish between existent and onn-existent default SB */ 2304 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2305 2306 /* Find the range of VF ids whose SB belong to this PF */ 2307 if (p_hwfn->p_dev->p_iov_info) { 2308 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2309 2310 min_vf = p_iov->first_vf_in_pf; 2311 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2312 } 2313 2314 for (igu_sb_id = 0; 2315 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2316 igu_sb_id++) { 2317 /* Read current entry; Notice it might not belong to this PF */ 2318 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2319 p_block = &p_igu_info->entry[igu_sb_id]; 2320 2321 if ((p_block->is_pf) && 2322 (p_block->function_id == p_hwfn->rel_pf_id)) { 2323 p_block->status = ECORE_IGU_STATUS_PF | 2324 ECORE_IGU_STATUS_VALID | 2325 ECORE_IGU_STATUS_FREE; 2326 2327 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2328 p_igu_info->usage.cnt++; 2329 } else if (!(p_block->is_pf) && 2330 (p_block->function_id >= min_vf) && 2331 (p_block->function_id < max_vf)) { 2332 /* Available for VFs of this PF */ 2333 p_block->status = ECORE_IGU_STATUS_VALID | 2334 ECORE_IGU_STATUS_FREE; 2335 2336 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2337 p_igu_info->usage.iov_cnt++; 2338 } 2339 2340 /* Mark the First entry belonging to the PF or its VFs 2341 * as the default SB [we'll reset IGU prior to first usage]. 2342 */ 2343 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2344 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2345 p_igu_info->igu_dsb_id = igu_sb_id; 2346 p_block->status |= ECORE_IGU_STATUS_DSB; 2347 } 2348 2349 /* While this isn't suitable for all clients, limit number 2350 * of prints by having each PF print only its entries with the 2351 * exception of PF0 which would print everything. 2352 */ 2353 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2354 (p_hwfn->abs_pf_id == 0)) 2355 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2356 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2357 igu_sb_id, p_block->function_id, 2358 p_block->is_pf, p_block->vector_number); 2359 } 2360 2361 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2362 DP_NOTICE(p_hwfn, true, 2363 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2364 p_igu_info->igu_dsb_id); 2365 return ECORE_INVAL; 2366 } 2367 2368 /* All non default SB are considered free at this point */ 2369 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2370 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2371 2372 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2373 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2374 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2375 p_igu_info->usage.iov_cnt); 2376 2377 return ECORE_SUCCESS; 2378 } 2379 2380 enum _ecore_status_t 2381 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2382 u16 sb_id, bool b_to_vf) 2383 { 2384 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2385 struct ecore_igu_block *p_block = OSAL_NULL; 2386 u16 igu_sb_id = 0, vf_num = 0; 2387 u32 val = 0; 2388 2389 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2390 return ECORE_INVAL; 2391 2392 if (sb_id == ECORE_SP_SB_ID) 2393 return ECORE_INVAL; 2394 2395 if (!p_info->b_allow_pf_vf_change) { 2396 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2397 return ECORE_INVAL; 2398 } 2399 2400 /* If we're moving a SB from PF to VF, the client had to specify 2401 * which vector it wants to move. 2402 */ 2403 if (b_to_vf) { 2404 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2405 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2406 return ECORE_INVAL; 2407 } 2408 2409 /* If we're moving a SB from VF to PF, need to validate there isn't 2410 * already a line configured for that vector. 2411 */ 2412 if (!b_to_vf) { 2413 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2414 ECORE_SB_INVALID_IDX) 2415 return ECORE_INVAL; 2416 } 2417 2418 /* We need to validate that the SB can actually be relocated. 2419 * This would also handle the previous case where we've explicitly 2420 * stated which IGU SB needs to move. 2421 */ 2422 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2423 igu_sb_id++) { 2424 p_block = &p_info->entry[igu_sb_id]; 2425 2426 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2427 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2428 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2429 if (b_to_vf) 2430 return ECORE_INVAL; 2431 else 2432 continue; 2433 } 2434 2435 break; 2436 } 2437 2438 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2439 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2440 "Failed to find a free SB to move\n"); 2441 return ECORE_INVAL; 2442 } 2443 2444 /* At this point, p_block points to the SB we want to relocate */ 2445 if (b_to_vf) { 2446 p_block->status &= ~ECORE_IGU_STATUS_PF; 2447 2448 /* It doesn't matter which VF number we choose, since we're 2449 * going to disable the line; But let's keep it in range. 2450 */ 2451 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2452 2453 p_block->function_id = (u8)vf_num; 2454 p_block->is_pf = 0; 2455 p_block->vector_number = 0; 2456 2457 p_info->usage.cnt--; 2458 p_info->usage.free_cnt--; 2459 p_info->usage.iov_cnt++; 2460 p_info->usage.free_cnt_iov++; 2461 2462 /* TODO - if SBs aren't really the limiting factor, 2463 * then it might not be accurate [in the since that 2464 * we might not need decrement the feature]. 2465 */ 2466 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2467 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2468 } else { 2469 p_block->status |= ECORE_IGU_STATUS_PF; 2470 p_block->function_id = p_hwfn->rel_pf_id; 2471 p_block->is_pf = 1; 2472 p_block->vector_number = sb_id + 1; 2473 2474 p_info->usage.cnt++; 2475 p_info->usage.free_cnt++; 2476 p_info->usage.iov_cnt--; 2477 p_info->usage.free_cnt_iov--; 2478 2479 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2480 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2481 } 2482 2483 /* Update the IGU and CAU with the new configuration */ 2484 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2485 p_block->function_id); 2486 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2487 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2488 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2489 p_block->vector_number); 2490 2491 ecore_wr(p_hwfn, p_ptt, 2492 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2493 val); 2494 2495 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2496 igu_sb_id, vf_num, 2497 p_block->is_pf ? 0 : 1); 2498 2499 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2500 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2501 igu_sb_id, p_block->function_id, 2502 p_block->is_pf, p_block->vector_number); 2503 2504 return ECORE_SUCCESS; 2505 } 2506 2507 /** 2508 * @brief Initialize igu runtime registers 2509 * 2510 * @param p_hwfn 2511 */ 2512 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2513 { 2514 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2515 2516 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2517 } 2518 2519 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2520 IGU_CMD_INT_ACK_BASE) 2521 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2522 IGU_CMD_INT_ACK_BASE) 2523 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2524 { 2525 u32 intr_status_hi = 0, intr_status_lo = 0; 2526 u64 intr_status = 0; 2527 2528 intr_status_lo = REG_RD(p_hwfn, 2529 GTT_BAR0_MAP_REG_IGU_CMD + 2530 LSB_IGU_CMD_ADDR * 8); 2531 intr_status_hi = REG_RD(p_hwfn, 2532 GTT_BAR0_MAP_REG_IGU_CMD + 2533 MSB_IGU_CMD_ADDR * 8); 2534 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2535 2536 return intr_status; 2537 } 2538 2539 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2540 { 2541 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2542 p_hwfn->b_sp_dpc_enabled = true; 2543 } 2544 2545 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2546 { 2547 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2548 if (!p_hwfn->sp_dpc) 2549 return ECORE_NOMEM; 2550 2551 return ECORE_SUCCESS; 2552 } 2553 2554 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2555 { 2556 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2557 p_hwfn->sp_dpc = OSAL_NULL; 2558 } 2559 2560 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2561 struct ecore_ptt *p_ptt) 2562 { 2563 enum _ecore_status_t rc = ECORE_SUCCESS; 2564 2565 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2566 if (rc != ECORE_SUCCESS) { 2567 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2568 return rc; 2569 } 2570 2571 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2572 if (rc != ECORE_SUCCESS) { 2573 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2574 return rc; 2575 } 2576 2577 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2578 if (rc != ECORE_SUCCESS) 2579 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2580 2581 return rc; 2582 } 2583 2584 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2585 { 2586 ecore_int_sp_sb_free(p_hwfn); 2587 ecore_int_sb_attn_free(p_hwfn); 2588 ecore_int_sp_dpc_free(p_hwfn); 2589 } 2590 2591 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2592 { 2593 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2594 return; 2595 2596 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2597 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2598 ecore_int_sp_dpc_setup(p_hwfn); 2599 } 2600 2601 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2602 struct ecore_sb_cnt_info *p_sb_cnt_info) 2603 { 2604 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2605 2606 if (!p_igu_info || !p_sb_cnt_info) 2607 return; 2608 2609 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2610 sizeof(*p_sb_cnt_info)); 2611 } 2612 2613 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2614 { 2615 int i; 2616 2617 for_each_hwfn(p_dev, i) 2618 p_dev->hwfns[i].b_int_requested = false; 2619 } 2620 2621 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2622 { 2623 p_dev->attn_clr_en = clr_enable; 2624 } 2625 2626 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2627 struct ecore_ptt *p_ptt, 2628 u8 timer_res, u16 sb_id, bool tx) 2629 { 2630 struct cau_sb_entry sb_entry; 2631 enum _ecore_status_t rc; 2632 2633 if (!p_hwfn->hw_init_done) { 2634 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2635 return ECORE_INVAL; 2636 } 2637 2638 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2639 sb_id * sizeof(u64), 2640 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2641 if (rc != ECORE_SUCCESS) { 2642 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2643 return rc; 2644 } 2645 2646 if (tx) 2647 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2648 else 2649 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2650 2651 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2652 (u64)(osal_uintptr_t)&sb_entry, 2653 CAU_REG_SB_VAR_MEMORY + 2654 sb_id * sizeof(u64), 2, 0); 2655 if (rc != ECORE_SUCCESS) { 2656 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2657 return rc; 2658 } 2659 2660 return rc; 2661 } 2662 2663 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2664 struct ecore_ptt *p_ptt, 2665 struct ecore_sb_info *p_sb, 2666 struct ecore_sb_info_dbg *p_info) 2667 { 2668 u16 sbid = p_sb->igu_sb_id; 2669 int i; 2670 2671 if (IS_VF(p_hwfn->p_dev)) 2672 return ECORE_INVAL; 2673 2674 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2675 return ECORE_INVAL; 2676 2677 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2678 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2679 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2680 IGU_REG_CONSUMER_MEM + sbid * 4); 2681 2682 for (i = 0; i < PIS_PER_SB_E4; i++) 2683 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2684 CAU_REG_PI_MEMORY + 2685 sbid * 4 * PIS_PER_SB_E4 + i * 4); 2686 2687 return ECORE_SUCCESS; 2688 } 2689