1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <linux/io.h> 36 #include <linux/bitops.h> 37 #include <linux/delay.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/errno.h> 40 #include <linux/interrupt.h> 41 #include <linux/kernel.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/string.h> 45 #include "qed.h" 46 #include "qed_hsi.h" 47 #include "qed_hw.h" 48 #include "qed_init_ops.h" 49 #include "qed_int.h" 50 #include "qed_mcp.h" 51 #include "qed_reg_addr.h" 52 #include "qed_sp.h" 53 #include "qed_sriov.h" 54 #include "qed_vf.h" 55 56 struct qed_pi_info { 57 qed_int_comp_cb_t comp_cb; 58 void *cookie; 59 }; 60 61 struct qed_sb_sp_info { 62 struct qed_sb_info sb_info; 63 64 /* per protocol index data */ 65 struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; 66 }; 67 68 enum qed_attention_type { 69 QED_ATTN_TYPE_ATTN, 70 QED_ATTN_TYPE_PARITY, 71 }; 72 73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 74 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 75 76 struct aeu_invert_reg_bit { 77 char bit_name[30]; 78 79 #define ATTENTION_PARITY (1 << 0) 80 81 #define ATTENTION_LENGTH_MASK (0x00000ff0) 82 #define ATTENTION_LENGTH_SHIFT (4) 83 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 84 ATTENTION_LENGTH_SHIFT) 85 #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) 86 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 87 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 88 ATTENTION_PARITY) 89 90 /* Multiple bits start with this offset */ 91 #define ATTENTION_OFFSET_MASK (0x000ff000) 92 #define ATTENTION_OFFSET_SHIFT (12) 93 94 #define ATTENTION_BB_MASK (0x00700000) 95 #define ATTENTION_BB_SHIFT (20) 96 #define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) 97 #define ATTENTION_BB_DIFFERENT BIT(23) 98 99 unsigned int flags; 100 101 /* Callback to call if attention will be triggered */ 102 int (*cb)(struct qed_hwfn *p_hwfn); 103 104 enum block_id block_index; 105 }; 106 107 struct aeu_invert_reg { 108 struct aeu_invert_reg_bit bits[32]; 109 }; 110 111 #define MAX_ATTN_GRPS (8) 112 #define NUM_ATTN_REGS (9) 113 114 /* Specific HW attention callbacks */ 115 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 116 { 117 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 118 119 /* This might occur on certain instances; Log it once then mask it */ 120 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 121 tmp); 122 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 123 0xffffffff); 124 125 return 0; 126 } 127 128 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 129 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 130 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 131 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 132 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 133 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 134 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 135 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 136 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 137 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 138 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 139 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 140 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 141 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 142 { 143 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 144 PSWHST_REG_INCORRECT_ACCESS_VALID); 145 146 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 147 u32 addr, data, length; 148 149 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 150 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 151 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 152 PSWHST_REG_INCORRECT_ACCESS_DATA); 153 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 154 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 155 156 DP_INFO(p_hwfn->cdev, 157 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 158 addr, length, 159 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 160 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 161 (u8) GET_FIELD(data, 162 ATTENTION_INCORRECT_ACCESS_VF_VALID), 163 (u8) GET_FIELD(data, 164 ATTENTION_INCORRECT_ACCESS_CLIENT), 165 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 166 (u8) GET_FIELD(data, 167 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 168 data); 169 } 170 171 return 0; 172 } 173 174 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 175 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 176 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 177 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 178 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 179 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 180 #define QED_GRC_ATTENTION_PF_MASK (0xf) 181 #define QED_GRC_ATTENTION_PF_SHIFT (0) 182 #define QED_GRC_ATTENTION_VF_MASK (0xff) 183 #define QED_GRC_ATTENTION_VF_SHIFT (4) 184 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 185 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 186 #define QED_GRC_ATTENTION_PRIV_VF (0) 187 static const char *attn_master_to_str(u8 master) 188 { 189 switch (master) { 190 case 1: return "PXP"; 191 case 2: return "MCP"; 192 case 3: return "MSDM"; 193 case 4: return "PSDM"; 194 case 5: return "YSDM"; 195 case 6: return "USDM"; 196 case 7: return "TSDM"; 197 case 8: return "XSDM"; 198 case 9: return "DBU"; 199 case 10: return "DMAE"; 200 default: 201 return "Unknown"; 202 } 203 } 204 205 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 206 { 207 u32 tmp, tmp2; 208 209 /* We've already cleared the timeout interrupt register, so we learn 210 * of interrupts via the validity register 211 */ 212 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 213 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 214 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 215 goto out; 216 217 /* Read the GRC timeout information */ 218 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 219 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 220 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 221 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 222 223 DP_INFO(p_hwfn->cdev, 224 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 225 tmp2, tmp, 226 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 227 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 228 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 229 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 230 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 231 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", 232 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 233 234 out: 235 /* Regardles of anything else, clean the validity bit */ 236 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 237 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 238 return 0; 239 } 240 241 #define PGLUE_ATTENTION_VALID (1 << 29) 242 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 243 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 244 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 245 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 246 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 247 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 248 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 249 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 250 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 251 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 252 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 253 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 254 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 255 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 256 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 257 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 258 259 int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, 260 struct qed_ptt *p_ptt) 261 { 262 u32 tmp; 263 264 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 265 if (tmp & PGLUE_ATTENTION_VALID) { 266 u32 addr_lo, addr_hi, details; 267 268 addr_lo = qed_rd(p_hwfn, p_ptt, 269 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 270 addr_hi = qed_rd(p_hwfn, p_ptt, 271 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 272 details = qed_rd(p_hwfn, p_ptt, 273 PGLUE_B_REG_TX_ERR_WR_DETAILS); 274 275 DP_NOTICE(p_hwfn, 276 "Illegal write by chip to [%08x:%08x] blocked.\n" 277 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 278 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 279 addr_hi, addr_lo, details, 280 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 281 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 282 GET_FIELD(details, 283 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 284 tmp, 285 GET_FIELD(tmp, 286 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 287 GET_FIELD(tmp, 288 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 289 GET_FIELD(tmp, 290 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 291 } 292 293 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 294 if (tmp & PGLUE_ATTENTION_RD_VALID) { 295 u32 addr_lo, addr_hi, details; 296 297 addr_lo = qed_rd(p_hwfn, p_ptt, 298 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 299 addr_hi = qed_rd(p_hwfn, p_ptt, 300 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 301 details = qed_rd(p_hwfn, p_ptt, 302 PGLUE_B_REG_TX_ERR_RD_DETAILS); 303 304 DP_NOTICE(p_hwfn, 305 "Illegal read by chip from [%08x:%08x] blocked.\n" 306 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 307 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 308 addr_hi, addr_lo, details, 309 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 310 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 311 GET_FIELD(details, 312 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 313 tmp, 314 GET_FIELD(tmp, 315 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 316 GET_FIELD(tmp, 317 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 318 GET_FIELD(tmp, 319 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 320 } 321 322 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 323 if (tmp & PGLUE_ATTENTION_ICPL_VALID) 324 DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); 325 326 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 327 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 328 u32 addr_hi, addr_lo; 329 330 addr_lo = qed_rd(p_hwfn, p_ptt, 331 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 332 addr_hi = qed_rd(p_hwfn, p_ptt, 333 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 334 335 DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", 336 tmp, addr_hi, addr_lo); 337 } 338 339 tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 340 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 341 u32 addr_hi, addr_lo, details; 342 343 addr_lo = qed_rd(p_hwfn, p_ptt, 344 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 345 addr_hi = qed_rd(p_hwfn, p_ptt, 346 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 347 details = qed_rd(p_hwfn, p_ptt, 348 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 349 350 DP_NOTICE(p_hwfn, 351 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 352 details, tmp, addr_hi, addr_lo); 353 } 354 355 /* Clear the indications */ 356 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); 357 358 return 0; 359 } 360 361 static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) 362 { 363 return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 364 } 365 366 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 367 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 368 #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 369 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 370 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 371 372 #define QED_DB_REC_COUNT 1000 373 #define QED_DB_REC_INTERVAL 100 374 375 static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, 376 struct qed_ptt *p_ptt) 377 { 378 u32 count = QED_DB_REC_COUNT; 379 u32 usage = 1; 380 381 /* wait for usage to zero or count to run out. This is necessary since 382 * EDPM doorbell transactions can take multiple 64b cycles, and as such 383 * can "split" over the pci. Possibly, the doorbell drop can happen with 384 * half an EDPM in the queue and other half dropped. Another EDPM 385 * doorbell to the same address (from doorbell recovery mechanism or 386 * from the doorbelling entity) could have first half dropped and second 387 * half interpreted as continuation of the first. To prevent such 388 * malformed doorbells from reaching the device, flush the queue before 389 * releasing the overflow sticky indication. 390 */ 391 while (count-- && usage) { 392 usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 393 udelay(QED_DB_REC_INTERVAL); 394 } 395 396 /* should have been depleted by now */ 397 if (usage) { 398 DP_NOTICE(p_hwfn->cdev, 399 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 400 QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); 401 return -EBUSY; 402 } 403 404 return 0; 405 } 406 407 int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 408 { 409 u32 overflow; 410 int rc; 411 412 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 413 DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); 414 if (!overflow) { 415 qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); 416 return 0; 417 } 418 419 if (qed_edpm_enabled(p_hwfn)) { 420 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 421 if (rc) 422 return rc; 423 } 424 425 /* Flush any pending (e)dpm as they may never arrive */ 426 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 427 428 /* Release overflow sticky indication (stop silently dropping everything) */ 429 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 430 431 /* Repeat all last doorbells (doorbell drop recovery) */ 432 qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 433 434 return 0; 435 } 436 437 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 438 { 439 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 440 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 441 int rc; 442 443 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 444 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); 445 446 /* int_sts may be zero since all PFs were interrupted for doorbell 447 * overflow but another one already handled it. Can abort here. If 448 * This PF also requires overflow recovery we will be interrupted again. 449 * The masked almost full indication may also be set. Ignoring. 450 */ 451 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 452 return 0; 453 454 /* check if db_drop or overflow happened */ 455 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 456 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 457 /* Obtain data about db drop/overflow */ 458 first_drop_reason = qed_rd(p_hwfn, p_ptt, 459 DORQ_REG_DB_DROP_REASON) & 460 QED_DORQ_ATTENTION_REASON_MASK; 461 details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); 462 address = qed_rd(p_hwfn, p_ptt, 463 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 464 all_drops_reason = qed_rd(p_hwfn, p_ptt, 465 DORQ_REG_DB_DROP_DETAILS_REASON); 466 467 /* Log info */ 468 DP_NOTICE(p_hwfn->cdev, 469 "Doorbell drop occurred\n" 470 "Address\t\t0x%08x\t(second BAR address)\n" 471 "FID\t\t0x%04x\t\t(Opaque FID)\n" 472 "Size\t\t0x%04x\t\t(in bytes)\n" 473 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 474 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 475 address, 476 GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), 477 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 478 first_drop_reason, all_drops_reason); 479 480 rc = qed_db_rec_handler(p_hwfn, p_ptt); 481 qed_periodic_db_rec_start(p_hwfn); 482 if (rc) 483 return rc; 484 485 /* Clear the doorbell drop details and prepare for next drop */ 486 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 487 488 /* Mark interrupt as handled (note: even if drop was due to a different 489 * reason than overflow we mark as handled) 490 */ 491 qed_wr(p_hwfn, 492 p_ptt, 493 DORQ_REG_INT_STS_WR, 494 DORQ_REG_INT_STS_DB_DROP | 495 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 496 497 /* If there are no indications other than drop indications, success */ 498 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 499 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 500 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 501 return 0; 502 } 503 504 /* Some other indication was present - non recoverable */ 505 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 506 507 return -EINVAL; 508 } 509 510 /* Instead of major changes to the data-structure, we have a some 'special' 511 * identifiers for sources that changed meaning between adapters. 512 */ 513 enum aeu_invert_reg_special_type { 514 AEU_INVERT_REG_SPECIAL_CNIG_0, 515 AEU_INVERT_REG_SPECIAL_CNIG_1, 516 AEU_INVERT_REG_SPECIAL_CNIG_2, 517 AEU_INVERT_REG_SPECIAL_CNIG_3, 518 AEU_INVERT_REG_SPECIAL_MAX, 519 }; 520 521 static struct aeu_invert_reg_bit 522 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 523 {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 524 {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 525 {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 526 {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, 527 }; 528 529 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 530 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 531 { 532 { /* After Invert 1 */ 533 {"GPIO0 function%d", 534 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 535 } 536 }, 537 538 { 539 { /* After Invert 2 */ 540 {"PGLUE config_space", ATTENTION_SINGLE, 541 NULL, MAX_BLOCK_ID}, 542 {"PGLUE misc_flr", ATTENTION_SINGLE, 543 NULL, MAX_BLOCK_ID}, 544 {"PGLUE B RBC", ATTENTION_PAR_INT, 545 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, 546 {"PGLUE misc_mctp", ATTENTION_SINGLE, 547 NULL, MAX_BLOCK_ID}, 548 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 549 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 550 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 551 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 552 (1 << ATTENTION_OFFSET_SHIFT), 553 NULL, MAX_BLOCK_ID}, 554 {"PCIE glue/PXP VPD %d", 555 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 556 } 557 }, 558 559 { 560 { /* After Invert 3 */ 561 {"General Attention %d", 562 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 563 } 564 }, 565 566 { 567 { /* After Invert 4 */ 568 {"General Attention 32", ATTENTION_SINGLE, 569 NULL, MAX_BLOCK_ID}, 570 {"General Attention %d", 571 (2 << ATTENTION_LENGTH_SHIFT) | 572 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 573 {"General Attention 35", ATTENTION_SINGLE, 574 NULL, MAX_BLOCK_ID}, 575 {"NWS Parity", 576 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 577 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 578 NULL, BLOCK_NWS}, 579 {"NWS Interrupt", 580 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 581 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 582 NULL, BLOCK_NWS}, 583 {"NWM Parity", 584 ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 585 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 586 NULL, BLOCK_NWM}, 587 {"NWM Interrupt", 588 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 589 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 590 NULL, BLOCK_NWM}, 591 {"MCP CPU", ATTENTION_SINGLE, 592 qed_mcp_attn_cb, MAX_BLOCK_ID}, 593 {"MCP Watchdog timer", ATTENTION_SINGLE, 594 NULL, MAX_BLOCK_ID}, 595 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 596 {"AVS stop status ready", ATTENTION_SINGLE, 597 NULL, MAX_BLOCK_ID}, 598 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 599 {"MSTAT per-path", ATTENTION_PAR_INT, 600 NULL, MAX_BLOCK_ID}, 601 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 602 NULL, MAX_BLOCK_ID}, 603 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 604 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 605 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 606 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 607 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 608 } 609 }, 610 611 { 612 { /* After Invert 5 */ 613 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 614 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 615 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 616 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 617 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 618 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 619 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 620 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 621 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 622 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 623 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 624 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 625 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 626 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 627 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 628 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 629 } 630 }, 631 632 { 633 { /* After Invert 6 */ 634 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 635 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 636 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 637 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 638 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 639 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 640 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 641 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 642 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 643 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 644 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 645 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 646 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 647 {"DORQ", ATTENTION_PAR_INT, 648 qed_dorq_attn_cb, BLOCK_DORQ}, 649 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 650 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 651 } 652 }, 653 654 { 655 { /* After Invert 7 */ 656 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 657 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 658 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 659 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 660 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 661 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 662 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 663 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 664 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 665 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 666 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 667 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 668 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 669 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 670 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 671 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 672 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 673 } 674 }, 675 676 { 677 { /* After Invert 8 */ 678 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 679 NULL, BLOCK_PSWRQ2}, 680 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 681 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 682 NULL, BLOCK_PSWWR2}, 683 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 684 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 685 NULL, BLOCK_PSWRD2}, 686 {"PSWHST", ATTENTION_PAR_INT, 687 qed_pswhst_attn_cb, BLOCK_PSWHST}, 688 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 689 NULL, BLOCK_PSWHST2}, 690 {"GRC", ATTENTION_PAR_INT, 691 qed_grc_attn_cb, BLOCK_GRC}, 692 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 693 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 694 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 695 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 696 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 697 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 698 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 699 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 700 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 701 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 702 NULL, BLOCK_PGLCS}, 703 {"PERST_B assertion", ATTENTION_SINGLE, 704 NULL, MAX_BLOCK_ID}, 705 {"PERST_B deassertion", ATTENTION_SINGLE, 706 NULL, MAX_BLOCK_ID}, 707 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 708 NULL, MAX_BLOCK_ID}, 709 } 710 }, 711 712 { 713 { /* After Invert 9 */ 714 {"MCP Latched memory", ATTENTION_PAR, 715 NULL, MAX_BLOCK_ID}, 716 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 717 NULL, MAX_BLOCK_ID}, 718 {"MCP Latched ump_tx", ATTENTION_PAR, 719 NULL, MAX_BLOCK_ID}, 720 {"MCP Latched scratchpad", ATTENTION_PAR, 721 NULL, MAX_BLOCK_ID}, 722 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 723 NULL, MAX_BLOCK_ID}, 724 } 725 }, 726 }; 727 728 static struct aeu_invert_reg_bit * 729 qed_int_aeu_translate(struct qed_hwfn *p_hwfn, 730 struct aeu_invert_reg_bit *p_bit) 731 { 732 if (!QED_IS_BB(p_hwfn->cdev)) 733 return p_bit; 734 735 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 736 return p_bit; 737 738 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 739 ATTENTION_BB_SHIFT]; 740 } 741 742 static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, 743 struct aeu_invert_reg_bit *p_bit) 744 { 745 return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & 746 ATTENTION_PARITY); 747 } 748 749 #define ATTN_STATE_BITS (0xfff) 750 #define ATTN_BITS_MASKABLE (0x3ff) 751 struct qed_sb_attn_info { 752 /* Virtual & Physical address of the SB */ 753 struct atten_status_block *sb_attn; 754 dma_addr_t sb_phys; 755 756 /* Last seen running index */ 757 u16 index; 758 759 /* A mask of the AEU bits resulting in a parity error */ 760 u32 parity_mask[NUM_ATTN_REGS]; 761 762 /* A pointer to the attention description structure */ 763 struct aeu_invert_reg *p_aeu_desc; 764 765 /* Previously asserted attentions, which are still unasserted */ 766 u16 known_attn; 767 768 /* Cleanup address for the link's general hw attention */ 769 u32 mfw_attn_addr; 770 }; 771 772 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 773 struct qed_sb_attn_info *p_sb_desc) 774 { 775 u16 rc = 0, index; 776 777 /* Make certain HW write took affect */ 778 mmiowb(); 779 780 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 781 if (p_sb_desc->index != index) { 782 p_sb_desc->index = index; 783 rc = QED_SB_ATT_IDX; 784 } 785 786 /* Make certain we got a consistent view with HW */ 787 mmiowb(); 788 789 return rc; 790 } 791 792 /** 793 * @brief qed_int_assertion - handles asserted attention bits 794 * 795 * @param p_hwfn 796 * @param asserted_bits newly asserted bits 797 * @return int 798 */ 799 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) 800 { 801 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 802 u32 igu_mask; 803 804 /* Mask the source of the attention in the IGU */ 805 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 806 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 807 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 808 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 809 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 810 811 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 812 "inner known ATTN state: 0x%04x --> 0x%04x\n", 813 sb_attn_sw->known_attn, 814 sb_attn_sw->known_attn | asserted_bits); 815 sb_attn_sw->known_attn |= asserted_bits; 816 817 /* Handle MCP events */ 818 if (asserted_bits & 0x100) { 819 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 820 /* Clean the MCP attention */ 821 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 822 sb_attn_sw->mfw_attn_addr, 0); 823 } 824 825 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 826 GTT_BAR0_MAP_REG_IGU_CMD + 827 ((IGU_CMD_ATTN_BIT_SET_UPPER - 828 IGU_CMD_INT_ACK_BASE) << 3), 829 (u32)asserted_bits); 830 831 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 832 asserted_bits); 833 834 return 0; 835 } 836 837 static void qed_int_attn_print(struct qed_hwfn *p_hwfn, 838 enum block_id id, 839 enum dbg_attn_type type, bool b_clear) 840 { 841 struct dbg_attn_block_result attn_results; 842 enum dbg_status status; 843 844 memset(&attn_results, 0, sizeof(attn_results)); 845 846 status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, 847 b_clear, &attn_results); 848 if (status != DBG_STATUS_OK) 849 DP_NOTICE(p_hwfn, 850 "Failed to parse attention information [status: %s]\n", 851 qed_dbg_get_status_str(status)); 852 else 853 qed_dbg_parse_attn(p_hwfn, &attn_results); 854 } 855 856 /** 857 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single 858 * cause of the attention 859 * 860 * @param p_hwfn 861 * @param p_aeu - descriptor of an AEU bit which caused the attention 862 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 863 * this bit to this group. 864 * @param bit_index - index of this bit in the aeu_en_reg 865 * 866 * @return int 867 */ 868 static int 869 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 870 struct aeu_invert_reg_bit *p_aeu, 871 u32 aeu_en_reg, 872 const char *p_bit_name, u32 bitmask) 873 { 874 bool b_fatal = false; 875 int rc = -EINVAL; 876 u32 val; 877 878 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 879 p_bit_name, bitmask); 880 881 /* Call callback before clearing the interrupt status */ 882 if (p_aeu->cb) { 883 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 884 p_bit_name); 885 rc = p_aeu->cb(p_hwfn); 886 } 887 888 if (rc) 889 b_fatal = true; 890 891 /* Print HW block interrupt registers */ 892 if (p_aeu->block_index != MAX_BLOCK_ID) 893 qed_int_attn_print(p_hwfn, p_aeu->block_index, 894 ATTN_TYPE_INTERRUPT, !b_fatal); 895 896 897 /* If the attention is benign, no need to prevent it */ 898 if (!rc) 899 goto out; 900 901 /* Prevent this Attention from being asserted in the future */ 902 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 903 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 904 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 905 p_bit_name); 906 907 out: 908 return rc; 909 } 910 911 /** 912 * @brief qed_int_deassertion_parity - handle a single parity AEU source 913 * 914 * @param p_hwfn 915 * @param p_aeu - descriptor of an AEU bit which caused the parity 916 * @param aeu_en_reg - address of the AEU enable register 917 * @param bit_index 918 */ 919 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 920 struct aeu_invert_reg_bit *p_aeu, 921 u32 aeu_en_reg, u8 bit_index) 922 { 923 u32 block_id = p_aeu->block_index, mask, val; 924 925 DP_NOTICE(p_hwfn->cdev, 926 "%s parity attention is set [address 0x%08x, bit %d]\n", 927 p_aeu->bit_name, aeu_en_reg, bit_index); 928 929 if (block_id != MAX_BLOCK_ID) { 930 qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 931 932 /* In BB, there's a single parity bit for several blocks */ 933 if (block_id == BLOCK_BTB) { 934 qed_int_attn_print(p_hwfn, BLOCK_OPTE, 935 ATTN_TYPE_PARITY, false); 936 qed_int_attn_print(p_hwfn, BLOCK_MCP, 937 ATTN_TYPE_PARITY, false); 938 } 939 } 940 941 /* Prevent this parity error from being re-asserted */ 942 mask = ~BIT(bit_index); 943 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 944 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 945 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 946 p_aeu->bit_name); 947 } 948 949 /** 950 * @brief - handles deassertion of previously asserted attentions. 951 * 952 * @param p_hwfn 953 * @param deasserted_bits - newly deasserted bits 954 * @return int 955 * 956 */ 957 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 958 u16 deasserted_bits) 959 { 960 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 961 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 962 u8 i, j, k, bit_idx; 963 int rc = 0; 964 965 /* Read the attention registers in the AEU */ 966 for (i = 0; i < NUM_ATTN_REGS; i++) { 967 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 968 MISC_REG_AEU_AFTER_INVERT_1_IGU + 969 i * 0x4); 970 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 971 "Deasserted bits [%d]: %08x\n", 972 i, aeu_inv_arr[i]); 973 } 974 975 /* Find parity attentions first */ 976 for (i = 0; i < NUM_ATTN_REGS; i++) { 977 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 978 u32 parities; 979 980 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 981 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 982 983 /* Skip register in which no parity bit is currently set */ 984 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 985 if (!parities) 986 continue; 987 988 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 989 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 990 991 if (qed_int_is_parity_flag(p_hwfn, p_bit) && 992 !!(parities & BIT(bit_idx))) 993 qed_int_deassertion_parity(p_hwfn, p_bit, 994 aeu_en, bit_idx); 995 996 bit_idx += ATTENTION_LENGTH(p_bit->flags); 997 } 998 } 999 1000 /* Find non-parity cause for attention and act */ 1001 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1002 struct aeu_invert_reg_bit *p_aeu; 1003 1004 /* Handle only groups whose attention is currently deasserted */ 1005 if (!(deasserted_bits & (1 << k))) 1006 continue; 1007 1008 for (i = 0; i < NUM_ATTN_REGS; i++) { 1009 u32 bits; 1010 1011 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1012 i * sizeof(u32) + 1013 k * sizeof(u32) * NUM_ATTN_REGS; 1014 1015 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1016 bits = aeu_inv_arr[i] & en; 1017 1018 /* Skip if no bit from this group is currently set */ 1019 if (!bits) 1020 continue; 1021 1022 /* Find all set bits from current register which belong 1023 * to current group, making them responsible for the 1024 * previous assertion. 1025 */ 1026 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1027 long unsigned int bitmask; 1028 u8 bit, bit_len; 1029 1030 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1031 p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); 1032 1033 bit = bit_idx; 1034 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1035 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { 1036 /* Skip Parity */ 1037 bit++; 1038 bit_len--; 1039 } 1040 1041 bitmask = bits & (((1 << bit_len) - 1) << bit); 1042 bitmask >>= bit; 1043 1044 if (bitmask) { 1045 u32 flags = p_aeu->flags; 1046 char bit_name[30]; 1047 u8 num; 1048 1049 num = (u8)find_first_bit(&bitmask, 1050 bit_len); 1051 1052 /* Some bits represent more than a 1053 * a single interrupt. Correctly print 1054 * their name. 1055 */ 1056 if (ATTENTION_LENGTH(flags) > 2 || 1057 ((flags & ATTENTION_PAR_INT) && 1058 ATTENTION_LENGTH(flags) > 1)) 1059 snprintf(bit_name, 30, 1060 p_aeu->bit_name, num); 1061 else 1062 strncpy(bit_name, 1063 p_aeu->bit_name, 30); 1064 1065 /* We now need to pass bitmask in its 1066 * correct position. 1067 */ 1068 bitmask <<= bit; 1069 1070 /* Handle source of the attention */ 1071 qed_int_deassertion_aeu_bit(p_hwfn, 1072 p_aeu, 1073 aeu_en, 1074 bit_name, 1075 bitmask); 1076 } 1077 1078 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1079 } 1080 } 1081 } 1082 1083 /* Clear IGU indication for the deasserted bits */ 1084 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1085 GTT_BAR0_MAP_REG_IGU_CMD + 1086 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1087 IGU_CMD_INT_ACK_BASE) << 3), 1088 ~((u32)deasserted_bits)); 1089 1090 /* Unmask deasserted attentions in IGU */ 1091 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); 1092 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1093 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1094 1095 /* Clear deassertion from inner state */ 1096 sb_attn_sw->known_attn &= ~deasserted_bits; 1097 1098 return rc; 1099 } 1100 1101 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 1102 { 1103 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1104 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1105 u32 attn_bits = 0, attn_acks = 0; 1106 u16 asserted_bits, deasserted_bits; 1107 __le16 index; 1108 int rc = 0; 1109 1110 /* Read current attention bits/acks - safeguard against attentions 1111 * by guaranting work on a synchronized timeframe 1112 */ 1113 do { 1114 index = p_sb_attn->sb_index; 1115 /* finish reading index before the loop condition */ 1116 dma_rmb(); 1117 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 1118 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 1119 } while (index != p_sb_attn->sb_index); 1120 p_sb_attn->sb_index = index; 1121 1122 /* Attention / Deassertion are meaningful (and in correct state) 1123 * only when they differ and consistent with known state - deassertion 1124 * when previous attention & current ack, and assertion when current 1125 * attention with no previous attention 1126 */ 1127 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1128 ~p_sb_attn_sw->known_attn; 1129 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1130 p_sb_attn_sw->known_attn; 1131 1132 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 1133 DP_INFO(p_hwfn, 1134 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1135 index, attn_bits, attn_acks, asserted_bits, 1136 deasserted_bits, p_sb_attn_sw->known_attn); 1137 } else if (asserted_bits == 0x100) { 1138 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1139 } else { 1140 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1141 "MFW indication [deassertion]\n"); 1142 } 1143 1144 if (asserted_bits) { 1145 rc = qed_int_assertion(p_hwfn, asserted_bits); 1146 if (rc) 1147 return rc; 1148 } 1149 1150 if (deasserted_bits) 1151 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 1152 1153 return rc; 1154 } 1155 1156 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 1157 void __iomem *igu_addr, u32 ack_cons) 1158 { 1159 struct igu_prod_cons_update igu_ack = { 0 }; 1160 1161 igu_ack.sb_id_and_flags = 1162 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1163 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1164 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1165 (IGU_SEG_ACCESS_ATTN << 1166 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1167 1168 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); 1169 1170 /* Both segments (interrupts & acks) are written to same place address; 1171 * Need to guarantee all commands will be received (in-order) by HW. 1172 */ 1173 mmiowb(); 1174 barrier(); 1175 } 1176 1177 void qed_int_sp_dpc(unsigned long hwfn_cookie) 1178 { 1179 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 1180 struct qed_pi_info *pi_info = NULL; 1181 struct qed_sb_attn_info *sb_attn; 1182 struct qed_sb_info *sb_info; 1183 int arr_size; 1184 u16 rc = 0; 1185 1186 if (!p_hwfn->p_sp_sb) { 1187 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 1188 return; 1189 } 1190 1191 sb_info = &p_hwfn->p_sp_sb->sb_info; 1192 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1193 if (!sb_info) { 1194 DP_ERR(p_hwfn->cdev, 1195 "Status block is NULL - cannot ack interrupts\n"); 1196 return; 1197 } 1198 1199 if (!p_hwfn->p_sb_attn) { 1200 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 1201 return; 1202 } 1203 sb_attn = p_hwfn->p_sb_attn; 1204 1205 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1206 p_hwfn, p_hwfn->my_id); 1207 1208 /* Disable ack for def status block. Required both for msix + 1209 * inta in non-mask mode, in inta does no harm. 1210 */ 1211 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1212 1213 /* Gather Interrupts/Attentions information */ 1214 if (!sb_info->sb_virt) { 1215 DP_ERR(p_hwfn->cdev, 1216 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 1217 } else { 1218 u32 tmp_index = sb_info->sb_ack; 1219 1220 rc = qed_sb_update_sb_idx(sb_info); 1221 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1222 "Interrupt indices: 0x%08x --> 0x%08x\n", 1223 tmp_index, sb_info->sb_ack); 1224 } 1225 1226 if (!sb_attn || !sb_attn->sb_attn) { 1227 DP_ERR(p_hwfn->cdev, 1228 "Attentions Status block is NULL - cannot check for new attentions!\n"); 1229 } else { 1230 u16 tmp_index = sb_attn->index; 1231 1232 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 1233 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 1234 "Attention indices: 0x%08x --> 0x%08x\n", 1235 tmp_index, sb_attn->index); 1236 } 1237 1238 /* Check if we expect interrupts at this time. if not just ack them */ 1239 if (!(rc & QED_SB_EVENT_MASK)) { 1240 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1241 return; 1242 } 1243 1244 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1245 if (!p_hwfn->p_dpc_ptt) { 1246 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 1247 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1248 return; 1249 } 1250 1251 if (rc & QED_SB_ATT_IDX) 1252 qed_int_attentions(p_hwfn); 1253 1254 if (rc & QED_SB_IDX) { 1255 int pi; 1256 1257 /* Look for a free index */ 1258 for (pi = 0; pi < arr_size; pi++) { 1259 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1260 if (pi_info->comp_cb) 1261 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1262 } 1263 } 1264 1265 if (sb_attn && (rc & QED_SB_ATT_IDX)) 1266 /* This should be done before the interrupts are enabled, 1267 * since otherwise a new attention will be generated. 1268 */ 1269 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1270 1271 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1272 } 1273 1274 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 1275 { 1276 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1277 1278 if (!p_sb) 1279 return; 1280 1281 if (p_sb->sb_attn) 1282 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1283 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1284 p_sb->sb_attn, p_sb->sb_phys); 1285 kfree(p_sb); 1286 p_hwfn->p_sb_attn = NULL; 1287 } 1288 1289 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 1290 struct qed_ptt *p_ptt) 1291 { 1292 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1293 1294 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1295 1296 sb_info->index = 0; 1297 sb_info->known_attn = 0; 1298 1299 /* Configure Attention Status Block in IGU */ 1300 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1301 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1302 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1303 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 1304 } 1305 1306 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 1307 struct qed_ptt *p_ptt, 1308 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1309 { 1310 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1311 int i, j, k; 1312 1313 sb_info->sb_attn = sb_virt_addr; 1314 sb_info->sb_phys = sb_phy_addr; 1315 1316 /* Set the pointer to the AEU descriptors */ 1317 sb_info->p_aeu_desc = aeu_descs; 1318 1319 /* Calculate Parity Masks */ 1320 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1321 for (i = 0; i < NUM_ATTN_REGS; i++) { 1322 /* j is array index, k is bit index */ 1323 for (j = 0, k = 0; k < 32; j++) { 1324 struct aeu_invert_reg_bit *p_aeu; 1325 1326 p_aeu = &aeu_descs[i].bits[j]; 1327 if (qed_int_is_parity_flag(p_hwfn, p_aeu)) 1328 sb_info->parity_mask[i] |= 1 << k; 1329 1330 k += ATTENTION_LENGTH(p_aeu->flags); 1331 } 1332 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1333 "Attn Mask [Reg %d]: 0x%08x\n", 1334 i, sb_info->parity_mask[i]); 1335 } 1336 1337 /* Set the address of cleanup for the mcp attention */ 1338 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1339 MISC_REG_AEU_GENERAL_ATTN_0; 1340 1341 qed_int_sb_attn_setup(p_hwfn, p_ptt); 1342 } 1343 1344 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 1345 struct qed_ptt *p_ptt) 1346 { 1347 struct qed_dev *cdev = p_hwfn->cdev; 1348 struct qed_sb_attn_info *p_sb; 1349 dma_addr_t p_phys = 0; 1350 void *p_virt; 1351 1352 /* SB struct */ 1353 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1354 if (!p_sb) 1355 return -ENOMEM; 1356 1357 /* SB ring */ 1358 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 1359 SB_ATTN_ALIGNED_SIZE(p_hwfn), 1360 &p_phys, GFP_KERNEL); 1361 1362 if (!p_virt) { 1363 kfree(p_sb); 1364 return -ENOMEM; 1365 } 1366 1367 /* Attention setup */ 1368 p_hwfn->p_sb_attn = p_sb; 1369 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1370 1371 return 0; 1372 } 1373 1374 /* coalescing timeout = timeset << (timer_res + 1) */ 1375 #define QED_CAU_DEF_RX_USECS 24 1376 #define QED_CAU_DEF_TX_USECS 48 1377 1378 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 1379 struct cau_sb_entry *p_sb_entry, 1380 u8 pf_id, u16 vf_number, u8 vf_valid) 1381 { 1382 struct qed_dev *cdev = p_hwfn->cdev; 1383 u32 cau_state; 1384 u8 timer_res; 1385 1386 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 1387 1388 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1389 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1390 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1391 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1392 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1393 1394 cau_state = CAU_HC_DISABLE_STATE; 1395 1396 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1397 cau_state = CAU_HC_ENABLE_STATE; 1398 if (!cdev->rx_coalesce_usecs) 1399 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 1400 if (!cdev->tx_coalesce_usecs) 1401 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 1402 } 1403 1404 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1405 if (cdev->rx_coalesce_usecs <= 0x7F) 1406 timer_res = 0; 1407 else if (cdev->rx_coalesce_usecs <= 0xFF) 1408 timer_res = 1; 1409 else 1410 timer_res = 2; 1411 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1412 1413 if (cdev->tx_coalesce_usecs <= 0x7F) 1414 timer_res = 0; 1415 else if (cdev->tx_coalesce_usecs <= 0xFF) 1416 timer_res = 1; 1417 else 1418 timer_res = 2; 1419 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1420 1421 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1422 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1423 } 1424 1425 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 1426 struct qed_ptt *p_ptt, 1427 u16 igu_sb_id, 1428 u32 pi_index, 1429 enum qed_coalescing_fsm coalescing_fsm, 1430 u8 timeset) 1431 { 1432 struct cau_pi_entry pi_entry; 1433 u32 sb_offset, pi_offset; 1434 1435 if (IS_VF(p_hwfn->cdev)) 1436 return; 1437 1438 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1439 memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1440 1441 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1442 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 1443 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1444 else 1445 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1446 1447 pi_offset = sb_offset + pi_index; 1448 if (p_hwfn->hw_init_done) { 1449 qed_wr(p_hwfn, p_ptt, 1450 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1451 *((u32 *)&(pi_entry))); 1452 } else { 1453 STORE_RT_REG(p_hwfn, 1454 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1455 *((u32 *)&(pi_entry))); 1456 } 1457 } 1458 1459 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 1460 struct qed_ptt *p_ptt, 1461 dma_addr_t sb_phys, 1462 u16 igu_sb_id, u16 vf_number, u8 vf_valid) 1463 { 1464 struct cau_sb_entry sb_entry; 1465 1466 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1467 vf_number, vf_valid); 1468 1469 if (p_hwfn->hw_init_done) { 1470 /* Wide-bus, initialize via DMAE */ 1471 u64 phys_addr = (u64)sb_phys; 1472 1473 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 1474 CAU_REG_SB_ADDR_MEMORY + 1475 igu_sb_id * sizeof(u64), 2, 0); 1476 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 1477 CAU_REG_SB_VAR_MEMORY + 1478 igu_sb_id * sizeof(u64), 2, 0); 1479 } else { 1480 /* Initialize Status Block Address */ 1481 STORE_RT_REG_AGG(p_hwfn, 1482 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1483 igu_sb_id * 2, 1484 sb_phys); 1485 1486 STORE_RT_REG_AGG(p_hwfn, 1487 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1488 igu_sb_id * 2, 1489 sb_entry); 1490 } 1491 1492 /* Configure pi coalescing if set */ 1493 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 1494 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1495 u8 timeset, timer_res; 1496 u8 i; 1497 1498 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1499 if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) 1500 timer_res = 0; 1501 else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) 1502 timer_res = 1; 1503 else 1504 timer_res = 2; 1505 timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); 1506 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1507 QED_COAL_RX_STATE_MACHINE, timeset); 1508 1509 if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) 1510 timer_res = 0; 1511 else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) 1512 timer_res = 1; 1513 else 1514 timer_res = 2; 1515 timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); 1516 for (i = 0; i < num_tc; i++) { 1517 qed_int_cau_conf_pi(p_hwfn, p_ptt, 1518 igu_sb_id, TX_PI(i), 1519 QED_COAL_TX_STATE_MACHINE, 1520 timeset); 1521 } 1522 } 1523 } 1524 1525 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 1526 struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) 1527 { 1528 /* zero status block and ack counter */ 1529 sb_info->sb_ack = 0; 1530 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1531 1532 if (IS_PF(p_hwfn->cdev)) 1533 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1534 sb_info->igu_sb_id, 0, 0); 1535 } 1536 1537 struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) 1538 { 1539 struct qed_igu_block *p_block; 1540 u16 igu_id; 1541 1542 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1543 igu_id++) { 1544 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1545 1546 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1547 !(p_block->status & QED_IGU_STATUS_FREE)) 1548 continue; 1549 1550 if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) 1551 return p_block; 1552 } 1553 1554 return NULL; 1555 } 1556 1557 static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) 1558 { 1559 struct qed_igu_block *p_block; 1560 u16 igu_id; 1561 1562 for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 1563 igu_id++) { 1564 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1565 1566 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1567 !p_block->is_pf || 1568 p_block->vector_number != vector_id) 1569 continue; 1570 1571 return igu_id; 1572 } 1573 1574 return QED_SB_INVALID_IDX; 1575 } 1576 1577 u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 1578 { 1579 u16 igu_sb_id; 1580 1581 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1582 if (sb_id == QED_SP_SB_ID) 1583 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1584 else if (IS_PF(p_hwfn->cdev)) 1585 igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1586 else 1587 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 1588 1589 if (sb_id == QED_SP_SB_ID) 1590 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1591 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1592 else 1593 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1594 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1595 1596 return igu_sb_id; 1597 } 1598 1599 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 1600 struct qed_ptt *p_ptt, 1601 struct qed_sb_info *sb_info, 1602 void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) 1603 { 1604 sb_info->sb_virt = sb_virt_addr; 1605 sb_info->sb_phys = sb_phy_addr; 1606 1607 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 1608 1609 if (sb_id != QED_SP_SB_ID) { 1610 if (IS_PF(p_hwfn->cdev)) { 1611 struct qed_igu_info *p_info; 1612 struct qed_igu_block *p_block; 1613 1614 p_info = p_hwfn->hw_info.p_igu_info; 1615 p_block = &p_info->entry[sb_info->igu_sb_id]; 1616 1617 p_block->sb_info = sb_info; 1618 p_block->status &= ~QED_IGU_STATUS_FREE; 1619 p_info->usage.free_cnt--; 1620 } else { 1621 qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1622 } 1623 } 1624 1625 sb_info->cdev = p_hwfn->cdev; 1626 1627 /* The igu address will hold the absolute address that needs to be 1628 * written to for a specific status block 1629 */ 1630 if (IS_PF(p_hwfn->cdev)) { 1631 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1632 GTT_BAR0_MAP_REG_IGU_CMD + 1633 (sb_info->igu_sb_id << 3); 1634 } else { 1635 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 1636 PXP_VF_BAR0_START_IGU + 1637 ((IGU_CMD_INT_ACK_BASE + 1638 sb_info->igu_sb_id) << 3); 1639 } 1640 1641 sb_info->flags |= QED_SB_INFO_INIT; 1642 1643 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 1644 1645 return 0; 1646 } 1647 1648 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 1649 struct qed_sb_info *sb_info, u16 sb_id) 1650 { 1651 struct qed_igu_block *p_block; 1652 struct qed_igu_info *p_info; 1653 1654 if (!sb_info) 1655 return 0; 1656 1657 /* zero status block and ack counter */ 1658 sb_info->sb_ack = 0; 1659 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1660 1661 if (IS_VF(p_hwfn->cdev)) { 1662 qed_vf_set_sb_info(p_hwfn, sb_id, NULL); 1663 return 0; 1664 } 1665 1666 p_info = p_hwfn->hw_info.p_igu_info; 1667 p_block = &p_info->entry[sb_info->igu_sb_id]; 1668 1669 /* Vector 0 is reserved to Default SB */ 1670 if (!p_block->vector_number) { 1671 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1672 return -EINVAL; 1673 } 1674 1675 /* Lose reference to client's SB info, and fix counters */ 1676 p_block->sb_info = NULL; 1677 p_block->status |= QED_IGU_STATUS_FREE; 1678 p_info->usage.free_cnt++; 1679 1680 return 0; 1681 } 1682 1683 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 1684 { 1685 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1686 1687 if (!p_sb) 1688 return; 1689 1690 if (p_sb->sb_info.sb_virt) 1691 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1692 SB_ALIGNED_SIZE(p_hwfn), 1693 p_sb->sb_info.sb_virt, 1694 p_sb->sb_info.sb_phys); 1695 kfree(p_sb); 1696 p_hwfn->p_sp_sb = NULL; 1697 } 1698 1699 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1700 { 1701 struct qed_sb_sp_info *p_sb; 1702 dma_addr_t p_phys = 0; 1703 void *p_virt; 1704 1705 /* SB struct */ 1706 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 1707 if (!p_sb) 1708 return -ENOMEM; 1709 1710 /* SB ring */ 1711 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1712 SB_ALIGNED_SIZE(p_hwfn), 1713 &p_phys, GFP_KERNEL); 1714 if (!p_virt) { 1715 kfree(p_sb); 1716 return -ENOMEM; 1717 } 1718 1719 /* Status Block setup */ 1720 p_hwfn->p_sp_sb = p_sb; 1721 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 1722 p_phys, QED_SP_SB_ID); 1723 1724 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1725 1726 return 0; 1727 } 1728 1729 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 1730 qed_int_comp_cb_t comp_cb, 1731 void *cookie, u8 *sb_idx, __le16 **p_fw_cons) 1732 { 1733 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1734 int rc = -ENOMEM; 1735 u8 pi; 1736 1737 /* Look for a free index */ 1738 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1739 if (p_sp_sb->pi_info_arr[pi].comp_cb) 1740 continue; 1741 1742 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1743 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1744 *sb_idx = pi; 1745 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1746 rc = 0; 1747 break; 1748 } 1749 1750 return rc; 1751 } 1752 1753 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 1754 { 1755 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1756 1757 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 1758 return -ENOMEM; 1759 1760 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 1761 p_sp_sb->pi_info_arr[pi].cookie = NULL; 1762 1763 return 0; 1764 } 1765 1766 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 1767 { 1768 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1769 } 1770 1771 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 1772 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1773 { 1774 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1775 1776 p_hwfn->cdev->int_mode = int_mode; 1777 switch (p_hwfn->cdev->int_mode) { 1778 case QED_INT_MODE_INTA: 1779 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1780 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1781 break; 1782 1783 case QED_INT_MODE_MSI: 1784 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1785 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1786 break; 1787 1788 case QED_INT_MODE_MSIX: 1789 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1790 break; 1791 case QED_INT_MODE_POLL: 1792 break; 1793 } 1794 1795 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1796 } 1797 1798 static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, 1799 struct qed_ptt *p_ptt) 1800 { 1801 1802 /* Configure AEU signal change to produce attentions */ 1803 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1804 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1805 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1806 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1807 1808 /* Flush the writes to IGU */ 1809 mmiowb(); 1810 1811 /* Unmask AEU signals toward IGU */ 1812 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1813 } 1814 1815 int 1816 qed_int_igu_enable(struct qed_hwfn *p_hwfn, 1817 struct qed_ptt *p_ptt, enum qed_int_mode int_mode) 1818 { 1819 int rc = 0; 1820 1821 qed_int_igu_enable_attn(p_hwfn, p_ptt); 1822 1823 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1824 rc = qed_slowpath_irq_req(p_hwfn); 1825 if (rc) { 1826 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 1827 return -EINVAL; 1828 } 1829 p_hwfn->b_int_requested = true; 1830 } 1831 /* Enable interrupt Generation */ 1832 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1833 p_hwfn->b_int_enabled = 1; 1834 1835 return rc; 1836 } 1837 1838 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1839 { 1840 p_hwfn->b_int_enabled = 0; 1841 1842 if (IS_VF(p_hwfn->cdev)) 1843 return; 1844 1845 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1846 } 1847 1848 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1849 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 1850 struct qed_ptt *p_ptt, 1851 u16 igu_sb_id, 1852 bool cleanup_set, u16 opaque_fid) 1853 { 1854 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1855 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1856 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1857 1858 /* Set the data field */ 1859 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1860 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 1861 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1862 1863 /* Set the control register */ 1864 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1865 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1866 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1867 1868 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1869 1870 barrier(); 1871 1872 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1873 1874 /* Flush the write to IGU */ 1875 mmiowb(); 1876 1877 /* calculate where to read the status bit from */ 1878 sb_bit = 1 << (igu_sb_id % 32); 1879 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 1880 1881 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 1882 1883 /* Now wait for the command to complete */ 1884 do { 1885 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 1886 1887 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1888 break; 1889 1890 usleep_range(5000, 10000); 1891 } while (--sleep_cnt); 1892 1893 if (!sleep_cnt) 1894 DP_NOTICE(p_hwfn, 1895 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1896 val, igu_sb_id); 1897 } 1898 1899 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 1900 struct qed_ptt *p_ptt, 1901 u16 igu_sb_id, u16 opaque, bool b_set) 1902 { 1903 struct qed_igu_block *p_block; 1904 int pi, i; 1905 1906 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 1907 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1908 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 1909 igu_sb_id, 1910 p_block->function_id, 1911 p_block->is_pf, p_block->vector_number); 1912 1913 /* Set */ 1914 if (b_set) 1915 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 1916 1917 /* Clear */ 1918 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 1919 1920 /* Wait for the IGU SB to cleanup */ 1921 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1922 u32 val; 1923 1924 val = qed_rd(p_hwfn, p_ptt, 1925 IGU_REG_WRITE_DONE_PENDING + 1926 ((igu_sb_id / 32) * 4)); 1927 if (val & BIT((igu_sb_id % 32))) 1928 usleep_range(10, 20); 1929 else 1930 break; 1931 } 1932 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1933 DP_NOTICE(p_hwfn, 1934 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1935 igu_sb_id); 1936 1937 /* Clear the CAU for the SB */ 1938 for (pi = 0; pi < 12; pi++) 1939 qed_wr(p_hwfn, p_ptt, 1940 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 1941 } 1942 1943 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 1944 struct qed_ptt *p_ptt, 1945 bool b_set, bool b_slowpath) 1946 { 1947 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 1948 struct qed_igu_block *p_block; 1949 u16 igu_sb_id = 0; 1950 u32 val = 0; 1951 1952 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 1953 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 1954 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 1955 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 1956 1957 for (igu_sb_id = 0; 1958 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 1959 p_block = &p_info->entry[igu_sb_id]; 1960 1961 if (!(p_block->status & QED_IGU_STATUS_VALID) || 1962 !p_block->is_pf || 1963 (p_block->status & QED_IGU_STATUS_DSB)) 1964 continue; 1965 1966 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 1967 p_hwfn->hw_info.opaque_fid, 1968 b_set); 1969 } 1970 1971 if (b_slowpath) 1972 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 1973 p_info->igu_dsb_id, 1974 p_hwfn->hw_info.opaque_fid, 1975 b_set); 1976 } 1977 1978 int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1979 { 1980 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 1981 struct qed_igu_block *p_block; 1982 int pf_sbs, vf_sbs; 1983 u16 igu_sb_id; 1984 u32 val, rval; 1985 1986 if (!RESC_NUM(p_hwfn, QED_SB)) { 1987 p_info->b_allow_pf_vf_change = false; 1988 } else { 1989 /* Use the numbers the MFW have provided - 1990 * don't forget MFW accounts for the default SB as well. 1991 */ 1992 p_info->b_allow_pf_vf_change = true; 1993 1994 if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { 1995 DP_INFO(p_hwfn, 1996 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 1997 RESC_NUM(p_hwfn, QED_SB) - 1, 1998 p_info->usage.cnt); 1999 p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; 2000 } 2001 2002 if (IS_PF_SRIOV(p_hwfn)) { 2003 u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; 2004 2005 if (vfs != p_info->usage.iov_cnt) 2006 DP_VERBOSE(p_hwfn, 2007 NETIF_MSG_INTR, 2008 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2009 p_info->usage.iov_cnt, vfs); 2010 2011 /* At this point we know how many SBs we have totally 2012 * in IGU + number of PF SBs. So we can validate that 2013 * we'd have sufficient for VF. 2014 */ 2015 if (vfs > p_info->usage.free_cnt + 2016 p_info->usage.free_cnt_iov - p_info->usage.cnt) { 2017 DP_NOTICE(p_hwfn, 2018 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2019 p_info->usage.free_cnt + 2020 p_info->usage.free_cnt_iov, 2021 p_info->usage.cnt, vfs); 2022 return -EINVAL; 2023 } 2024 2025 /* Currently cap the number of VFs SBs by the 2026 * number of VFs. 2027 */ 2028 p_info->usage.iov_cnt = vfs; 2029 } 2030 } 2031 2032 /* Mark all SBs as free, now in the right PF/VFs division */ 2033 p_info->usage.free_cnt = p_info->usage.cnt; 2034 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2035 p_info->usage.orig = p_info->usage.cnt; 2036 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2037 2038 /* We now proceed to re-configure the IGU cam to reflect the initial 2039 * configuration. We can start with the Default SB. 2040 */ 2041 pf_sbs = p_info->usage.cnt; 2042 vf_sbs = p_info->usage.iov_cnt; 2043 2044 for (igu_sb_id = p_info->igu_dsb_id; 2045 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2046 p_block = &p_info->entry[igu_sb_id]; 2047 val = 0; 2048 2049 if (!(p_block->status & QED_IGU_STATUS_VALID)) 2050 continue; 2051 2052 if (p_block->status & QED_IGU_STATUS_DSB) { 2053 p_block->function_id = p_hwfn->rel_pf_id; 2054 p_block->is_pf = 1; 2055 p_block->vector_number = 0; 2056 p_block->status = QED_IGU_STATUS_VALID | 2057 QED_IGU_STATUS_PF | 2058 QED_IGU_STATUS_DSB; 2059 } else if (pf_sbs) { 2060 pf_sbs--; 2061 p_block->function_id = p_hwfn->rel_pf_id; 2062 p_block->is_pf = 1; 2063 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2064 p_block->status = QED_IGU_STATUS_VALID | 2065 QED_IGU_STATUS_PF | 2066 QED_IGU_STATUS_FREE; 2067 } else if (vf_sbs) { 2068 p_block->function_id = 2069 p_hwfn->cdev->p_iov_info->first_vf_in_pf + 2070 p_info->usage.iov_cnt - vf_sbs; 2071 p_block->is_pf = 0; 2072 p_block->vector_number = 0; 2073 p_block->status = QED_IGU_STATUS_VALID | 2074 QED_IGU_STATUS_FREE; 2075 vf_sbs--; 2076 } else { 2077 p_block->function_id = 0; 2078 p_block->is_pf = 0; 2079 p_block->vector_number = 0; 2080 } 2081 2082 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2083 p_block->function_id); 2084 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2085 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2086 p_block->vector_number); 2087 2088 /* VF entries would be enabled when VF is initializaed */ 2089 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2090 2091 rval = qed_rd(p_hwfn, p_ptt, 2092 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2093 2094 if (rval != val) { 2095 qed_wr(p_hwfn, p_ptt, 2096 IGU_REG_MAPPING_MEMORY + 2097 sizeof(u32) * igu_sb_id, val); 2098 2099 DP_VERBOSE(p_hwfn, 2100 NETIF_MSG_INTR, 2101 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2102 igu_sb_id, 2103 p_block->function_id, 2104 p_block->is_pf, 2105 p_block->vector_number, rval, val); 2106 } 2107 } 2108 2109 return 0; 2110 } 2111 2112 static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 2113 struct qed_ptt *p_ptt, u16 igu_sb_id) 2114 { 2115 u32 val = qed_rd(p_hwfn, p_ptt, 2116 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2117 struct qed_igu_block *p_block; 2118 2119 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2120 2121 /* Fill the block information */ 2122 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2123 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2124 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2125 p_block->igu_sb_id = igu_sb_id; 2126 } 2127 2128 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2129 { 2130 struct qed_igu_info *p_igu_info; 2131 struct qed_igu_block *p_block; 2132 u32 min_vf = 0, max_vf = 0; 2133 u16 igu_sb_id; 2134 2135 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 2136 if (!p_hwfn->hw_info.p_igu_info) 2137 return -ENOMEM; 2138 2139 p_igu_info = p_hwfn->hw_info.p_igu_info; 2140 2141 /* Distinguish between existent and non-existent default SB */ 2142 p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; 2143 2144 /* Find the range of VF ids whose SB belong to this PF */ 2145 if (p_hwfn->cdev->p_iov_info) { 2146 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 2147 2148 min_vf = p_iov->first_vf_in_pf; 2149 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2150 } 2151 2152 for (igu_sb_id = 0; 2153 igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { 2154 /* Read current entry; Notice it might not belong to this PF */ 2155 qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2156 p_block = &p_igu_info->entry[igu_sb_id]; 2157 2158 if ((p_block->is_pf) && 2159 (p_block->function_id == p_hwfn->rel_pf_id)) { 2160 p_block->status = QED_IGU_STATUS_PF | 2161 QED_IGU_STATUS_VALID | 2162 QED_IGU_STATUS_FREE; 2163 2164 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2165 p_igu_info->usage.cnt++; 2166 } else if (!(p_block->is_pf) && 2167 (p_block->function_id >= min_vf) && 2168 (p_block->function_id < max_vf)) { 2169 /* Available for VFs of this PF */ 2170 p_block->status = QED_IGU_STATUS_VALID | 2171 QED_IGU_STATUS_FREE; 2172 2173 if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) 2174 p_igu_info->usage.iov_cnt++; 2175 } 2176 2177 /* Mark the First entry belonging to the PF or its VFs 2178 * as the default SB [we'll reset IGU prior to first usage]. 2179 */ 2180 if ((p_block->status & QED_IGU_STATUS_VALID) && 2181 (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { 2182 p_igu_info->igu_dsb_id = igu_sb_id; 2183 p_block->status |= QED_IGU_STATUS_DSB; 2184 } 2185 2186 /* limit number of prints by having each PF print only its 2187 * entries with the exception of PF0 which would print 2188 * everything. 2189 */ 2190 if ((p_block->status & QED_IGU_STATUS_VALID) || 2191 (p_hwfn->abs_pf_id == 0)) { 2192 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2193 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2194 igu_sb_id, p_block->function_id, 2195 p_block->is_pf, p_block->vector_number); 2196 } 2197 } 2198 2199 if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { 2200 DP_NOTICE(p_hwfn, 2201 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2202 p_igu_info->igu_dsb_id); 2203 return -EINVAL; 2204 } 2205 2206 /* All non default SB are considered free at this point */ 2207 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2208 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2209 2210 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2211 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2212 p_igu_info->igu_dsb_id, 2213 p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); 2214 2215 return 0; 2216 } 2217 2218 /** 2219 * @brief Initialize igu runtime registers 2220 * 2221 * @param p_hwfn 2222 */ 2223 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 2224 { 2225 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2226 2227 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2228 } 2229 2230 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 2231 { 2232 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 2233 IGU_CMD_INT_ACK_BASE; 2234 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 2235 IGU_CMD_INT_ACK_BASE; 2236 u32 intr_status_hi = 0, intr_status_lo = 0; 2237 u64 intr_status = 0; 2238 2239 intr_status_lo = REG_RD(p_hwfn, 2240 GTT_BAR0_MAP_REG_IGU_CMD + 2241 lsb_igu_cmd_addr * 8); 2242 intr_status_hi = REG_RD(p_hwfn, 2243 GTT_BAR0_MAP_REG_IGU_CMD + 2244 msb_igu_cmd_addr * 8); 2245 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2246 2247 return intr_status; 2248 } 2249 2250 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 2251 { 2252 tasklet_init(p_hwfn->sp_dpc, 2253 qed_int_sp_dpc, (unsigned long)p_hwfn); 2254 p_hwfn->b_sp_dpc_enabled = true; 2255 } 2256 2257 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 2258 { 2259 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 2260 if (!p_hwfn->sp_dpc) 2261 return -ENOMEM; 2262 2263 return 0; 2264 } 2265 2266 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 2267 { 2268 kfree(p_hwfn->sp_dpc); 2269 p_hwfn->sp_dpc = NULL; 2270 } 2271 2272 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2273 { 2274 int rc = 0; 2275 2276 rc = qed_int_sp_dpc_alloc(p_hwfn); 2277 if (rc) 2278 return rc; 2279 2280 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 2281 if (rc) 2282 return rc; 2283 2284 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 2285 2286 return rc; 2287 } 2288 2289 void qed_int_free(struct qed_hwfn *p_hwfn) 2290 { 2291 qed_int_sp_sb_free(p_hwfn); 2292 qed_int_sb_attn_free(p_hwfn); 2293 qed_int_sp_dpc_free(p_hwfn); 2294 } 2295 2296 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2297 { 2298 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2299 qed_int_sb_attn_setup(p_hwfn, p_ptt); 2300 qed_int_sp_dpc_setup(p_hwfn); 2301 } 2302 2303 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 2304 struct qed_sb_cnt_info *p_sb_cnt_info) 2305 { 2306 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 2307 2308 if (!info || !p_sb_cnt_info) 2309 return; 2310 2311 memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); 2312 } 2313 2314 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 2315 { 2316 int i; 2317 2318 for_each_hwfn(cdev, i) 2319 cdev->hwfns[i].b_int_requested = false; 2320 } 2321 2322 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2323 u8 timer_res, u16 sb_id, bool tx) 2324 { 2325 struct cau_sb_entry sb_entry; 2326 int rc; 2327 2328 if (!p_hwfn->hw_init_done) { 2329 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2330 return -EINVAL; 2331 } 2332 2333 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2334 sb_id * sizeof(u64), 2335 (u64)(uintptr_t)&sb_entry, 2, 0); 2336 if (rc) { 2337 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2338 return rc; 2339 } 2340 2341 if (tx) 2342 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2343 else 2344 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2345 2346 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 2347 (u64)(uintptr_t)&sb_entry, 2348 CAU_REG_SB_VAR_MEMORY + 2349 sb_id * sizeof(u64), 2, 0); 2350 if (rc) { 2351 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2352 return rc; 2353 } 2354 2355 return rc; 2356 } 2357