1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <asm/byteorder.h> 11 #include <linux/io.h> 12 #include <linux/bitops.h> 13 #include <linux/delay.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/errno.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 #include <linux/string.h> 21 #include "qed.h" 22 #include "qed_hsi.h" 23 #include "qed_hw.h" 24 #include "qed_init_ops.h" 25 #include "qed_int.h" 26 #include "qed_mcp.h" 27 #include "qed_reg_addr.h" 28 #include "qed_sp.h" 29 #include "qed_sriov.h" 30 #include "qed_vf.h" 31 32 struct qed_pi_info { 33 qed_int_comp_cb_t comp_cb; 34 void *cookie; 35 }; 36 37 struct qed_sb_sp_info { 38 struct qed_sb_info sb_info; 39 40 /* per protocol index data */ 41 struct qed_pi_info pi_info_arr[PIS_PER_SB]; 42 }; 43 44 enum qed_attention_type { 45 QED_ATTN_TYPE_ATTN, 46 QED_ATTN_TYPE_PARITY, 47 }; 48 49 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 50 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 51 52 struct aeu_invert_reg_bit { 53 char bit_name[30]; 54 55 #define ATTENTION_PARITY (1 << 0) 56 57 #define ATTENTION_LENGTH_MASK (0x00000ff0) 58 #define ATTENTION_LENGTH_SHIFT (4) 59 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 60 ATTENTION_LENGTH_SHIFT) 61 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 62 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 63 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 64 ATTENTION_PARITY) 65 66 /* Multiple bits start with this offset */ 67 #define ATTENTION_OFFSET_MASK (0x000ff000) 68 #define ATTENTION_OFFSET_SHIFT (12) 69 unsigned int flags; 70 71 /* Callback to call if attention will be triggered */ 72 int (*cb)(struct qed_hwfn *p_hwfn); 73 74 enum block_id block_index; 75 }; 76 77 struct aeu_invert_reg { 78 struct aeu_invert_reg_bit bits[32]; 79 }; 80 81 #define MAX_ATTN_GRPS (8) 82 #define NUM_ATTN_REGS (9) 83 84 /* HW Attention register */ 85 struct attn_hw_reg { 86 u16 reg_idx; /* Index of this register in its block */ 87 u16 num_of_bits; /* number of valid attention bits */ 88 u32 sts_addr; /* Address of the STS register */ 89 u32 sts_clr_addr; /* Address of the STS_CLR register */ 90 u32 sts_wr_addr; /* Address of the STS_WR register */ 91 u32 mask_addr; /* Address of the MASK register */ 92 }; 93 94 /* HW block attention registers */ 95 struct attn_hw_regs { 96 u16 num_of_int_regs; /* Number of interrupt regs */ 97 u16 num_of_prty_regs; /* Number of parity regs */ 98 struct attn_hw_reg **int_regs; /* interrupt regs */ 99 struct attn_hw_reg **prty_regs; /* parity regs */ 100 }; 101 102 /* HW block attention registers */ 103 struct attn_hw_block { 104 const char *name; /* Block name */ 105 struct attn_hw_regs chip_regs[1]; 106 }; 107 108 static struct attn_hw_reg grc_int0_bb_b0 = { 109 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184}; 110 111 static struct attn_hw_reg *grc_int_bb_b0_regs[1] = { 112 &grc_int0_bb_b0}; 113 114 static struct attn_hw_reg grc_prty1_bb_b0 = { 115 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204}; 116 117 static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = { 118 &grc_prty1_bb_b0}; 119 120 static struct attn_hw_reg miscs_int0_bb_b0 = { 121 0, 3, 0x9180, 0x918c, 0x9188, 0x9184}; 122 123 static struct attn_hw_reg miscs_int1_bb_b0 = { 124 1, 11, 0x9190, 0x919c, 0x9198, 0x9194}; 125 126 static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = { 127 &miscs_int0_bb_b0, &miscs_int1_bb_b0}; 128 129 static struct attn_hw_reg miscs_prty0_bb_b0 = { 130 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4}; 131 132 static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = { 133 &miscs_prty0_bb_b0}; 134 135 static struct attn_hw_reg misc_int0_bb_b0 = { 136 0, 1, 0x8180, 0x818c, 0x8188, 0x8184}; 137 138 static struct attn_hw_reg *misc_int_bb_b0_regs[1] = { 139 &misc_int0_bb_b0}; 140 141 static struct attn_hw_reg pglue_b_int0_bb_b0 = { 142 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184}; 143 144 static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = { 145 &pglue_b_int0_bb_b0}; 146 147 static struct attn_hw_reg pglue_b_prty0_bb_b0 = { 148 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194}; 149 150 static struct attn_hw_reg pglue_b_prty1_bb_b0 = { 151 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204}; 152 153 static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = { 154 &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0}; 155 156 static struct attn_hw_reg cnig_int0_bb_b0 = { 157 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec}; 158 159 static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = { 160 &cnig_int0_bb_b0}; 161 162 static struct attn_hw_reg cnig_prty0_bb_b0 = { 163 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c}; 164 165 static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = { 166 &cnig_prty0_bb_b0}; 167 168 static struct attn_hw_reg cpmu_int0_bb_b0 = { 169 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4}; 170 171 static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = { 172 &cpmu_int0_bb_b0}; 173 174 static struct attn_hw_reg ncsi_int0_bb_b0 = { 175 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0}; 176 177 static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = { 178 &ncsi_int0_bb_b0}; 179 180 static struct attn_hw_reg ncsi_prty1_bb_b0 = { 181 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004}; 182 183 static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = { 184 &ncsi_prty1_bb_b0}; 185 186 static struct attn_hw_reg opte_prty1_bb_b0 = { 187 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004}; 188 189 static struct attn_hw_reg opte_prty0_bb_b0 = { 190 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c}; 191 192 static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = { 193 &opte_prty1_bb_b0, &opte_prty0_bb_b0}; 194 195 static struct attn_hw_reg bmb_int0_bb_b0 = { 196 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4}; 197 198 static struct attn_hw_reg bmb_int1_bb_b0 = { 199 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc}; 200 201 static struct attn_hw_reg bmb_int2_bb_b0 = { 202 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4}; 203 204 static struct attn_hw_reg bmb_int3_bb_b0 = { 205 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c}; 206 207 static struct attn_hw_reg bmb_int4_bb_b0 = { 208 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124}; 209 210 static struct attn_hw_reg bmb_int5_bb_b0 = { 211 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c}; 212 213 static struct attn_hw_reg bmb_int6_bb_b0 = { 214 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154}; 215 216 static struct attn_hw_reg bmb_int7_bb_b0 = { 217 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c}; 218 219 static struct attn_hw_reg bmb_int8_bb_b0 = { 220 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188}; 221 222 static struct attn_hw_reg bmb_int9_bb_b0 = { 223 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0}; 224 225 static struct attn_hw_reg bmb_int10_bb_b0 = { 226 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8}; 227 228 static struct attn_hw_reg bmb_int11_bb_b0 = { 229 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0}; 230 231 static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = { 232 &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0, 233 &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0, 234 &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0}; 235 236 static struct attn_hw_reg bmb_prty0_bb_b0 = { 237 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0}; 238 239 static struct attn_hw_reg bmb_prty1_bb_b0 = { 240 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404}; 241 242 static struct attn_hw_reg bmb_prty2_bb_b0 = { 243 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414}; 244 245 static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = { 246 &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0}; 247 248 static struct attn_hw_reg pcie_prty1_bb_b0 = { 249 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004}; 250 251 static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = { 252 &pcie_prty1_bb_b0}; 253 254 static struct attn_hw_reg mcp2_prty0_bb_b0 = { 255 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044}; 256 257 static struct attn_hw_reg mcp2_prty1_bb_b0 = { 258 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208}; 259 260 static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = { 261 &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0}; 262 263 static struct attn_hw_reg pswhst_int0_bb_b0 = { 264 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184}; 265 266 static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = { 267 &pswhst_int0_bb_b0}; 268 269 static struct attn_hw_reg pswhst_prty0_bb_b0 = { 270 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194}; 271 272 static struct attn_hw_reg pswhst_prty1_bb_b0 = { 273 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204}; 274 275 static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = { 276 &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0}; 277 278 static struct attn_hw_reg pswhst2_int0_bb_b0 = { 279 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184}; 280 281 static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = { 282 &pswhst2_int0_bb_b0}; 283 284 static struct attn_hw_reg pswhst2_prty0_bb_b0 = { 285 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194}; 286 287 static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = { 288 &pswhst2_prty0_bb_b0}; 289 290 static struct attn_hw_reg pswrd_int0_bb_b0 = { 291 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184}; 292 293 static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = { 294 &pswrd_int0_bb_b0}; 295 296 static struct attn_hw_reg pswrd_prty0_bb_b0 = { 297 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194}; 298 299 static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = { 300 &pswrd_prty0_bb_b0}; 301 302 static struct attn_hw_reg pswrd2_int0_bb_b0 = { 303 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184}; 304 305 static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = { 306 &pswrd2_int0_bb_b0}; 307 308 static struct attn_hw_reg pswrd2_prty0_bb_b0 = { 309 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194}; 310 311 static struct attn_hw_reg pswrd2_prty1_bb_b0 = { 312 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204}; 313 314 static struct attn_hw_reg pswrd2_prty2_bb_b0 = { 315 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214}; 316 317 static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = { 318 &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0}; 319 320 static struct attn_hw_reg pswwr_int0_bb_b0 = { 321 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184}; 322 323 static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = { 324 &pswwr_int0_bb_b0}; 325 326 static struct attn_hw_reg pswwr_prty0_bb_b0 = { 327 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194}; 328 329 static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = { 330 &pswwr_prty0_bb_b0}; 331 332 static struct attn_hw_reg pswwr2_int0_bb_b0 = { 333 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184}; 334 335 static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = { 336 &pswwr2_int0_bb_b0}; 337 338 static struct attn_hw_reg pswwr2_prty0_bb_b0 = { 339 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194}; 340 341 static struct attn_hw_reg pswwr2_prty1_bb_b0 = { 342 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204}; 343 344 static struct attn_hw_reg pswwr2_prty2_bb_b0 = { 345 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214}; 346 347 static struct attn_hw_reg pswwr2_prty3_bb_b0 = { 348 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224}; 349 350 static struct attn_hw_reg pswwr2_prty4_bb_b0 = { 351 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234}; 352 353 static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = { 354 &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0, 355 &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0}; 356 357 static struct attn_hw_reg pswrq_int0_bb_b0 = { 358 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184}; 359 360 static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = { 361 &pswrq_int0_bb_b0}; 362 363 static struct attn_hw_reg pswrq_prty0_bb_b0 = { 364 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194}; 365 366 static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = { 367 &pswrq_prty0_bb_b0}; 368 369 static struct attn_hw_reg pswrq2_int0_bb_b0 = { 370 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184}; 371 372 static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = { 373 &pswrq2_int0_bb_b0}; 374 375 static struct attn_hw_reg pswrq2_prty1_bb_b0 = { 376 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204}; 377 378 static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = { 379 &pswrq2_prty1_bb_b0}; 380 381 static struct attn_hw_reg pglcs_int0_bb_b0 = { 382 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04}; 383 384 static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = { 385 &pglcs_int0_bb_b0}; 386 387 static struct attn_hw_reg dmae_int0_bb_b0 = { 388 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184}; 389 390 static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = { 391 &dmae_int0_bb_b0}; 392 393 static struct attn_hw_reg dmae_prty1_bb_b0 = { 394 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204}; 395 396 static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = { 397 &dmae_prty1_bb_b0}; 398 399 static struct attn_hw_reg ptu_int0_bb_b0 = { 400 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184}; 401 402 static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = { 403 &ptu_int0_bb_b0}; 404 405 static struct attn_hw_reg ptu_prty1_bb_b0 = { 406 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204}; 407 408 static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = { 409 &ptu_prty1_bb_b0}; 410 411 static struct attn_hw_reg tcm_int0_bb_b0 = { 412 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184}; 413 414 static struct attn_hw_reg tcm_int1_bb_b0 = { 415 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194}; 416 417 static struct attn_hw_reg tcm_int2_bb_b0 = { 418 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4}; 419 420 static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = { 421 &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0}; 422 423 static struct attn_hw_reg tcm_prty1_bb_b0 = { 424 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204}; 425 426 static struct attn_hw_reg tcm_prty2_bb_b0 = { 427 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214}; 428 429 static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = { 430 &tcm_prty1_bb_b0, &tcm_prty2_bb_b0}; 431 432 static struct attn_hw_reg mcm_int0_bb_b0 = { 433 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184}; 434 435 static struct attn_hw_reg mcm_int1_bb_b0 = { 436 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194}; 437 438 static struct attn_hw_reg mcm_int2_bb_b0 = { 439 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4}; 440 441 static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = { 442 &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0}; 443 444 static struct attn_hw_reg mcm_prty1_bb_b0 = { 445 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204}; 446 447 static struct attn_hw_reg mcm_prty2_bb_b0 = { 448 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214}; 449 450 static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = { 451 &mcm_prty1_bb_b0, &mcm_prty2_bb_b0}; 452 453 static struct attn_hw_reg ucm_int0_bb_b0 = { 454 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184}; 455 456 static struct attn_hw_reg ucm_int1_bb_b0 = { 457 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194}; 458 459 static struct attn_hw_reg ucm_int2_bb_b0 = { 460 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4}; 461 462 static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = { 463 &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0}; 464 465 static struct attn_hw_reg ucm_prty1_bb_b0 = { 466 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204}; 467 468 static struct attn_hw_reg ucm_prty2_bb_b0 = { 469 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214}; 470 471 static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = { 472 &ucm_prty1_bb_b0, &ucm_prty2_bb_b0}; 473 474 static struct attn_hw_reg xcm_int0_bb_b0 = { 475 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184}; 476 477 static struct attn_hw_reg xcm_int1_bb_b0 = { 478 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194}; 479 480 static struct attn_hw_reg xcm_int2_bb_b0 = { 481 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4}; 482 483 static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = { 484 &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0}; 485 486 static struct attn_hw_reg xcm_prty1_bb_b0 = { 487 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204}; 488 489 static struct attn_hw_reg xcm_prty2_bb_b0 = { 490 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214}; 491 492 static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = { 493 &xcm_prty1_bb_b0, &xcm_prty2_bb_b0}; 494 495 static struct attn_hw_reg ycm_int0_bb_b0 = { 496 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184}; 497 498 static struct attn_hw_reg ycm_int1_bb_b0 = { 499 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194}; 500 501 static struct attn_hw_reg ycm_int2_bb_b0 = { 502 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4}; 503 504 static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = { 505 &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0}; 506 507 static struct attn_hw_reg ycm_prty1_bb_b0 = { 508 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204}; 509 510 static struct attn_hw_reg ycm_prty2_bb_b0 = { 511 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214}; 512 513 static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = { 514 &ycm_prty1_bb_b0, &ycm_prty2_bb_b0}; 515 516 static struct attn_hw_reg pcm_int0_bb_b0 = { 517 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184}; 518 519 static struct attn_hw_reg pcm_int1_bb_b0 = { 520 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194}; 521 522 static struct attn_hw_reg pcm_int2_bb_b0 = { 523 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4}; 524 525 static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = { 526 &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0}; 527 528 static struct attn_hw_reg pcm_prty1_bb_b0 = { 529 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204}; 530 531 static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = { 532 &pcm_prty1_bb_b0}; 533 534 static struct attn_hw_reg qm_int0_bb_b0 = { 535 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184}; 536 537 static struct attn_hw_reg *qm_int_bb_b0_regs[1] = { 538 &qm_int0_bb_b0}; 539 540 static struct attn_hw_reg qm_prty0_bb_b0 = { 541 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194}; 542 543 static struct attn_hw_reg qm_prty1_bb_b0 = { 544 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204}; 545 546 static struct attn_hw_reg qm_prty2_bb_b0 = { 547 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214}; 548 549 static struct attn_hw_reg qm_prty3_bb_b0 = { 550 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224}; 551 552 static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = { 553 &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0}; 554 555 static struct attn_hw_reg tm_int0_bb_b0 = { 556 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184}; 557 558 static struct attn_hw_reg tm_int1_bb_b0 = { 559 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194}; 560 561 static struct attn_hw_reg *tm_int_bb_b0_regs[2] = { 562 &tm_int0_bb_b0, &tm_int1_bb_b0}; 563 564 static struct attn_hw_reg tm_prty1_bb_b0 = { 565 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204}; 566 567 static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = { 568 &tm_prty1_bb_b0}; 569 570 static struct attn_hw_reg dorq_int0_bb_b0 = { 571 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184}; 572 573 static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = { 574 &dorq_int0_bb_b0}; 575 576 static struct attn_hw_reg dorq_prty0_bb_b0 = { 577 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194}; 578 579 static struct attn_hw_reg dorq_prty1_bb_b0 = { 580 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204}; 581 582 static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = { 583 &dorq_prty0_bb_b0, &dorq_prty1_bb_b0}; 584 585 static struct attn_hw_reg brb_int0_bb_b0 = { 586 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4}; 587 588 static struct attn_hw_reg brb_int1_bb_b0 = { 589 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc}; 590 591 static struct attn_hw_reg brb_int2_bb_b0 = { 592 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4}; 593 594 static struct attn_hw_reg brb_int3_bb_b0 = { 595 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c}; 596 597 static struct attn_hw_reg brb_int4_bb_b0 = { 598 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124}; 599 600 static struct attn_hw_reg brb_int5_bb_b0 = { 601 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c}; 602 603 static struct attn_hw_reg brb_int6_bb_b0 = { 604 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154}; 605 606 static struct attn_hw_reg brb_int7_bb_b0 = { 607 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c}; 608 609 static struct attn_hw_reg brb_int8_bb_b0 = { 610 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188}; 611 612 static struct attn_hw_reg brb_int9_bb_b0 = { 613 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0}; 614 615 static struct attn_hw_reg brb_int10_bb_b0 = { 616 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8}; 617 618 static struct attn_hw_reg brb_int11_bb_b0 = { 619 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0}; 620 621 static struct attn_hw_reg *brb_int_bb_b0_regs[12] = { 622 &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0, 623 &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0, 624 &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0}; 625 626 static struct attn_hw_reg brb_prty0_bb_b0 = { 627 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0}; 628 629 static struct attn_hw_reg brb_prty1_bb_b0 = { 630 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404}; 631 632 static struct attn_hw_reg brb_prty2_bb_b0 = { 633 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414}; 634 635 static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = { 636 &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0}; 637 638 static struct attn_hw_reg src_int0_bb_b0 = { 639 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4}; 640 641 static struct attn_hw_reg *src_int_bb_b0_regs[1] = { 642 &src_int0_bb_b0}; 643 644 static struct attn_hw_reg prs_int0_bb_b0 = { 645 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044}; 646 647 static struct attn_hw_reg *prs_int_bb_b0_regs[1] = { 648 &prs_int0_bb_b0}; 649 650 static struct attn_hw_reg prs_prty0_bb_b0 = { 651 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054}; 652 653 static struct attn_hw_reg prs_prty1_bb_b0 = { 654 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208}; 655 656 static struct attn_hw_reg prs_prty2_bb_b0 = { 657 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218}; 658 659 static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = { 660 &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0}; 661 662 static struct attn_hw_reg tsdm_int0_bb_b0 = { 663 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044}; 664 665 static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = { 666 &tsdm_int0_bb_b0}; 667 668 static struct attn_hw_reg tsdm_prty1_bb_b0 = { 669 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204}; 670 671 static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = { 672 &tsdm_prty1_bb_b0}; 673 674 static struct attn_hw_reg msdm_int0_bb_b0 = { 675 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044}; 676 677 static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = { 678 &msdm_int0_bb_b0}; 679 680 static struct attn_hw_reg msdm_prty1_bb_b0 = { 681 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204}; 682 683 static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = { 684 &msdm_prty1_bb_b0}; 685 686 static struct attn_hw_reg usdm_int0_bb_b0 = { 687 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044}; 688 689 static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = { 690 &usdm_int0_bb_b0}; 691 692 static struct attn_hw_reg usdm_prty1_bb_b0 = { 693 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204}; 694 695 static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = { 696 &usdm_prty1_bb_b0}; 697 698 static struct attn_hw_reg xsdm_int0_bb_b0 = { 699 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044}; 700 701 static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = { 702 &xsdm_int0_bb_b0}; 703 704 static struct attn_hw_reg xsdm_prty1_bb_b0 = { 705 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204}; 706 707 static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = { 708 &xsdm_prty1_bb_b0}; 709 710 static struct attn_hw_reg ysdm_int0_bb_b0 = { 711 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044}; 712 713 static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = { 714 &ysdm_int0_bb_b0}; 715 716 static struct attn_hw_reg ysdm_prty1_bb_b0 = { 717 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204}; 718 719 static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = { 720 &ysdm_prty1_bb_b0}; 721 722 static struct attn_hw_reg psdm_int0_bb_b0 = { 723 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044}; 724 725 static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = { 726 &psdm_int0_bb_b0}; 727 728 static struct attn_hw_reg psdm_prty1_bb_b0 = { 729 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204}; 730 731 static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = { 732 &psdm_prty1_bb_b0}; 733 734 static struct attn_hw_reg tsem_int0_bb_b0 = { 735 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044}; 736 737 static struct attn_hw_reg tsem_int1_bb_b0 = { 738 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054}; 739 740 static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = { 741 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044}; 742 743 static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = { 744 &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0}; 745 746 static struct attn_hw_reg tsem_prty0_bb_b0 = { 747 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc}; 748 749 static struct attn_hw_reg tsem_prty1_bb_b0 = { 750 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204}; 751 752 static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = { 753 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204}; 754 755 static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = { 756 &tsem_prty0_bb_b0, &tsem_prty1_bb_b0, 757 &tsem_fast_memory_vfc_config_prty1_bb_b0}; 758 759 static struct attn_hw_reg msem_int0_bb_b0 = { 760 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044}; 761 762 static struct attn_hw_reg msem_int1_bb_b0 = { 763 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054}; 764 765 static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = { 766 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044}; 767 768 static struct attn_hw_reg *msem_int_bb_b0_regs[3] = { 769 &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0}; 770 771 static struct attn_hw_reg msem_prty0_bb_b0 = { 772 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc}; 773 774 static struct attn_hw_reg msem_prty1_bb_b0 = { 775 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204}; 776 777 static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = { 778 &msem_prty0_bb_b0, &msem_prty1_bb_b0}; 779 780 static struct attn_hw_reg usem_int0_bb_b0 = { 781 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044}; 782 783 static struct attn_hw_reg usem_int1_bb_b0 = { 784 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054}; 785 786 static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = { 787 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044}; 788 789 static struct attn_hw_reg *usem_int_bb_b0_regs[3] = { 790 &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0}; 791 792 static struct attn_hw_reg usem_prty0_bb_b0 = { 793 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc}; 794 795 static struct attn_hw_reg usem_prty1_bb_b0 = { 796 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204}; 797 798 static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = { 799 &usem_prty0_bb_b0, &usem_prty1_bb_b0}; 800 801 static struct attn_hw_reg xsem_int0_bb_b0 = { 802 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044}; 803 804 static struct attn_hw_reg xsem_int1_bb_b0 = { 805 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054}; 806 807 static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = { 808 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044}; 809 810 static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = { 811 &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0}; 812 813 static struct attn_hw_reg xsem_prty0_bb_b0 = { 814 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc}; 815 816 static struct attn_hw_reg xsem_prty1_bb_b0 = { 817 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204}; 818 819 static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = { 820 &xsem_prty0_bb_b0, &xsem_prty1_bb_b0}; 821 822 static struct attn_hw_reg ysem_int0_bb_b0 = { 823 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044}; 824 825 static struct attn_hw_reg ysem_int1_bb_b0 = { 826 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054}; 827 828 static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = { 829 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044}; 830 831 static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = { 832 &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0}; 833 834 static struct attn_hw_reg ysem_prty0_bb_b0 = { 835 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc}; 836 837 static struct attn_hw_reg ysem_prty1_bb_b0 = { 838 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204}; 839 840 static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = { 841 &ysem_prty0_bb_b0, &ysem_prty1_bb_b0}; 842 843 static struct attn_hw_reg psem_int0_bb_b0 = { 844 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044}; 845 846 static struct attn_hw_reg psem_int1_bb_b0 = { 847 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054}; 848 849 static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = { 850 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044}; 851 852 static struct attn_hw_reg *psem_int_bb_b0_regs[3] = { 853 &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0}; 854 855 static struct attn_hw_reg psem_prty0_bb_b0 = { 856 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc}; 857 858 static struct attn_hw_reg psem_prty1_bb_b0 = { 859 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204}; 860 861 static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = { 862 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204}; 863 864 static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = { 865 &psem_prty0_bb_b0, &psem_prty1_bb_b0, 866 &psem_fast_memory_vfc_config_prty1_bb_b0}; 867 868 static struct attn_hw_reg rss_int0_bb_b0 = { 869 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984}; 870 871 static struct attn_hw_reg *rss_int_bb_b0_regs[1] = { 872 &rss_int0_bb_b0}; 873 874 static struct attn_hw_reg rss_prty1_bb_b0 = { 875 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04}; 876 877 static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = { 878 &rss_prty1_bb_b0}; 879 880 static struct attn_hw_reg tmld_int0_bb_b0 = { 881 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184}; 882 883 static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = { 884 &tmld_int0_bb_b0}; 885 886 static struct attn_hw_reg tmld_prty1_bb_b0 = { 887 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204}; 888 889 static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = { 890 &tmld_prty1_bb_b0}; 891 892 static struct attn_hw_reg muld_int0_bb_b0 = { 893 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184}; 894 895 static struct attn_hw_reg *muld_int_bb_b0_regs[1] = { 896 &muld_int0_bb_b0}; 897 898 static struct attn_hw_reg muld_prty1_bb_b0 = { 899 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204}; 900 901 static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = { 902 &muld_prty1_bb_b0}; 903 904 static struct attn_hw_reg yuld_int0_bb_b0 = { 905 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184}; 906 907 static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = { 908 &yuld_int0_bb_b0}; 909 910 static struct attn_hw_reg yuld_prty1_bb_b0 = { 911 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204}; 912 913 static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = { 914 &yuld_prty1_bb_b0}; 915 916 static struct attn_hw_reg xyld_int0_bb_b0 = { 917 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184}; 918 919 static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = { 920 &xyld_int0_bb_b0}; 921 922 static struct attn_hw_reg xyld_prty1_bb_b0 = { 923 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204}; 924 925 static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = { 926 &xyld_prty1_bb_b0}; 927 928 static struct attn_hw_reg prm_int0_bb_b0 = { 929 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044}; 930 931 static struct attn_hw_reg *prm_int_bb_b0_regs[1] = { 932 &prm_int0_bb_b0}; 933 934 static struct attn_hw_reg prm_prty0_bb_b0 = { 935 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054}; 936 937 static struct attn_hw_reg prm_prty1_bb_b0 = { 938 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204}; 939 940 static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = { 941 &prm_prty0_bb_b0, &prm_prty1_bb_b0}; 942 943 static struct attn_hw_reg pbf_pb1_int0_bb_b0 = { 944 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044}; 945 946 static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = { 947 &pbf_pb1_int0_bb_b0}; 948 949 static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = { 950 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054}; 951 952 static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = { 953 &pbf_pb1_prty0_bb_b0}; 954 955 static struct attn_hw_reg pbf_pb2_int0_bb_b0 = { 956 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044}; 957 958 static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = { 959 &pbf_pb2_int0_bb_b0}; 960 961 static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = { 962 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054}; 963 964 static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = { 965 &pbf_pb2_prty0_bb_b0}; 966 967 static struct attn_hw_reg rpb_int0_bb_b0 = { 968 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044}; 969 970 static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = { 971 &rpb_int0_bb_b0}; 972 973 static struct attn_hw_reg rpb_prty0_bb_b0 = { 974 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054}; 975 976 static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = { 977 &rpb_prty0_bb_b0}; 978 979 static struct attn_hw_reg btb_int0_bb_b0 = { 980 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4}; 981 982 static struct attn_hw_reg btb_int1_bb_b0 = { 983 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc}; 984 985 static struct attn_hw_reg btb_int2_bb_b0 = { 986 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4}; 987 988 static struct attn_hw_reg btb_int3_bb_b0 = { 989 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c}; 990 991 static struct attn_hw_reg btb_int4_bb_b0 = { 992 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124}; 993 994 static struct attn_hw_reg btb_int5_bb_b0 = { 995 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c}; 996 997 static struct attn_hw_reg btb_int6_bb_b0 = { 998 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154}; 999 1000 static struct attn_hw_reg btb_int8_bb_b0 = { 1001 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188}; 1002 1003 static struct attn_hw_reg btb_int9_bb_b0 = { 1004 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0}; 1005 1006 static struct attn_hw_reg btb_int10_bb_b0 = { 1007 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8}; 1008 1009 static struct attn_hw_reg btb_int11_bb_b0 = { 1010 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0}; 1011 1012 static struct attn_hw_reg *btb_int_bb_b0_regs[11] = { 1013 &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0, 1014 &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0, 1015 &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0}; 1016 1017 static struct attn_hw_reg btb_prty0_bb_b0 = { 1018 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0}; 1019 1020 static struct attn_hw_reg btb_prty1_bb_b0 = { 1021 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404}; 1022 1023 static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = { 1024 &btb_prty0_bb_b0, &btb_prty1_bb_b0}; 1025 1026 static struct attn_hw_reg pbf_int0_bb_b0 = { 1027 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184}; 1028 1029 static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = { 1030 &pbf_int0_bb_b0}; 1031 1032 static struct attn_hw_reg pbf_prty0_bb_b0 = { 1033 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194}; 1034 1035 static struct attn_hw_reg pbf_prty1_bb_b0 = { 1036 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204}; 1037 1038 static struct attn_hw_reg pbf_prty2_bb_b0 = { 1039 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214}; 1040 1041 static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = { 1042 &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0}; 1043 1044 static struct attn_hw_reg rdif_int0_bb_b0 = { 1045 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184}; 1046 1047 static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = { 1048 &rdif_int0_bb_b0}; 1049 1050 static struct attn_hw_reg rdif_prty0_bb_b0 = { 1051 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194}; 1052 1053 static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = { 1054 &rdif_prty0_bb_b0}; 1055 1056 static struct attn_hw_reg tdif_int0_bb_b0 = { 1057 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184}; 1058 1059 static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = { 1060 &tdif_int0_bb_b0}; 1061 1062 static struct attn_hw_reg tdif_prty0_bb_b0 = { 1063 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194}; 1064 1065 static struct attn_hw_reg tdif_prty1_bb_b0 = { 1066 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204}; 1067 1068 static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = { 1069 &tdif_prty0_bb_b0, &tdif_prty1_bb_b0}; 1070 1071 static struct attn_hw_reg cdu_int0_bb_b0 = { 1072 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc}; 1073 1074 static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = { 1075 &cdu_int0_bb_b0}; 1076 1077 static struct attn_hw_reg cdu_prty1_bb_b0 = { 1078 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204}; 1079 1080 static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = { 1081 &cdu_prty1_bb_b0}; 1082 1083 static struct attn_hw_reg ccfc_int0_bb_b0 = { 1084 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184}; 1085 1086 static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = { 1087 &ccfc_int0_bb_b0}; 1088 1089 static struct attn_hw_reg ccfc_prty1_bb_b0 = { 1090 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204}; 1091 1092 static struct attn_hw_reg ccfc_prty0_bb_b0 = { 1093 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8}; 1094 1095 static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = { 1096 &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0}; 1097 1098 static struct attn_hw_reg tcfc_int0_bb_b0 = { 1099 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184}; 1100 1101 static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = { 1102 &tcfc_int0_bb_b0}; 1103 1104 static struct attn_hw_reg tcfc_prty1_bb_b0 = { 1105 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204}; 1106 1107 static struct attn_hw_reg tcfc_prty0_bb_b0 = { 1108 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8}; 1109 1110 static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = { 1111 &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0}; 1112 1113 static struct attn_hw_reg igu_int0_bb_b0 = { 1114 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184}; 1115 1116 static struct attn_hw_reg *igu_int_bb_b0_regs[1] = { 1117 &igu_int0_bb_b0}; 1118 1119 static struct attn_hw_reg igu_prty0_bb_b0 = { 1120 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194}; 1121 1122 static struct attn_hw_reg igu_prty1_bb_b0 = { 1123 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204}; 1124 1125 static struct attn_hw_reg igu_prty2_bb_b0 = { 1126 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214}; 1127 1128 static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = { 1129 &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0}; 1130 1131 static struct attn_hw_reg cau_int0_bb_b0 = { 1132 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0}; 1133 1134 static struct attn_hw_reg *cau_int_bb_b0_regs[1] = { 1135 &cau_int0_bb_b0}; 1136 1137 static struct attn_hw_reg cau_prty1_bb_b0 = { 1138 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204}; 1139 1140 static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = { 1141 &cau_prty1_bb_b0}; 1142 1143 static struct attn_hw_reg dbg_int0_bb_b0 = { 1144 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184}; 1145 1146 static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = { 1147 &dbg_int0_bb_b0}; 1148 1149 static struct attn_hw_reg dbg_prty1_bb_b0 = { 1150 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204}; 1151 1152 static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = { 1153 &dbg_prty1_bb_b0}; 1154 1155 static struct attn_hw_reg nig_int0_bb_b0 = { 1156 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044}; 1157 1158 static struct attn_hw_reg nig_int1_bb_b0 = { 1159 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054}; 1160 1161 static struct attn_hw_reg nig_int2_bb_b0 = { 1162 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064}; 1163 1164 static struct attn_hw_reg nig_int3_bb_b0 = { 1165 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074}; 1166 1167 static struct attn_hw_reg nig_int4_bb_b0 = { 1168 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084}; 1169 1170 static struct attn_hw_reg nig_int5_bb_b0 = { 1171 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094}; 1172 1173 static struct attn_hw_reg *nig_int_bb_b0_regs[6] = { 1174 &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0, 1175 &nig_int4_bb_b0, &nig_int5_bb_b0}; 1176 1177 static struct attn_hw_reg nig_prty0_bb_b0 = { 1178 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4}; 1179 1180 static struct attn_hw_reg nig_prty1_bb_b0 = { 1181 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204}; 1182 1183 static struct attn_hw_reg nig_prty2_bb_b0 = { 1184 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214}; 1185 1186 static struct attn_hw_reg nig_prty3_bb_b0 = { 1187 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224}; 1188 1189 static struct attn_hw_reg nig_prty4_bb_b0 = { 1190 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234}; 1191 1192 static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = { 1193 &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, 1194 &nig_prty3_bb_b0, &nig_prty4_bb_b0}; 1195 1196 static struct attn_hw_reg ipc_int0_bb_b0 = { 1197 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510}; 1198 1199 static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = { 1200 &ipc_int0_bb_b0}; 1201 1202 static struct attn_hw_reg ipc_prty0_bb_b0 = { 1203 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520}; 1204 1205 static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = { 1206 &ipc_prty0_bb_b0}; 1207 1208 static struct attn_hw_block attn_blocks[] = { 1209 {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } }, 1210 {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } }, 1211 {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } }, 1212 {"dbu", {{0, 0, NULL, NULL} } }, 1213 {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs, 1214 pglue_b_prty_bb_b0_regs} } }, 1215 {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } }, 1216 {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } }, 1217 {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } }, 1218 {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } }, 1219 {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } }, 1220 {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } }, 1221 {"mcp", {{0, 0, NULL, NULL} } }, 1222 {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } }, 1223 {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } }, 1224 {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs, 1225 pswhst2_prty_bb_b0_regs} } }, 1226 {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } }, 1227 {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } }, 1228 {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } }, 1229 {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } }, 1230 {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } }, 1231 {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } }, 1232 {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } }, 1233 {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } }, 1234 {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } }, 1235 {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } }, 1236 {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } }, 1237 {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } }, 1238 {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } }, 1239 {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } }, 1240 {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } }, 1241 {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } }, 1242 {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } }, 1243 {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } }, 1244 {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } }, 1245 {"src", {{1, 0, src_int_bb_b0_regs, NULL} } }, 1246 {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } }, 1247 {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } }, 1248 {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } }, 1249 {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } }, 1250 {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } }, 1251 {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } }, 1252 {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } }, 1253 {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } }, 1254 {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } }, 1255 {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } }, 1256 {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } }, 1257 {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } }, 1258 {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } }, 1259 {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } }, 1260 {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } }, 1261 {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } }, 1262 {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } }, 1263 {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } }, 1264 {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } }, 1265 {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs, 1266 pbf_pb1_prty_bb_b0_regs} } }, 1267 {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs, 1268 pbf_pb2_prty_bb_b0_regs} } }, 1269 {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } }, 1270 {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } }, 1271 {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } }, 1272 {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } }, 1273 {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } }, 1274 {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } }, 1275 {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } }, 1276 {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } }, 1277 {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } }, 1278 {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } }, 1279 {"umac", { {0, 0, NULL, NULL} } }, 1280 {"xmac", { {0, 0, NULL, NULL} } }, 1281 {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } }, 1282 {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } }, 1283 {"wol", { {0, 0, NULL, NULL} } }, 1284 {"bmbn", { {0, 0, NULL, NULL} } }, 1285 {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } }, 1286 {"nwm", { {0, 0, NULL, NULL} } }, 1287 {"nws", { {0, 0, NULL, NULL} } }, 1288 {"ms", { {0, 0, NULL, NULL} } }, 1289 {"phy_pcie", { {0, 0, NULL, NULL} } }, 1290 {"misc_aeu", { {0, 0, NULL, NULL} } }, 1291 {"bar0_map", { {0, 0, NULL, NULL} } },}; 1292 1293 /* Specific HW attention callbacks */ 1294 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) 1295 { 1296 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 1297 1298 /* This might occur on certain instances; Log it once then mask it */ 1299 DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", 1300 tmp); 1301 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 1302 0xffffffff); 1303 1304 return 0; 1305 } 1306 1307 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 1308 #define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 1309 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 1310 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) 1311 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 1312 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) 1313 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 1314 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) 1315 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 1316 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) 1317 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 1318 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) 1319 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 1320 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) 1321 { 1322 u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1323 PSWHST_REG_INCORRECT_ACCESS_VALID); 1324 1325 if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { 1326 u32 addr, data, length; 1327 1328 addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1329 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 1330 data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1331 PSWHST_REG_INCORRECT_ACCESS_DATA); 1332 length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1333 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 1334 1335 DP_INFO(p_hwfn->cdev, 1336 "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", 1337 addr, length, 1338 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), 1339 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), 1340 (u8) GET_FIELD(data, 1341 ATTENTION_INCORRECT_ACCESS_VF_VALID), 1342 (u8) GET_FIELD(data, 1343 ATTENTION_INCORRECT_ACCESS_CLIENT), 1344 (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), 1345 (u8) GET_FIELD(data, 1346 ATTENTION_INCORRECT_ACCESS_BYTE_EN), 1347 data); 1348 } 1349 1350 return 0; 1351 } 1352 1353 #define QED_GRC_ATTENTION_VALID_BIT (1 << 0) 1354 #define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) 1355 #define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) 1356 #define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) 1357 #define QED_GRC_ATTENTION_MASTER_MASK (0xf) 1358 #define QED_GRC_ATTENTION_MASTER_SHIFT (24) 1359 #define QED_GRC_ATTENTION_PF_MASK (0xf) 1360 #define QED_GRC_ATTENTION_PF_SHIFT (0) 1361 #define QED_GRC_ATTENTION_VF_MASK (0xff) 1362 #define QED_GRC_ATTENTION_VF_SHIFT (4) 1363 #define QED_GRC_ATTENTION_PRIV_MASK (0x3) 1364 #define QED_GRC_ATTENTION_PRIV_SHIFT (14) 1365 #define QED_GRC_ATTENTION_PRIV_VF (0) 1366 static const char *attn_master_to_str(u8 master) 1367 { 1368 switch (master) { 1369 case 1: return "PXP"; 1370 case 2: return "MCP"; 1371 case 3: return "MSDM"; 1372 case 4: return "PSDM"; 1373 case 5: return "YSDM"; 1374 case 6: return "USDM"; 1375 case 7: return "TSDM"; 1376 case 8: return "XSDM"; 1377 case 9: return "DBU"; 1378 case 10: return "DMAE"; 1379 default: 1380 return "Unkown"; 1381 } 1382 } 1383 1384 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) 1385 { 1386 u32 tmp, tmp2; 1387 1388 /* We've already cleared the timeout interrupt register, so we learn 1389 * of interrupts via the validity register 1390 */ 1391 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1392 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 1393 if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) 1394 goto out; 1395 1396 /* Read the GRC timeout information */ 1397 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1398 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 1399 tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1400 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 1401 1402 DP_INFO(p_hwfn->cdev, 1403 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 1404 tmp2, tmp, 1405 (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 1406 GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, 1407 attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), 1408 GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), 1409 (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == 1410 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", 1411 GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); 1412 1413 out: 1414 /* Regardles of anything else, clean the validity bit */ 1415 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 1416 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 1417 return 0; 1418 } 1419 1420 #define PGLUE_ATTENTION_VALID (1 << 29) 1421 #define PGLUE_ATTENTION_RD_VALID (1 << 26) 1422 #define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) 1423 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 1424 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) 1425 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) 1426 #define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) 1427 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 1428 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) 1429 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) 1430 #define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) 1431 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) 1432 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) 1433 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) 1434 #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) 1435 #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) 1436 #define PGLUE_ATTENTION_ILT_VALID (1 << 23) 1437 static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) 1438 { 1439 u32 tmp; 1440 1441 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1442 PGLUE_B_REG_TX_ERR_WR_DETAILS2); 1443 if (tmp & PGLUE_ATTENTION_VALID) { 1444 u32 addr_lo, addr_hi, details; 1445 1446 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1447 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 1448 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1449 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 1450 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1451 PGLUE_B_REG_TX_ERR_WR_DETAILS); 1452 1453 DP_INFO(p_hwfn, 1454 "Illegal write by chip to [%08x:%08x] blocked.\n" 1455 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 1456 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 1457 addr_hi, addr_lo, details, 1458 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 1459 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 1460 GET_FIELD(details, 1461 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 1462 tmp, 1463 GET_FIELD(tmp, 1464 PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, 1465 GET_FIELD(tmp, 1466 PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 1467 GET_FIELD(tmp, 1468 PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); 1469 } 1470 1471 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1472 PGLUE_B_REG_TX_ERR_RD_DETAILS2); 1473 if (tmp & PGLUE_ATTENTION_RD_VALID) { 1474 u32 addr_lo, addr_hi, details; 1475 1476 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1477 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 1478 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1479 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 1480 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1481 PGLUE_B_REG_TX_ERR_RD_DETAILS); 1482 1483 DP_INFO(p_hwfn, 1484 "Illegal read by chip from [%08x:%08x] blocked.\n" 1485 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" 1486 " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 1487 addr_hi, addr_lo, details, 1488 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), 1489 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), 1490 GET_FIELD(details, 1491 PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, 1492 tmp, 1493 GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 1494 : 0, 1495 GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, 1496 GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 1497 : 0); 1498 } 1499 1500 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1501 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 1502 if (tmp & PGLUE_ATTENTION_ICPL_VALID) 1503 DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp); 1504 1505 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1506 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 1507 if (tmp & PGLUE_ATTENTION_ZLR_VALID) { 1508 u32 addr_hi, addr_lo; 1509 1510 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1511 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 1512 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1513 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 1514 1515 DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", 1516 tmp, addr_hi, addr_lo); 1517 } 1518 1519 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1520 PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 1521 if (tmp & PGLUE_ATTENTION_ILT_VALID) { 1522 u32 addr_hi, addr_lo, details; 1523 1524 addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1525 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 1526 addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1527 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 1528 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1529 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 1530 1531 DP_INFO(p_hwfn, 1532 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 1533 details, tmp, addr_hi, addr_lo); 1534 } 1535 1536 /* Clear the indications */ 1537 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 1538 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 1539 1540 return 0; 1541 } 1542 1543 #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) 1544 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 1545 #define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) 1546 #define QED_DORQ_ATTENTION_SIZE_SHIFT (16) 1547 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 1548 { 1549 u32 reason; 1550 1551 reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & 1552 QED_DORQ_ATTENTION_REASON_MASK; 1553 if (reason) { 1554 u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1555 DORQ_REG_DB_DROP_DETAILS); 1556 1557 DP_INFO(p_hwfn->cdev, 1558 "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", 1559 qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1560 DORQ_REG_DB_DROP_DETAILS_ADDRESS), 1561 (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), 1562 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 1563 reason); 1564 } 1565 1566 return -EINVAL; 1567 } 1568 1569 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 1570 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 1571 { 1572 { /* After Invert 1 */ 1573 {"GPIO0 function%d", 1574 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 1575 } 1576 }, 1577 1578 { 1579 { /* After Invert 2 */ 1580 {"PGLUE config_space", ATTENTION_SINGLE, 1581 NULL, MAX_BLOCK_ID}, 1582 {"PGLUE misc_flr", ATTENTION_SINGLE, 1583 NULL, MAX_BLOCK_ID}, 1584 {"PGLUE B RBC", ATTENTION_PAR_INT, 1585 qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, 1586 {"PGLUE misc_mctp", ATTENTION_SINGLE, 1587 NULL, MAX_BLOCK_ID}, 1588 {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 1589 {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 1590 {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 1591 {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | 1592 (1 << ATTENTION_OFFSET_SHIFT), 1593 NULL, MAX_BLOCK_ID}, 1594 {"PCIE glue/PXP VPD %d", 1595 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, 1596 } 1597 }, 1598 1599 { 1600 { /* After Invert 3 */ 1601 {"General Attention %d", 1602 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, 1603 } 1604 }, 1605 1606 { 1607 { /* After Invert 4 */ 1608 {"General Attention 32", ATTENTION_SINGLE, 1609 NULL, MAX_BLOCK_ID}, 1610 {"General Attention %d", 1611 (2 << ATTENTION_LENGTH_SHIFT) | 1612 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, 1613 {"General Attention 35", ATTENTION_SINGLE, 1614 NULL, MAX_BLOCK_ID}, 1615 {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), 1616 NULL, BLOCK_CNIG}, 1617 {"MCP CPU", ATTENTION_SINGLE, 1618 qed_mcp_attn_cb, MAX_BLOCK_ID}, 1619 {"MCP Watchdog timer", ATTENTION_SINGLE, 1620 NULL, MAX_BLOCK_ID}, 1621 {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, 1622 {"AVS stop status ready", ATTENTION_SINGLE, 1623 NULL, MAX_BLOCK_ID}, 1624 {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 1625 {"MSTAT per-path", ATTENTION_PAR_INT, 1626 NULL, MAX_BLOCK_ID}, 1627 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), 1628 NULL, MAX_BLOCK_ID}, 1629 {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, 1630 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, 1631 {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, 1632 {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, 1633 {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, 1634 } 1635 }, 1636 1637 { 1638 { /* After Invert 5 */ 1639 {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, 1640 {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, 1641 {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, 1642 {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, 1643 {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, 1644 {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, 1645 {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, 1646 {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, 1647 {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, 1648 {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, 1649 {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, 1650 {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, 1651 {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, 1652 {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, 1653 {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, 1654 {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, 1655 } 1656 }, 1657 1658 { 1659 { /* After Invert 6 */ 1660 {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, 1661 {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, 1662 {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, 1663 {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, 1664 {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, 1665 {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, 1666 {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, 1667 {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, 1668 {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, 1669 {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, 1670 {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, 1671 {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, 1672 {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, 1673 {"DORQ", ATTENTION_PAR_INT, 1674 qed_dorq_attn_cb, BLOCK_DORQ}, 1675 {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, 1676 {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, 1677 } 1678 }, 1679 1680 { 1681 { /* After Invert 7 */ 1682 {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, 1683 {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, 1684 {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, 1685 {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, 1686 {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, 1687 {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, 1688 {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, 1689 {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, 1690 {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, 1691 {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, 1692 {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, 1693 {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, 1694 {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, 1695 {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, 1696 {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, 1697 {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, 1698 {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, 1699 } 1700 }, 1701 1702 { 1703 { /* After Invert 8 */ 1704 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, 1705 NULL, BLOCK_PSWRQ2}, 1706 {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, 1707 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, 1708 NULL, BLOCK_PSWWR2}, 1709 {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, 1710 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, 1711 NULL, BLOCK_PSWRD2}, 1712 {"PSWHST", ATTENTION_PAR_INT, 1713 qed_pswhst_attn_cb, BLOCK_PSWHST}, 1714 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, 1715 NULL, BLOCK_PSWHST2}, 1716 {"GRC", ATTENTION_PAR_INT, 1717 qed_grc_attn_cb, BLOCK_GRC}, 1718 {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, 1719 {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, 1720 {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 1721 {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 1722 {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 1723 {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 1724 {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 1725 {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, 1726 {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, 1727 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, 1728 NULL, BLOCK_PGLCS}, 1729 {"PERST_B assertion", ATTENTION_SINGLE, 1730 NULL, MAX_BLOCK_ID}, 1731 {"PERST_B deassertion", ATTENTION_SINGLE, 1732 NULL, MAX_BLOCK_ID}, 1733 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), 1734 NULL, MAX_BLOCK_ID}, 1735 } 1736 }, 1737 1738 { 1739 { /* After Invert 9 */ 1740 {"MCP Latched memory", ATTENTION_PAR, 1741 NULL, MAX_BLOCK_ID}, 1742 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, 1743 NULL, MAX_BLOCK_ID}, 1744 {"MCP Latched ump_tx", ATTENTION_PAR, 1745 NULL, MAX_BLOCK_ID}, 1746 {"MCP Latched scratchpad", ATTENTION_PAR, 1747 NULL, MAX_BLOCK_ID}, 1748 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), 1749 NULL, MAX_BLOCK_ID}, 1750 } 1751 }, 1752 }; 1753 1754 #define ATTN_STATE_BITS (0xfff) 1755 #define ATTN_BITS_MASKABLE (0x3ff) 1756 struct qed_sb_attn_info { 1757 /* Virtual & Physical address of the SB */ 1758 struct atten_status_block *sb_attn; 1759 dma_addr_t sb_phys; 1760 1761 /* Last seen running index */ 1762 u16 index; 1763 1764 /* A mask of the AEU bits resulting in a parity error */ 1765 u32 parity_mask[NUM_ATTN_REGS]; 1766 1767 /* A pointer to the attention description structure */ 1768 struct aeu_invert_reg *p_aeu_desc; 1769 1770 /* Previously asserted attentions, which are still unasserted */ 1771 u16 known_attn; 1772 1773 /* Cleanup address for the link's general hw attention */ 1774 u32 mfw_attn_addr; 1775 }; 1776 1777 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, 1778 struct qed_sb_attn_info *p_sb_desc) 1779 { 1780 u16 rc = 0; 1781 u16 index; 1782 1783 /* Make certain HW write took affect */ 1784 mmiowb(); 1785 1786 index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); 1787 if (p_sb_desc->index != index) { 1788 p_sb_desc->index = index; 1789 rc = QED_SB_ATT_IDX; 1790 } 1791 1792 /* Make certain we got a consistent view with HW */ 1793 mmiowb(); 1794 1795 return rc; 1796 } 1797 1798 /** 1799 * @brief qed_int_assertion - handles asserted attention bits 1800 * 1801 * @param p_hwfn 1802 * @param asserted_bits newly asserted bits 1803 * @return int 1804 */ 1805 static int qed_int_assertion(struct qed_hwfn *p_hwfn, 1806 u16 asserted_bits) 1807 { 1808 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1809 u32 igu_mask; 1810 1811 /* Mask the source of the attention in the IGU */ 1812 igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1813 IGU_REG_ATTENTION_ENABLE); 1814 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 1815 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 1816 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 1817 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 1818 1819 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 1820 "inner known ATTN state: 0x%04x --> 0x%04x\n", 1821 sb_attn_sw->known_attn, 1822 sb_attn_sw->known_attn | asserted_bits); 1823 sb_attn_sw->known_attn |= asserted_bits; 1824 1825 /* Handle MCP events */ 1826 if (asserted_bits & 0x100) { 1827 qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 1828 /* Clean the MCP attention */ 1829 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, 1830 sb_attn_sw->mfw_attn_addr, 0); 1831 } 1832 1833 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1834 GTT_BAR0_MAP_REG_IGU_CMD + 1835 ((IGU_CMD_ATTN_BIT_SET_UPPER - 1836 IGU_CMD_INT_ACK_BASE) << 3), 1837 (u32)asserted_bits); 1838 1839 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", 1840 asserted_bits); 1841 1842 return 0; 1843 } 1844 1845 static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn, 1846 struct attn_hw_reg *p_reg_desc, 1847 struct attn_hw_block *p_block, 1848 enum qed_attention_type type, 1849 u32 val, u32 mask) 1850 { 1851 int j; 1852 1853 for (j = 0; j < p_reg_desc->num_of_bits; j++) { 1854 if (!(val & (1 << j))) 1855 continue; 1856 1857 DP_NOTICE(p_hwfn, 1858 "%s (%s): reg %d [0x%08x], bit %d [%s]\n", 1859 p_block->name, 1860 type == QED_ATTN_TYPE_ATTN ? "Interrupt" : 1861 "Parity", 1862 p_reg_desc->reg_idx, p_reg_desc->sts_addr, 1863 j, (mask & (1 << j)) ? " [MASKED]" : ""); 1864 } 1865 } 1866 1867 /** 1868 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single 1869 * cause of the attention 1870 * 1871 * @param p_hwfn 1872 * @param p_aeu - descriptor of an AEU bit which caused the attention 1873 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 1874 * this bit to this group. 1875 * @param bit_index - index of this bit in the aeu_en_reg 1876 * 1877 * @return int 1878 */ 1879 static int 1880 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, 1881 struct aeu_invert_reg_bit *p_aeu, 1882 u32 aeu_en_reg, 1883 u32 bitmask) 1884 { 1885 int rc = -EINVAL; 1886 u32 val; 1887 1888 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 1889 p_aeu->bit_name, bitmask); 1890 1891 /* Call callback before clearing the interrupt status */ 1892 if (p_aeu->cb) { 1893 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 1894 p_aeu->bit_name); 1895 rc = p_aeu->cb(p_hwfn); 1896 } 1897 1898 /* Handle HW block interrupt registers */ 1899 if (p_aeu->block_index != MAX_BLOCK_ID) { 1900 struct attn_hw_block *p_block; 1901 u32 mask; 1902 int i; 1903 1904 p_block = &attn_blocks[p_aeu->block_index]; 1905 1906 /* Handle each interrupt register */ 1907 for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) { 1908 struct attn_hw_reg *p_reg_desc; 1909 u32 sts_addr; 1910 1911 p_reg_desc = p_block->chip_regs[0].int_regs[i]; 1912 1913 /* In case of fatal attention, don't clear the status 1914 * so it would appear in following idle check. 1915 */ 1916 if (rc == 0) 1917 sts_addr = p_reg_desc->sts_clr_addr; 1918 else 1919 sts_addr = p_reg_desc->sts_addr; 1920 1921 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr); 1922 mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1923 p_reg_desc->mask_addr); 1924 qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, 1925 p_block, 1926 QED_ATTN_TYPE_ATTN, 1927 val, mask); 1928 } 1929 } 1930 1931 /* If the attention is benign, no need to prevent it */ 1932 if (!rc) 1933 goto out; 1934 1935 /* Prevent this Attention from being asserted in the future */ 1936 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 1937 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); 1938 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 1939 p_aeu->bit_name); 1940 1941 out: 1942 return rc; 1943 } 1944 1945 static void qed_int_parity_print(struct qed_hwfn *p_hwfn, 1946 struct aeu_invert_reg_bit *p_aeu, 1947 struct attn_hw_block *p_block, 1948 u8 bit_index) 1949 { 1950 int i; 1951 1952 for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) { 1953 struct attn_hw_reg *p_reg_desc; 1954 u32 val, mask; 1955 1956 p_reg_desc = p_block->chip_regs[0].prty_regs[i]; 1957 1958 val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1959 p_reg_desc->sts_clr_addr); 1960 mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1961 p_reg_desc->mask_addr); 1962 qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, 1963 p_block, 1964 QED_ATTN_TYPE_PARITY, 1965 val, mask); 1966 } 1967 } 1968 1969 /** 1970 * @brief qed_int_deassertion_parity - handle a single parity AEU source 1971 * 1972 * @param p_hwfn 1973 * @param p_aeu - descriptor of an AEU bit which caused the parity 1974 * @param bit_index 1975 */ 1976 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, 1977 struct aeu_invert_reg_bit *p_aeu, 1978 u8 bit_index) 1979 { 1980 u32 block_id = p_aeu->block_index; 1981 1982 DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n", 1983 p_aeu->bit_name, bit_index); 1984 1985 if (block_id != MAX_BLOCK_ID) { 1986 qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id], 1987 bit_index); 1988 1989 /* In BB, there's a single parity bit for several blocks */ 1990 if (block_id == BLOCK_BTB) { 1991 qed_int_parity_print(p_hwfn, p_aeu, 1992 &attn_blocks[BLOCK_OPTE], 1993 bit_index); 1994 qed_int_parity_print(p_hwfn, p_aeu, 1995 &attn_blocks[BLOCK_MCP], 1996 bit_index); 1997 } 1998 } 1999 } 2000 2001 /** 2002 * @brief - handles deassertion of previously asserted attentions. 2003 * 2004 * @param p_hwfn 2005 * @param deasserted_bits - newly deasserted bits 2006 * @return int 2007 * 2008 */ 2009 static int qed_int_deassertion(struct qed_hwfn *p_hwfn, 2010 u16 deasserted_bits) 2011 { 2012 struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 2013 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; 2014 u8 i, j, k, bit_idx; 2015 int rc = 0; 2016 2017 /* Read the attention registers in the AEU */ 2018 for (i = 0; i < NUM_ATTN_REGS; i++) { 2019 aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 2020 MISC_REG_AEU_AFTER_INVERT_1_IGU + 2021 i * 0x4); 2022 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2023 "Deasserted bits [%d]: %08x\n", 2024 i, aeu_inv_arr[i]); 2025 } 2026 2027 /* Find parity attentions first */ 2028 for (i = 0; i < NUM_ATTN_REGS; i++) { 2029 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 2030 u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 2031 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 2032 i * sizeof(u32)); 2033 u32 parities; 2034 2035 /* Skip register in which no parity bit is currently set */ 2036 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 2037 if (!parities) 2038 continue; 2039 2040 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 2041 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 2042 2043 if ((p_bit->flags & ATTENTION_PARITY) && 2044 !!(parities & (1 << bit_idx))) 2045 qed_int_deassertion_parity(p_hwfn, p_bit, 2046 bit_idx); 2047 2048 bit_idx += ATTENTION_LENGTH(p_bit->flags); 2049 } 2050 } 2051 2052 /* Find non-parity cause for attention and act */ 2053 for (k = 0; k < MAX_ATTN_GRPS; k++) { 2054 struct aeu_invert_reg_bit *p_aeu; 2055 2056 /* Handle only groups whose attention is currently deasserted */ 2057 if (!(deasserted_bits & (1 << k))) 2058 continue; 2059 2060 for (i = 0; i < NUM_ATTN_REGS; i++) { 2061 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 2062 i * sizeof(u32) + 2063 k * sizeof(u32) * NUM_ATTN_REGS; 2064 u32 en, bits; 2065 2066 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 2067 bits = aeu_inv_arr[i] & en; 2068 2069 /* Skip if no bit from this group is currently set */ 2070 if (!bits) 2071 continue; 2072 2073 /* Find all set bits from current register which belong 2074 * to current group, making them responsible for the 2075 * previous assertion. 2076 */ 2077 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 2078 u8 bit, bit_len; 2079 u32 bitmask; 2080 2081 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 2082 2083 /* No need to handle parity-only bits */ 2084 if (p_aeu->flags == ATTENTION_PAR) 2085 continue; 2086 2087 bit = bit_idx; 2088 bit_len = ATTENTION_LENGTH(p_aeu->flags); 2089 if (p_aeu->flags & ATTENTION_PAR_INT) { 2090 /* Skip Parity */ 2091 bit++; 2092 bit_len--; 2093 } 2094 2095 bitmask = bits & (((1 << bit_len) - 1) << bit); 2096 if (bitmask) { 2097 /* Handle source of the attention */ 2098 qed_int_deassertion_aeu_bit(p_hwfn, 2099 p_aeu, 2100 aeu_en, 2101 bitmask); 2102 } 2103 2104 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 2105 } 2106 } 2107 } 2108 2109 /* Clear IGU indication for the deasserted bits */ 2110 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 2111 GTT_BAR0_MAP_REG_IGU_CMD + 2112 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 2113 IGU_CMD_INT_ACK_BASE) << 3), 2114 ~((u32)deasserted_bits)); 2115 2116 /* Unmask deasserted attentions in IGU */ 2117 aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, 2118 IGU_REG_ATTENTION_ENABLE); 2119 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 2120 qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 2121 2122 /* Clear deassertion from inner state */ 2123 sb_attn_sw->known_attn &= ~deasserted_bits; 2124 2125 return rc; 2126 } 2127 2128 static int qed_int_attentions(struct qed_hwfn *p_hwfn) 2129 { 2130 struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 2131 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 2132 u32 attn_bits = 0, attn_acks = 0; 2133 u16 asserted_bits, deasserted_bits; 2134 __le16 index; 2135 int rc = 0; 2136 2137 /* Read current attention bits/acks - safeguard against attentions 2138 * by guaranting work on a synchronized timeframe 2139 */ 2140 do { 2141 index = p_sb_attn->sb_index; 2142 attn_bits = le32_to_cpu(p_sb_attn->atten_bits); 2143 attn_acks = le32_to_cpu(p_sb_attn->atten_ack); 2144 } while (index != p_sb_attn->sb_index); 2145 p_sb_attn->sb_index = index; 2146 2147 /* Attention / Deassertion are meaningful (and in correct state) 2148 * only when they differ and consistent with known state - deassertion 2149 * when previous attention & current ack, and assertion when current 2150 * attention with no previous attention 2151 */ 2152 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 2153 ~p_sb_attn_sw->known_attn; 2154 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 2155 p_sb_attn_sw->known_attn; 2156 2157 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { 2158 DP_INFO(p_hwfn, 2159 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 2160 index, attn_bits, attn_acks, asserted_bits, 2161 deasserted_bits, p_sb_attn_sw->known_attn); 2162 } else if (asserted_bits == 0x100) { 2163 DP_INFO(p_hwfn, 2164 "MFW indication via attention\n"); 2165 } else { 2166 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2167 "MFW indication [deassertion]\n"); 2168 } 2169 2170 if (asserted_bits) { 2171 rc = qed_int_assertion(p_hwfn, asserted_bits); 2172 if (rc) 2173 return rc; 2174 } 2175 2176 if (deasserted_bits) { 2177 rc = qed_int_deassertion(p_hwfn, deasserted_bits); 2178 if (rc) 2179 return rc; 2180 } 2181 2182 return rc; 2183 } 2184 2185 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, 2186 void __iomem *igu_addr, 2187 u32 ack_cons) 2188 { 2189 struct igu_prod_cons_update igu_ack = { 0 }; 2190 2191 igu_ack.sb_id_and_flags = 2192 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 2193 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 2194 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 2195 (IGU_SEG_ACCESS_ATTN << 2196 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 2197 2198 DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); 2199 2200 /* Both segments (interrupts & acks) are written to same place address; 2201 * Need to guarantee all commands will be received (in-order) by HW. 2202 */ 2203 mmiowb(); 2204 barrier(); 2205 } 2206 2207 void qed_int_sp_dpc(unsigned long hwfn_cookie) 2208 { 2209 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; 2210 struct qed_pi_info *pi_info = NULL; 2211 struct qed_sb_attn_info *sb_attn; 2212 struct qed_sb_info *sb_info; 2213 int arr_size; 2214 u16 rc = 0; 2215 2216 if (!p_hwfn->p_sp_sb) { 2217 DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); 2218 return; 2219 } 2220 2221 sb_info = &p_hwfn->p_sp_sb->sb_info; 2222 arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 2223 if (!sb_info) { 2224 DP_ERR(p_hwfn->cdev, 2225 "Status block is NULL - cannot ack interrupts\n"); 2226 return; 2227 } 2228 2229 if (!p_hwfn->p_sb_attn) { 2230 DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); 2231 return; 2232 } 2233 sb_attn = p_hwfn->p_sb_attn; 2234 2235 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 2236 p_hwfn, p_hwfn->my_id); 2237 2238 /* Disable ack for def status block. Required both for msix + 2239 * inta in non-mask mode, in inta does no harm. 2240 */ 2241 qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); 2242 2243 /* Gather Interrupts/Attentions information */ 2244 if (!sb_info->sb_virt) { 2245 DP_ERR( 2246 p_hwfn->cdev, 2247 "Interrupt Status block is NULL - cannot check for new interrupts!\n"); 2248 } else { 2249 u32 tmp_index = sb_info->sb_ack; 2250 2251 rc = qed_sb_update_sb_idx(sb_info); 2252 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 2253 "Interrupt indices: 0x%08x --> 0x%08x\n", 2254 tmp_index, sb_info->sb_ack); 2255 } 2256 2257 if (!sb_attn || !sb_attn->sb_attn) { 2258 DP_ERR( 2259 p_hwfn->cdev, 2260 "Attentions Status block is NULL - cannot check for new attentions!\n"); 2261 } else { 2262 u16 tmp_index = sb_attn->index; 2263 2264 rc |= qed_attn_update_idx(p_hwfn, sb_attn); 2265 DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, 2266 "Attention indices: 0x%08x --> 0x%08x\n", 2267 tmp_index, sb_attn->index); 2268 } 2269 2270 /* Check if we expect interrupts at this time. if not just ack them */ 2271 if (!(rc & QED_SB_EVENT_MASK)) { 2272 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 2273 return; 2274 } 2275 2276 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 2277 if (!p_hwfn->p_dpc_ptt) { 2278 DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); 2279 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 2280 return; 2281 } 2282 2283 if (rc & QED_SB_ATT_IDX) 2284 qed_int_attentions(p_hwfn); 2285 2286 if (rc & QED_SB_IDX) { 2287 int pi; 2288 2289 /* Look for a free index */ 2290 for (pi = 0; pi < arr_size; pi++) { 2291 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 2292 if (pi_info->comp_cb) 2293 pi_info->comp_cb(p_hwfn, pi_info->cookie); 2294 } 2295 } 2296 2297 if (sb_attn && (rc & QED_SB_ATT_IDX)) 2298 /* This should be done before the interrupts are enabled, 2299 * since otherwise a new attention will be generated. 2300 */ 2301 qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 2302 2303 qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); 2304 } 2305 2306 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) 2307 { 2308 struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 2309 2310 if (!p_sb) 2311 return; 2312 2313 if (p_sb->sb_attn) 2314 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2315 SB_ATTN_ALIGNED_SIZE(p_hwfn), 2316 p_sb->sb_attn, 2317 p_sb->sb_phys); 2318 kfree(p_sb); 2319 } 2320 2321 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, 2322 struct qed_ptt *p_ptt) 2323 { 2324 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 2325 2326 memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 2327 2328 sb_info->index = 0; 2329 sb_info->known_attn = 0; 2330 2331 /* Configure Attention Status Block in IGU */ 2332 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 2333 lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); 2334 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 2335 upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); 2336 } 2337 2338 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, 2339 struct qed_ptt *p_ptt, 2340 void *sb_virt_addr, 2341 dma_addr_t sb_phy_addr) 2342 { 2343 struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 2344 int i, j, k; 2345 2346 sb_info->sb_attn = sb_virt_addr; 2347 sb_info->sb_phys = sb_phy_addr; 2348 2349 /* Set the pointer to the AEU descriptors */ 2350 sb_info->p_aeu_desc = aeu_descs; 2351 2352 /* Calculate Parity Masks */ 2353 memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 2354 for (i = 0; i < NUM_ATTN_REGS; i++) { 2355 /* j is array index, k is bit index */ 2356 for (j = 0, k = 0; k < 32; j++) { 2357 unsigned int flags = aeu_descs[i].bits[j].flags; 2358 2359 if (flags & ATTENTION_PARITY) 2360 sb_info->parity_mask[i] |= 1 << k; 2361 2362 k += ATTENTION_LENGTH(flags); 2363 } 2364 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2365 "Attn Mask [Reg %d]: 0x%08x\n", 2366 i, sb_info->parity_mask[i]); 2367 } 2368 2369 /* Set the address of cleanup for the mcp attention */ 2370 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 2371 MISC_REG_AEU_GENERAL_ATTN_0; 2372 2373 qed_int_sb_attn_setup(p_hwfn, p_ptt); 2374 } 2375 2376 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, 2377 struct qed_ptt *p_ptt) 2378 { 2379 struct qed_dev *cdev = p_hwfn->cdev; 2380 struct qed_sb_attn_info *p_sb; 2381 void *p_virt; 2382 dma_addr_t p_phys = 0; 2383 2384 /* SB struct */ 2385 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 2386 if (!p_sb) { 2387 DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); 2388 return -ENOMEM; 2389 } 2390 2391 /* SB ring */ 2392 p_virt = dma_alloc_coherent(&cdev->pdev->dev, 2393 SB_ATTN_ALIGNED_SIZE(p_hwfn), 2394 &p_phys, GFP_KERNEL); 2395 2396 if (!p_virt) { 2397 DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n"); 2398 kfree(p_sb); 2399 return -ENOMEM; 2400 } 2401 2402 /* Attention setup */ 2403 p_hwfn->p_sb_attn = p_sb; 2404 qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 2405 2406 return 0; 2407 } 2408 2409 /* coalescing timeout = timeset << (timer_res + 1) */ 2410 #define QED_CAU_DEF_RX_USECS 24 2411 #define QED_CAU_DEF_TX_USECS 48 2412 2413 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, 2414 struct cau_sb_entry *p_sb_entry, 2415 u8 pf_id, 2416 u16 vf_number, 2417 u8 vf_valid) 2418 { 2419 struct qed_dev *cdev = p_hwfn->cdev; 2420 u32 cau_state; 2421 2422 memset(p_sb_entry, 0, sizeof(*p_sb_entry)); 2423 2424 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 2425 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 2426 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 2427 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 2428 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 2429 2430 /* setting the time resultion to a fixed value ( = 1) */ 2431 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, 2432 QED_CAU_DEF_RX_TIMER_RES); 2433 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, 2434 QED_CAU_DEF_TX_TIMER_RES); 2435 2436 cau_state = CAU_HC_DISABLE_STATE; 2437 2438 if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 2439 cau_state = CAU_HC_ENABLE_STATE; 2440 if (!cdev->rx_coalesce_usecs) 2441 cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; 2442 if (!cdev->tx_coalesce_usecs) 2443 cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; 2444 } 2445 2446 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 2447 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 2448 } 2449 2450 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, 2451 struct qed_ptt *p_ptt, 2452 dma_addr_t sb_phys, 2453 u16 igu_sb_id, 2454 u16 vf_number, 2455 u8 vf_valid) 2456 { 2457 struct cau_sb_entry sb_entry; 2458 2459 qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 2460 vf_number, vf_valid); 2461 2462 if (p_hwfn->hw_init_done) { 2463 /* Wide-bus, initialize via DMAE */ 2464 u64 phys_addr = (u64)sb_phys; 2465 2466 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, 2467 CAU_REG_SB_ADDR_MEMORY + 2468 igu_sb_id * sizeof(u64), 2, 0); 2469 qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, 2470 CAU_REG_SB_VAR_MEMORY + 2471 igu_sb_id * sizeof(u64), 2, 0); 2472 } else { 2473 /* Initialize Status Block Address */ 2474 STORE_RT_REG_AGG(p_hwfn, 2475 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 2476 igu_sb_id * 2, 2477 sb_phys); 2478 2479 STORE_RT_REG_AGG(p_hwfn, 2480 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 2481 igu_sb_id * 2, 2482 sb_entry); 2483 } 2484 2485 /* Configure pi coalescing if set */ 2486 if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { 2487 u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> 2488 (QED_CAU_DEF_RX_TIMER_RES + 1); 2489 u8 num_tc = 1, i; 2490 2491 qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 2492 QED_COAL_RX_STATE_MACHINE, 2493 timeset); 2494 2495 timeset = p_hwfn->cdev->tx_coalesce_usecs >> 2496 (QED_CAU_DEF_TX_TIMER_RES + 1); 2497 2498 for (i = 0; i < num_tc; i++) { 2499 qed_int_cau_conf_pi(p_hwfn, p_ptt, 2500 igu_sb_id, TX_PI(i), 2501 QED_COAL_TX_STATE_MACHINE, 2502 timeset); 2503 } 2504 } 2505 } 2506 2507 void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, 2508 struct qed_ptt *p_ptt, 2509 u16 igu_sb_id, 2510 u32 pi_index, 2511 enum qed_coalescing_fsm coalescing_fsm, 2512 u8 timeset) 2513 { 2514 struct cau_pi_entry pi_entry; 2515 u32 sb_offset; 2516 u32 pi_offset; 2517 2518 if (IS_VF(p_hwfn->cdev)) 2519 return; 2520 2521 sb_offset = igu_sb_id * PIS_PER_SB; 2522 memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); 2523 2524 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 2525 if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) 2526 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 2527 else 2528 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 2529 2530 pi_offset = sb_offset + pi_index; 2531 if (p_hwfn->hw_init_done) { 2532 qed_wr(p_hwfn, p_ptt, 2533 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 2534 *((u32 *)&(pi_entry))); 2535 } else { 2536 STORE_RT_REG(p_hwfn, 2537 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 2538 *((u32 *)&(pi_entry))); 2539 } 2540 } 2541 2542 void qed_int_sb_setup(struct qed_hwfn *p_hwfn, 2543 struct qed_ptt *p_ptt, 2544 struct qed_sb_info *sb_info) 2545 { 2546 /* zero status block and ack counter */ 2547 sb_info->sb_ack = 0; 2548 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 2549 2550 if (IS_PF(p_hwfn->cdev)) 2551 qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 2552 sb_info->igu_sb_id, 0, 0); 2553 } 2554 2555 /** 2556 * @brief qed_get_igu_sb_id - given a sw sb_id return the 2557 * igu_sb_id 2558 * 2559 * @param p_hwfn 2560 * @param sb_id 2561 * 2562 * @return u16 2563 */ 2564 static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, 2565 u16 sb_id) 2566 { 2567 u16 igu_sb_id; 2568 2569 /* Assuming continuous set of IGU SBs dedicated for given PF */ 2570 if (sb_id == QED_SP_SB_ID) 2571 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 2572 else if (IS_PF(p_hwfn->cdev)) 2573 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; 2574 else 2575 igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); 2576 2577 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", 2578 (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); 2579 2580 return igu_sb_id; 2581 } 2582 2583 int qed_int_sb_init(struct qed_hwfn *p_hwfn, 2584 struct qed_ptt *p_ptt, 2585 struct qed_sb_info *sb_info, 2586 void *sb_virt_addr, 2587 dma_addr_t sb_phy_addr, 2588 u16 sb_id) 2589 { 2590 sb_info->sb_virt = sb_virt_addr; 2591 sb_info->sb_phys = sb_phy_addr; 2592 2593 sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); 2594 2595 if (sb_id != QED_SP_SB_ID) { 2596 p_hwfn->sbs_info[sb_id] = sb_info; 2597 p_hwfn->num_sbs++; 2598 } 2599 2600 sb_info->cdev = p_hwfn->cdev; 2601 2602 /* The igu address will hold the absolute address that needs to be 2603 * written to for a specific status block 2604 */ 2605 if (IS_PF(p_hwfn->cdev)) { 2606 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 2607 GTT_BAR0_MAP_REG_IGU_CMD + 2608 (sb_info->igu_sb_id << 3); 2609 } else { 2610 sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + 2611 PXP_VF_BAR0_START_IGU + 2612 ((IGU_CMD_INT_ACK_BASE + 2613 sb_info->igu_sb_id) << 3); 2614 } 2615 2616 sb_info->flags |= QED_SB_INFO_INIT; 2617 2618 qed_int_sb_setup(p_hwfn, p_ptt, sb_info); 2619 2620 return 0; 2621 } 2622 2623 int qed_int_sb_release(struct qed_hwfn *p_hwfn, 2624 struct qed_sb_info *sb_info, 2625 u16 sb_id) 2626 { 2627 if (sb_id == QED_SP_SB_ID) { 2628 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 2629 return -EINVAL; 2630 } 2631 2632 /* zero status block and ack counter */ 2633 sb_info->sb_ack = 0; 2634 memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 2635 2636 if (p_hwfn->sbs_info[sb_id] != NULL) { 2637 p_hwfn->sbs_info[sb_id] = NULL; 2638 p_hwfn->num_sbs--; 2639 } 2640 2641 return 0; 2642 } 2643 2644 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) 2645 { 2646 struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 2647 2648 if (!p_sb) 2649 return; 2650 2651 if (p_sb->sb_info.sb_virt) 2652 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2653 SB_ALIGNED_SIZE(p_hwfn), 2654 p_sb->sb_info.sb_virt, 2655 p_sb->sb_info.sb_phys); 2656 kfree(p_sb); 2657 } 2658 2659 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, 2660 struct qed_ptt *p_ptt) 2661 { 2662 struct qed_sb_sp_info *p_sb; 2663 dma_addr_t p_phys = 0; 2664 void *p_virt; 2665 2666 /* SB struct */ 2667 p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); 2668 if (!p_sb) { 2669 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); 2670 return -ENOMEM; 2671 } 2672 2673 /* SB ring */ 2674 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2675 SB_ALIGNED_SIZE(p_hwfn), 2676 &p_phys, GFP_KERNEL); 2677 if (!p_virt) { 2678 DP_NOTICE(p_hwfn, "Failed to allocate status block\n"); 2679 kfree(p_sb); 2680 return -ENOMEM; 2681 } 2682 2683 /* Status Block setup */ 2684 p_hwfn->p_sp_sb = p_sb; 2685 qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, 2686 p_phys, QED_SP_SB_ID); 2687 2688 memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 2689 2690 return 0; 2691 } 2692 2693 int qed_int_register_cb(struct qed_hwfn *p_hwfn, 2694 qed_int_comp_cb_t comp_cb, 2695 void *cookie, 2696 u8 *sb_idx, 2697 __le16 **p_fw_cons) 2698 { 2699 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 2700 int rc = -ENOMEM; 2701 u8 pi; 2702 2703 /* Look for a free index */ 2704 for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 2705 if (p_sp_sb->pi_info_arr[pi].comp_cb) 2706 continue; 2707 2708 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 2709 p_sp_sb->pi_info_arr[pi].cookie = cookie; 2710 *sb_idx = pi; 2711 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 2712 rc = 0; 2713 break; 2714 } 2715 2716 return rc; 2717 } 2718 2719 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) 2720 { 2721 struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 2722 2723 if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) 2724 return -ENOMEM; 2725 2726 p_sp_sb->pi_info_arr[pi].comp_cb = NULL; 2727 p_sp_sb->pi_info_arr[pi].cookie = NULL; 2728 2729 return 0; 2730 } 2731 2732 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) 2733 { 2734 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 2735 } 2736 2737 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, 2738 struct qed_ptt *p_ptt, 2739 enum qed_int_mode int_mode) 2740 { 2741 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 2742 2743 p_hwfn->cdev->int_mode = int_mode; 2744 switch (p_hwfn->cdev->int_mode) { 2745 case QED_INT_MODE_INTA: 2746 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 2747 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 2748 break; 2749 2750 case QED_INT_MODE_MSI: 2751 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 2752 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 2753 break; 2754 2755 case QED_INT_MODE_MSIX: 2756 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 2757 break; 2758 case QED_INT_MODE_POLL: 2759 break; 2760 } 2761 2762 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 2763 } 2764 2765 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2766 enum qed_int_mode int_mode) 2767 { 2768 int rc = 0; 2769 2770 /* Configure AEU signal change to produce attentions */ 2771 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 2772 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 2773 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 2774 qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 2775 2776 /* Flush the writes to IGU */ 2777 mmiowb(); 2778 2779 /* Unmask AEU signals toward IGU */ 2780 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 2781 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 2782 rc = qed_slowpath_irq_req(p_hwfn); 2783 if (rc != 0) { 2784 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); 2785 return -EINVAL; 2786 } 2787 p_hwfn->b_int_requested = true; 2788 } 2789 /* Enable interrupt Generation */ 2790 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 2791 p_hwfn->b_int_enabled = 1; 2792 2793 return rc; 2794 } 2795 2796 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, 2797 struct qed_ptt *p_ptt) 2798 { 2799 p_hwfn->b_int_enabled = 0; 2800 2801 if (IS_VF(p_hwfn->cdev)) 2802 return; 2803 2804 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 2805 } 2806 2807 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 2808 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 2809 struct qed_ptt *p_ptt, 2810 u32 sb_id, bool cleanup_set, u16 opaque_fid) 2811 { 2812 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 2813 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; 2814 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 2815 2816 /* Set the data field */ 2817 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 2818 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); 2819 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 2820 2821 /* Set the control register */ 2822 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 2823 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 2824 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 2825 2826 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 2827 2828 barrier(); 2829 2830 qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 2831 2832 /* Flush the write to IGU */ 2833 mmiowb(); 2834 2835 /* calculate where to read the status bit from */ 2836 sb_bit = 1 << (sb_id % 32); 2837 sb_bit_addr = sb_id / 32 * sizeof(u32); 2838 2839 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; 2840 2841 /* Now wait for the command to complete */ 2842 do { 2843 val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); 2844 2845 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2846 break; 2847 2848 usleep_range(5000, 10000); 2849 } while (--sleep_cnt); 2850 2851 if (!sleep_cnt) 2852 DP_NOTICE(p_hwfn, 2853 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2854 val, sb_id); 2855 } 2856 2857 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 2858 struct qed_ptt *p_ptt, 2859 u32 sb_id, u16 opaque, bool b_set) 2860 { 2861 int pi, i; 2862 2863 /* Set */ 2864 if (b_set) 2865 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); 2866 2867 /* Clear */ 2868 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); 2869 2870 /* Wait for the IGU SB to cleanup */ 2871 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2872 u32 val; 2873 2874 val = qed_rd(p_hwfn, p_ptt, 2875 IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); 2876 if (val & (1 << (sb_id % 32))) 2877 usleep_range(10, 20); 2878 else 2879 break; 2880 } 2881 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2882 DP_NOTICE(p_hwfn, 2883 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2884 sb_id); 2885 2886 /* Clear the CAU for the SB */ 2887 for (pi = 0; pi < 12; pi++) 2888 qed_wr(p_hwfn, p_ptt, 2889 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); 2890 } 2891 2892 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 2893 struct qed_ptt *p_ptt, 2894 bool b_set, bool b_slowpath) 2895 { 2896 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; 2897 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; 2898 u32 sb_id = 0, val = 0; 2899 2900 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2901 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2902 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2903 qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2904 2905 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2906 "IGU cleaning SBs [%d,...,%d]\n", 2907 igu_base_sb, igu_base_sb + igu_sb_cnt - 1); 2908 2909 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) 2910 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 2911 p_hwfn->hw_info.opaque_fid, 2912 b_set); 2913 2914 if (!b_slowpath) 2915 return; 2916 2917 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 2918 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2919 "IGU cleaning slowpath SB [%d]\n", sb_id); 2920 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 2921 p_hwfn->hw_info.opaque_fid, b_set); 2922 } 2923 2924 static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, 2925 struct qed_ptt *p_ptt, 2926 u16 sb_id) 2927 { 2928 u32 val = qed_rd(p_hwfn, p_ptt, 2929 IGU_REG_MAPPING_MEMORY + 2930 sizeof(u32) * sb_id); 2931 struct qed_igu_block *p_block; 2932 2933 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; 2934 2935 /* stop scanning when hit first invalid PF entry */ 2936 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && 2937 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) 2938 goto out; 2939 2940 /* Fill the block information */ 2941 p_block->status = QED_IGU_STATUS_VALID; 2942 p_block->function_id = GET_FIELD(val, 2943 IGU_MAPPING_LINE_FUNCTION_NUMBER); 2944 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2945 p_block->vector_number = GET_FIELD(val, 2946 IGU_MAPPING_LINE_VECTOR_NUMBER); 2947 2948 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2949 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2950 sb_id, val, p_block->function_id, 2951 p_block->is_pf, p_block->vector_number); 2952 2953 out: 2954 return val; 2955 } 2956 2957 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, 2958 struct qed_ptt *p_ptt) 2959 { 2960 struct qed_igu_info *p_igu_info; 2961 u32 val, min_vf = 0, max_vf = 0; 2962 u16 sb_id, last_iov_sb_id = 0; 2963 struct qed_igu_block *blk; 2964 u16 prev_sb_id = 0xFF; 2965 2966 p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); 2967 2968 if (!p_hwfn->hw_info.p_igu_info) 2969 return -ENOMEM; 2970 2971 p_igu_info = p_hwfn->hw_info.p_igu_info; 2972 2973 /* Initialize base sb / sb cnt for PFs and VFs */ 2974 p_igu_info->igu_base_sb = 0xffff; 2975 p_igu_info->igu_sb_cnt = 0; 2976 p_igu_info->igu_dsb_id = 0xffff; 2977 p_igu_info->igu_base_sb_iov = 0xffff; 2978 2979 if (p_hwfn->cdev->p_iov_info) { 2980 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 2981 2982 min_vf = p_iov->first_vf_in_pf; 2983 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2984 } 2985 2986 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); 2987 sb_id++) { 2988 blk = &p_igu_info->igu_map.igu_blocks[sb_id]; 2989 2990 val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); 2991 2992 /* stop scanning when hit first invalid PF entry */ 2993 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && 2994 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) 2995 break; 2996 2997 if (blk->is_pf) { 2998 if (blk->function_id == p_hwfn->rel_pf_id) { 2999 blk->status |= QED_IGU_STATUS_PF; 3000 3001 if (blk->vector_number == 0) { 3002 if (p_igu_info->igu_dsb_id == 0xffff) 3003 p_igu_info->igu_dsb_id = sb_id; 3004 } else { 3005 if (p_igu_info->igu_base_sb == 3006 0xffff) { 3007 p_igu_info->igu_base_sb = sb_id; 3008 } else if (prev_sb_id != sb_id - 1) { 3009 DP_NOTICE(p_hwfn->cdev, 3010 "consecutive igu vectors for HWFN %x broken", 3011 p_hwfn->rel_pf_id); 3012 break; 3013 } 3014 prev_sb_id = sb_id; 3015 /* we don't count the default */ 3016 (p_igu_info->igu_sb_cnt)++; 3017 } 3018 } 3019 } else { 3020 if ((blk->function_id >= min_vf) && 3021 (blk->function_id < max_vf)) { 3022 /* Available for VFs of this PF */ 3023 if (p_igu_info->igu_base_sb_iov == 0xffff) { 3024 p_igu_info->igu_base_sb_iov = sb_id; 3025 } else if (last_iov_sb_id != sb_id - 1) { 3026 if (!val) { 3027 DP_VERBOSE(p_hwfn->cdev, 3028 NETIF_MSG_INTR, 3029 "First uninitialized IGU CAM entry at index 0x%04x\n", 3030 sb_id); 3031 } else { 3032 DP_NOTICE(p_hwfn->cdev, 3033 "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n", 3034 p_hwfn->rel_pf_id, 3035 last_iov_sb_id, 3036 sb_id); } 3037 break; 3038 } 3039 blk->status |= QED_IGU_STATUS_FREE; 3040 p_hwfn->hw_info.p_igu_info->free_blks++; 3041 last_iov_sb_id = sb_id; 3042 } 3043 } 3044 } 3045 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; 3046 3047 DP_VERBOSE( 3048 p_hwfn, 3049 NETIF_MSG_INTR, 3050 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n", 3051 p_igu_info->igu_base_sb, 3052 p_igu_info->igu_base_sb_iov, 3053 p_igu_info->igu_sb_cnt, 3054 p_igu_info->igu_sb_cnt_iov, 3055 p_igu_info->igu_dsb_id); 3056 3057 if (p_igu_info->igu_base_sb == 0xffff || 3058 p_igu_info->igu_dsb_id == 0xffff || 3059 p_igu_info->igu_sb_cnt == 0) { 3060 DP_NOTICE(p_hwfn, 3061 "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", 3062 p_igu_info->igu_base_sb, 3063 p_igu_info->igu_sb_cnt, 3064 p_igu_info->igu_dsb_id); 3065 return -EINVAL; 3066 } 3067 3068 return 0; 3069 } 3070 3071 /** 3072 * @brief Initialize igu runtime registers 3073 * 3074 * @param p_hwfn 3075 */ 3076 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) 3077 { 3078 u32 igu_pf_conf = 0; 3079 3080 igu_pf_conf |= IGU_PF_CONF_FUNC_EN; 3081 3082 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 3083 } 3084 3085 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) 3086 { 3087 u64 intr_status = 0; 3088 u32 intr_status_lo = 0; 3089 u32 intr_status_hi = 0; 3090 u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - 3091 IGU_CMD_INT_ACK_BASE; 3092 u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - 3093 IGU_CMD_INT_ACK_BASE; 3094 3095 intr_status_lo = REG_RD(p_hwfn, 3096 GTT_BAR0_MAP_REG_IGU_CMD + 3097 lsb_igu_cmd_addr * 8); 3098 intr_status_hi = REG_RD(p_hwfn, 3099 GTT_BAR0_MAP_REG_IGU_CMD + 3100 msb_igu_cmd_addr * 8); 3101 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 3102 3103 return intr_status; 3104 } 3105 3106 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) 3107 { 3108 tasklet_init(p_hwfn->sp_dpc, 3109 qed_int_sp_dpc, (unsigned long)p_hwfn); 3110 p_hwfn->b_sp_dpc_enabled = true; 3111 } 3112 3113 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) 3114 { 3115 p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); 3116 if (!p_hwfn->sp_dpc) 3117 return -ENOMEM; 3118 3119 return 0; 3120 } 3121 3122 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn) 3123 { 3124 kfree(p_hwfn->sp_dpc); 3125 } 3126 3127 int qed_int_alloc(struct qed_hwfn *p_hwfn, 3128 struct qed_ptt *p_ptt) 3129 { 3130 int rc = 0; 3131 3132 rc = qed_int_sp_dpc_alloc(p_hwfn); 3133 if (rc) { 3134 DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n"); 3135 return rc; 3136 } 3137 rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); 3138 if (rc) { 3139 DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n"); 3140 return rc; 3141 } 3142 rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); 3143 if (rc) { 3144 DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n"); 3145 return rc; 3146 } 3147 return rc; 3148 } 3149 3150 void qed_int_free(struct qed_hwfn *p_hwfn) 3151 { 3152 qed_int_sp_sb_free(p_hwfn); 3153 qed_int_sb_attn_free(p_hwfn); 3154 qed_int_sp_dpc_free(p_hwfn); 3155 } 3156 3157 void qed_int_setup(struct qed_hwfn *p_hwfn, 3158 struct qed_ptt *p_ptt) 3159 { 3160 qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 3161 qed_int_sb_attn_setup(p_hwfn, p_ptt); 3162 qed_int_sp_dpc_setup(p_hwfn); 3163 } 3164 3165 void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, 3166 struct qed_sb_cnt_info *p_sb_cnt_info) 3167 { 3168 struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; 3169 3170 if (!info || !p_sb_cnt_info) 3171 return; 3172 3173 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; 3174 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; 3175 p_sb_cnt_info->sb_free_blk = info->free_blks; 3176 } 3177 3178 u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) 3179 { 3180 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 3181 3182 /* Determine origin of SB id */ 3183 if ((sb_id >= p_info->igu_base_sb) && 3184 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { 3185 return sb_id - p_info->igu_base_sb; 3186 } else if ((sb_id >= p_info->igu_base_sb_iov) && 3187 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { 3188 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; 3189 } else { 3190 DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); 3191 return 0; 3192 } 3193 } 3194 3195 void qed_int_disable_post_isr_release(struct qed_dev *cdev) 3196 { 3197 int i; 3198 3199 for_each_hwfn(cdev, i) 3200 cdev->hwfns[i].b_int_requested = false; 3201 } 3202