1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/list.h> 8 #include <linux/ssb/ssb_regs.h> 9 #include <linux/bcma/bcma.h> 10 #include <linux/bcma/bcma_regs.h> 11 12 #include <defs.h> 13 #include <soc.h> 14 #include <brcm_hw_ids.h> 15 #include <brcmu_utils.h> 16 #include <chipcommon.h> 17 #include "debug.h" 18 #include "chip.h" 19 20 /* SOC Interconnect types (aka chip types) */ 21 #define SOCI_SB 0 22 #define SOCI_AI 1 23 24 /* PL-368 DMP definitions */ 25 #define DMP_DESC_TYPE_MSK 0x0000000F 26 #define DMP_DESC_EMPTY 0x00000000 27 #define DMP_DESC_VALID 0x00000001 28 #define DMP_DESC_COMPONENT 0x00000001 29 #define DMP_DESC_MASTER_PORT 0x00000003 30 #define DMP_DESC_ADDRESS 0x00000005 31 #define DMP_DESC_ADDRSIZE_GT32 0x00000008 32 #define DMP_DESC_EOT 0x0000000F 33 34 #define DMP_COMP_DESIGNER 0xFFF00000 35 #define DMP_COMP_DESIGNER_S 20 36 #define DMP_COMP_PARTNUM 0x000FFF00 37 #define DMP_COMP_PARTNUM_S 8 38 #define DMP_COMP_CLASS 0x000000F0 39 #define DMP_COMP_CLASS_S 4 40 #define DMP_COMP_REVISION 0xFF000000 41 #define DMP_COMP_REVISION_S 24 42 #define DMP_COMP_NUM_SWRAP 0x00F80000 43 #define DMP_COMP_NUM_SWRAP_S 19 44 #define DMP_COMP_NUM_MWRAP 0x0007C000 45 #define DMP_COMP_NUM_MWRAP_S 14 46 #define DMP_COMP_NUM_SPORT 0x00003E00 47 #define DMP_COMP_NUM_SPORT_S 9 48 #define DMP_COMP_NUM_MPORT 0x000001F0 49 #define DMP_COMP_NUM_MPORT_S 4 50 51 #define DMP_MASTER_PORT_UID 0x0000FF00 52 #define DMP_MASTER_PORT_UID_S 8 53 #define DMP_MASTER_PORT_NUM 0x000000F0 54 #define DMP_MASTER_PORT_NUM_S 4 55 56 #define DMP_SLAVE_ADDR_BASE 0xFFFFF000 57 #define DMP_SLAVE_ADDR_BASE_S 12 58 #define DMP_SLAVE_PORT_NUM 0x00000F00 59 #define DMP_SLAVE_PORT_NUM_S 8 60 #define DMP_SLAVE_TYPE 0x000000C0 61 #define DMP_SLAVE_TYPE_S 6 62 #define DMP_SLAVE_TYPE_SLAVE 0 63 #define DMP_SLAVE_TYPE_BRIDGE 1 64 #define DMP_SLAVE_TYPE_SWRAP 2 65 #define DMP_SLAVE_TYPE_MWRAP 3 66 #define DMP_SLAVE_SIZE_TYPE 0x00000030 67 #define DMP_SLAVE_SIZE_TYPE_S 4 68 #define DMP_SLAVE_SIZE_4K 0 69 #define DMP_SLAVE_SIZE_8K 1 70 #define DMP_SLAVE_SIZE_16K 2 71 #define DMP_SLAVE_SIZE_DESC 3 72 73 /* EROM CompIdentB */ 74 #define CIB_REV_MASK 0xff000000 75 #define CIB_REV_SHIFT 24 76 77 /* ARM CR4 core specific control flag bits */ 78 #define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020 79 80 /* D11 core specific control flag bits */ 81 #define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004 82 #define D11_BCMA_IOCTL_PHYRESET 0x0008 83 84 /* chip core base & ramsize */ 85 /* bcm4329 */ 86 /* SDIO device core, ID 0x829 */ 87 #define BCM4329_CORE_BUS_BASE 0x18011000 88 /* internal memory core, ID 0x80e */ 89 #define BCM4329_CORE_SOCRAM_BASE 0x18003000 90 /* ARM Cortex M3 core, ID 0x82a */ 91 #define BCM4329_CORE_ARM_BASE 0x18002000 92 93 /* Max possibly supported memory size (limited by IO mapped memory) */ 94 #define BRCMF_CHIP_MAX_MEMSIZE (4 * 1024 * 1024) 95 96 #define CORE_SB(base, field) \ 97 (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) 98 #define SBCOREREV(sbidh) \ 99 ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \ 100 ((sbidh) & SSB_IDHIGH_RCLO)) 101 102 struct sbconfig { 103 u32 PAD[2]; 104 u32 sbipsflag; /* initiator port ocp slave flag */ 105 u32 PAD[3]; 106 u32 sbtpsflag; /* target port ocp slave flag */ 107 u32 PAD[11]; 108 u32 sbtmerrloga; /* (sonics >= 2.3) */ 109 u32 PAD; 110 u32 sbtmerrlog; /* (sonics >= 2.3) */ 111 u32 PAD[3]; 112 u32 sbadmatch3; /* address match3 */ 113 u32 PAD; 114 u32 sbadmatch2; /* address match2 */ 115 u32 PAD; 116 u32 sbadmatch1; /* address match1 */ 117 u32 PAD[7]; 118 u32 sbimstate; /* initiator agent state */ 119 u32 sbintvec; /* interrupt mask */ 120 u32 sbtmstatelow; /* target state */ 121 u32 sbtmstatehigh; /* target state */ 122 u32 sbbwa0; /* bandwidth allocation table0 */ 123 u32 PAD; 124 u32 sbimconfiglow; /* initiator configuration */ 125 u32 sbimconfighigh; /* initiator configuration */ 126 u32 sbadmatch0; /* address match0 */ 127 u32 PAD; 128 u32 sbtmconfiglow; /* target configuration */ 129 u32 sbtmconfighigh; /* target configuration */ 130 u32 sbbconfig; /* broadcast configuration */ 131 u32 PAD; 132 u32 sbbstate; /* broadcast state */ 133 u32 PAD[3]; 134 u32 sbactcnfg; /* activate configuration */ 135 u32 PAD[3]; 136 u32 sbflagst; /* current sbflags */ 137 u32 PAD[3]; 138 u32 sbidlow; /* identification */ 139 u32 sbidhigh; /* identification */ 140 }; 141 142 /* bankidx and bankinfo reg defines corerev >= 8 */ 143 #define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 144 #define SOCRAM_BANKINFO_SZMASK 0x0000007f 145 #define SOCRAM_BANKIDX_ROM_MASK 0x00000100 146 147 #define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 148 /* socram bankinfo memtype */ 149 #define SOCRAM_MEMTYPE_RAM 0 150 #define SOCRAM_MEMTYPE_R0M 1 151 #define SOCRAM_MEMTYPE_DEVRAM 2 152 153 #define SOCRAM_BANKINFO_SZBASE 8192 154 #define SRCI_LSS_MASK 0x00f00000 155 #define SRCI_LSS_SHIFT 20 156 #define SRCI_SRNB_MASK 0xf0 157 #define SRCI_SRNB_MASK_EXT 0x100 158 #define SRCI_SRNB_SHIFT 4 159 #define SRCI_SRBSZ_MASK 0xf 160 #define SRCI_SRBSZ_SHIFT 0 161 #define SR_BSZ_BASE 14 162 163 struct sbsocramregs { 164 u32 coreinfo; 165 u32 bwalloc; 166 u32 extracoreinfo; 167 u32 biststat; 168 u32 bankidx; 169 u32 standbyctrl; 170 171 u32 errlogstatus; /* rev 6 */ 172 u32 errlogaddr; /* rev 6 */ 173 /* used for patching rev 3 & 5 */ 174 u32 cambankidx; 175 u32 cambankstandbyctrl; 176 u32 cambankpatchctrl; 177 u32 cambankpatchtblbaseaddr; 178 u32 cambankcmdreg; 179 u32 cambankdatareg; 180 u32 cambankmaskreg; 181 u32 PAD[1]; 182 u32 bankinfo; /* corev 8 */ 183 u32 bankpda; 184 u32 PAD[14]; 185 u32 extmemconfig; 186 u32 extmemparitycsr; 187 u32 extmemparityerrdata; 188 u32 extmemparityerrcnt; 189 u32 extmemwrctrlandsize; 190 u32 PAD[84]; 191 u32 workaround; 192 u32 pwrctl; /* corerev >= 2 */ 193 u32 PAD[133]; 194 u32 sr_control; /* corerev >= 15 */ 195 u32 sr_status; /* corerev >= 15 */ 196 u32 sr_address; /* corerev >= 15 */ 197 u32 sr_data; /* corerev >= 15 */ 198 }; 199 200 #define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 201 #define SYSMEMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 202 203 #define ARMCR4_CAP (0x04) 204 #define ARMCR4_BANKIDX (0x40) 205 #define ARMCR4_BANKINFO (0x44) 206 #define ARMCR4_BANKPDA (0x4C) 207 208 #define ARMCR4_TCBBNB_MASK 0xf0 209 #define ARMCR4_TCBBNB_SHIFT 4 210 #define ARMCR4_TCBANB_MASK 0xf 211 #define ARMCR4_TCBANB_SHIFT 0 212 213 #define ARMCR4_BSZ_MASK 0x3f 214 #define ARMCR4_BSZ_MULT 8192 215 216 struct brcmf_core_priv { 217 struct brcmf_core pub; 218 u32 wrapbase; 219 struct list_head list; 220 struct brcmf_chip_priv *chip; 221 }; 222 223 struct brcmf_chip_priv { 224 struct brcmf_chip pub; 225 const struct brcmf_buscore_ops *ops; 226 void *ctx; 227 /* assured first core is chipcommon, second core is buscore */ 228 struct list_head cores; 229 u16 num_cores; 230 231 bool (*iscoreup)(struct brcmf_core_priv *core); 232 void (*coredisable)(struct brcmf_core_priv *core, u32 prereset, 233 u32 reset); 234 void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset, 235 u32 postreset); 236 }; 237 238 static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci, 239 struct brcmf_core *core) 240 { 241 u32 regdata; 242 243 regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh)); 244 core->rev = SBCOREREV(regdata); 245 } 246 247 static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core) 248 { 249 struct brcmf_chip_priv *ci; 250 u32 regdata; 251 u32 address; 252 253 ci = core->chip; 254 address = CORE_SB(core->pub.base, sbtmstatelow); 255 regdata = ci->ops->read32(ci->ctx, address); 256 regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT | 257 SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK); 258 return SSB_TMSLOW_CLOCK == regdata; 259 } 260 261 static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core) 262 { 263 struct brcmf_chip_priv *ci; 264 u32 regdata; 265 bool ret; 266 267 ci = core->chip; 268 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 269 ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK; 270 271 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 272 ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0); 273 274 return ret; 275 } 276 277 static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core, 278 u32 prereset, u32 reset) 279 { 280 struct brcmf_chip_priv *ci; 281 u32 val, base; 282 283 ci = core->chip; 284 base = core->pub.base; 285 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 286 if (val & SSB_TMSLOW_RESET) 287 return; 288 289 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 290 if ((val & SSB_TMSLOW_CLOCK) != 0) { 291 /* 292 * set target reject and spin until busy is clear 293 * (preserve core-specific bits) 294 */ 295 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 296 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 297 val | SSB_TMSLOW_REJECT); 298 299 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 300 udelay(1); 301 SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)) 302 & SSB_TMSHIGH_BUSY), 100000); 303 304 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)); 305 if (val & SSB_TMSHIGH_BUSY) 306 brcmf_err("core state still busy\n"); 307 308 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow)); 309 if (val & SSB_IDLOW_INITIATOR) { 310 val = ci->ops->read32(ci->ctx, 311 CORE_SB(base, sbimstate)); 312 val |= SSB_IMSTATE_REJECT; 313 ci->ops->write32(ci->ctx, 314 CORE_SB(base, sbimstate), val); 315 val = ci->ops->read32(ci->ctx, 316 CORE_SB(base, sbimstate)); 317 udelay(1); 318 SPINWAIT((ci->ops->read32(ci->ctx, 319 CORE_SB(base, sbimstate)) & 320 SSB_IMSTATE_BUSY), 100000); 321 } 322 323 /* set reset and reject while enabling the clocks */ 324 val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 325 SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET; 326 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val); 327 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 328 udelay(10); 329 330 /* clear the initiator reject bit */ 331 val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow)); 332 if (val & SSB_IDLOW_INITIATOR) { 333 val = ci->ops->read32(ci->ctx, 334 CORE_SB(base, sbimstate)); 335 val &= ~SSB_IMSTATE_REJECT; 336 ci->ops->write32(ci->ctx, 337 CORE_SB(base, sbimstate), val); 338 } 339 } 340 341 /* leave reset and reject asserted */ 342 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 343 (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET)); 344 udelay(1); 345 } 346 347 static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core, 348 u32 prereset, u32 reset) 349 { 350 struct brcmf_chip_priv *ci; 351 u32 regdata; 352 353 ci = core->chip; 354 355 /* if core is already in reset, skip reset */ 356 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 357 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 358 goto in_reset_configure; 359 360 /* configure reset */ 361 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 362 prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 363 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 364 365 /* put in reset */ 366 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 367 BCMA_RESET_CTL_RESET); 368 usleep_range(10, 20); 369 370 /* wait till reset is 1 */ 371 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 372 BCMA_RESET_CTL_RESET, 300); 373 374 in_reset_configure: 375 /* in-reset configure */ 376 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 377 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 378 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 379 } 380 381 static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset, 382 u32 reset, u32 postreset) 383 { 384 struct brcmf_chip_priv *ci; 385 u32 regdata; 386 u32 base; 387 388 ci = core->chip; 389 base = core->pub.base; 390 /* 391 * Must do the disable sequence first to work for 392 * arbitrary current core state. 393 */ 394 brcmf_chip_sb_coredisable(core, 0, 0); 395 396 /* 397 * Now do the initialization sequence. 398 * set reset while enabling the clock and 399 * forcing them on throughout the core 400 */ 401 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 402 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | 403 SSB_TMSLOW_RESET); 404 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 405 udelay(1); 406 407 /* clear any serror */ 408 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh)); 409 if (regdata & SSB_TMSHIGH_SERR) 410 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0); 411 412 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate)); 413 if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) { 414 regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO); 415 ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata); 416 } 417 418 /* clear reset and allow it to propagate throughout the core */ 419 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 420 SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK); 421 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 422 udelay(1); 423 424 /* leave clock enabled */ 425 ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), 426 SSB_TMSLOW_CLOCK); 427 regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow)); 428 udelay(1); 429 } 430 431 static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset, 432 u32 reset, u32 postreset) 433 { 434 struct brcmf_chip_priv *ci; 435 int count; 436 struct brcmf_core *d11core2 = NULL; 437 struct brcmf_core_priv *d11priv2 = NULL; 438 439 ci = core->chip; 440 441 /* special handle two D11 cores reset */ 442 if (core->pub.id == BCMA_CORE_80211) { 443 d11core2 = brcmf_chip_get_d11core(&ci->pub, 1); 444 if (d11core2) { 445 brcmf_dbg(INFO, "found two d11 cores, reset both\n"); 446 d11priv2 = container_of(d11core2, 447 struct brcmf_core_priv, pub); 448 } 449 } 450 451 /* must disable first to work for arbitrary current core state */ 452 brcmf_chip_ai_coredisable(core, prereset, reset); 453 if (d11priv2) 454 brcmf_chip_ai_coredisable(d11priv2, prereset, reset); 455 456 count = 0; 457 while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) & 458 BCMA_RESET_CTL_RESET) { 459 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0); 460 count++; 461 if (count > 50) 462 break; 463 usleep_range(40, 60); 464 } 465 466 if (d11priv2) { 467 count = 0; 468 while (ci->ops->read32(ci->ctx, 469 d11priv2->wrapbase + BCMA_RESET_CTL) & 470 BCMA_RESET_CTL_RESET) { 471 ci->ops->write32(ci->ctx, 472 d11priv2->wrapbase + BCMA_RESET_CTL, 473 0); 474 count++; 475 if (count > 50) 476 break; 477 usleep_range(40, 60); 478 } 479 } 480 481 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 482 postreset | BCMA_IOCTL_CLK); 483 ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL); 484 485 if (d11priv2) { 486 ci->ops->write32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL, 487 postreset | BCMA_IOCTL_CLK); 488 ci->ops->read32(ci->ctx, d11priv2->wrapbase + BCMA_IOCTL); 489 } 490 } 491 492 char *brcmf_chip_name(u32 id, u32 rev, char *buf, uint len) 493 { 494 const char *fmt; 495 496 fmt = ((id > 0xa000) || (id < 0x4000)) ? "BCM%d/%u" : "BCM%x/%u"; 497 snprintf(buf, len, fmt, id, rev); 498 return buf; 499 } 500 501 static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci, 502 u16 coreid, u32 base, 503 u32 wrapbase) 504 { 505 struct brcmf_core_priv *core; 506 507 core = kzalloc(sizeof(*core), GFP_KERNEL); 508 if (!core) 509 return ERR_PTR(-ENOMEM); 510 511 core->pub.id = coreid; 512 core->pub.base = base; 513 core->chip = ci; 514 core->wrapbase = wrapbase; 515 516 list_add_tail(&core->list, &ci->cores); 517 return &core->pub; 518 } 519 520 /* safety check for chipinfo */ 521 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 522 { 523 struct brcmf_core_priv *core; 524 bool need_socram = false; 525 bool has_socram = false; 526 bool cpu_found = false; 527 int idx = 1; 528 529 list_for_each_entry(core, &ci->cores, list) { 530 brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n", 531 idx++, core->pub.id, core->pub.rev, core->pub.base, 532 core->wrapbase); 533 534 switch (core->pub.id) { 535 case BCMA_CORE_ARM_CM3: 536 cpu_found = true; 537 need_socram = true; 538 break; 539 case BCMA_CORE_INTERNAL_MEM: 540 has_socram = true; 541 break; 542 case BCMA_CORE_ARM_CR4: 543 cpu_found = true; 544 break; 545 case BCMA_CORE_ARM_CA7: 546 cpu_found = true; 547 break; 548 default: 549 break; 550 } 551 } 552 553 if (!cpu_found) { 554 brcmf_err("CPU core not detected\n"); 555 return -ENXIO; 556 } 557 /* check RAM core presence for ARM CM3 core */ 558 if (need_socram && !has_socram) { 559 brcmf_err("RAM core not provided with ARM CM3 core\n"); 560 return -ENODEV; 561 } 562 return 0; 563 } 564 565 static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg) 566 { 567 return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg); 568 } 569 570 static void brcmf_chip_core_write32(struct brcmf_core_priv *core, 571 u16 reg, u32 val) 572 { 573 core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val); 574 } 575 576 static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx, 577 u32 *banksize) 578 { 579 u32 bankinfo; 580 u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); 581 582 bankidx |= idx; 583 brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx); 584 bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo)); 585 *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1; 586 *banksize *= SOCRAM_BANKINFO_SZBASE; 587 return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK); 588 } 589 590 static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize, 591 u32 *srsize) 592 { 593 u32 coreinfo; 594 uint nb, banksize, lss; 595 bool retent; 596 int i; 597 598 *ramsize = 0; 599 *srsize = 0; 600 601 if (WARN_ON(sr->pub.rev < 4)) 602 return; 603 604 if (!brcmf_chip_iscoreup(&sr->pub)) 605 brcmf_chip_resetcore(&sr->pub, 0, 0, 0); 606 607 /* Get info for determining size */ 608 coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo)); 609 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 610 611 if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) { 612 banksize = (coreinfo & SRCI_SRBSZ_MASK); 613 lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; 614 if (lss != 0) 615 nb--; 616 *ramsize = nb * (1 << (banksize + SR_BSZ_BASE)); 617 if (lss != 0) 618 *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE)); 619 } else { 620 /* length of SRAM Banks increased for corerev greater than 23 */ 621 if (sr->pub.rev >= 23) { 622 nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) 623 >> SRCI_SRNB_SHIFT; 624 } else { 625 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 626 } 627 for (i = 0; i < nb; i++) { 628 retent = brcmf_chip_socram_banksize(sr, i, &banksize); 629 *ramsize += banksize; 630 if (retent) 631 *srsize += banksize; 632 } 633 } 634 635 /* hardcoded save&restore memory sizes */ 636 switch (sr->chip->pub.chip) { 637 case BRCM_CC_4334_CHIP_ID: 638 if (sr->chip->pub.chiprev < 2) 639 *srsize = (32 * 1024); 640 break; 641 case BRCM_CC_43430_CHIP_ID: 642 /* assume sr for now as we can not check 643 * firmware sr capability at this point. 644 */ 645 *srsize = (64 * 1024); 646 break; 647 default: 648 break; 649 } 650 } 651 652 /** Return the SYS MEM size */ 653 static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem) 654 { 655 u32 memsize = 0; 656 u32 coreinfo; 657 u32 idx; 658 u32 nb; 659 u32 banksize; 660 661 if (!brcmf_chip_iscoreup(&sysmem->pub)) 662 brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0); 663 664 coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo)); 665 nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 666 667 for (idx = 0; idx < nb; idx++) { 668 brcmf_chip_socram_banksize(sysmem, idx, &banksize); 669 memsize += banksize; 670 } 671 672 return memsize; 673 } 674 675 /** Return the TCM-RAM size of the ARMCR4 core. */ 676 static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) 677 { 678 u32 corecap; 679 u32 memsize = 0; 680 u32 nab; 681 u32 nbb; 682 u32 totb; 683 u32 bxinfo; 684 u32 idx; 685 686 corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP); 687 688 nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; 689 nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; 690 totb = nab + nbb; 691 692 for (idx = 0; idx < totb; idx++) { 693 brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx); 694 bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO); 695 memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; 696 } 697 698 return memsize; 699 } 700 701 static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) 702 { 703 switch (ci->pub.chip) { 704 case BRCM_CC_4345_CHIP_ID: 705 return 0x198000; 706 case BRCM_CC_4335_CHIP_ID: 707 case BRCM_CC_4339_CHIP_ID: 708 case BRCM_CC_4350_CHIP_ID: 709 case BRCM_CC_4354_CHIP_ID: 710 case BRCM_CC_4356_CHIP_ID: 711 case BRCM_CC_43567_CHIP_ID: 712 case BRCM_CC_43569_CHIP_ID: 713 case BRCM_CC_43570_CHIP_ID: 714 case BRCM_CC_4358_CHIP_ID: 715 case BRCM_CC_43602_CHIP_ID: 716 case BRCM_CC_4371_CHIP_ID: 717 return 0x180000; 718 case BRCM_CC_43465_CHIP_ID: 719 case BRCM_CC_43525_CHIP_ID: 720 case BRCM_CC_4365_CHIP_ID: 721 case BRCM_CC_4366_CHIP_ID: 722 case BRCM_CC_43664_CHIP_ID: 723 case BRCM_CC_43666_CHIP_ID: 724 return 0x200000; 725 case BRCM_CC_4359_CHIP_ID: 726 return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000; 727 case BRCM_CC_4364_CHIP_ID: 728 case CY_CC_4373_CHIP_ID: 729 return 0x160000; 730 default: 731 brcmf_err("unknown chip: %s\n", ci->pub.name); 732 break; 733 } 734 return 0; 735 } 736 737 int brcmf_chip_get_raminfo(struct brcmf_chip *pub) 738 { 739 struct brcmf_chip_priv *ci = container_of(pub, struct brcmf_chip_priv, 740 pub); 741 struct brcmf_core_priv *mem_core; 742 struct brcmf_core *mem; 743 744 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4); 745 if (mem) { 746 mem_core = container_of(mem, struct brcmf_core_priv, pub); 747 ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core); 748 ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 749 if (!ci->pub.rambase) { 750 brcmf_err("RAM base not provided with ARM CR4 core\n"); 751 return -EINVAL; 752 } 753 } else { 754 mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM); 755 if (mem) { 756 mem_core = container_of(mem, struct brcmf_core_priv, 757 pub); 758 ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core); 759 ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 760 if (!ci->pub.rambase) { 761 brcmf_err("RAM base not provided with ARM CA7 core\n"); 762 return -EINVAL; 763 } 764 } else { 765 mem = brcmf_chip_get_core(&ci->pub, 766 BCMA_CORE_INTERNAL_MEM); 767 if (!mem) { 768 brcmf_err("No memory cores found\n"); 769 return -ENOMEM; 770 } 771 mem_core = container_of(mem, struct brcmf_core_priv, 772 pub); 773 brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize, 774 &ci->pub.srsize); 775 } 776 } 777 brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n", 778 ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize, 779 ci->pub.srsize, ci->pub.srsize); 780 781 if (!ci->pub.ramsize) { 782 brcmf_err("RAM size is undetermined\n"); 783 return -ENOMEM; 784 } 785 786 if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) { 787 brcmf_err("RAM size is incorrect\n"); 788 return -ENOMEM; 789 } 790 791 return 0; 792 } 793 794 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr, 795 u8 *type) 796 { 797 u32 val; 798 799 /* read next descriptor */ 800 val = ci->ops->read32(ci->ctx, *eromaddr); 801 *eromaddr += 4; 802 803 if (!type) 804 return val; 805 806 /* determine descriptor type */ 807 *type = (val & DMP_DESC_TYPE_MSK); 808 if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS) 809 *type = DMP_DESC_ADDRESS; 810 811 return val; 812 } 813 814 static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, 815 u32 *regbase, u32 *wrapbase) 816 { 817 u8 desc; 818 u32 val, szdesc; 819 u8 stype, sztype, wraptype; 820 821 *regbase = 0; 822 *wrapbase = 0; 823 824 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc); 825 if (desc == DMP_DESC_MASTER_PORT) { 826 wraptype = DMP_SLAVE_TYPE_MWRAP; 827 } else if (desc == DMP_DESC_ADDRESS) { 828 /* revert erom address */ 829 *eromaddr -= 4; 830 wraptype = DMP_SLAVE_TYPE_SWRAP; 831 } else { 832 *eromaddr -= 4; 833 return -EILSEQ; 834 } 835 836 do { 837 /* locate address descriptor */ 838 do { 839 val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc); 840 /* unexpected table end */ 841 if (desc == DMP_DESC_EOT) { 842 *eromaddr -= 4; 843 return -EFAULT; 844 } 845 } while (desc != DMP_DESC_ADDRESS && 846 desc != DMP_DESC_COMPONENT); 847 848 /* stop if we crossed current component border */ 849 if (desc == DMP_DESC_COMPONENT) { 850 *eromaddr -= 4; 851 return 0; 852 } 853 854 /* skip upper 32-bit address descriptor */ 855 if (val & DMP_DESC_ADDRSIZE_GT32) 856 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 857 858 sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S; 859 860 /* next size descriptor can be skipped */ 861 if (sztype == DMP_SLAVE_SIZE_DESC) { 862 szdesc = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 863 /* skip upper size descriptor if present */ 864 if (szdesc & DMP_DESC_ADDRSIZE_GT32) 865 brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); 866 } 867 868 /* look for 4K or 8K register regions */ 869 if (sztype != DMP_SLAVE_SIZE_4K && 870 sztype != DMP_SLAVE_SIZE_8K) 871 continue; 872 873 stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S; 874 875 /* only regular slave and wrapper */ 876 if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE) 877 *regbase = val & DMP_SLAVE_ADDR_BASE; 878 if (*wrapbase == 0 && stype == wraptype) 879 *wrapbase = val & DMP_SLAVE_ADDR_BASE; 880 } while (*regbase == 0 || *wrapbase == 0); 881 882 return 0; 883 } 884 885 static 886 int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) 887 { 888 struct brcmf_core *core; 889 u32 eromaddr; 890 u8 desc_type = 0; 891 u32 val; 892 u16 id; 893 u8 nmw, nsw, rev; 894 u32 base, wrap; 895 int err; 896 897 eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr)); 898 899 while (desc_type != DMP_DESC_EOT) { 900 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); 901 if (!(val & DMP_DESC_VALID)) 902 continue; 903 904 if (desc_type == DMP_DESC_EMPTY) 905 continue; 906 907 /* need a component descriptor */ 908 if (desc_type != DMP_DESC_COMPONENT) 909 continue; 910 911 id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S; 912 913 /* next descriptor must be component as well */ 914 val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type); 915 if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT)) 916 return -EFAULT; 917 918 /* only look at cores with master port(s) */ 919 nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S; 920 nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S; 921 rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; 922 923 /* need core with ports */ 924 if (nmw + nsw == 0 && 925 id != BCMA_CORE_PMU && 926 id != BCMA_CORE_GCI) 927 continue; 928 929 /* try to obtain register address info */ 930 err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap); 931 if (err) 932 continue; 933 934 /* finally a core to be added */ 935 core = brcmf_chip_add_core(ci, id, base, wrap); 936 if (IS_ERR(core)) 937 return PTR_ERR(core); 938 939 core->rev = rev; 940 } 941 942 return 0; 943 } 944 945 static int brcmf_chip_recognition(struct brcmf_chip_priv *ci) 946 { 947 struct brcmf_core *core; 948 u32 regdata; 949 u32 socitype; 950 int ret; 951 952 /* Get CC core rev 953 * Chipid is assume to be at offset 0 from SI_ENUM_BASE 954 * For different chiptypes or old sdio hosts w/o chipcommon, 955 * other ways of recognition should be added here. 956 */ 957 regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid)); 958 ci->pub.chip = regdata & CID_ID_MASK; 959 ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; 960 socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; 961 962 brcmf_chip_name(ci->pub.chip, ci->pub.chiprev, 963 ci->pub.name, sizeof(ci->pub.name)); 964 brcmf_dbg(INFO, "found %s chip: %s\n", 965 socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name); 966 967 if (socitype == SOCI_SB) { 968 if (ci->pub.chip != BRCM_CC_4329_CHIP_ID) { 969 brcmf_err("SB chip is not supported\n"); 970 return -ENODEV; 971 } 972 ci->iscoreup = brcmf_chip_sb_iscoreup; 973 ci->coredisable = brcmf_chip_sb_coredisable; 974 ci->resetcore = brcmf_chip_sb_resetcore; 975 976 core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON, 977 SI_ENUM_BASE, 0); 978 brcmf_chip_sb_corerev(ci, core); 979 core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV, 980 BCM4329_CORE_BUS_BASE, 0); 981 brcmf_chip_sb_corerev(ci, core); 982 core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM, 983 BCM4329_CORE_SOCRAM_BASE, 0); 984 brcmf_chip_sb_corerev(ci, core); 985 core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3, 986 BCM4329_CORE_ARM_BASE, 0); 987 brcmf_chip_sb_corerev(ci, core); 988 989 core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0); 990 brcmf_chip_sb_corerev(ci, core); 991 } else if (socitype == SOCI_AI) { 992 ci->iscoreup = brcmf_chip_ai_iscoreup; 993 ci->coredisable = brcmf_chip_ai_coredisable; 994 ci->resetcore = brcmf_chip_ai_resetcore; 995 996 brcmf_chip_dmp_erom_scan(ci); 997 } else { 998 brcmf_err("chip backplane type %u is not supported\n", 999 socitype); 1000 return -ENODEV; 1001 } 1002 1003 ret = brcmf_chip_cores_check(ci); 1004 if (ret) 1005 return ret; 1006 1007 /* assure chip is passive for core access */ 1008 brcmf_chip_set_passive(&ci->pub); 1009 1010 /* Call bus specific reset function now. Cores have been determined 1011 * but further access may require a chip specific reset at this point. 1012 */ 1013 if (ci->ops->reset) { 1014 ci->ops->reset(ci->ctx, &ci->pub); 1015 brcmf_chip_set_passive(&ci->pub); 1016 } 1017 1018 return brcmf_chip_get_raminfo(&ci->pub); 1019 } 1020 1021 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) 1022 { 1023 struct brcmf_core *core; 1024 struct brcmf_core_priv *cpu; 1025 u32 val; 1026 1027 1028 core = brcmf_chip_get_core(&chip->pub, id); 1029 if (!core) 1030 return; 1031 1032 switch (id) { 1033 case BCMA_CORE_ARM_CM3: 1034 brcmf_chip_coredisable(core, 0, 0); 1035 break; 1036 case BCMA_CORE_ARM_CR4: 1037 case BCMA_CORE_ARM_CA7: 1038 cpu = container_of(core, struct brcmf_core_priv, pub); 1039 1040 /* clear all IOCTL bits except HALT bit */ 1041 val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL); 1042 val &= ARMCR4_BCMA_IOCTL_CPUHALT; 1043 brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT, 1044 ARMCR4_BCMA_IOCTL_CPUHALT); 1045 break; 1046 default: 1047 brcmf_err("unknown id: %u\n", id); 1048 break; 1049 } 1050 } 1051 1052 static int brcmf_chip_setup(struct brcmf_chip_priv *chip) 1053 { 1054 struct brcmf_chip *pub; 1055 struct brcmf_core_priv *cc; 1056 struct brcmf_core *pmu; 1057 u32 base; 1058 u32 val; 1059 int ret = 0; 1060 1061 pub = &chip->pub; 1062 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list); 1063 base = cc->pub.base; 1064 1065 /* get chipcommon capabilites */ 1066 pub->cc_caps = chip->ops->read32(chip->ctx, 1067 CORE_CC_REG(base, capabilities)); 1068 pub->cc_caps_ext = chip->ops->read32(chip->ctx, 1069 CORE_CC_REG(base, 1070 capabilities_ext)); 1071 1072 /* get pmu caps & rev */ 1073 pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */ 1074 if (pub->cc_caps & CC_CAP_PMU) { 1075 val = chip->ops->read32(chip->ctx, 1076 CORE_CC_REG(pmu->base, pmucapabilities)); 1077 pub->pmurev = val & PCAP_REV_MASK; 1078 pub->pmucaps = val; 1079 } 1080 1081 brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n", 1082 cc->pub.rev, pub->pmurev, pub->pmucaps); 1083 1084 /* execute bus core specific setup */ 1085 if (chip->ops->setup) 1086 ret = chip->ops->setup(chip->ctx, pub); 1087 1088 return ret; 1089 } 1090 1091 struct brcmf_chip *brcmf_chip_attach(void *ctx, 1092 const struct brcmf_buscore_ops *ops) 1093 { 1094 struct brcmf_chip_priv *chip; 1095 int err = 0; 1096 1097 if (WARN_ON(!ops->read32)) 1098 err = -EINVAL; 1099 if (WARN_ON(!ops->write32)) 1100 err = -EINVAL; 1101 if (WARN_ON(!ops->prepare)) 1102 err = -EINVAL; 1103 if (WARN_ON(!ops->activate)) 1104 err = -EINVAL; 1105 if (err < 0) 1106 return ERR_PTR(-EINVAL); 1107 1108 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1109 if (!chip) 1110 return ERR_PTR(-ENOMEM); 1111 1112 INIT_LIST_HEAD(&chip->cores); 1113 chip->num_cores = 0; 1114 chip->ops = ops; 1115 chip->ctx = ctx; 1116 1117 err = ops->prepare(ctx); 1118 if (err < 0) 1119 goto fail; 1120 1121 err = brcmf_chip_recognition(chip); 1122 if (err < 0) 1123 goto fail; 1124 1125 err = brcmf_chip_setup(chip); 1126 if (err < 0) 1127 goto fail; 1128 1129 return &chip->pub; 1130 1131 fail: 1132 brcmf_chip_detach(&chip->pub); 1133 return ERR_PTR(err); 1134 } 1135 1136 void brcmf_chip_detach(struct brcmf_chip *pub) 1137 { 1138 struct brcmf_chip_priv *chip; 1139 struct brcmf_core_priv *core; 1140 struct brcmf_core_priv *tmp; 1141 1142 chip = container_of(pub, struct brcmf_chip_priv, pub); 1143 list_for_each_entry_safe(core, tmp, &chip->cores, list) { 1144 list_del(&core->list); 1145 kfree(core); 1146 } 1147 kfree(chip); 1148 } 1149 1150 struct brcmf_core *brcmf_chip_get_d11core(struct brcmf_chip *pub, u8 unit) 1151 { 1152 struct brcmf_chip_priv *chip; 1153 struct brcmf_core_priv *core; 1154 1155 chip = container_of(pub, struct brcmf_chip_priv, pub); 1156 list_for_each_entry(core, &chip->cores, list) { 1157 if (core->pub.id == BCMA_CORE_80211) { 1158 if (unit-- == 0) 1159 return &core->pub; 1160 } 1161 } 1162 return NULL; 1163 } 1164 1165 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid) 1166 { 1167 struct brcmf_chip_priv *chip; 1168 struct brcmf_core_priv *core; 1169 1170 chip = container_of(pub, struct brcmf_chip_priv, pub); 1171 list_for_each_entry(core, &chip->cores, list) 1172 if (core->pub.id == coreid) 1173 return &core->pub; 1174 1175 return NULL; 1176 } 1177 1178 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub) 1179 { 1180 struct brcmf_chip_priv *chip; 1181 struct brcmf_core_priv *cc; 1182 1183 chip = container_of(pub, struct brcmf_chip_priv, pub); 1184 cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list); 1185 if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON)) 1186 return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON); 1187 return &cc->pub; 1188 } 1189 1190 struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub) 1191 { 1192 struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub); 1193 struct brcmf_core *pmu; 1194 1195 /* See if there is separated PMU core available */ 1196 if (cc->rev >= 35 && 1197 pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { 1198 pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU); 1199 if (pmu) 1200 return pmu; 1201 } 1202 1203 /* Fallback to ChipCommon core for older hardware */ 1204 return cc; 1205 } 1206 1207 bool brcmf_chip_iscoreup(struct brcmf_core *pub) 1208 { 1209 struct brcmf_core_priv *core; 1210 1211 core = container_of(pub, struct brcmf_core_priv, pub); 1212 return core->chip->iscoreup(core); 1213 } 1214 1215 void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset) 1216 { 1217 struct brcmf_core_priv *core; 1218 1219 core = container_of(pub, struct brcmf_core_priv, pub); 1220 core->chip->coredisable(core, prereset, reset); 1221 } 1222 1223 void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset, 1224 u32 postreset) 1225 { 1226 struct brcmf_core_priv *core; 1227 1228 core = container_of(pub, struct brcmf_core_priv, pub); 1229 core->chip->resetcore(core, prereset, reset, postreset); 1230 } 1231 1232 static void 1233 brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip) 1234 { 1235 struct brcmf_core *core; 1236 struct brcmf_core_priv *sr; 1237 1238 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3); 1239 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1240 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1241 D11_BCMA_IOCTL_PHYCLOCKEN, 1242 D11_BCMA_IOCTL_PHYCLOCKEN, 1243 D11_BCMA_IOCTL_PHYCLOCKEN); 1244 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1245 brcmf_chip_resetcore(core, 0, 0, 0); 1246 1247 /* disable bank #3 remap for this device */ 1248 if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) { 1249 sr = container_of(core, struct brcmf_core_priv, pub); 1250 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3); 1251 brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0); 1252 } 1253 } 1254 1255 static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip) 1256 { 1257 struct brcmf_core *core; 1258 1259 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1260 if (!brcmf_chip_iscoreup(core)) { 1261 brcmf_err("SOCRAM core is down after reset?\n"); 1262 return false; 1263 } 1264 1265 chip->ops->activate(chip->ctx, &chip->pub, 0); 1266 1267 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3); 1268 brcmf_chip_resetcore(core, 0, 0, 0); 1269 1270 return true; 1271 } 1272 1273 static inline void 1274 brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip) 1275 { 1276 struct brcmf_core *core; 1277 1278 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4); 1279 1280 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1281 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1282 D11_BCMA_IOCTL_PHYCLOCKEN, 1283 D11_BCMA_IOCTL_PHYCLOCKEN, 1284 D11_BCMA_IOCTL_PHYCLOCKEN); 1285 } 1286 1287 static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1288 { 1289 struct brcmf_core *core; 1290 1291 chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1292 1293 /* restore ARM */ 1294 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4); 1295 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0); 1296 1297 return true; 1298 } 1299 1300 static inline void 1301 brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip) 1302 { 1303 struct brcmf_core *core; 1304 1305 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7); 1306 1307 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); 1308 brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET | 1309 D11_BCMA_IOCTL_PHYCLOCKEN, 1310 D11_BCMA_IOCTL_PHYCLOCKEN, 1311 D11_BCMA_IOCTL_PHYCLOCKEN); 1312 } 1313 1314 static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1315 { 1316 struct brcmf_core *core; 1317 1318 chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1319 1320 /* restore ARM */ 1321 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7); 1322 brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0); 1323 1324 return true; 1325 } 1326 1327 void brcmf_chip_set_passive(struct brcmf_chip *pub) 1328 { 1329 struct brcmf_chip_priv *chip; 1330 struct brcmf_core *arm; 1331 1332 brcmf_dbg(TRACE, "Enter\n"); 1333 1334 chip = container_of(pub, struct brcmf_chip_priv, pub); 1335 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1336 if (arm) { 1337 brcmf_chip_cr4_set_passive(chip); 1338 return; 1339 } 1340 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7); 1341 if (arm) { 1342 brcmf_chip_ca7_set_passive(chip); 1343 return; 1344 } 1345 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3); 1346 if (arm) { 1347 brcmf_chip_cm3_set_passive(chip); 1348 return; 1349 } 1350 } 1351 1352 bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec) 1353 { 1354 struct brcmf_chip_priv *chip; 1355 struct brcmf_core *arm; 1356 1357 brcmf_dbg(TRACE, "Enter\n"); 1358 1359 chip = container_of(pub, struct brcmf_chip_priv, pub); 1360 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1361 if (arm) 1362 return brcmf_chip_cr4_set_active(chip, rstvec); 1363 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7); 1364 if (arm) 1365 return brcmf_chip_ca7_set_active(chip, rstvec); 1366 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3); 1367 if (arm) 1368 return brcmf_chip_cm3_set_active(chip); 1369 1370 return false; 1371 } 1372 1373 bool brcmf_chip_sr_capable(struct brcmf_chip *pub) 1374 { 1375 u32 base, addr, reg, pmu_cc3_mask = ~0; 1376 struct brcmf_chip_priv *chip; 1377 struct brcmf_core *pmu = brcmf_chip_get_pmu(pub); 1378 1379 brcmf_dbg(TRACE, "Enter\n"); 1380 1381 /* old chips with PMU version less than 17 don't support save restore */ 1382 if (pub->pmurev < 17) 1383 return false; 1384 1385 base = brcmf_chip_get_chipcommon(pub)->base; 1386 chip = container_of(pub, struct brcmf_chip_priv, pub); 1387 1388 switch (pub->chip) { 1389 case BRCM_CC_4354_CHIP_ID: 1390 case BRCM_CC_4356_CHIP_ID: 1391 case BRCM_CC_4345_CHIP_ID: 1392 /* explicitly check SR engine enable bit */ 1393 pmu_cc3_mask = BIT(2); 1394 fallthrough; 1395 case BRCM_CC_43241_CHIP_ID: 1396 case BRCM_CC_4335_CHIP_ID: 1397 case BRCM_CC_4339_CHIP_ID: 1398 /* read PMU chipcontrol register 3 */ 1399 addr = CORE_CC_REG(pmu->base, chipcontrol_addr); 1400 chip->ops->write32(chip->ctx, addr, 3); 1401 addr = CORE_CC_REG(pmu->base, chipcontrol_data); 1402 reg = chip->ops->read32(chip->ctx, addr); 1403 return (reg & pmu_cc3_mask) != 0; 1404 case BRCM_CC_43430_CHIP_ID: 1405 addr = CORE_CC_REG(base, sr_control1); 1406 reg = chip->ops->read32(chip->ctx, addr); 1407 return reg != 0; 1408 case CY_CC_4373_CHIP_ID: 1409 /* explicitly check SR engine enable bit */ 1410 addr = CORE_CC_REG(base, sr_control0); 1411 reg = chip->ops->read32(chip->ctx, addr); 1412 return (reg & CC_SR_CTL0_ENABLE_MASK) != 0; 1413 case BRCM_CC_4359_CHIP_ID: 1414 case CY_CC_43012_CHIP_ID: 1415 addr = CORE_CC_REG(pmu->base, retention_ctl); 1416 reg = chip->ops->read32(chip->ctx, addr); 1417 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1418 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; 1419 default: 1420 addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); 1421 reg = chip->ops->read32(chip->ctx, addr); 1422 if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0) 1423 return false; 1424 1425 addr = CORE_CC_REG(pmu->base, retention_ctl); 1426 reg = chip->ops->read32(chip->ctx, addr); 1427 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1428 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; 1429 } 1430 } 1431