1 /* 2 * Broadcom specific AMBA 3 * Bus subsystem 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8 #include "bcma_private.h" 9 #include <linux/module.h> 10 #include <linux/platform_device.h> 11 #include <linux/bcma/bcma.h> 12 #include <linux/slab.h> 13 #include <linux/of_address.h> 14 #include <linux/of_irq.h> 15 16 MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); 17 MODULE_LICENSE("GPL"); 18 19 /* contains the number the next bus should get. */ 20 static unsigned int bcma_bus_next_num = 0; 21 22 /* bcma_buses_mutex locks the bcma_bus_next_num */ 23 static DEFINE_MUTEX(bcma_buses_mutex); 24 25 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 26 static int bcma_device_probe(struct device *dev); 27 static int bcma_device_remove(struct device *dev); 28 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); 29 30 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 31 { 32 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 33 return sprintf(buf, "0x%03X\n", core->id.manuf); 34 } 35 static DEVICE_ATTR_RO(manuf); 36 37 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 38 { 39 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 40 return sprintf(buf, "0x%03X\n", core->id.id); 41 } 42 static DEVICE_ATTR_RO(id); 43 44 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) 45 { 46 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 47 return sprintf(buf, "0x%02X\n", core->id.rev); 48 } 49 static DEVICE_ATTR_RO(rev); 50 51 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) 52 { 53 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 54 return sprintf(buf, "0x%X\n", core->id.class); 55 } 56 static DEVICE_ATTR_RO(class); 57 58 static struct attribute *bcma_device_attrs[] = { 59 &dev_attr_manuf.attr, 60 &dev_attr_id.attr, 61 &dev_attr_rev.attr, 62 &dev_attr_class.attr, 63 NULL, 64 }; 65 ATTRIBUTE_GROUPS(bcma_device); 66 67 static struct bus_type bcma_bus_type = { 68 .name = "bcma", 69 .match = bcma_bus_match, 70 .probe = bcma_device_probe, 71 .remove = bcma_device_remove, 72 .uevent = bcma_device_uevent, 73 .dev_groups = bcma_device_groups, 74 }; 75 76 static u16 bcma_cc_core_id(struct bcma_bus *bus) 77 { 78 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) 79 return BCMA_CORE_4706_CHIPCOMMON; 80 return BCMA_CORE_CHIPCOMMON; 81 } 82 83 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, 84 u8 unit) 85 { 86 struct bcma_device *core; 87 88 list_for_each_entry(core, &bus->cores, list) { 89 if (core->id.id == coreid && core->core_unit == unit) 90 return core; 91 } 92 return NULL; 93 } 94 EXPORT_SYMBOL_GPL(bcma_find_core_unit); 95 96 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, 97 int timeout) 98 { 99 unsigned long deadline = jiffies + timeout; 100 u32 val; 101 102 do { 103 val = bcma_read32(core, reg); 104 if ((val & mask) == value) 105 return true; 106 cpu_relax(); 107 udelay(10); 108 } while (!time_after_eq(jiffies, deadline)); 109 110 bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); 111 112 return false; 113 } 114 115 static void bcma_release_core_dev(struct device *dev) 116 { 117 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 118 if (core->io_addr) 119 iounmap(core->io_addr); 120 if (core->io_wrap) 121 iounmap(core->io_wrap); 122 kfree(core); 123 } 124 125 static bool bcma_is_core_needed_early(u16 core_id) 126 { 127 switch (core_id) { 128 case BCMA_CORE_NS_NAND: 129 case BCMA_CORE_NS_QSPI: 130 return true; 131 } 132 133 return false; 134 } 135 136 #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) 137 static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 138 struct bcma_device *core) 139 { 140 struct device_node *node; 141 u64 size; 142 const __be32 *reg; 143 144 if (!parent || !parent->dev.of_node) 145 return NULL; 146 147 for_each_child_of_node(parent->dev.of_node, node) { 148 reg = of_get_address(node, 0, &size, NULL); 149 if (!reg) 150 continue; 151 if (of_translate_address(node, reg) == core->addr) 152 return node; 153 } 154 return NULL; 155 } 156 157 static int bcma_of_irq_parse(struct platform_device *parent, 158 struct bcma_device *core, 159 struct of_phandle_args *out_irq, int num) 160 { 161 __be32 laddr[1]; 162 int rc; 163 164 if (core->dev.of_node) { 165 rc = of_irq_parse_one(core->dev.of_node, num, out_irq); 166 if (!rc) 167 return rc; 168 } 169 170 out_irq->np = parent->dev.of_node; 171 out_irq->args_count = 1; 172 out_irq->args[0] = num; 173 174 laddr[0] = cpu_to_be32(core->addr); 175 return of_irq_parse_raw(laddr, out_irq); 176 } 177 178 static unsigned int bcma_of_get_irq(struct platform_device *parent, 179 struct bcma_device *core, int num) 180 { 181 struct of_phandle_args out_irq; 182 int ret; 183 184 if (!parent || !parent->dev.of_node) 185 return 0; 186 187 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 188 if (ret) { 189 bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", 190 ret); 191 return 0; 192 } 193 194 return irq_create_of_mapping(&out_irq); 195 } 196 197 static void bcma_of_fill_device(struct platform_device *parent, 198 struct bcma_device *core) 199 { 200 struct device_node *node; 201 202 node = bcma_of_find_child_device(parent, core); 203 if (node) 204 core->dev.of_node = node; 205 206 core->irq = bcma_of_get_irq(parent, core, 0); 207 } 208 #else 209 static void bcma_of_fill_device(struct platform_device *parent, 210 struct bcma_device *core) 211 { 212 } 213 static inline unsigned int bcma_of_get_irq(struct platform_device *parent, 214 struct bcma_device *core, int num) 215 { 216 return 0; 217 } 218 #endif /* CONFIG_OF */ 219 220 unsigned int bcma_core_irq(struct bcma_device *core, int num) 221 { 222 struct bcma_bus *bus = core->bus; 223 unsigned int mips_irq; 224 225 switch (bus->hosttype) { 226 case BCMA_HOSTTYPE_PCI: 227 return bus->host_pci->irq; 228 case BCMA_HOSTTYPE_SOC: 229 if (bus->drv_mips.core && num == 0) { 230 mips_irq = bcma_core_mips_irq(core); 231 return mips_irq <= 4 ? mips_irq + 2 : 0; 232 } 233 if (bus->host_pdev) 234 return bcma_of_get_irq(bus->host_pdev, core, num); 235 return 0; 236 case BCMA_HOSTTYPE_SDIO: 237 return 0; 238 } 239 240 return 0; 241 } 242 EXPORT_SYMBOL(bcma_core_irq); 243 244 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) 245 { 246 core->dev.release = bcma_release_core_dev; 247 core->dev.bus = &bcma_bus_type; 248 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); 249 250 switch (bus->hosttype) { 251 case BCMA_HOSTTYPE_PCI: 252 core->dev.parent = &bus->host_pci->dev; 253 core->dma_dev = &bus->host_pci->dev; 254 core->irq = bus->host_pci->irq; 255 break; 256 case BCMA_HOSTTYPE_SOC: 257 core->dev.dma_mask = &core->dev.coherent_dma_mask; 258 if (bus->host_pdev) { 259 core->dma_dev = &bus->host_pdev->dev; 260 core->dev.parent = &bus->host_pdev->dev; 261 bcma_of_fill_device(bus->host_pdev, core); 262 } else { 263 core->dma_dev = &core->dev; 264 } 265 break; 266 case BCMA_HOSTTYPE_SDIO: 267 break; 268 } 269 } 270 271 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) 272 { 273 int err; 274 275 err = device_register(&core->dev); 276 if (err) { 277 bcma_err(bus, "Could not register dev for core 0x%03X\n", 278 core->id.id); 279 put_device(&core->dev); 280 return; 281 } 282 core->dev_registered = true; 283 } 284 285 static int bcma_register_devices(struct bcma_bus *bus) 286 { 287 struct bcma_device *core; 288 int err; 289 290 list_for_each_entry(core, &bus->cores, list) { 291 /* We support that cores ourself */ 292 switch (core->id.id) { 293 case BCMA_CORE_4706_CHIPCOMMON: 294 case BCMA_CORE_CHIPCOMMON: 295 case BCMA_CORE_NS_CHIPCOMMON_B: 296 case BCMA_CORE_PCI: 297 case BCMA_CORE_PCIE: 298 case BCMA_CORE_PCIE2: 299 case BCMA_CORE_MIPS_74K: 300 case BCMA_CORE_4706_MAC_GBIT_COMMON: 301 continue; 302 } 303 304 /* Early cores were already registered */ 305 if (bcma_is_core_needed_early(core->id.id)) 306 continue; 307 308 /* Only first GMAC core on BCM4706 is connected and working */ 309 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 310 core->core_unit > 0) 311 continue; 312 313 bcma_register_core(bus, core); 314 } 315 316 #ifdef CONFIG_BCMA_DRIVER_MIPS 317 if (bus->drv_cc.pflash.present) { 318 err = platform_device_register(&bcma_pflash_dev); 319 if (err) 320 bcma_err(bus, "Error registering parallel flash\n"); 321 } 322 #endif 323 324 #ifdef CONFIG_BCMA_SFLASH 325 if (bus->drv_cc.sflash.present) { 326 err = platform_device_register(&bcma_sflash_dev); 327 if (err) 328 bcma_err(bus, "Error registering serial flash\n"); 329 } 330 #endif 331 332 #ifdef CONFIG_BCMA_NFLASH 333 if (bus->drv_cc.nflash.present) { 334 err = platform_device_register(&bcma_nflash_dev); 335 if (err) 336 bcma_err(bus, "Error registering NAND flash\n"); 337 } 338 #endif 339 err = bcma_gpio_init(&bus->drv_cc); 340 if (err == -ENOTSUPP) 341 bcma_debug(bus, "GPIO driver not activated\n"); 342 else if (err) 343 bcma_err(bus, "Error registering GPIO driver: %i\n", err); 344 345 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 346 err = bcma_chipco_watchdog_register(&bus->drv_cc); 347 if (err) 348 bcma_err(bus, "Error registering watchdog driver\n"); 349 } 350 351 return 0; 352 } 353 354 static void bcma_unregister_cores(struct bcma_bus *bus) 355 { 356 struct bcma_device *core, *tmp; 357 358 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 359 list_del(&core->list); 360 if (core->dev_registered) 361 device_unregister(&core->dev); 362 } 363 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 364 platform_device_unregister(bus->drv_cc.watchdog); 365 } 366 367 int bcma_bus_register(struct bcma_bus *bus) 368 { 369 int err; 370 struct bcma_device *core; 371 372 mutex_lock(&bcma_buses_mutex); 373 bus->num = bcma_bus_next_num++; 374 mutex_unlock(&bcma_buses_mutex); 375 376 /* Scan for devices (cores) */ 377 err = bcma_bus_scan(bus); 378 if (err) { 379 bcma_err(bus, "Failed to scan: %d\n", err); 380 return err; 381 } 382 383 /* Early init CC core */ 384 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 385 if (core) { 386 bus->drv_cc.core = core; 387 bcma_core_chipcommon_early_init(&bus->drv_cc); 388 } 389 390 /* Cores providing flash access go before SPROM init */ 391 list_for_each_entry(core, &bus->cores, list) { 392 if (bcma_is_core_needed_early(core->id.id)) 393 bcma_register_core(bus, core); 394 } 395 396 /* Try to get SPROM */ 397 err = bcma_sprom_get(bus); 398 if (err == -ENOENT) { 399 bcma_err(bus, "No SPROM available\n"); 400 } else if (err) 401 bcma_err(bus, "Failed to get SPROM: %d\n", err); 402 403 /* Init CC core */ 404 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 405 if (core) { 406 bus->drv_cc.core = core; 407 bcma_core_chipcommon_init(&bus->drv_cc); 408 } 409 410 /* Init CC core */ 411 core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); 412 if (core) { 413 bus->drv_cc_b.core = core; 414 bcma_core_chipcommon_b_init(&bus->drv_cc_b); 415 } 416 417 /* Init MIPS core */ 418 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 419 if (core) { 420 bus->drv_mips.core = core; 421 bcma_core_mips_init(&bus->drv_mips); 422 } 423 424 /* Init PCIE core */ 425 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0); 426 if (core) { 427 bus->drv_pci[0].core = core; 428 bcma_core_pci_init(&bus->drv_pci[0]); 429 } 430 431 /* Init PCIE core */ 432 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1); 433 if (core) { 434 bus->drv_pci[1].core = core; 435 bcma_core_pci_init(&bus->drv_pci[1]); 436 } 437 438 /* Init PCIe Gen 2 core */ 439 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0); 440 if (core) { 441 bus->drv_pcie2.core = core; 442 bcma_core_pcie2_init(&bus->drv_pcie2); 443 } 444 445 /* Init GBIT MAC COMMON core */ 446 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 447 if (core) { 448 bus->drv_gmac_cmn.core = core; 449 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 450 } 451 452 /* Register found cores */ 453 bcma_register_devices(bus); 454 455 bcma_info(bus, "Bus registered\n"); 456 457 return 0; 458 } 459 460 void bcma_bus_unregister(struct bcma_bus *bus) 461 { 462 struct bcma_device *cores[3]; 463 int err; 464 465 err = bcma_gpio_unregister(&bus->drv_cc); 466 if (err == -EBUSY) 467 bcma_err(bus, "Some GPIOs are still in use.\n"); 468 else if (err) 469 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 470 471 bcma_core_chipcommon_b_free(&bus->drv_cc_b); 472 473 cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 474 cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE); 475 cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 476 477 bcma_unregister_cores(bus); 478 479 kfree(cores[2]); 480 kfree(cores[1]); 481 kfree(cores[0]); 482 } 483 484 int __init bcma_bus_early_register(struct bcma_bus *bus, 485 struct bcma_device *core_cc, 486 struct bcma_device *core_mips) 487 { 488 int err; 489 struct bcma_device *core; 490 struct bcma_device_id match; 491 492 match.manuf = BCMA_MANUF_BCM; 493 match.id = bcma_cc_core_id(bus); 494 match.class = BCMA_CL_SIM; 495 match.rev = BCMA_ANY_REV; 496 497 /* Scan for chip common core */ 498 err = bcma_bus_scan_early(bus, &match, core_cc); 499 if (err) { 500 bcma_err(bus, "Failed to scan for common core: %d\n", err); 501 return -1; 502 } 503 504 match.manuf = BCMA_MANUF_MIPS; 505 match.id = BCMA_CORE_MIPS_74K; 506 match.class = BCMA_CL_SIM; 507 match.rev = BCMA_ANY_REV; 508 509 /* Scan for mips core */ 510 err = bcma_bus_scan_early(bus, &match, core_mips); 511 if (err) { 512 bcma_err(bus, "Failed to scan for mips core: %d\n", err); 513 return -1; 514 } 515 516 /* Early init CC core */ 517 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 518 if (core) { 519 bus->drv_cc.core = core; 520 bcma_core_chipcommon_early_init(&bus->drv_cc); 521 } 522 523 /* Early init MIPS core */ 524 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 525 if (core) { 526 bus->drv_mips.core = core; 527 bcma_core_mips_early_init(&bus->drv_mips); 528 } 529 530 bcma_info(bus, "Early bus registered\n"); 531 532 return 0; 533 } 534 535 #ifdef CONFIG_PM 536 int bcma_bus_suspend(struct bcma_bus *bus) 537 { 538 struct bcma_device *core; 539 540 list_for_each_entry(core, &bus->cores, list) { 541 struct device_driver *drv = core->dev.driver; 542 if (drv) { 543 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 544 if (adrv->suspend) 545 adrv->suspend(core); 546 } 547 } 548 return 0; 549 } 550 551 int bcma_bus_resume(struct bcma_bus *bus) 552 { 553 struct bcma_device *core; 554 555 /* Init CC core */ 556 if (bus->drv_cc.core) { 557 bus->drv_cc.setup_done = false; 558 bcma_core_chipcommon_init(&bus->drv_cc); 559 } 560 561 list_for_each_entry(core, &bus->cores, list) { 562 struct device_driver *drv = core->dev.driver; 563 if (drv) { 564 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 565 if (adrv->resume) 566 adrv->resume(core); 567 } 568 } 569 570 return 0; 571 } 572 #endif 573 574 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) 575 { 576 drv->drv.name = drv->name; 577 drv->drv.bus = &bcma_bus_type; 578 drv->drv.owner = owner; 579 580 return driver_register(&drv->drv); 581 } 582 EXPORT_SYMBOL_GPL(__bcma_driver_register); 583 584 void bcma_driver_unregister(struct bcma_driver *drv) 585 { 586 driver_unregister(&drv->drv); 587 } 588 EXPORT_SYMBOL_GPL(bcma_driver_unregister); 589 590 static int bcma_bus_match(struct device *dev, struct device_driver *drv) 591 { 592 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 593 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 594 const struct bcma_device_id *cid = &core->id; 595 const struct bcma_device_id *did; 596 597 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { 598 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && 599 (did->id == cid->id || did->id == BCMA_ANY_ID) && 600 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && 601 (did->class == cid->class || did->class == BCMA_ANY_CLASS)) 602 return 1; 603 } 604 return 0; 605 } 606 607 static int bcma_device_probe(struct device *dev) 608 { 609 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 610 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 611 drv); 612 int err = 0; 613 614 if (adrv->probe) 615 err = adrv->probe(core); 616 617 return err; 618 } 619 620 static int bcma_device_remove(struct device *dev) 621 { 622 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 623 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 624 drv); 625 626 if (adrv->remove) 627 adrv->remove(core); 628 629 return 0; 630 } 631 632 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) 633 { 634 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 635 636 return add_uevent_var(env, 637 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", 638 core->id.manuf, core->id.id, 639 core->id.rev, core->id.class); 640 } 641 642 static int __init bcma_modinit(void) 643 { 644 int err; 645 646 err = bus_register(&bcma_bus_type); 647 if (err) 648 return err; 649 650 err = bcma_host_soc_register_driver(); 651 if (err) { 652 pr_err("SoC host initialization failed\n"); 653 err = 0; 654 } 655 #ifdef CONFIG_BCMA_HOST_PCI 656 err = bcma_host_pci_init(); 657 if (err) { 658 pr_err("PCI host initialization failed\n"); 659 err = 0; 660 } 661 #endif 662 663 return err; 664 } 665 fs_initcall(bcma_modinit); 666 667 static void __exit bcma_modexit(void) 668 { 669 #ifdef CONFIG_BCMA_HOST_PCI 670 bcma_host_pci_exit(); 671 #endif 672 bcma_host_soc_unregister_driver(); 673 bus_unregister(&bcma_bus_type); 674 } 675 module_exit(bcma_modexit) 676