1 /* 2 * Broadcom specific AMBA 3 * Bus subsystem 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8 #include "bcma_private.h" 9 #include <linux/module.h> 10 #include <linux/mmc/sdio_func.h> 11 #include <linux/platform_device.h> 12 #include <linux/pci.h> 13 #include <linux/bcma/bcma.h> 14 #include <linux/slab.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_platform.h> 18 19 MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); 20 MODULE_LICENSE("GPL"); 21 22 /* contains the number the next bus should get. */ 23 static unsigned int bcma_bus_next_num = 0; 24 25 /* bcma_buses_mutex locks the bcma_bus_next_num */ 26 static DEFINE_MUTEX(bcma_buses_mutex); 27 28 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 29 static int bcma_device_probe(struct device *dev); 30 static void bcma_device_remove(struct device *dev); 31 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); 32 33 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 34 { 35 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 36 return sprintf(buf, "0x%03X\n", core->id.manuf); 37 } 38 static DEVICE_ATTR_RO(manuf); 39 40 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 41 { 42 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 43 return sprintf(buf, "0x%03X\n", core->id.id); 44 } 45 static DEVICE_ATTR_RO(id); 46 47 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) 48 { 49 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 50 return sprintf(buf, "0x%02X\n", core->id.rev); 51 } 52 static DEVICE_ATTR_RO(rev); 53 54 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) 55 { 56 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 57 return sprintf(buf, "0x%X\n", core->id.class); 58 } 59 static DEVICE_ATTR_RO(class); 60 61 static struct attribute *bcma_device_attrs[] = { 62 &dev_attr_manuf.attr, 63 &dev_attr_id.attr, 64 &dev_attr_rev.attr, 65 &dev_attr_class.attr, 66 NULL, 67 }; 68 ATTRIBUTE_GROUPS(bcma_device); 69 70 static struct bus_type bcma_bus_type = { 71 .name = "bcma", 72 .match = bcma_bus_match, 73 .probe = bcma_device_probe, 74 .remove = bcma_device_remove, 75 .uevent = bcma_device_uevent, 76 .dev_groups = bcma_device_groups, 77 }; 78 79 static u16 bcma_cc_core_id(struct bcma_bus *bus) 80 { 81 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) 82 return BCMA_CORE_4706_CHIPCOMMON; 83 return BCMA_CORE_CHIPCOMMON; 84 } 85 86 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, 87 u8 unit) 88 { 89 struct bcma_device *core; 90 91 list_for_each_entry(core, &bus->cores, list) { 92 if (core->id.id == coreid && core->core_unit == unit) 93 return core; 94 } 95 return NULL; 96 } 97 EXPORT_SYMBOL_GPL(bcma_find_core_unit); 98 99 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, 100 int timeout) 101 { 102 unsigned long deadline = jiffies + timeout; 103 u32 val; 104 105 do { 106 val = bcma_read32(core, reg); 107 if ((val & mask) == value) 108 return true; 109 cpu_relax(); 110 udelay(10); 111 } while (!time_after_eq(jiffies, deadline)); 112 113 bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); 114 115 return false; 116 } 117 118 static void bcma_release_core_dev(struct device *dev) 119 { 120 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 121 if (core->io_addr) 122 iounmap(core->io_addr); 123 if (core->io_wrap) 124 iounmap(core->io_wrap); 125 kfree(core); 126 } 127 128 static bool bcma_is_core_needed_early(u16 core_id) 129 { 130 switch (core_id) { 131 case BCMA_CORE_NS_NAND: 132 case BCMA_CORE_NS_QSPI: 133 return true; 134 } 135 136 return false; 137 } 138 139 static struct device_node *bcma_of_find_child_device(struct device *parent, 140 struct bcma_device *core) 141 { 142 struct device_node *node; 143 u64 size; 144 const __be32 *reg; 145 146 if (!parent->of_node) 147 return NULL; 148 149 for_each_child_of_node(parent->of_node, node) { 150 reg = of_get_address(node, 0, &size, NULL); 151 if (!reg) 152 continue; 153 if (of_translate_address(node, reg) == core->addr) 154 return node; 155 } 156 return NULL; 157 } 158 159 static int bcma_of_irq_parse(struct device *parent, 160 struct bcma_device *core, 161 struct of_phandle_args *out_irq, int num) 162 { 163 __be32 laddr[1]; 164 int rc; 165 166 if (core->dev.of_node) { 167 rc = of_irq_parse_one(core->dev.of_node, num, out_irq); 168 if (!rc) 169 return rc; 170 } 171 172 out_irq->np = parent->of_node; 173 out_irq->args_count = 1; 174 out_irq->args[0] = num; 175 176 laddr[0] = cpu_to_be32(core->addr); 177 return of_irq_parse_raw(laddr, out_irq); 178 } 179 180 static unsigned int bcma_of_get_irq(struct device *parent, 181 struct bcma_device *core, int num) 182 { 183 struct of_phandle_args out_irq; 184 int ret; 185 186 if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node) 187 return 0; 188 189 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 190 if (ret) { 191 bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", 192 ret); 193 return 0; 194 } 195 196 return irq_create_of_mapping(&out_irq); 197 } 198 199 static void bcma_of_fill_device(struct device *parent, 200 struct bcma_device *core) 201 { 202 struct device_node *node; 203 204 node = bcma_of_find_child_device(parent, core); 205 if (node) 206 core->dev.of_node = node; 207 208 core->irq = bcma_of_get_irq(parent, core, 0); 209 210 of_dma_configure(&core->dev, node, false); 211 } 212 213 unsigned int bcma_core_irq(struct bcma_device *core, int num) 214 { 215 struct bcma_bus *bus = core->bus; 216 unsigned int mips_irq; 217 218 switch (bus->hosttype) { 219 case BCMA_HOSTTYPE_PCI: 220 return bus->host_pci->irq; 221 case BCMA_HOSTTYPE_SOC: 222 if (bus->drv_mips.core && num == 0) { 223 mips_irq = bcma_core_mips_irq(core); 224 return mips_irq <= 4 ? mips_irq + 2 : 0; 225 } 226 if (bus->dev) 227 return bcma_of_get_irq(bus->dev, core, num); 228 return 0; 229 case BCMA_HOSTTYPE_SDIO: 230 return 0; 231 } 232 233 return 0; 234 } 235 EXPORT_SYMBOL(bcma_core_irq); 236 237 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) 238 { 239 device_initialize(&core->dev); 240 core->dev.release = bcma_release_core_dev; 241 core->dev.bus = &bcma_bus_type; 242 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); 243 core->dev.parent = bus->dev; 244 if (bus->dev) 245 bcma_of_fill_device(bus->dev, core); 246 247 switch (bus->hosttype) { 248 case BCMA_HOSTTYPE_PCI: 249 core->dma_dev = bus->dev; 250 core->irq = bus->host_pci->irq; 251 break; 252 case BCMA_HOSTTYPE_SOC: 253 if (IS_ENABLED(CONFIG_OF) && bus->dev) { 254 core->dma_dev = bus->dev; 255 } else { 256 core->dev.dma_mask = &core->dev.coherent_dma_mask; 257 core->dma_dev = &core->dev; 258 } 259 break; 260 case BCMA_HOSTTYPE_SDIO: 261 break; 262 } 263 } 264 265 void bcma_init_bus(struct bcma_bus *bus) 266 { 267 mutex_lock(&bcma_buses_mutex); 268 bus->num = bcma_bus_next_num++; 269 mutex_unlock(&bcma_buses_mutex); 270 271 INIT_LIST_HEAD(&bus->cores); 272 bus->nr_cores = 0; 273 274 bcma_detect_chip(bus); 275 } 276 277 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) 278 { 279 int err; 280 281 err = device_add(&core->dev); 282 if (err) { 283 bcma_err(bus, "Could not register dev for core 0x%03X\n", 284 core->id.id); 285 return; 286 } 287 core->dev_registered = true; 288 } 289 290 static int bcma_register_devices(struct bcma_bus *bus) 291 { 292 struct bcma_device *core; 293 int err; 294 295 list_for_each_entry(core, &bus->cores, list) { 296 /* We support that cores ourself */ 297 switch (core->id.id) { 298 case BCMA_CORE_4706_CHIPCOMMON: 299 case BCMA_CORE_CHIPCOMMON: 300 case BCMA_CORE_NS_CHIPCOMMON_B: 301 case BCMA_CORE_PCI: 302 case BCMA_CORE_PCIE: 303 case BCMA_CORE_PCIE2: 304 case BCMA_CORE_MIPS_74K: 305 case BCMA_CORE_4706_MAC_GBIT_COMMON: 306 continue; 307 } 308 309 /* Early cores were already registered */ 310 if (bcma_is_core_needed_early(core->id.id)) 311 continue; 312 313 /* Only first GMAC core on BCM4706 is connected and working */ 314 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 315 core->core_unit > 0) 316 continue; 317 318 bcma_register_core(bus, core); 319 } 320 321 #ifdef CONFIG_BCMA_PFLASH 322 if (bus->drv_cc.pflash.present) { 323 err = platform_device_register(&bcma_pflash_dev); 324 if (err) 325 bcma_err(bus, "Error registering parallel flash\n"); 326 } 327 #endif 328 329 #ifdef CONFIG_BCMA_SFLASH 330 if (bus->drv_cc.sflash.present) { 331 err = platform_device_register(&bcma_sflash_dev); 332 if (err) 333 bcma_err(bus, "Error registering serial flash\n"); 334 } 335 #endif 336 337 #ifdef CONFIG_BCMA_NFLASH 338 if (bus->drv_cc.nflash.present) { 339 err = platform_device_register(&bcma_nflash_dev); 340 if (err) 341 bcma_err(bus, "Error registering NAND flash\n"); 342 } 343 #endif 344 err = bcma_gpio_init(&bus->drv_cc); 345 if (err == -ENOTSUPP) 346 bcma_debug(bus, "GPIO driver not activated\n"); 347 else if (err) 348 bcma_err(bus, "Error registering GPIO driver: %i\n", err); 349 350 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 351 err = bcma_chipco_watchdog_register(&bus->drv_cc); 352 if (err) 353 bcma_err(bus, "Error registering watchdog driver\n"); 354 } 355 356 return 0; 357 } 358 359 void bcma_unregister_cores(struct bcma_bus *bus) 360 { 361 struct bcma_device *core, *tmp; 362 363 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 364 if (!core->dev_registered) 365 continue; 366 list_del(&core->list); 367 device_unregister(&core->dev); 368 } 369 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 370 platform_device_unregister(bus->drv_cc.watchdog); 371 372 /* Now noone uses internally-handled cores, we can free them */ 373 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 374 list_del(&core->list); 375 put_device(&core->dev); 376 } 377 } 378 379 int bcma_bus_register(struct bcma_bus *bus) 380 { 381 int err; 382 struct bcma_device *core; 383 384 /* Scan for devices (cores) */ 385 err = bcma_bus_scan(bus); 386 if (err) { 387 bcma_err(bus, "Failed to scan: %d\n", err); 388 return err; 389 } 390 391 /* Early init CC core */ 392 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 393 if (core) { 394 bus->drv_cc.core = core; 395 bcma_core_chipcommon_early_init(&bus->drv_cc); 396 } 397 398 /* Early init PCIE core */ 399 core = bcma_find_core(bus, BCMA_CORE_PCIE); 400 if (core) { 401 bus->drv_pci[0].core = core; 402 bcma_core_pci_early_init(&bus->drv_pci[0]); 403 } 404 405 if (bus->dev) 406 of_platform_default_populate(bus->dev->of_node, NULL, bus->dev); 407 408 /* Cores providing flash access go before SPROM init */ 409 list_for_each_entry(core, &bus->cores, list) { 410 if (bcma_is_core_needed_early(core->id.id)) 411 bcma_register_core(bus, core); 412 } 413 414 /* Try to get SPROM */ 415 err = bcma_sprom_get(bus); 416 if (err == -ENOENT) { 417 bcma_err(bus, "No SPROM available\n"); 418 } else if (err) 419 bcma_err(bus, "Failed to get SPROM: %d\n", err); 420 421 /* Init CC core */ 422 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 423 if (core) { 424 bus->drv_cc.core = core; 425 bcma_core_chipcommon_init(&bus->drv_cc); 426 } 427 428 /* Init CC core */ 429 core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); 430 if (core) { 431 bus->drv_cc_b.core = core; 432 bcma_core_chipcommon_b_init(&bus->drv_cc_b); 433 } 434 435 /* Init MIPS core */ 436 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 437 if (core) { 438 bus->drv_mips.core = core; 439 bcma_core_mips_init(&bus->drv_mips); 440 } 441 442 /* Init PCIE core */ 443 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0); 444 if (core) { 445 bus->drv_pci[0].core = core; 446 bcma_core_pci_init(&bus->drv_pci[0]); 447 } 448 449 /* Init PCIE core */ 450 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1); 451 if (core) { 452 bus->drv_pci[1].core = core; 453 bcma_core_pci_init(&bus->drv_pci[1]); 454 } 455 456 /* Init PCIe Gen 2 core */ 457 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0); 458 if (core) { 459 bus->drv_pcie2.core = core; 460 bcma_core_pcie2_init(&bus->drv_pcie2); 461 } 462 463 /* Init GBIT MAC COMMON core */ 464 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 465 if (core) { 466 bus->drv_gmac_cmn.core = core; 467 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 468 } 469 470 /* Register found cores */ 471 bcma_register_devices(bus); 472 473 bcma_info(bus, "Bus registered\n"); 474 475 return 0; 476 } 477 478 void bcma_bus_unregister(struct bcma_bus *bus) 479 { 480 int err; 481 482 err = bcma_gpio_unregister(&bus->drv_cc); 483 if (err == -EBUSY) 484 bcma_err(bus, "Some GPIOs are still in use.\n"); 485 else if (err) 486 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 487 488 bcma_core_chipcommon_b_free(&bus->drv_cc_b); 489 490 bcma_unregister_cores(bus); 491 } 492 493 /* 494 * This is a special version of bus registration function designed for SoCs. 495 * It scans bus and performs basic initialization of main cores only. 496 * Please note it requires memory allocation, however it won't try to sleep. 497 */ 498 int __init bcma_bus_early_register(struct bcma_bus *bus) 499 { 500 int err; 501 struct bcma_device *core; 502 503 /* Scan for devices (cores) */ 504 err = bcma_bus_scan(bus); 505 if (err) { 506 bcma_err(bus, "Failed to scan bus: %d\n", err); 507 return -1; 508 } 509 510 /* Early init CC core */ 511 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 512 if (core) { 513 bus->drv_cc.core = core; 514 bcma_core_chipcommon_early_init(&bus->drv_cc); 515 } 516 517 /* Early init MIPS core */ 518 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 519 if (core) { 520 bus->drv_mips.core = core; 521 bcma_core_mips_early_init(&bus->drv_mips); 522 } 523 524 bcma_info(bus, "Early bus registered\n"); 525 526 return 0; 527 } 528 529 #ifdef CONFIG_PM 530 int bcma_bus_suspend(struct bcma_bus *bus) 531 { 532 struct bcma_device *core; 533 534 list_for_each_entry(core, &bus->cores, list) { 535 struct device_driver *drv = core->dev.driver; 536 if (drv) { 537 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 538 if (adrv->suspend) 539 adrv->suspend(core); 540 } 541 } 542 return 0; 543 } 544 545 int bcma_bus_resume(struct bcma_bus *bus) 546 { 547 struct bcma_device *core; 548 549 /* Init CC core */ 550 if (bus->drv_cc.core) { 551 bus->drv_cc.setup_done = false; 552 bcma_core_chipcommon_init(&bus->drv_cc); 553 } 554 555 list_for_each_entry(core, &bus->cores, list) { 556 struct device_driver *drv = core->dev.driver; 557 if (drv) { 558 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 559 if (adrv->resume) 560 adrv->resume(core); 561 } 562 } 563 564 return 0; 565 } 566 #endif 567 568 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) 569 { 570 drv->drv.name = drv->name; 571 drv->drv.bus = &bcma_bus_type; 572 drv->drv.owner = owner; 573 574 return driver_register(&drv->drv); 575 } 576 EXPORT_SYMBOL_GPL(__bcma_driver_register); 577 578 void bcma_driver_unregister(struct bcma_driver *drv) 579 { 580 driver_unregister(&drv->drv); 581 } 582 EXPORT_SYMBOL_GPL(bcma_driver_unregister); 583 584 static int bcma_bus_match(struct device *dev, struct device_driver *drv) 585 { 586 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 587 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 588 const struct bcma_device_id *cid = &core->id; 589 const struct bcma_device_id *did; 590 591 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { 592 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && 593 (did->id == cid->id || did->id == BCMA_ANY_ID) && 594 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && 595 (did->class == cid->class || did->class == BCMA_ANY_CLASS)) 596 return 1; 597 } 598 return 0; 599 } 600 601 static int bcma_device_probe(struct device *dev) 602 { 603 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 604 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 605 drv); 606 int err = 0; 607 608 get_device(dev); 609 if (adrv->probe) 610 err = adrv->probe(core); 611 if (err) 612 put_device(dev); 613 614 return err; 615 } 616 617 static void bcma_device_remove(struct device *dev) 618 { 619 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 620 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 621 drv); 622 623 if (adrv->remove) 624 adrv->remove(core); 625 put_device(dev); 626 } 627 628 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) 629 { 630 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 631 632 return add_uevent_var(env, 633 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", 634 core->id.manuf, core->id.id, 635 core->id.rev, core->id.class); 636 } 637 638 static unsigned int bcma_bus_registered; 639 640 /* 641 * If built-in, bus has to be registered early, before any driver calls 642 * bcma_driver_register. 643 * Otherwise registering driver would trigger BUG in driver_register. 644 */ 645 static int __init bcma_init_bus_register(void) 646 { 647 int err; 648 649 if (bcma_bus_registered) 650 return 0; 651 652 err = bus_register(&bcma_bus_type); 653 if (!err) 654 bcma_bus_registered = 1; 655 656 return err; 657 } 658 #ifndef MODULE 659 fs_initcall(bcma_init_bus_register); 660 #endif 661 662 /* Main initialization has to be done with SPI/mtd/NAND/SPROM available */ 663 static int __init bcma_modinit(void) 664 { 665 int err; 666 667 err = bcma_init_bus_register(); 668 if (err) 669 return err; 670 671 err = bcma_host_soc_register_driver(); 672 if (err) { 673 pr_err("SoC host initialization failed\n"); 674 err = 0; 675 } 676 #ifdef CONFIG_BCMA_HOST_PCI 677 err = bcma_host_pci_init(); 678 if (err) { 679 pr_err("PCI host initialization failed\n"); 680 err = 0; 681 } 682 #endif 683 684 return err; 685 } 686 module_init(bcma_modinit); 687 688 static void __exit bcma_modexit(void) 689 { 690 #ifdef CONFIG_BCMA_HOST_PCI 691 bcma_host_pci_exit(); 692 #endif 693 bcma_host_soc_unregister_driver(); 694 bus_unregister(&bcma_bus_type); 695 } 696 module_exit(bcma_modexit) 697