1 /* 2 * Broadcom specific AMBA 3 * Bus subsystem 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8 #include "bcma_private.h" 9 #include <linux/module.h> 10 #include <linux/platform_device.h> 11 #include <linux/bcma/bcma.h> 12 #include <linux/slab.h> 13 #include <linux/of_address.h> 14 #include <linux/of_irq.h> 15 16 MODULE_DESCRIPTION("Broadcom's specific AMBA driver"); 17 MODULE_LICENSE("GPL"); 18 19 /* contains the number the next bus should get. */ 20 static unsigned int bcma_bus_next_num = 0; 21 22 /* bcma_buses_mutex locks the bcma_bus_next_num */ 23 static DEFINE_MUTEX(bcma_buses_mutex); 24 25 static int bcma_bus_match(struct device *dev, struct device_driver *drv); 26 static int bcma_device_probe(struct device *dev); 27 static int bcma_device_remove(struct device *dev); 28 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env); 29 30 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf) 31 { 32 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 33 return sprintf(buf, "0x%03X\n", core->id.manuf); 34 } 35 static DEVICE_ATTR_RO(manuf); 36 37 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 38 { 39 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 40 return sprintf(buf, "0x%03X\n", core->id.id); 41 } 42 static DEVICE_ATTR_RO(id); 43 44 static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf) 45 { 46 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 47 return sprintf(buf, "0x%02X\n", core->id.rev); 48 } 49 static DEVICE_ATTR_RO(rev); 50 51 static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf) 52 { 53 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 54 return sprintf(buf, "0x%X\n", core->id.class); 55 } 56 static DEVICE_ATTR_RO(class); 57 58 static struct attribute *bcma_device_attrs[] = { 59 &dev_attr_manuf.attr, 60 &dev_attr_id.attr, 61 &dev_attr_rev.attr, 62 &dev_attr_class.attr, 63 NULL, 64 }; 65 ATTRIBUTE_GROUPS(bcma_device); 66 67 static struct bus_type bcma_bus_type = { 68 .name = "bcma", 69 .match = bcma_bus_match, 70 .probe = bcma_device_probe, 71 .remove = bcma_device_remove, 72 .uevent = bcma_device_uevent, 73 .dev_groups = bcma_device_groups, 74 }; 75 76 static u16 bcma_cc_core_id(struct bcma_bus *bus) 77 { 78 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) 79 return BCMA_CORE_4706_CHIPCOMMON; 80 return BCMA_CORE_CHIPCOMMON; 81 } 82 83 struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid, 84 u8 unit) 85 { 86 struct bcma_device *core; 87 88 list_for_each_entry(core, &bus->cores, list) { 89 if (core->id.id == coreid && core->core_unit == unit) 90 return core; 91 } 92 return NULL; 93 } 94 EXPORT_SYMBOL_GPL(bcma_find_core_unit); 95 96 bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, 97 int timeout) 98 { 99 unsigned long deadline = jiffies + timeout; 100 u32 val; 101 102 do { 103 val = bcma_read32(core, reg); 104 if ((val & mask) == value) 105 return true; 106 cpu_relax(); 107 udelay(10); 108 } while (!time_after_eq(jiffies, deadline)); 109 110 bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg); 111 112 return false; 113 } 114 115 static void bcma_release_core_dev(struct device *dev) 116 { 117 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 118 if (core->io_addr) 119 iounmap(core->io_addr); 120 if (core->io_wrap) 121 iounmap(core->io_wrap); 122 kfree(core); 123 } 124 125 static bool bcma_is_core_needed_early(u16 core_id) 126 { 127 switch (core_id) { 128 case BCMA_CORE_NS_NAND: 129 case BCMA_CORE_NS_QSPI: 130 return true; 131 } 132 133 return false; 134 } 135 136 #if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS) 137 static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 138 struct bcma_device *core) 139 { 140 struct device_node *node; 141 u64 size; 142 const __be32 *reg; 143 144 if (!parent || !parent->dev.of_node) 145 return NULL; 146 147 for_each_child_of_node(parent->dev.of_node, node) { 148 reg = of_get_address(node, 0, &size, NULL); 149 if (!reg) 150 continue; 151 if (of_translate_address(node, reg) == core->addr) 152 return node; 153 } 154 return NULL; 155 } 156 157 static int bcma_of_irq_parse(struct platform_device *parent, 158 struct bcma_device *core, 159 struct of_phandle_args *out_irq, int num) 160 { 161 __be32 laddr[1]; 162 int rc; 163 164 if (core->dev.of_node) { 165 rc = of_irq_parse_one(core->dev.of_node, num, out_irq); 166 if (!rc) 167 return rc; 168 } 169 170 out_irq->np = parent->dev.of_node; 171 out_irq->args_count = 1; 172 out_irq->args[0] = num; 173 174 laddr[0] = cpu_to_be32(core->addr); 175 return of_irq_parse_raw(laddr, out_irq); 176 } 177 178 static unsigned int bcma_of_get_irq(struct platform_device *parent, 179 struct bcma_device *core, int num) 180 { 181 struct of_phandle_args out_irq; 182 int ret; 183 184 if (!parent || !parent->dev.of_node) 185 return 0; 186 187 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 188 if (ret) { 189 bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n", 190 ret); 191 return 0; 192 } 193 194 return irq_create_of_mapping(&out_irq); 195 } 196 197 static void bcma_of_fill_device(struct platform_device *parent, 198 struct bcma_device *core) 199 { 200 struct device_node *node; 201 202 node = bcma_of_find_child_device(parent, core); 203 if (node) 204 core->dev.of_node = node; 205 206 core->irq = bcma_of_get_irq(parent, core, 0); 207 } 208 #else 209 static void bcma_of_fill_device(struct platform_device *parent, 210 struct bcma_device *core) 211 { 212 } 213 static inline unsigned int bcma_of_get_irq(struct platform_device *parent, 214 struct bcma_device *core, int num) 215 { 216 return 0; 217 } 218 #endif /* CONFIG_OF */ 219 220 unsigned int bcma_core_irq(struct bcma_device *core, int num) 221 { 222 struct bcma_bus *bus = core->bus; 223 unsigned int mips_irq; 224 225 switch (bus->hosttype) { 226 case BCMA_HOSTTYPE_PCI: 227 return bus->host_pci->irq; 228 case BCMA_HOSTTYPE_SOC: 229 if (bus->drv_mips.core && num == 0) { 230 mips_irq = bcma_core_mips_irq(core); 231 return mips_irq <= 4 ? mips_irq + 2 : 0; 232 } 233 if (bus->host_pdev) 234 return bcma_of_get_irq(bus->host_pdev, core, num); 235 return 0; 236 case BCMA_HOSTTYPE_SDIO: 237 return 0; 238 } 239 240 return 0; 241 } 242 EXPORT_SYMBOL(bcma_core_irq); 243 244 void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) 245 { 246 core->dev.release = bcma_release_core_dev; 247 core->dev.bus = &bcma_bus_type; 248 dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); 249 250 switch (bus->hosttype) { 251 case BCMA_HOSTTYPE_PCI: 252 core->dev.parent = &bus->host_pci->dev; 253 core->dma_dev = &bus->host_pci->dev; 254 core->irq = bus->host_pci->irq; 255 break; 256 case BCMA_HOSTTYPE_SOC: 257 core->dev.dma_mask = &core->dev.coherent_dma_mask; 258 if (bus->host_pdev) { 259 core->dma_dev = &bus->host_pdev->dev; 260 core->dev.parent = &bus->host_pdev->dev; 261 bcma_of_fill_device(bus->host_pdev, core); 262 } else { 263 core->dma_dev = &core->dev; 264 } 265 break; 266 case BCMA_HOSTTYPE_SDIO: 267 break; 268 } 269 } 270 271 void bcma_init_bus(struct bcma_bus *bus) 272 { 273 mutex_lock(&bcma_buses_mutex); 274 bus->num = bcma_bus_next_num++; 275 mutex_unlock(&bcma_buses_mutex); 276 277 INIT_LIST_HEAD(&bus->cores); 278 bus->nr_cores = 0; 279 280 bcma_detect_chip(bus); 281 } 282 283 static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) 284 { 285 int err; 286 287 err = device_register(&core->dev); 288 if (err) { 289 bcma_err(bus, "Could not register dev for core 0x%03X\n", 290 core->id.id); 291 put_device(&core->dev); 292 return; 293 } 294 core->dev_registered = true; 295 } 296 297 static int bcma_register_devices(struct bcma_bus *bus) 298 { 299 struct bcma_device *core; 300 int err; 301 302 list_for_each_entry(core, &bus->cores, list) { 303 /* We support that cores ourself */ 304 switch (core->id.id) { 305 case BCMA_CORE_4706_CHIPCOMMON: 306 case BCMA_CORE_CHIPCOMMON: 307 case BCMA_CORE_NS_CHIPCOMMON_B: 308 case BCMA_CORE_PCI: 309 case BCMA_CORE_PCIE: 310 case BCMA_CORE_PCIE2: 311 case BCMA_CORE_MIPS_74K: 312 case BCMA_CORE_4706_MAC_GBIT_COMMON: 313 continue; 314 } 315 316 /* Early cores were already registered */ 317 if (bcma_is_core_needed_early(core->id.id)) 318 continue; 319 320 /* Only first GMAC core on BCM4706 is connected and working */ 321 if (core->id.id == BCMA_CORE_4706_MAC_GBIT && 322 core->core_unit > 0) 323 continue; 324 325 bcma_register_core(bus, core); 326 } 327 328 #ifdef CONFIG_BCMA_DRIVER_MIPS 329 if (bus->drv_cc.pflash.present) { 330 err = platform_device_register(&bcma_pflash_dev); 331 if (err) 332 bcma_err(bus, "Error registering parallel flash\n"); 333 } 334 #endif 335 336 #ifdef CONFIG_BCMA_SFLASH 337 if (bus->drv_cc.sflash.present) { 338 err = platform_device_register(&bcma_sflash_dev); 339 if (err) 340 bcma_err(bus, "Error registering serial flash\n"); 341 } 342 #endif 343 344 #ifdef CONFIG_BCMA_NFLASH 345 if (bus->drv_cc.nflash.present) { 346 err = platform_device_register(&bcma_nflash_dev); 347 if (err) 348 bcma_err(bus, "Error registering NAND flash\n"); 349 } 350 #endif 351 err = bcma_gpio_init(&bus->drv_cc); 352 if (err == -ENOTSUPP) 353 bcma_debug(bus, "GPIO driver not activated\n"); 354 else if (err) 355 bcma_err(bus, "Error registering GPIO driver: %i\n", err); 356 357 if (bus->hosttype == BCMA_HOSTTYPE_SOC) { 358 err = bcma_chipco_watchdog_register(&bus->drv_cc); 359 if (err) 360 bcma_err(bus, "Error registering watchdog driver\n"); 361 } 362 363 return 0; 364 } 365 366 void bcma_unregister_cores(struct bcma_bus *bus) 367 { 368 struct bcma_device *core, *tmp; 369 370 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 371 if (!core->dev_registered) 372 continue; 373 list_del(&core->list); 374 device_unregister(&core->dev); 375 } 376 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 377 platform_device_unregister(bus->drv_cc.watchdog); 378 379 /* Now noone uses internally-handled cores, we can free them */ 380 list_for_each_entry_safe(core, tmp, &bus->cores, list) { 381 list_del(&core->list); 382 kfree(core); 383 } 384 } 385 386 int bcma_bus_register(struct bcma_bus *bus) 387 { 388 int err; 389 struct bcma_device *core; 390 391 /* Scan for devices (cores) */ 392 err = bcma_bus_scan(bus); 393 if (err) { 394 bcma_err(bus, "Failed to scan: %d\n", err); 395 return err; 396 } 397 398 /* Early init CC core */ 399 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 400 if (core) { 401 bus->drv_cc.core = core; 402 bcma_core_chipcommon_early_init(&bus->drv_cc); 403 } 404 405 /* Early init PCIE core */ 406 core = bcma_find_core(bus, BCMA_CORE_PCIE); 407 if (core) { 408 bus->drv_pci[0].core = core; 409 bcma_core_pci_early_init(&bus->drv_pci[0]); 410 } 411 412 /* Cores providing flash access go before SPROM init */ 413 list_for_each_entry(core, &bus->cores, list) { 414 if (bcma_is_core_needed_early(core->id.id)) 415 bcma_register_core(bus, core); 416 } 417 418 /* Try to get SPROM */ 419 err = bcma_sprom_get(bus); 420 if (err == -ENOENT) { 421 bcma_err(bus, "No SPROM available\n"); 422 } else if (err) 423 bcma_err(bus, "Failed to get SPROM: %d\n", err); 424 425 /* Init CC core */ 426 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 427 if (core) { 428 bus->drv_cc.core = core; 429 bcma_core_chipcommon_init(&bus->drv_cc); 430 } 431 432 /* Init CC core */ 433 core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B); 434 if (core) { 435 bus->drv_cc_b.core = core; 436 bcma_core_chipcommon_b_init(&bus->drv_cc_b); 437 } 438 439 /* Init MIPS core */ 440 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 441 if (core) { 442 bus->drv_mips.core = core; 443 bcma_core_mips_init(&bus->drv_mips); 444 } 445 446 /* Init PCIE core */ 447 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0); 448 if (core) { 449 bus->drv_pci[0].core = core; 450 bcma_core_pci_init(&bus->drv_pci[0]); 451 } 452 453 /* Init PCIE core */ 454 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1); 455 if (core) { 456 bus->drv_pci[1].core = core; 457 bcma_core_pci_init(&bus->drv_pci[1]); 458 } 459 460 /* Init PCIe Gen 2 core */ 461 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0); 462 if (core) { 463 bus->drv_pcie2.core = core; 464 bcma_core_pcie2_init(&bus->drv_pcie2); 465 } 466 467 /* Init GBIT MAC COMMON core */ 468 core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON); 469 if (core) { 470 bus->drv_gmac_cmn.core = core; 471 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 472 } 473 474 /* Register found cores */ 475 bcma_register_devices(bus); 476 477 bcma_info(bus, "Bus registered\n"); 478 479 return 0; 480 } 481 482 void bcma_bus_unregister(struct bcma_bus *bus) 483 { 484 int err; 485 486 err = bcma_gpio_unregister(&bus->drv_cc); 487 if (err == -EBUSY) 488 bcma_err(bus, "Some GPIOs are still in use.\n"); 489 else if (err) 490 bcma_err(bus, "Can not unregister GPIO driver: %i\n", err); 491 492 bcma_core_chipcommon_b_free(&bus->drv_cc_b); 493 494 bcma_unregister_cores(bus); 495 } 496 497 /* 498 * This is a special version of bus registration function designed for SoCs. 499 * It scans bus and performs basic initialization of main cores only. 500 * Please note it requires memory allocation, however it won't try to sleep. 501 */ 502 int __init bcma_bus_early_register(struct bcma_bus *bus) 503 { 504 int err; 505 struct bcma_device *core; 506 507 /* Scan for devices (cores) */ 508 err = bcma_bus_scan(bus); 509 if (err) { 510 bcma_err(bus, "Failed to scan bus: %d\n", err); 511 return -1; 512 } 513 514 /* Early init CC core */ 515 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 516 if (core) { 517 bus->drv_cc.core = core; 518 bcma_core_chipcommon_early_init(&bus->drv_cc); 519 } 520 521 /* Early init MIPS core */ 522 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 523 if (core) { 524 bus->drv_mips.core = core; 525 bcma_core_mips_early_init(&bus->drv_mips); 526 } 527 528 bcma_info(bus, "Early bus registered\n"); 529 530 return 0; 531 } 532 533 #ifdef CONFIG_PM 534 int bcma_bus_suspend(struct bcma_bus *bus) 535 { 536 struct bcma_device *core; 537 538 list_for_each_entry(core, &bus->cores, list) { 539 struct device_driver *drv = core->dev.driver; 540 if (drv) { 541 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 542 if (adrv->suspend) 543 adrv->suspend(core); 544 } 545 } 546 return 0; 547 } 548 549 int bcma_bus_resume(struct bcma_bus *bus) 550 { 551 struct bcma_device *core; 552 553 /* Init CC core */ 554 if (bus->drv_cc.core) { 555 bus->drv_cc.setup_done = false; 556 bcma_core_chipcommon_init(&bus->drv_cc); 557 } 558 559 list_for_each_entry(core, &bus->cores, list) { 560 struct device_driver *drv = core->dev.driver; 561 if (drv) { 562 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 563 if (adrv->resume) 564 adrv->resume(core); 565 } 566 } 567 568 return 0; 569 } 570 #endif 571 572 int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) 573 { 574 drv->drv.name = drv->name; 575 drv->drv.bus = &bcma_bus_type; 576 drv->drv.owner = owner; 577 578 return driver_register(&drv->drv); 579 } 580 EXPORT_SYMBOL_GPL(__bcma_driver_register); 581 582 void bcma_driver_unregister(struct bcma_driver *drv) 583 { 584 driver_unregister(&drv->drv); 585 } 586 EXPORT_SYMBOL_GPL(bcma_driver_unregister); 587 588 static int bcma_bus_match(struct device *dev, struct device_driver *drv) 589 { 590 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 591 struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv); 592 const struct bcma_device_id *cid = &core->id; 593 const struct bcma_device_id *did; 594 595 for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) { 596 if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) && 597 (did->id == cid->id || did->id == BCMA_ANY_ID) && 598 (did->rev == cid->rev || did->rev == BCMA_ANY_REV) && 599 (did->class == cid->class || did->class == BCMA_ANY_CLASS)) 600 return 1; 601 } 602 return 0; 603 } 604 605 static int bcma_device_probe(struct device *dev) 606 { 607 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 608 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 609 drv); 610 int err = 0; 611 612 if (adrv->probe) 613 err = adrv->probe(core); 614 615 return err; 616 } 617 618 static int bcma_device_remove(struct device *dev) 619 { 620 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 621 struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver, 622 drv); 623 624 if (adrv->remove) 625 adrv->remove(core); 626 627 return 0; 628 } 629 630 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env) 631 { 632 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 633 634 return add_uevent_var(env, 635 "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X", 636 core->id.manuf, core->id.id, 637 core->id.rev, core->id.class); 638 } 639 640 static int __init bcma_modinit(void) 641 { 642 int err; 643 644 err = bus_register(&bcma_bus_type); 645 if (err) 646 return err; 647 648 err = bcma_host_soc_register_driver(); 649 if (err) { 650 pr_err("SoC host initialization failed\n"); 651 err = 0; 652 } 653 #ifdef CONFIG_BCMA_HOST_PCI 654 err = bcma_host_pci_init(); 655 if (err) { 656 pr_err("PCI host initialization failed\n"); 657 err = 0; 658 } 659 #endif 660 661 return err; 662 } 663 fs_initcall(bcma_modinit); 664 665 static void __exit bcma_modexit(void) 666 { 667 #ifdef CONFIG_BCMA_HOST_PCI 668 bcma_host_pci_exit(); 669 #endif 670 bcma_host_soc_unregister_driver(); 671 bus_unregister(&bcma_bus_type); 672 } 673 module_exit(bcma_modexit) 674