1 /* 2 * Core maple bus functionality 3 * 4 * Copyright (C) 2007 Adrian McMenamin 5 * 6 * Based on 2.4 code by: 7 * 8 * Copyright (C) 2000-2001 YAEGASHI Takeshi 9 * Copyright (C) 2001 M. R. Brown 10 * Copyright (C) 2001 Paul Mundt 11 * 12 * and others. 13 * 14 * This file is subject to the terms and conditions of the GNU General Public 15 * License. See the file "COPYING" in the main directory of this archive 16 * for more details. 17 */ 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/device.h> 21 #include <linux/module.h> 22 #include <linux/interrupt.h> 23 #include <linux/list.h> 24 #include <linux/io.h> 25 #include <linux/slab.h> 26 #include <linux/maple.h> 27 #include <linux/dma-mapping.h> 28 #include <asm/cacheflush.h> 29 #include <asm/dma.h> 30 #include <asm/io.h> 31 #include <asm/mach/dma.h> 32 #include <asm/mach/sysasic.h> 33 #include <asm/mach/maple.h> 34 #include <linux/delay.h> 35 36 MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin"); 37 MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 38 MODULE_LICENSE("GPL v2"); 39 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 40 41 static void maple_dma_handler(struct work_struct *work); 42 static void maple_vblank_handler(struct work_struct *work); 43 44 static DECLARE_WORK(maple_dma_process, maple_dma_handler); 45 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); 46 47 static LIST_HEAD(maple_waitq); 48 static LIST_HEAD(maple_sentq); 49 50 static DEFINE_MUTEX(maple_list_lock); 51 52 static struct maple_driver maple_dummy_driver; 53 static struct device maple_bus; 54 static int subdevice_map[MAPLE_PORTS]; 55 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 56 static unsigned long maple_pnp_time; 57 static int started, scanning, liststatus, realscan; 58 static struct kmem_cache *maple_queue_cache; 59 60 struct maple_device_specify { 61 int port; 62 int unit; 63 }; 64 65 /** 66 * maple_driver_register - register a device driver 67 * automatically makes the driver bus a maple bus 68 * @drv: the driver to be registered 69 */ 70 int maple_driver_register(struct device_driver *drv) 71 { 72 if (!drv) 73 return -EINVAL; 74 drv->bus = &maple_bus_type; 75 return driver_register(drv); 76 } 77 EXPORT_SYMBOL_GPL(maple_driver_register); 78 79 /* set hardware registers to enable next round of dma */ 80 static void maplebus_dma_reset(void) 81 { 82 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 83 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 84 ctrl_outl(1, MAPLE_TRIGTYPE); 85 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 86 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 87 ctrl_outl(1, MAPLE_ENABLE); 88 } 89 90 /** 91 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND 92 * @dev: device responding 93 * @callback: handler callback 94 * @interval: interval in jiffies between callbacks 95 * @function: the function code for the device 96 */ 97 void maple_getcond_callback(struct maple_device *dev, 98 void (*callback) (struct mapleq *mq), 99 unsigned long interval, unsigned long function) 100 { 101 dev->callback = callback; 102 dev->interval = interval; 103 dev->function = cpu_to_be32(function); 104 dev->when = jiffies; 105 } 106 EXPORT_SYMBOL_GPL(maple_getcond_callback); 107 108 static int maple_dma_done(void) 109 { 110 return (ctrl_inl(MAPLE_STATE) & 1) == 0; 111 } 112 113 static void maple_release_device(struct device *dev) 114 { 115 struct maple_device *mdev; 116 struct mapleq *mq; 117 if (!dev) 118 return; 119 mdev = to_maple_dev(dev); 120 mq = mdev->mq; 121 if (mq) { 122 if (mq->recvbufdcsp) 123 kmem_cache_free(maple_queue_cache, mq->recvbufdcsp); 124 kfree(mq); 125 mq = NULL; 126 } 127 kfree(mdev); 128 } 129 130 /** 131 * maple_add_packet - add a single instruction to the queue 132 * @mq: instruction to add to waiting queue 133 */ 134 void maple_add_packet(struct mapleq *mq) 135 { 136 mutex_lock(&maple_list_lock); 137 list_add(&mq->list, &maple_waitq); 138 mutex_unlock(&maple_list_lock); 139 } 140 EXPORT_SYMBOL_GPL(maple_add_packet); 141 142 static struct mapleq *maple_allocq(struct maple_device *mdev) 143 { 144 struct mapleq *mq; 145 146 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 147 if (!mq) 148 return NULL; 149 150 mq->dev = mdev; 151 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 152 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 153 if (!mq->recvbuf) { 154 kfree(mq); 155 return NULL; 156 } 157 158 return mq; 159 } 160 161 static struct maple_device *maple_alloc_dev(int port, int unit) 162 { 163 struct maple_device *mdev; 164 165 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 166 if (!mdev) 167 return NULL; 168 169 mdev->port = port; 170 mdev->unit = unit; 171 mdev->mq = maple_allocq(mdev); 172 173 if (!mdev->mq) { 174 kfree(mdev); 175 return NULL; 176 } 177 mdev->dev.bus = &maple_bus_type; 178 mdev->dev.parent = &maple_bus; 179 mdev->function = 0; 180 return mdev; 181 } 182 183 static void maple_free_dev(struct maple_device *mdev) 184 { 185 if (!mdev) 186 return; 187 if (mdev->mq) { 188 if (mdev->mq->recvbufdcsp) 189 kmem_cache_free(maple_queue_cache, 190 mdev->mq->recvbufdcsp); 191 kfree(mdev->mq); 192 } 193 kfree(mdev); 194 } 195 196 /* process the command queue into a maple command block 197 * terminating command has bit 32 of first long set to 0 198 */ 199 static void maple_build_block(struct mapleq *mq) 200 { 201 int port, unit, from, to, len; 202 unsigned long *lsendbuf = mq->sendbuf; 203 204 port = mq->dev->port & 3; 205 unit = mq->dev->unit; 206 len = mq->length; 207 from = port << 6; 208 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); 209 210 *maple_lastptr &= 0x7fffffff; 211 maple_lastptr = maple_sendptr; 212 213 *maple_sendptr++ = (port << 16) | len | 0x80000000; 214 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 215 *maple_sendptr++ = 216 mq->command | (to << 8) | (from << 16) | (len << 24); 217 218 while (len-- > 0) 219 *maple_sendptr++ = *lsendbuf++; 220 } 221 222 /* build up command queue */ 223 static void maple_send(void) 224 { 225 int i; 226 int maple_packets; 227 struct mapleq *mq, *nmq; 228 229 if (!list_empty(&maple_sentq)) 230 return; 231 if (list_empty(&maple_waitq) || !maple_dma_done()) 232 return; 233 maple_packets = 0; 234 maple_sendptr = maple_lastptr = maple_sendbuf; 235 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 236 maple_build_block(mq); 237 list_move(&mq->list, &maple_sentq); 238 if (maple_packets++ > MAPLE_MAXPACKETS) 239 break; 240 } 241 if (maple_packets > 0) { 242 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 243 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 244 PAGE_SIZE, DMA_BIDIRECTIONAL); 245 } 246 } 247 248 static int attach_matching_maple_driver(struct device_driver *driver, 249 void *devptr) 250 { 251 struct maple_driver *maple_drv; 252 struct maple_device *mdev; 253 254 mdev = devptr; 255 maple_drv = to_maple_driver(driver); 256 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { 257 if (maple_drv->connect(mdev) == 0) { 258 mdev->driver = maple_drv; 259 return 1; 260 } 261 } 262 return 0; 263 } 264 265 static void maple_detach_driver(struct maple_device *mdev) 266 { 267 if (!mdev) 268 return; 269 if (mdev->driver) { 270 if (mdev->driver->disconnect) 271 mdev->driver->disconnect(mdev); 272 } 273 mdev->driver = NULL; 274 device_unregister(&mdev->dev); 275 mdev = NULL; 276 } 277 278 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ 279 static void maple_attach_driver(struct maple_device *mdev) 280 { 281 char *p, *recvbuf; 282 unsigned long function; 283 int matched, retval; 284 285 recvbuf = mdev->mq->recvbuf; 286 /* copy the data as individual elements in 287 * case of memory optimisation */ 288 memcpy(&mdev->devinfo.function, recvbuf + 4, 4); 289 memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12); 290 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); 291 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); 292 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); 293 memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60); 294 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); 295 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); 296 memcpy(mdev->product_name, mdev->devinfo.product_name, 30); 297 mdev->product_name[30] = '\0'; 298 memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60); 299 mdev->product_licence[60] = '\0'; 300 301 for (p = mdev->product_name + 29; mdev->product_name <= p; p--) 302 if (*p == ' ') 303 *p = '\0'; 304 else 305 break; 306 for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--) 307 if (*p == ' ') 308 *p = '\0'; 309 else 310 break; 311 312 if (realscan) { 313 printk(KERN_INFO "Maple device detected: %s\n", 314 mdev->product_name); 315 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); 316 } 317 318 function = be32_to_cpu(mdev->devinfo.function); 319 320 if (function > 0x200) { 321 /* Do this silently - as not a real device */ 322 function = 0; 323 mdev->driver = &maple_dummy_driver; 324 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 325 } else { 326 if (realscan) 327 printk(KERN_INFO 328 "Maple bus at (%d, %d): Function 0x%lX\n", 329 mdev->port, mdev->unit, function); 330 331 matched = 332 bus_for_each_drv(&maple_bus_type, NULL, mdev, 333 attach_matching_maple_driver); 334 335 if (matched == 0) { 336 /* Driver does not exist yet */ 337 if (realscan) 338 printk(KERN_INFO 339 "No maple driver found.\n"); 340 mdev->driver = &maple_dummy_driver; 341 } 342 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 343 mdev->unit, function); 344 } 345 mdev->function = function; 346 mdev->dev.release = &maple_release_device; 347 retval = device_register(&mdev->dev); 348 if (retval) { 349 printk(KERN_INFO 350 "Maple bus: Attempt to register device" 351 " (%x, %x) failed.\n", 352 mdev->port, mdev->unit); 353 maple_free_dev(mdev); 354 mdev = NULL; 355 return; 356 } 357 } 358 359 /* 360 * if device has been registered for the given 361 * port and unit then return 1 - allows identification 362 * of which devices need to be attached or detached 363 */ 364 static int detach_maple_device(struct device *device, void *portptr) 365 { 366 struct maple_device_specify *ds; 367 struct maple_device *mdev; 368 369 ds = portptr; 370 mdev = to_maple_dev(device); 371 if (mdev->port == ds->port && mdev->unit == ds->unit) 372 return 1; 373 return 0; 374 } 375 376 static int setup_maple_commands(struct device *device, void *ignored) 377 { 378 struct maple_device *maple_dev = to_maple_dev(device); 379 380 if ((maple_dev->interval > 0) 381 && time_after(jiffies, maple_dev->when)) { 382 maple_dev->when = jiffies + maple_dev->interval; 383 maple_dev->mq->command = MAPLE_COMMAND_GETCOND; 384 maple_dev->mq->sendbuf = &maple_dev->function; 385 maple_dev->mq->length = 1; 386 maple_add_packet(maple_dev->mq); 387 liststatus++; 388 } else { 389 if (time_after(jiffies, maple_pnp_time)) { 390 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; 391 maple_dev->mq->length = 0; 392 maple_add_packet(maple_dev->mq); 393 liststatus++; 394 } 395 } 396 397 return 0; 398 } 399 400 /* VBLANK bottom half - implemented via workqueue */ 401 static void maple_vblank_handler(struct work_struct *work) 402 { 403 if (!maple_dma_done()) 404 return; 405 if (!list_empty(&maple_sentq)) 406 return; 407 ctrl_outl(0, MAPLE_ENABLE); 408 liststatus = 0; 409 bus_for_each_dev(&maple_bus_type, NULL, NULL, 410 setup_maple_commands); 411 if (time_after(jiffies, maple_pnp_time)) 412 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 413 if (liststatus && list_empty(&maple_sentq)) { 414 INIT_LIST_HEAD(&maple_sentq); 415 maple_send(); 416 } 417 maplebus_dma_reset(); 418 } 419 420 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 421 static void maple_map_subunits(struct maple_device *mdev, int submask) 422 { 423 int retval, k, devcheck; 424 struct maple_device *mdev_add; 425 struct maple_device_specify ds; 426 427 for (k = 0; k < 5; k++) { 428 ds.port = mdev->port; 429 ds.unit = k + 1; 430 retval = 431 bus_for_each_dev(&maple_bus_type, NULL, &ds, 432 detach_maple_device); 433 if (retval) { 434 submask = submask >> 1; 435 continue; 436 } 437 devcheck = submask & 0x01; 438 if (devcheck) { 439 mdev_add = maple_alloc_dev(mdev->port, k + 1); 440 if (!mdev_add) 441 return; 442 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; 443 mdev_add->mq->length = 0; 444 maple_add_packet(mdev_add->mq); 445 scanning = 1; 446 } 447 submask = submask >> 1; 448 } 449 } 450 451 /* mark a device as removed */ 452 static void maple_clean_submap(struct maple_device *mdev) 453 { 454 int killbit; 455 456 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); 457 killbit = ~killbit; 458 killbit &= 0xFF; 459 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; 460 } 461 462 /* handle empty port or hotplug removal */ 463 static void maple_response_none(struct maple_device *mdev, 464 struct mapleq *mq) 465 { 466 if (mdev->unit != 0) { 467 list_del(&mq->list); 468 maple_clean_submap(mdev); 469 printk(KERN_INFO 470 "Maple bus device detaching at (%d, %d)\n", 471 mdev->port, mdev->unit); 472 maple_detach_driver(mdev); 473 return; 474 } 475 if (!started) { 476 printk(KERN_INFO "No maple devices attached to port %d\n", 477 mdev->port); 478 return; 479 } 480 maple_clean_submap(mdev); 481 } 482 483 /* preprocess hotplugs or scans */ 484 static void maple_response_devinfo(struct maple_device *mdev, 485 char *recvbuf) 486 { 487 char submask; 488 if ((!started) || (scanning == 2)) { 489 maple_attach_driver(mdev); 490 return; 491 } 492 if (mdev->unit == 0) { 493 submask = recvbuf[2] & 0x1F; 494 if (submask ^ subdevice_map[mdev->port]) { 495 maple_map_subunits(mdev, submask); 496 subdevice_map[mdev->port] = submask; 497 } 498 } 499 } 500 501 /* maple dma end bottom half - implemented via workqueue */ 502 static void maple_dma_handler(struct work_struct *work) 503 { 504 struct mapleq *mq, *nmq; 505 struct maple_device *dev; 506 char *recvbuf; 507 enum maple_code code; 508 509 if (!maple_dma_done()) 510 return; 511 ctrl_outl(0, MAPLE_ENABLE); 512 if (!list_empty(&maple_sentq)) { 513 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 514 recvbuf = mq->recvbuf; 515 code = recvbuf[0]; 516 dev = mq->dev; 517 switch (code) { 518 case MAPLE_RESPONSE_NONE: 519 maple_response_none(dev, mq); 520 break; 521 522 case MAPLE_RESPONSE_DEVINFO: 523 maple_response_devinfo(dev, recvbuf); 524 break; 525 526 case MAPLE_RESPONSE_DATATRF: 527 if (dev->callback) 528 dev->callback(mq); 529 break; 530 531 case MAPLE_RESPONSE_FILEERR: 532 case MAPLE_RESPONSE_AGAIN: 533 case MAPLE_RESPONSE_BADCMD: 534 case MAPLE_RESPONSE_BADFUNC: 535 printk(KERN_DEBUG 536 "Maple non-fatal error 0x%X\n", 537 code); 538 break; 539 540 case MAPLE_RESPONSE_ALLINFO: 541 printk(KERN_DEBUG 542 "Maple - extended device information" 543 " not supported\n"); 544 break; 545 546 case MAPLE_RESPONSE_OK: 547 break; 548 549 default: 550 break; 551 } 552 } 553 INIT_LIST_HEAD(&maple_sentq); 554 if (scanning == 1) { 555 maple_send(); 556 scanning = 2; 557 } else 558 scanning = 0; 559 560 if (started == 0) 561 started = 1; 562 } 563 maplebus_dma_reset(); 564 } 565 566 static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 567 { 568 /* Load everything into the bottom half */ 569 schedule_work(&maple_dma_process); 570 return IRQ_HANDLED; 571 } 572 573 static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 574 { 575 schedule_work(&maple_vblank_process); 576 return IRQ_HANDLED; 577 } 578 579 static int maple_set_dma_interrupt_handler(void) 580 { 581 return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, 582 IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); 583 } 584 585 static int maple_set_vblank_interrupt_handler(void) 586 { 587 return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, 588 IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); 589 } 590 591 static int maple_get_dma_buffer(void) 592 { 593 maple_sendbuf = 594 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 595 MAPLE_DMA_PAGES); 596 if (!maple_sendbuf) 597 return -ENOMEM; 598 return 0; 599 } 600 601 static int match_maple_bus_driver(struct device *devptr, 602 struct device_driver *drvptr) 603 { 604 struct maple_driver *maple_drv; 605 struct maple_device *maple_dev; 606 607 maple_drv = container_of(drvptr, struct maple_driver, drv); 608 maple_dev = container_of(devptr, struct maple_device, dev); 609 /* Trap empty port case */ 610 if (maple_dev->devinfo.function == 0xFFFFFFFF) 611 return 0; 612 else if (maple_dev->devinfo.function & 613 be32_to_cpu(maple_drv->function)) 614 return 1; 615 return 0; 616 } 617 618 static int maple_bus_uevent(struct device *dev, 619 struct kobj_uevent_env *env) 620 { 621 return 0; 622 } 623 624 static void maple_bus_release(struct device *dev) 625 { 626 } 627 628 static struct maple_driver maple_dummy_driver = { 629 .drv = { 630 .name = "maple_dummy_driver", 631 .bus = &maple_bus_type, 632 }, 633 }; 634 635 struct bus_type maple_bus_type = { 636 .name = "maple", 637 .match = match_maple_bus_driver, 638 .uevent = maple_bus_uevent, 639 }; 640 EXPORT_SYMBOL_GPL(maple_bus_type); 641 642 static struct device maple_bus = { 643 .bus_id = "maple", 644 .release = maple_bus_release, 645 }; 646 647 static int __init maple_bus_init(void) 648 { 649 int retval, i; 650 struct maple_device *mdev[MAPLE_PORTS]; 651 ctrl_outl(0, MAPLE_STATE); 652 653 retval = device_register(&maple_bus); 654 if (retval) 655 goto cleanup; 656 657 retval = bus_register(&maple_bus_type); 658 if (retval) 659 goto cleanup_device; 660 661 retval = driver_register(&maple_dummy_driver.drv); 662 if (retval) 663 goto cleanup_bus; 664 665 /* allocate memory for maple bus dma */ 666 retval = maple_get_dma_buffer(); 667 if (retval) { 668 printk(KERN_INFO 669 "Maple bus: Failed to allocate Maple DMA buffers\n"); 670 goto cleanup_basic; 671 } 672 673 /* set up DMA interrupt handler */ 674 retval = maple_set_dma_interrupt_handler(); 675 if (retval) { 676 printk(KERN_INFO 677 "Maple bus: Failed to grab maple DMA IRQ\n"); 678 goto cleanup_dma; 679 } 680 681 /* set up VBLANK interrupt handler */ 682 retval = maple_set_vblank_interrupt_handler(); 683 if (retval) { 684 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 685 goto cleanup_irq; 686 } 687 688 maple_queue_cache = 689 kmem_cache_create("maple_queue_cache", 0x400, 0, 690 SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL); 691 692 if (!maple_queue_cache) 693 goto cleanup_bothirqs; 694 695 /* setup maple ports */ 696 for (i = 0; i < MAPLE_PORTS; i++) { 697 mdev[i] = maple_alloc_dev(i, 0); 698 if (!mdev[i]) { 699 while (i-- > 0) 700 maple_free_dev(mdev[i]); 701 goto cleanup_cache; 702 } 703 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; 704 mdev[i]->mq->length = 0; 705 maple_add_packet(mdev[i]->mq); 706 /* delay aids hardware detection */ 707 mdelay(5); 708 subdevice_map[i] = 0; 709 } 710 711 realscan = 1; 712 /* setup maplebus hardware */ 713 maplebus_dma_reset(); 714 /* initial detection */ 715 maple_send(); 716 maple_pnp_time = jiffies; 717 printk(KERN_INFO "Maple bus core now registered.\n"); 718 719 return 0; 720 721 cleanup_cache: 722 kmem_cache_destroy(maple_queue_cache); 723 724 cleanup_bothirqs: 725 free_irq(HW_EVENT_VSYNC, 0); 726 727 cleanup_irq: 728 free_irq(HW_EVENT_MAPLE_DMA, 0); 729 730 cleanup_dma: 731 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 732 733 cleanup_basic: 734 driver_unregister(&maple_dummy_driver.drv); 735 736 cleanup_bus: 737 bus_unregister(&maple_bus_type); 738 739 cleanup_device: 740 device_unregister(&maple_bus); 741 742 cleanup: 743 printk(KERN_INFO "Maple bus registration failed\n"); 744 return retval; 745 } 746 /* Push init to later to ensure hardware gets detected */ 747 fs_initcall(maple_bus_init); 748