1 /* 2 * Core maple bus functionality 3 * 4 * Copyright (C) 2007, 2008 Adrian McMenamin 5 * Copyright (C) 2001 - 2008 Paul Mundt 6 * 7 * Based on 2.4 code by: 8 * 9 * Copyright (C) 2000-2001 YAEGASHI Takeshi 10 * Copyright (C) 2001 M. R. Brown 11 * Copyright (C) 2001 Paul Mundt 12 * 13 * and others. 14 * 15 * This file is subject to the terms and conditions of the GNU General Public 16 * License. See the file "COPYING" in the main directory of this archive 17 * for more details. 18 */ 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/device.h> 22 #include <linux/interrupt.h> 23 #include <linux/list.h> 24 #include <linux/io.h> 25 #include <linux/slab.h> 26 #include <linux/maple.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/delay.h> 29 #include <asm/cacheflush.h> 30 #include <asm/dma.h> 31 #include <asm/io.h> 32 #include <mach/dma.h> 33 #include <mach/sysasic.h> 34 35 MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin"); 36 MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 37 MODULE_LICENSE("GPL v2"); 38 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}"); 39 40 static void maple_dma_handler(struct work_struct *work); 41 static void maple_vblank_handler(struct work_struct *work); 42 43 static DECLARE_WORK(maple_dma_process, maple_dma_handler); 44 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler); 45 46 static LIST_HEAD(maple_waitq); 47 static LIST_HEAD(maple_sentq); 48 49 /* mutex to protect queue of waiting packets */ 50 static DEFINE_MUTEX(maple_wlist_lock); 51 52 static struct maple_driver maple_dummy_driver; 53 static struct device maple_bus; 54 static int subdevice_map[MAPLE_PORTS]; 55 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 56 static unsigned long maple_pnp_time; 57 static int started, scanning, fullscan; 58 static struct kmem_cache *maple_queue_cache; 59 60 struct maple_device_specify { 61 int port; 62 int unit; 63 }; 64 65 static bool checked[4]; 66 static struct maple_device *baseunits[4]; 67 68 /** 69 * maple_driver_register - register a maple driver 70 * @drv: maple driver to be registered. 71 * 72 * Registers the passed in @drv, while updating the bus type. 73 * Devices with matching function IDs will be automatically probed. 74 */ 75 int maple_driver_register(struct maple_driver *drv) 76 { 77 if (!drv) 78 return -EINVAL; 79 80 drv->drv.bus = &maple_bus_type; 81 82 return driver_register(&drv->drv); 83 } 84 EXPORT_SYMBOL_GPL(maple_driver_register); 85 86 /** 87 * maple_driver_unregister - unregister a maple driver. 88 * @drv: maple driver to unregister. 89 * 90 * Cleans up after maple_driver_register(). To be invoked in the exit 91 * path of any module drivers. 92 */ 93 void maple_driver_unregister(struct maple_driver *drv) 94 { 95 driver_unregister(&drv->drv); 96 } 97 98 /* set hardware registers to enable next round of dma */ 99 static void maplebus_dma_reset(void) 100 { 101 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 102 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 103 ctrl_outl(1, MAPLE_TRIGTYPE); 104 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 105 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 106 ctrl_outl(1, MAPLE_ENABLE); 107 } 108 109 /** 110 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND 111 * @dev: device responding 112 * @callback: handler callback 113 * @interval: interval in jiffies between callbacks 114 * @function: the function code for the device 115 */ 116 void maple_getcond_callback(struct maple_device *dev, 117 void (*callback) (struct mapleq *mq), 118 unsigned long interval, unsigned long function) 119 { 120 dev->callback = callback; 121 dev->interval = interval; 122 dev->function = cpu_to_be32(function); 123 dev->when = jiffies; 124 } 125 EXPORT_SYMBOL_GPL(maple_getcond_callback); 126 127 static int maple_dma_done(void) 128 { 129 return (ctrl_inl(MAPLE_STATE) & 1) == 0; 130 } 131 132 static void maple_release_device(struct device *dev) 133 { 134 struct maple_device *mdev; 135 struct mapleq *mq; 136 if (!dev) 137 return; 138 mdev = to_maple_dev(dev); 139 mq = mdev->mq; 140 if (mq) { 141 if (mq->recvbufdcsp) 142 kmem_cache_free(maple_queue_cache, mq->recvbufdcsp); 143 kfree(mq); 144 mq = NULL; 145 } 146 kfree(mdev); 147 } 148 149 /* 150 * maple_add_packet - add a single instruction to the queue 151 * @mdev - maple device 152 * @function - function on device being queried 153 * @command - maple command to add 154 * @length - length of command string (in 32 bit words) 155 * @data - remainder of command string 156 */ 157 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, 158 size_t length, void *data) 159 { 160 int locking, ret = 0; 161 void *sendbuf = NULL; 162 163 mutex_lock(&maple_wlist_lock); 164 /* bounce if device already locked */ 165 locking = mutex_is_locked(&mdev->mq->mutex); 166 if (locking) { 167 ret = -EBUSY; 168 goto out; 169 } 170 171 mutex_lock(&mdev->mq->mutex); 172 173 if (length) { 174 sendbuf = kmalloc(length * 4, GFP_KERNEL); 175 if (!sendbuf) { 176 mutex_unlock(&mdev->mq->mutex); 177 ret = -ENOMEM; 178 goto out; 179 } 180 ((__be32 *)sendbuf)[0] = cpu_to_be32(function); 181 } 182 183 mdev->mq->command = command; 184 mdev->mq->length = length; 185 if (length > 1) 186 memcpy(sendbuf + 4, data, (length - 1) * 4); 187 mdev->mq->sendbuf = sendbuf; 188 189 list_add(&mdev->mq->list, &maple_waitq); 190 out: 191 mutex_unlock(&maple_wlist_lock); 192 return ret; 193 } 194 EXPORT_SYMBOL_GPL(maple_add_packet); 195 196 /* 197 * maple_add_packet_sleeps - add a single instruction to the queue 198 * - waits for lock to be free 199 * @mdev - maple device 200 * @function - function on device being queried 201 * @command - maple command to add 202 * @length - length of command string (in 32 bit words) 203 * @data - remainder of command string 204 */ 205 int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, 206 u32 command, size_t length, void *data) 207 { 208 int locking, ret = 0; 209 void *sendbuf = NULL; 210 211 locking = mutex_lock_interruptible(&mdev->mq->mutex); 212 if (locking) { 213 ret = -EIO; 214 goto out; 215 } 216 217 if (length) { 218 sendbuf = kmalloc(length * 4, GFP_KERNEL); 219 if (!sendbuf) { 220 mutex_unlock(&mdev->mq->mutex); 221 ret = -ENOMEM; 222 goto out; 223 } 224 ((__be32 *)sendbuf)[0] = cpu_to_be32(function); 225 } 226 227 mdev->mq->command = command; 228 mdev->mq->length = length; 229 if (length > 1) 230 memcpy(sendbuf + 4, data, (length - 1) * 4); 231 mdev->mq->sendbuf = sendbuf; 232 233 mutex_lock(&maple_wlist_lock); 234 list_add(&mdev->mq->list, &maple_waitq); 235 mutex_unlock(&maple_wlist_lock); 236 out: 237 return ret; 238 } 239 EXPORT_SYMBOL_GPL(maple_add_packet_sleeps); 240 241 static struct mapleq *maple_allocq(struct maple_device *mdev) 242 { 243 struct mapleq *mq; 244 245 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 246 if (!mq) 247 goto failed_nomem; 248 249 mq->dev = mdev; 250 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 251 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 252 if (!mq->recvbuf) 253 goto failed_p2; 254 /* 255 * most devices do not need the mutex - but 256 * anything that injects block reads or writes 257 * will rely on it 258 */ 259 mutex_init(&mq->mutex); 260 261 return mq; 262 263 failed_p2: 264 kfree(mq); 265 failed_nomem: 266 return NULL; 267 } 268 269 static struct maple_device *maple_alloc_dev(int port, int unit) 270 { 271 struct maple_device *mdev; 272 273 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 274 if (!mdev) 275 return NULL; 276 277 mdev->port = port; 278 mdev->unit = unit; 279 mdev->mq = maple_allocq(mdev); 280 281 if (!mdev->mq) { 282 kfree(mdev); 283 return NULL; 284 } 285 mdev->dev.bus = &maple_bus_type; 286 mdev->dev.parent = &maple_bus; 287 return mdev; 288 } 289 290 static void maple_free_dev(struct maple_device *mdev) 291 { 292 if (!mdev) 293 return; 294 if (mdev->mq) { 295 if (mdev->mq->recvbufdcsp) 296 kmem_cache_free(maple_queue_cache, 297 mdev->mq->recvbufdcsp); 298 kfree(mdev->mq); 299 } 300 kfree(mdev); 301 } 302 303 /* process the command queue into a maple command block 304 * terminating command has bit 32 of first long set to 0 305 */ 306 static void maple_build_block(struct mapleq *mq) 307 { 308 int port, unit, from, to, len; 309 unsigned long *lsendbuf = mq->sendbuf; 310 311 port = mq->dev->port & 3; 312 unit = mq->dev->unit; 313 len = mq->length; 314 from = port << 6; 315 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); 316 317 *maple_lastptr &= 0x7fffffff; 318 maple_lastptr = maple_sendptr; 319 320 *maple_sendptr++ = (port << 16) | len | 0x80000000; 321 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 322 *maple_sendptr++ = 323 mq->command | (to << 8) | (from << 16) | (len << 24); 324 while (len-- > 0) 325 *maple_sendptr++ = *lsendbuf++; 326 } 327 328 /* build up command queue */ 329 static void maple_send(void) 330 { 331 int i, maple_packets = 0; 332 struct mapleq *mq, *nmq; 333 334 if (!list_empty(&maple_sentq)) 335 return; 336 mutex_lock(&maple_wlist_lock); 337 if (list_empty(&maple_waitq) || !maple_dma_done()) { 338 mutex_unlock(&maple_wlist_lock); 339 return; 340 } 341 mutex_unlock(&maple_wlist_lock); 342 maple_lastptr = maple_sendbuf; 343 maple_sendptr = maple_sendbuf; 344 mutex_lock(&maple_wlist_lock); 345 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 346 maple_build_block(mq); 347 list_move(&mq->list, &maple_sentq); 348 if (maple_packets++ > MAPLE_MAXPACKETS) 349 break; 350 } 351 mutex_unlock(&maple_wlist_lock); 352 if (maple_packets > 0) { 353 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 354 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 355 PAGE_SIZE, DMA_BIDIRECTIONAL); 356 } 357 } 358 359 /* check if there is a driver registered likely to match this device */ 360 static int check_matching_maple_driver(struct device_driver *driver, 361 void *devptr) 362 { 363 struct maple_driver *maple_drv; 364 struct maple_device *mdev; 365 366 mdev = devptr; 367 maple_drv = to_maple_driver(driver); 368 if (mdev->devinfo.function & cpu_to_be32(maple_drv->function)) 369 return 1; 370 return 0; 371 } 372 373 static void maple_detach_driver(struct maple_device *mdev) 374 { 375 if (!mdev) 376 return; 377 device_unregister(&mdev->dev); 378 mdev = NULL; 379 } 380 381 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */ 382 static void maple_attach_driver(struct maple_device *mdev) 383 { 384 char *p, *recvbuf; 385 unsigned long function; 386 int matched, retval; 387 388 recvbuf = mdev->mq->recvbuf; 389 /* copy the data as individual elements in 390 * case of memory optimisation */ 391 memcpy(&mdev->devinfo.function, recvbuf + 4, 4); 392 memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12); 393 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1); 394 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1); 395 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30); 396 memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60); 397 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2); 398 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2); 399 memcpy(mdev->product_name, mdev->devinfo.product_name, 30); 400 mdev->product_name[30] = '\0'; 401 memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60); 402 mdev->product_licence[60] = '\0'; 403 404 for (p = mdev->product_name + 29; mdev->product_name <= p; p--) 405 if (*p == ' ') 406 *p = '\0'; 407 else 408 break; 409 for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--) 410 if (*p == ' ') 411 *p = '\0'; 412 else 413 break; 414 415 printk(KERN_INFO "Maple device detected: %s\n", 416 mdev->product_name); 417 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); 418 419 function = be32_to_cpu(mdev->devinfo.function); 420 421 if (function > 0x200) { 422 /* Do this silently - as not a real device */ 423 function = 0; 424 mdev->driver = &maple_dummy_driver; 425 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 426 } else { 427 printk(KERN_INFO 428 "Maple bus at (%d, %d): Function 0x%lX\n", 429 mdev->port, mdev->unit, function); 430 431 matched = 432 bus_for_each_drv(&maple_bus_type, NULL, mdev, 433 check_matching_maple_driver); 434 435 if (matched == 0) { 436 /* Driver does not exist yet */ 437 printk(KERN_INFO 438 "No maple driver found.\n"); 439 mdev->driver = &maple_dummy_driver; 440 } 441 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 442 mdev->unit, function); 443 } 444 mdev->function = function; 445 mdev->dev.release = &maple_release_device; 446 retval = device_register(&mdev->dev); 447 if (retval) { 448 printk(KERN_INFO 449 "Maple bus: Attempt to register device" 450 " (%x, %x) failed.\n", 451 mdev->port, mdev->unit); 452 maple_free_dev(mdev); 453 mdev = NULL; 454 return; 455 } 456 } 457 458 /* 459 * if device has been registered for the given 460 * port and unit then return 1 - allows identification 461 * of which devices need to be attached or detached 462 */ 463 static int detach_maple_device(struct device *device, void *portptr) 464 { 465 struct maple_device_specify *ds; 466 struct maple_device *mdev; 467 468 ds = portptr; 469 mdev = to_maple_dev(device); 470 if (mdev->port == ds->port && mdev->unit == ds->unit) 471 return 1; 472 return 0; 473 } 474 475 static int setup_maple_commands(struct device *device, void *ignored) 476 { 477 int add; 478 struct maple_device *maple_dev = to_maple_dev(device); 479 480 if ((maple_dev->interval > 0) 481 && time_after(jiffies, maple_dev->when)) { 482 /* bounce if we cannot lock */ 483 add = maple_add_packet(maple_dev, 484 be32_to_cpu(maple_dev->devinfo.function), 485 MAPLE_COMMAND_GETCOND, 1, NULL); 486 if (!add) 487 maple_dev->when = jiffies + maple_dev->interval; 488 } else { 489 if (time_after(jiffies, maple_pnp_time)) 490 /* This will also bounce */ 491 maple_add_packet(maple_dev, 0, 492 MAPLE_COMMAND_DEVINFO, 0, NULL); 493 } 494 return 0; 495 } 496 497 /* VBLANK bottom half - implemented via workqueue */ 498 static void maple_vblank_handler(struct work_struct *work) 499 { 500 if (!list_empty(&maple_sentq) || !maple_dma_done()) 501 return; 502 503 ctrl_outl(0, MAPLE_ENABLE); 504 505 bus_for_each_dev(&maple_bus_type, NULL, NULL, 506 setup_maple_commands); 507 508 if (time_after(jiffies, maple_pnp_time)) 509 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 510 511 mutex_lock(&maple_wlist_lock); 512 if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) { 513 mutex_unlock(&maple_wlist_lock); 514 maple_send(); 515 } else { 516 mutex_unlock(&maple_wlist_lock); 517 } 518 519 maplebus_dma_reset(); 520 } 521 522 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 523 static void maple_map_subunits(struct maple_device *mdev, int submask) 524 { 525 int retval, k, devcheck; 526 struct maple_device *mdev_add; 527 struct maple_device_specify ds; 528 529 ds.port = mdev->port; 530 for (k = 0; k < 5; k++) { 531 ds.unit = k + 1; 532 retval = 533 bus_for_each_dev(&maple_bus_type, NULL, &ds, 534 detach_maple_device); 535 if (retval) { 536 submask = submask >> 1; 537 continue; 538 } 539 devcheck = submask & 0x01; 540 if (devcheck) { 541 mdev_add = maple_alloc_dev(mdev->port, k + 1); 542 if (!mdev_add) 543 return; 544 maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO, 545 0, NULL); 546 /* mark that we are checking sub devices */ 547 scanning = 1; 548 } 549 submask = submask >> 1; 550 } 551 } 552 553 /* mark a device as removed */ 554 static void maple_clean_submap(struct maple_device *mdev) 555 { 556 int killbit; 557 558 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); 559 killbit = ~killbit; 560 killbit &= 0xFF; 561 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; 562 } 563 564 /* handle empty port or hotplug removal */ 565 static void maple_response_none(struct maple_device *mdev, 566 struct mapleq *mq) 567 { 568 if (mdev->unit != 0) { 569 list_del(&mq->list); 570 maple_clean_submap(mdev); 571 printk(KERN_INFO 572 "Maple bus device detaching at (%d, %d)\n", 573 mdev->port, mdev->unit); 574 maple_detach_driver(mdev); 575 return; 576 } 577 if (!started || !fullscan) { 578 if (checked[mdev->port] == false) { 579 checked[mdev->port] = true; 580 printk(KERN_INFO "No maple devices attached" 581 " to port %d\n", mdev->port); 582 } 583 return; 584 } 585 maple_clean_submap(mdev); 586 } 587 588 /* preprocess hotplugs or scans */ 589 static void maple_response_devinfo(struct maple_device *mdev, 590 char *recvbuf) 591 { 592 char submask; 593 if (!started || (scanning == 2) || !fullscan) { 594 if ((mdev->unit == 0) && (checked[mdev->port] == false)) { 595 checked[mdev->port] = true; 596 maple_attach_driver(mdev); 597 } else { 598 if (mdev->unit != 0) 599 maple_attach_driver(mdev); 600 } 601 return; 602 } 603 if (mdev->unit == 0) { 604 submask = recvbuf[2] & 0x1F; 605 if (submask ^ subdevice_map[mdev->port]) { 606 maple_map_subunits(mdev, submask); 607 subdevice_map[mdev->port] = submask; 608 } 609 } 610 } 611 612 static void maple_port_rescan(void) 613 { 614 int i; 615 struct maple_device *mdev; 616 617 fullscan = 1; 618 for (i = 0; i < MAPLE_PORTS; i++) { 619 if (checked[i] == false) { 620 fullscan = 0; 621 mdev = baseunits[i]; 622 /* 623 * test lock in case scan has failed 624 * but device is still locked 625 */ 626 if (mutex_is_locked(&mdev->mq->mutex)) 627 mutex_unlock(&mdev->mq->mutex); 628 maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO, 629 0, NULL); 630 } 631 } 632 } 633 634 /* maple dma end bottom half - implemented via workqueue */ 635 static void maple_dma_handler(struct work_struct *work) 636 { 637 struct mapleq *mq, *nmq; 638 struct maple_device *dev; 639 char *recvbuf; 640 enum maple_code code; 641 642 if (!maple_dma_done()) 643 return; 644 ctrl_outl(0, MAPLE_ENABLE); 645 if (!list_empty(&maple_sentq)) { 646 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 647 recvbuf = mq->recvbuf; 648 code = recvbuf[0]; 649 dev = mq->dev; 650 kfree(mq->sendbuf); 651 mutex_unlock(&mq->mutex); 652 list_del_init(&mq->list); 653 654 switch (code) { 655 case MAPLE_RESPONSE_NONE: 656 maple_response_none(dev, mq); 657 break; 658 659 case MAPLE_RESPONSE_DEVINFO: 660 maple_response_devinfo(dev, recvbuf); 661 break; 662 663 case MAPLE_RESPONSE_DATATRF: 664 if (dev->callback) 665 dev->callback(mq); 666 break; 667 668 case MAPLE_RESPONSE_FILEERR: 669 case MAPLE_RESPONSE_AGAIN: 670 case MAPLE_RESPONSE_BADCMD: 671 case MAPLE_RESPONSE_BADFUNC: 672 printk(KERN_DEBUG 673 "Maple non-fatal error 0x%X\n", 674 code); 675 break; 676 677 case MAPLE_RESPONSE_ALLINFO: 678 printk(KERN_DEBUG 679 "Maple - extended device information" 680 " not supported\n"); 681 break; 682 683 case MAPLE_RESPONSE_OK: 684 break; 685 686 default: 687 break; 688 } 689 } 690 /* if scanning is 1 then we have subdevices to check */ 691 if (scanning == 1) { 692 maple_send(); 693 scanning = 2; 694 } else 695 scanning = 0; 696 /*check if we have actually tested all ports yet */ 697 if (!fullscan) 698 maple_port_rescan(); 699 /* mark that we have been through the first scan */ 700 if (started == 0) 701 started = 1; 702 } 703 maplebus_dma_reset(); 704 } 705 706 static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 707 { 708 /* Load everything into the bottom half */ 709 schedule_work(&maple_dma_process); 710 return IRQ_HANDLED; 711 } 712 713 static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 714 { 715 schedule_work(&maple_vblank_process); 716 return IRQ_HANDLED; 717 } 718 719 static int maple_set_dma_interrupt_handler(void) 720 { 721 return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt, 722 IRQF_SHARED, "maple bus DMA", &maple_dummy_driver); 723 } 724 725 static int maple_set_vblank_interrupt_handler(void) 726 { 727 return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt, 728 IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver); 729 } 730 731 static int maple_get_dma_buffer(void) 732 { 733 maple_sendbuf = 734 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 735 MAPLE_DMA_PAGES); 736 if (!maple_sendbuf) 737 return -ENOMEM; 738 return 0; 739 } 740 741 static int match_maple_bus_driver(struct device *devptr, 742 struct device_driver *drvptr) 743 { 744 struct maple_driver *maple_drv = to_maple_driver(drvptr); 745 struct maple_device *maple_dev = to_maple_dev(devptr); 746 747 /* Trap empty port case */ 748 if (maple_dev->devinfo.function == 0xFFFFFFFF) 749 return 0; 750 else if (maple_dev->devinfo.function & 751 cpu_to_be32(maple_drv->function)) 752 return 1; 753 return 0; 754 } 755 756 static int maple_bus_uevent(struct device *dev, 757 struct kobj_uevent_env *env) 758 { 759 return 0; 760 } 761 762 static void maple_bus_release(struct device *dev) 763 { 764 } 765 766 static struct maple_driver maple_dummy_driver = { 767 .drv = { 768 .name = "maple_dummy_driver", 769 .bus = &maple_bus_type, 770 }, 771 }; 772 773 struct bus_type maple_bus_type = { 774 .name = "maple", 775 .match = match_maple_bus_driver, 776 .uevent = maple_bus_uevent, 777 }; 778 EXPORT_SYMBOL_GPL(maple_bus_type); 779 780 static struct device maple_bus = { 781 .bus_id = "maple", 782 .release = maple_bus_release, 783 }; 784 785 static int __init maple_bus_init(void) 786 { 787 int retval, i; 788 struct maple_device *mdev[MAPLE_PORTS]; 789 ctrl_outl(0, MAPLE_STATE); 790 791 retval = device_register(&maple_bus); 792 if (retval) 793 goto cleanup; 794 795 retval = bus_register(&maple_bus_type); 796 if (retval) 797 goto cleanup_device; 798 799 retval = driver_register(&maple_dummy_driver.drv); 800 if (retval) 801 goto cleanup_bus; 802 803 /* allocate memory for maple bus dma */ 804 retval = maple_get_dma_buffer(); 805 if (retval) { 806 printk(KERN_INFO 807 "Maple bus: Failed to allocate Maple DMA buffers\n"); 808 goto cleanup_basic; 809 } 810 811 /* set up DMA interrupt handler */ 812 retval = maple_set_dma_interrupt_handler(); 813 if (retval) { 814 printk(KERN_INFO 815 "Maple bus: Failed to grab maple DMA IRQ\n"); 816 goto cleanup_dma; 817 } 818 819 /* set up VBLANK interrupt handler */ 820 retval = maple_set_vblank_interrupt_handler(); 821 if (retval) { 822 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 823 goto cleanup_irq; 824 } 825 826 maple_queue_cache = 827 kmem_cache_create("maple_queue_cache", 0x400, 0, 828 SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL); 829 830 if (!maple_queue_cache) 831 goto cleanup_bothirqs; 832 833 INIT_LIST_HEAD(&maple_waitq); 834 INIT_LIST_HEAD(&maple_sentq); 835 836 /* setup maple ports */ 837 for (i = 0; i < MAPLE_PORTS; i++) { 838 checked[i] = false; 839 mdev[i] = maple_alloc_dev(i, 0); 840 baseunits[i] = mdev[i]; 841 if (!mdev[i]) { 842 while (i-- > 0) 843 maple_free_dev(mdev[i]); 844 goto cleanup_cache; 845 } 846 maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL); 847 subdevice_map[i] = 0; 848 } 849 850 /* setup maplebus hardware */ 851 maplebus_dma_reset(); 852 /* initial detection */ 853 maple_send(); 854 maple_pnp_time = jiffies; 855 printk(KERN_INFO "Maple bus core now registered.\n"); 856 857 return 0; 858 859 cleanup_cache: 860 kmem_cache_destroy(maple_queue_cache); 861 862 cleanup_bothirqs: 863 free_irq(HW_EVENT_VSYNC, 0); 864 865 cleanup_irq: 866 free_irq(HW_EVENT_MAPLE_DMA, 0); 867 868 cleanup_dma: 869 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 870 871 cleanup_basic: 872 driver_unregister(&maple_dummy_driver.drv); 873 874 cleanup_bus: 875 bus_unregister(&maple_bus_type); 876 877 cleanup_device: 878 device_unregister(&maple_bus); 879 880 cleanup: 881 printk(KERN_INFO "Maple bus registration failed\n"); 882 return retval; 883 } 884 /* Push init to later to ensure hardware gets detected */ 885 fs_initcall(maple_bus_init); 886