xref: /linux/drivers/sh/maple/maple.c (revision 1795cf48b322b4d19230a40dbe7181acedd34a94)
1 /*
2  * Core maple bus functionality
3  *
4  *  Copyright (C) 2007, 2008 Adrian McMenamin
5  *
6  * Based on 2.4 code by:
7  *
8  *  Copyright (C) 2000-2001 YAEGASHI Takeshi
9  *  Copyright (C) 2001 M. R. Brown
10  *  Copyright (C) 2001 Paul Mundt
11  *
12  * and others.
13  *
14  * This file is subject to the terms and conditions of the GNU General Public
15  * License.  See the file "COPYING" in the main directory of this archive
16  * for more details.
17  */
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/device.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/maple.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <asm/cacheflush.h>
29 #include <asm/dma.h>
30 #include <asm/io.h>
31 #include <mach/dma.h>
32 #include <mach/sysasic.h>
33 
34 MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
35 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
36 MODULE_LICENSE("GPL v2");
37 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
38 
39 static void maple_dma_handler(struct work_struct *work);
40 static void maple_vblank_handler(struct work_struct *work);
41 
42 static DECLARE_WORK(maple_dma_process, maple_dma_handler);
43 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
44 
45 static LIST_HEAD(maple_waitq);
46 static LIST_HEAD(maple_sentq);
47 
48 /* mutex to protect queue of waiting packets */
49 static DEFINE_MUTEX(maple_wlist_lock);
50 
51 static struct maple_driver maple_dummy_driver;
52 static struct device maple_bus;
53 static int subdevice_map[MAPLE_PORTS];
54 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
55 static unsigned long maple_pnp_time;
56 static int started, scanning, fullscan;
57 static struct kmem_cache *maple_queue_cache;
58 
59 struct maple_device_specify {
60 	int port;
61 	int unit;
62 };
63 
64 static bool checked[4];
65 static struct maple_device *baseunits[4];
66 
67 /**
68  *  maple_driver_register - register a device driver
69  *  automatically makes the driver bus a maple bus
70  *  @drv: the driver to be registered
71  */
72 int maple_driver_register(struct device_driver *drv)
73 {
74 	if (!drv)
75 		return -EINVAL;
76 	drv->bus = &maple_bus_type;
77 	return driver_register(drv);
78 }
79 EXPORT_SYMBOL_GPL(maple_driver_register);
80 
81 /* set hardware registers to enable next round of dma */
82 static void maplebus_dma_reset(void)
83 {
84 	ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
85 	/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
86 	ctrl_outl(1, MAPLE_TRIGTYPE);
87 	ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
88 	ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
89 	ctrl_outl(1, MAPLE_ENABLE);
90 }
91 
92 /**
93  * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
94  * @dev: device responding
95  * @callback: handler callback
96  * @interval: interval in jiffies between callbacks
97  * @function: the function code for the device
98  */
99 void maple_getcond_callback(struct maple_device *dev,
100 			void (*callback) (struct mapleq *mq),
101 			unsigned long interval, unsigned long function)
102 {
103 	dev->callback = callback;
104 	dev->interval = interval;
105 	dev->function = cpu_to_be32(function);
106 	dev->when = jiffies;
107 }
108 EXPORT_SYMBOL_GPL(maple_getcond_callback);
109 
110 static int maple_dma_done(void)
111 {
112 	return (ctrl_inl(MAPLE_STATE) & 1) == 0;
113 }
114 
115 static void maple_release_device(struct device *dev)
116 {
117 	struct maple_device *mdev;
118 	struct mapleq *mq;
119 	if (!dev)
120 		return;
121 	mdev = to_maple_dev(dev);
122 	mq = mdev->mq;
123 	if (mq) {
124 		if (mq->recvbufdcsp)
125 			kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
126 		kfree(mq);
127 		mq = NULL;
128 	}
129 	kfree(mdev);
130 }
131 
132 /*
133  * maple_add_packet - add a single instruction to the queue
134  * @mdev - maple device
135  * @function - function on device being queried
136  * @command - maple command to add
137  * @length - length of command string (in 32 bit words)
138  * @data - remainder of command string
139  */
140 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
141 	size_t length, void *data)
142 {
143 	int locking, ret = 0;
144 	void *sendbuf = NULL;
145 
146 	mutex_lock(&maple_wlist_lock);
147 	/* bounce if device already locked */
148 	locking = mutex_is_locked(&mdev->mq->mutex);
149 	if (locking) {
150 		ret = -EBUSY;
151 		goto out;
152 	}
153 
154 	mutex_lock(&mdev->mq->mutex);
155 
156 	if (length) {
157 		sendbuf = kmalloc(length * 4, GFP_KERNEL);
158 		if (!sendbuf) {
159 			mutex_unlock(&mdev->mq->mutex);
160 			ret = -ENOMEM;
161 			goto out;
162 		}
163 		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
164 	}
165 
166 	mdev->mq->command = command;
167 	mdev->mq->length = length;
168 	if (length > 1)
169 		memcpy(sendbuf + 4, data, (length - 1) * 4);
170 	mdev->mq->sendbuf = sendbuf;
171 
172 	list_add(&mdev->mq->list, &maple_waitq);
173 out:
174 	mutex_unlock(&maple_wlist_lock);
175 	return ret;
176 }
177 EXPORT_SYMBOL_GPL(maple_add_packet);
178 
179 /*
180  * maple_add_packet_sleeps - add a single instruction to the queue
181  *  - waits for lock to be free
182  * @mdev - maple device
183  * @function - function on device being queried
184  * @command - maple command to add
185  * @length - length of command string (in 32 bit words)
186  * @data - remainder of command string
187  */
188 int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
189 	u32 command, size_t length, void *data)
190 {
191 	int locking, ret = 0;
192 	void *sendbuf = NULL;
193 
194 	locking = mutex_lock_interruptible(&mdev->mq->mutex);
195 	if (locking) {
196 		ret = -EIO;
197 		goto out;
198 	}
199 
200 	if (length) {
201 		sendbuf = kmalloc(length * 4, GFP_KERNEL);
202 		if (!sendbuf) {
203 			mutex_unlock(&mdev->mq->mutex);
204 			ret = -ENOMEM;
205 			goto out;
206 		}
207 		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
208 	}
209 
210 	mdev->mq->command = command;
211 	mdev->mq->length = length;
212 	if (length > 1)
213 		memcpy(sendbuf + 4, data, (length - 1) * 4);
214 	mdev->mq->sendbuf = sendbuf;
215 
216 	mutex_lock(&maple_wlist_lock);
217 	list_add(&mdev->mq->list, &maple_waitq);
218 	mutex_unlock(&maple_wlist_lock);
219 out:
220 	return ret;
221 }
222 EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);
223 
224 static struct mapleq *maple_allocq(struct maple_device *mdev)
225 {
226 	struct mapleq *mq;
227 
228 	mq = kmalloc(sizeof(*mq), GFP_KERNEL);
229 	if (!mq)
230 		goto failed_nomem;
231 
232 	mq->dev = mdev;
233 	mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
234 	mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
235 	if (!mq->recvbuf)
236 		goto failed_p2;
237 	/*
238 	 * most devices do not need the mutex - but
239 	 * anything that injects block reads or writes
240 	 * will rely on it
241 	 */
242 	mutex_init(&mq->mutex);
243 
244 	return mq;
245 
246 failed_p2:
247 	kfree(mq);
248 failed_nomem:
249 	return NULL;
250 }
251 
252 static struct maple_device *maple_alloc_dev(int port, int unit)
253 {
254 	struct maple_device *mdev;
255 
256 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
257 	if (!mdev)
258 		return NULL;
259 
260 	mdev->port = port;
261 	mdev->unit = unit;
262 	mdev->mq = maple_allocq(mdev);
263 
264 	if (!mdev->mq) {
265 		kfree(mdev);
266 		return NULL;
267 	}
268 	mdev->dev.bus = &maple_bus_type;
269 	mdev->dev.parent = &maple_bus;
270 	return mdev;
271 }
272 
273 static void maple_free_dev(struct maple_device *mdev)
274 {
275 	if (!mdev)
276 		return;
277 	if (mdev->mq) {
278 		if (mdev->mq->recvbufdcsp)
279 			kmem_cache_free(maple_queue_cache,
280 				mdev->mq->recvbufdcsp);
281 		kfree(mdev->mq);
282 	}
283 	kfree(mdev);
284 }
285 
286 /* process the command queue into a maple command block
287  * terminating command has bit 32 of first long set to 0
288  */
289 static void maple_build_block(struct mapleq *mq)
290 {
291 	int port, unit, from, to, len;
292 	unsigned long *lsendbuf = mq->sendbuf;
293 
294 	port = mq->dev->port & 3;
295 	unit = mq->dev->unit;
296 	len = mq->length;
297 	from = port << 6;
298 	to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
299 
300 	*maple_lastptr &= 0x7fffffff;
301 	maple_lastptr = maple_sendptr;
302 
303 	*maple_sendptr++ = (port << 16) | len | 0x80000000;
304 	*maple_sendptr++ = PHYSADDR(mq->recvbuf);
305 	*maple_sendptr++ =
306 	    mq->command | (to << 8) | (from << 16) | (len << 24);
307 	while (len-- > 0)
308 		*maple_sendptr++ = *lsendbuf++;
309 }
310 
311 /* build up command queue */
312 static void maple_send(void)
313 {
314 	int i, maple_packets = 0;
315 	struct mapleq *mq, *nmq;
316 
317 	if (!list_empty(&maple_sentq))
318 		return;
319 	mutex_lock(&maple_wlist_lock);
320 	if (list_empty(&maple_waitq) || !maple_dma_done()) {
321 		mutex_unlock(&maple_wlist_lock);
322 		return;
323 	}
324 	mutex_unlock(&maple_wlist_lock);
325 	maple_lastptr = maple_sendbuf;
326 	maple_sendptr = maple_sendbuf;
327 	mutex_lock(&maple_wlist_lock);
328 	list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
329 		maple_build_block(mq);
330 		list_move(&mq->list, &maple_sentq);
331 		if (maple_packets++ > MAPLE_MAXPACKETS)
332 			break;
333 	}
334 	mutex_unlock(&maple_wlist_lock);
335 	if (maple_packets > 0) {
336 		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
337 			dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
338 				       PAGE_SIZE, DMA_BIDIRECTIONAL);
339 	}
340 }
341 
342 /* check if there is a driver registered likely to match this device */
343 static int check_matching_maple_driver(struct device_driver *driver,
344 					void *devptr)
345 {
346 	struct maple_driver *maple_drv;
347 	struct maple_device *mdev;
348 
349 	mdev = devptr;
350 	maple_drv = to_maple_driver(driver);
351 	if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
352 		return 1;
353 	return 0;
354 }
355 
356 static void maple_detach_driver(struct maple_device *mdev)
357 {
358 	if (!mdev)
359 		return;
360 	device_unregister(&mdev->dev);
361 	mdev = NULL;
362 }
363 
364 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
365 static void maple_attach_driver(struct maple_device *mdev)
366 {
367 	char *p, *recvbuf;
368 	unsigned long function;
369 	int matched, retval;
370 
371 	recvbuf = mdev->mq->recvbuf;
372 	/* copy the data as individual elements in
373 	* case of memory optimisation */
374 	memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
375 	memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
376 	memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
377 	memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
378 	memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
379 	memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
380 	memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
381 	memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
382 	memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
383 	mdev->product_name[30] = '\0';
384 	memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
385 	mdev->product_licence[60] = '\0';
386 
387 	for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
388 		if (*p == ' ')
389 			*p = '\0';
390 		else
391 			break;
392 	for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
393 		if (*p == ' ')
394 			*p = '\0';
395 		else
396 			break;
397 
398 	printk(KERN_INFO "Maple device detected: %s\n",
399 		mdev->product_name);
400 	printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
401 
402 	function = be32_to_cpu(mdev->devinfo.function);
403 
404 	if (function > 0x200) {
405 		/* Do this silently - as not a real device */
406 		function = 0;
407 		mdev->driver = &maple_dummy_driver;
408 		sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
409 	} else {
410 		printk(KERN_INFO
411 			"Maple bus at (%d, %d): Function 0x%lX\n",
412 			mdev->port, mdev->unit, function);
413 
414 		matched =
415 			bus_for_each_drv(&maple_bus_type, NULL, mdev,
416 				check_matching_maple_driver);
417 
418 		if (matched == 0) {
419 			/* Driver does not exist yet */
420 			printk(KERN_INFO
421 				"No maple driver found.\n");
422 			mdev->driver = &maple_dummy_driver;
423 		}
424 		sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
425 			mdev->unit, function);
426 	}
427 	mdev->function = function;
428 	mdev->dev.release = &maple_release_device;
429 	retval = device_register(&mdev->dev);
430 	if (retval) {
431 		printk(KERN_INFO
432 		"Maple bus: Attempt to register device"
433 		" (%x, %x) failed.\n",
434 		mdev->port, mdev->unit);
435 		maple_free_dev(mdev);
436 		mdev = NULL;
437 		return;
438 	}
439 }
440 
441 /*
442  * if device has been registered for the given
443  * port and unit then return 1 - allows identification
444  * of which devices need to be attached or detached
445  */
446 static int detach_maple_device(struct device *device, void *portptr)
447 {
448 	struct maple_device_specify *ds;
449 	struct maple_device *mdev;
450 
451 	ds = portptr;
452 	mdev = to_maple_dev(device);
453 	if (mdev->port == ds->port && mdev->unit == ds->unit)
454 		return 1;
455 	return 0;
456 }
457 
458 static int setup_maple_commands(struct device *device, void *ignored)
459 {
460 	int add;
461 	struct maple_device *maple_dev = to_maple_dev(device);
462 
463 	if ((maple_dev->interval > 0)
464 	    && time_after(jiffies, maple_dev->when)) {
465 		/* bounce if we cannot lock */
466 		add = maple_add_packet(maple_dev,
467 			be32_to_cpu(maple_dev->devinfo.function),
468 			MAPLE_COMMAND_GETCOND, 1, NULL);
469 		if (!add)
470 			maple_dev->when = jiffies + maple_dev->interval;
471 	} else {
472 		if (time_after(jiffies, maple_pnp_time))
473 			/* This will also bounce */
474 			maple_add_packet(maple_dev, 0,
475 				MAPLE_COMMAND_DEVINFO, 0, NULL);
476 	}
477 	return 0;
478 }
479 
480 /* VBLANK bottom half - implemented via workqueue */
481 static void maple_vblank_handler(struct work_struct *work)
482 {
483 	if (!list_empty(&maple_sentq) || !maple_dma_done())
484 		return;
485 
486 	ctrl_outl(0, MAPLE_ENABLE);
487 
488 	bus_for_each_dev(&maple_bus_type, NULL, NULL,
489 			 setup_maple_commands);
490 
491 	if (time_after(jiffies, maple_pnp_time))
492 		maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
493 
494 	mutex_lock(&maple_wlist_lock);
495 	if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
496 		mutex_unlock(&maple_wlist_lock);
497 		maple_send();
498 	} else {
499 		mutex_unlock(&maple_wlist_lock);
500 	}
501 
502 	maplebus_dma_reset();
503 }
504 
505 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/
506 static void maple_map_subunits(struct maple_device *mdev, int submask)
507 {
508 	int retval, k, devcheck;
509 	struct maple_device *mdev_add;
510 	struct maple_device_specify ds;
511 
512 	ds.port = mdev->port;
513 	for (k = 0; k < 5; k++) {
514 		ds.unit = k + 1;
515 		retval =
516 		    bus_for_each_dev(&maple_bus_type, NULL, &ds,
517 				     detach_maple_device);
518 		if (retval) {
519 			submask = submask >> 1;
520 			continue;
521 		}
522 		devcheck = submask & 0x01;
523 		if (devcheck) {
524 			mdev_add = maple_alloc_dev(mdev->port, k + 1);
525 			if (!mdev_add)
526 				return;
527 			maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
528 				0, NULL);
529 			/* mark that we are checking sub devices */
530 			scanning = 1;
531 		}
532 		submask = submask >> 1;
533 	}
534 }
535 
536 /* mark a device as removed */
537 static void maple_clean_submap(struct maple_device *mdev)
538 {
539 	int killbit;
540 
541 	killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
542 	killbit = ~killbit;
543 	killbit &= 0xFF;
544 	subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
545 }
546 
547 /* handle empty port or hotplug removal */
548 static void maple_response_none(struct maple_device *mdev,
549 				struct mapleq *mq)
550 {
551 	if (mdev->unit != 0) {
552 		list_del(&mq->list);
553 		maple_clean_submap(mdev);
554 		printk(KERN_INFO
555 		       "Maple bus device detaching at (%d, %d)\n",
556 		       mdev->port, mdev->unit);
557 		maple_detach_driver(mdev);
558 		return;
559 	}
560 	if (!started || !fullscan) {
561 		if (checked[mdev->port] == false) {
562 			checked[mdev->port] = true;
563 			printk(KERN_INFO "No maple devices attached"
564 				" to port %d\n", mdev->port);
565 		}
566 		return;
567 	}
568 	maple_clean_submap(mdev);
569 }
570 
571 /* preprocess hotplugs or scans */
572 static void maple_response_devinfo(struct maple_device *mdev,
573 				   char *recvbuf)
574 {
575 	char submask;
576 	if (!started || (scanning == 2) || !fullscan) {
577 		if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
578 			checked[mdev->port] = true;
579 			maple_attach_driver(mdev);
580 		} else {
581 			if (mdev->unit != 0)
582 				maple_attach_driver(mdev);
583 		}
584 		return;
585 	}
586 	if (mdev->unit == 0) {
587 		submask = recvbuf[2] & 0x1F;
588 		if (submask ^ subdevice_map[mdev->port]) {
589 			maple_map_subunits(mdev, submask);
590 			subdevice_map[mdev->port] = submask;
591 		}
592 	}
593 }
594 
595 static void maple_port_rescan(void)
596 {
597 	int i;
598 	struct maple_device *mdev;
599 
600 	fullscan = 1;
601 	for (i = 0; i < MAPLE_PORTS; i++) {
602 		if (checked[i] == false) {
603 			fullscan = 0;
604 			mdev = baseunits[i];
605 			/*
606 			 *  test lock in case scan has failed
607 			 *  but device is still locked
608 			 */
609 			if (mutex_is_locked(&mdev->mq->mutex))
610 				mutex_unlock(&mdev->mq->mutex);
611 			maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
612 				0, NULL);
613 		}
614 	}
615 }
616 
617 /* maple dma end bottom half - implemented via workqueue */
618 static void maple_dma_handler(struct work_struct *work)
619 {
620 	struct mapleq *mq, *nmq;
621 	struct maple_device *dev;
622 	char *recvbuf;
623 	enum maple_code code;
624 
625 	if (!maple_dma_done())
626 		return;
627 	ctrl_outl(0, MAPLE_ENABLE);
628 	if (!list_empty(&maple_sentq)) {
629 		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
630 			recvbuf = mq->recvbuf;
631 			code = recvbuf[0];
632 			dev = mq->dev;
633 			kfree(mq->sendbuf);
634 			mutex_unlock(&mq->mutex);
635 			list_del_init(&mq->list);
636 
637 			switch (code) {
638 			case MAPLE_RESPONSE_NONE:
639 				maple_response_none(dev, mq);
640 				break;
641 
642 			case MAPLE_RESPONSE_DEVINFO:
643 				maple_response_devinfo(dev, recvbuf);
644 				break;
645 
646 			case MAPLE_RESPONSE_DATATRF:
647 				if (dev->callback)
648 					dev->callback(mq);
649 				break;
650 
651 			case MAPLE_RESPONSE_FILEERR:
652 			case MAPLE_RESPONSE_AGAIN:
653 			case MAPLE_RESPONSE_BADCMD:
654 			case MAPLE_RESPONSE_BADFUNC:
655 				printk(KERN_DEBUG
656 				       "Maple non-fatal error 0x%X\n",
657 				       code);
658 				break;
659 
660 			case MAPLE_RESPONSE_ALLINFO:
661 				printk(KERN_DEBUG
662 				       "Maple - extended device information"
663 					" not supported\n");
664 				break;
665 
666 			case MAPLE_RESPONSE_OK:
667 				break;
668 
669 			default:
670 				break;
671 			}
672 		}
673 		/* if scanning is 1 then we have subdevices to check */
674 		if (scanning == 1) {
675 			maple_send();
676 			scanning = 2;
677 		} else
678 			scanning = 0;
679 		/*check if we have actually tested all ports yet */
680 		if (!fullscan)
681 			maple_port_rescan();
682 		/* mark that we have been through the first scan */
683 		if (started == 0)
684 			started = 1;
685 	}
686 	maplebus_dma_reset();
687 }
688 
689 static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
690 {
691 	/* Load everything into the bottom half */
692 	schedule_work(&maple_dma_process);
693 	return IRQ_HANDLED;
694 }
695 
696 static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
697 {
698 	schedule_work(&maple_vblank_process);
699 	return IRQ_HANDLED;
700 }
701 
702 static int maple_set_dma_interrupt_handler(void)
703 {
704 	return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
705 		IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
706 }
707 
708 static int maple_set_vblank_interrupt_handler(void)
709 {
710 	return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
711 		IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
712 }
713 
714 static int maple_get_dma_buffer(void)
715 {
716 	maple_sendbuf =
717 	    (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
718 				      MAPLE_DMA_PAGES);
719 	if (!maple_sendbuf)
720 		return -ENOMEM;
721 	return 0;
722 }
723 
724 static int match_maple_bus_driver(struct device *devptr,
725 				  struct device_driver *drvptr)
726 {
727 	struct maple_driver *maple_drv;
728 	struct maple_device *maple_dev;
729 
730 	maple_drv = container_of(drvptr, struct maple_driver, drv);
731 	maple_dev = container_of(devptr, struct maple_device, dev);
732 	/* Trap empty port case */
733 	if (maple_dev->devinfo.function == 0xFFFFFFFF)
734 		return 0;
735 	else if (maple_dev->devinfo.function &
736 		 cpu_to_be32(maple_drv->function))
737 		return 1;
738 	return 0;
739 }
740 
741 static int maple_bus_uevent(struct device *dev,
742 			    struct kobj_uevent_env *env)
743 {
744 	return 0;
745 }
746 
747 static void maple_bus_release(struct device *dev)
748 {
749 }
750 
751 static struct maple_driver maple_dummy_driver = {
752 	.drv = {
753 		.name = "maple_dummy_driver",
754 		.bus = &maple_bus_type,
755 	},
756 };
757 
758 struct bus_type maple_bus_type = {
759 	.name = "maple",
760 	.match = match_maple_bus_driver,
761 	.uevent = maple_bus_uevent,
762 };
763 EXPORT_SYMBOL_GPL(maple_bus_type);
764 
765 static struct device maple_bus = {
766 	.bus_id = "maple",
767 	.release = maple_bus_release,
768 };
769 
770 static int __init maple_bus_init(void)
771 {
772 	int retval, i;
773 	struct maple_device *mdev[MAPLE_PORTS];
774 	ctrl_outl(0, MAPLE_STATE);
775 
776 	retval = device_register(&maple_bus);
777 	if (retval)
778 		goto cleanup;
779 
780 	retval = bus_register(&maple_bus_type);
781 	if (retval)
782 		goto cleanup_device;
783 
784 	retval = driver_register(&maple_dummy_driver.drv);
785 	if (retval)
786 		goto cleanup_bus;
787 
788 	/* allocate memory for maple bus dma */
789 	retval = maple_get_dma_buffer();
790 	if (retval) {
791 		printk(KERN_INFO
792 		       "Maple bus: Failed to allocate Maple DMA buffers\n");
793 		goto cleanup_basic;
794 	}
795 
796 	/* set up DMA interrupt handler */
797 	retval = maple_set_dma_interrupt_handler();
798 	if (retval) {
799 		printk(KERN_INFO
800 		       "Maple bus: Failed to grab maple DMA IRQ\n");
801 		goto cleanup_dma;
802 	}
803 
804 	/* set up VBLANK interrupt handler */
805 	retval = maple_set_vblank_interrupt_handler();
806 	if (retval) {
807 		printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
808 		goto cleanup_irq;
809 	}
810 
811 	maple_queue_cache =
812 	    kmem_cache_create("maple_queue_cache", 0x400, 0,
813 			      SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
814 
815 	if (!maple_queue_cache)
816 		goto cleanup_bothirqs;
817 
818 	INIT_LIST_HEAD(&maple_waitq);
819 	INIT_LIST_HEAD(&maple_sentq);
820 
821 	/* setup maple ports */
822 	for (i = 0; i < MAPLE_PORTS; i++) {
823 		checked[i] = false;
824 		mdev[i] = maple_alloc_dev(i, 0);
825 		baseunits[i] = mdev[i];
826 		if (!mdev[i]) {
827 			while (i-- > 0)
828 				maple_free_dev(mdev[i]);
829 			goto cleanup_cache;
830 		}
831 		maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
832 		subdevice_map[i] = 0;
833 	}
834 
835 	/* setup maplebus hardware */
836 	maplebus_dma_reset();
837 	/* initial detection */
838 	maple_send();
839 	maple_pnp_time = jiffies;
840 	printk(KERN_INFO "Maple bus core now registered.\n");
841 
842 	return 0;
843 
844 cleanup_cache:
845 	kmem_cache_destroy(maple_queue_cache);
846 
847 cleanup_bothirqs:
848 	free_irq(HW_EVENT_VSYNC, 0);
849 
850 cleanup_irq:
851 	free_irq(HW_EVENT_MAPLE_DMA, 0);
852 
853 cleanup_dma:
854 	free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
855 
856 cleanup_basic:
857 	driver_unregister(&maple_dummy_driver.drv);
858 
859 cleanup_bus:
860 	bus_unregister(&maple_bus_type);
861 
862 cleanup_device:
863 	device_unregister(&maple_bus);
864 
865 cleanup:
866 	printk(KERN_INFO "Maple bus registration failed\n");
867 	return retval;
868 }
869 /* Push init to later to ensure hardware gets detected */
870 fs_initcall(maple_bus_init);
871