xref: /linux/drivers/sh/maple/maple.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  * Core maple bus functionality
3  *
4  *  Copyright (C) 2007, 2008 Adrian McMenamin
5  *  Copyright (C) 2001 - 2008 Paul Mundt
6  *
7  * Based on 2.4 code by:
8  *
9  *  Copyright (C) 2000-2001 YAEGASHI Takeshi
10  *  Copyright (C) 2001 M. R. Brown
11  *  Copyright (C) 2001 Paul Mundt
12  *
13  * and others.
14  *
15  * This file is subject to the terms and conditions of the GNU General Public
16  * License.  See the file "COPYING" in the main directory of this archive
17  * for more details.
18  */
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/list.h>
24 #include <linux/io.h>
25 #include <linux/slab.h>
26 #include <linux/maple.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/delay.h>
29 #include <asm/cacheflush.h>
30 #include <asm/dma.h>
31 #include <asm/io.h>
32 #include <mach/dma.h>
33 #include <mach/sysasic.h>
34 
35 MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin");
36 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37 MODULE_LICENSE("GPL v2");
38 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
39 
40 static void maple_dma_handler(struct work_struct *work);
41 static void maple_vblank_handler(struct work_struct *work);
42 
43 static DECLARE_WORK(maple_dma_process, maple_dma_handler);
44 static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
45 
46 static LIST_HEAD(maple_waitq);
47 static LIST_HEAD(maple_sentq);
48 
49 /* mutex to protect queue of waiting packets */
50 static DEFINE_MUTEX(maple_wlist_lock);
51 
52 static struct maple_driver maple_dummy_driver;
53 static struct device maple_bus;
54 static int subdevice_map[MAPLE_PORTS];
55 static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
56 static unsigned long maple_pnp_time;
57 static int started, scanning, fullscan;
58 static struct kmem_cache *maple_queue_cache;
59 
60 struct maple_device_specify {
61 	int port;
62 	int unit;
63 };
64 
65 static bool checked[4];
66 static struct maple_device *baseunits[4];
67 
68 /**
69  * maple_driver_register - register a maple driver
70  * @drv: maple driver to be registered.
71  *
72  * Registers the passed in @drv, while updating the bus type.
73  * Devices with matching function IDs will be automatically probed.
74  */
75 int maple_driver_register(struct maple_driver *drv)
76 {
77 	if (!drv)
78 		return -EINVAL;
79 
80 	drv->drv.bus = &maple_bus_type;
81 
82 	return driver_register(&drv->drv);
83 }
84 EXPORT_SYMBOL_GPL(maple_driver_register);
85 
86 /**
87  * maple_driver_unregister - unregister a maple driver.
88  * @drv: maple driver to unregister.
89  *
90  * Cleans up after maple_driver_register(). To be invoked in the exit
91  * path of any module drivers.
92  */
93 void maple_driver_unregister(struct maple_driver *drv)
94 {
95 	driver_unregister(&drv->drv);
96 }
97 EXPORT_SYMBOL_GPL(maple_driver_unregister);
98 
99 /* set hardware registers to enable next round of dma */
100 static void maplebus_dma_reset(void)
101 {
102 	ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
103 	/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
104 	ctrl_outl(1, MAPLE_TRIGTYPE);
105 	ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
106 	ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
107 	ctrl_outl(1, MAPLE_ENABLE);
108 }
109 
110 /**
111  * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
112  * @dev: device responding
113  * @callback: handler callback
114  * @interval: interval in jiffies between callbacks
115  * @function: the function code for the device
116  */
117 void maple_getcond_callback(struct maple_device *dev,
118 			void (*callback) (struct mapleq *mq),
119 			unsigned long interval, unsigned long function)
120 {
121 	dev->callback = callback;
122 	dev->interval = interval;
123 	dev->function = cpu_to_be32(function);
124 	dev->when = jiffies;
125 }
126 EXPORT_SYMBOL_GPL(maple_getcond_callback);
127 
128 static int maple_dma_done(void)
129 {
130 	return (ctrl_inl(MAPLE_STATE) & 1) == 0;
131 }
132 
133 static void maple_release_device(struct device *dev)
134 {
135 	struct maple_device *mdev;
136 	struct mapleq *mq;
137 	if (!dev)
138 		return;
139 	mdev = to_maple_dev(dev);
140 	mq = mdev->mq;
141 	if (mq) {
142 		if (mq->recvbufdcsp)
143 			kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
144 		kfree(mq);
145 		mq = NULL;
146 	}
147 	kfree(mdev);
148 }
149 
150 /**
151  * maple_add_packet - add a single instruction to the queue
152  * @mdev: maple device
153  * @function: function on device being queried
154  * @command: maple command to add
155  * @length: length of command string (in 32 bit words)
156  * @data: remainder of command string
157  */
158 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
159 	size_t length, void *data)
160 {
161 	int locking, ret = 0;
162 	void *sendbuf = NULL;
163 
164 	mutex_lock(&maple_wlist_lock);
165 	/* bounce if device already locked */
166 	locking = mutex_is_locked(&mdev->mq->mutex);
167 	if (locking) {
168 		ret = -EBUSY;
169 		goto out;
170 	}
171 
172 	mutex_lock(&mdev->mq->mutex);
173 
174 	if (length) {
175 		sendbuf = kmalloc(length * 4, GFP_KERNEL);
176 		if (!sendbuf) {
177 			mutex_unlock(&mdev->mq->mutex);
178 			ret = -ENOMEM;
179 			goto out;
180 		}
181 		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
182 	}
183 
184 	mdev->mq->command = command;
185 	mdev->mq->length = length;
186 	if (length > 1)
187 		memcpy(sendbuf + 4, data, (length - 1) * 4);
188 	mdev->mq->sendbuf = sendbuf;
189 
190 	list_add(&mdev->mq->list, &maple_waitq);
191 out:
192 	mutex_unlock(&maple_wlist_lock);
193 	return ret;
194 }
195 EXPORT_SYMBOL_GPL(maple_add_packet);
196 
197 /**
198  * maple_add_packet_sleeps - add a single instruction to the queue
199  * @mdev: maple device
200  * @function: function on device being queried
201  * @command: maple command to add
202  * @length: length of command string (in 32 bit words)
203  * @data: remainder of command string
204  *
205  * Same as maple_add_packet(), but waits for the lock to become free.
206  */
207 int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
208 	u32 command, size_t length, void *data)
209 {
210 	int locking, ret = 0;
211 	void *sendbuf = NULL;
212 
213 	locking = mutex_lock_interruptible(&mdev->mq->mutex);
214 	if (locking) {
215 		ret = -EIO;
216 		goto out;
217 	}
218 
219 	if (length) {
220 		sendbuf = kmalloc(length * 4, GFP_KERNEL);
221 		if (!sendbuf) {
222 			mutex_unlock(&mdev->mq->mutex);
223 			ret = -ENOMEM;
224 			goto out;
225 		}
226 		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
227 	}
228 
229 	mdev->mq->command = command;
230 	mdev->mq->length = length;
231 	if (length > 1)
232 		memcpy(sendbuf + 4, data, (length - 1) * 4);
233 	mdev->mq->sendbuf = sendbuf;
234 
235 	mutex_lock(&maple_wlist_lock);
236 	list_add(&mdev->mq->list, &maple_waitq);
237 	mutex_unlock(&maple_wlist_lock);
238 out:
239 	return ret;
240 }
241 EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);
242 
243 static struct mapleq *maple_allocq(struct maple_device *mdev)
244 {
245 	struct mapleq *mq;
246 
247 	mq = kmalloc(sizeof(*mq), GFP_KERNEL);
248 	if (!mq)
249 		goto failed_nomem;
250 
251 	mq->dev = mdev;
252 	mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
253 	mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
254 	if (!mq->recvbuf)
255 		goto failed_p2;
256 	/*
257 	 * most devices do not need the mutex - but
258 	 * anything that injects block reads or writes
259 	 * will rely on it
260 	 */
261 	mutex_init(&mq->mutex);
262 
263 	return mq;
264 
265 failed_p2:
266 	kfree(mq);
267 failed_nomem:
268 	return NULL;
269 }
270 
271 static struct maple_device *maple_alloc_dev(int port, int unit)
272 {
273 	struct maple_device *mdev;
274 
275 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
276 	if (!mdev)
277 		return NULL;
278 
279 	mdev->port = port;
280 	mdev->unit = unit;
281 	mdev->mq = maple_allocq(mdev);
282 
283 	if (!mdev->mq) {
284 		kfree(mdev);
285 		return NULL;
286 	}
287 	mdev->dev.bus = &maple_bus_type;
288 	mdev->dev.parent = &maple_bus;
289 	return mdev;
290 }
291 
292 static void maple_free_dev(struct maple_device *mdev)
293 {
294 	if (!mdev)
295 		return;
296 	if (mdev->mq) {
297 		if (mdev->mq->recvbufdcsp)
298 			kmem_cache_free(maple_queue_cache,
299 				mdev->mq->recvbufdcsp);
300 		kfree(mdev->mq);
301 	}
302 	kfree(mdev);
303 }
304 
305 /* process the command queue into a maple command block
306  * terminating command has bit 32 of first long set to 0
307  */
308 static void maple_build_block(struct mapleq *mq)
309 {
310 	int port, unit, from, to, len;
311 	unsigned long *lsendbuf = mq->sendbuf;
312 
313 	port = mq->dev->port & 3;
314 	unit = mq->dev->unit;
315 	len = mq->length;
316 	from = port << 6;
317 	to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
318 
319 	*maple_lastptr &= 0x7fffffff;
320 	maple_lastptr = maple_sendptr;
321 
322 	*maple_sendptr++ = (port << 16) | len | 0x80000000;
323 	*maple_sendptr++ = PHYSADDR(mq->recvbuf);
324 	*maple_sendptr++ =
325 	    mq->command | (to << 8) | (from << 16) | (len << 24);
326 	while (len-- > 0)
327 		*maple_sendptr++ = *lsendbuf++;
328 }
329 
330 /* build up command queue */
331 static void maple_send(void)
332 {
333 	int i, maple_packets = 0;
334 	struct mapleq *mq, *nmq;
335 
336 	if (!list_empty(&maple_sentq))
337 		return;
338 	mutex_lock(&maple_wlist_lock);
339 	if (list_empty(&maple_waitq) || !maple_dma_done()) {
340 		mutex_unlock(&maple_wlist_lock);
341 		return;
342 	}
343 	mutex_unlock(&maple_wlist_lock);
344 	maple_lastptr = maple_sendbuf;
345 	maple_sendptr = maple_sendbuf;
346 	mutex_lock(&maple_wlist_lock);
347 	list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
348 		maple_build_block(mq);
349 		list_move(&mq->list, &maple_sentq);
350 		if (maple_packets++ > MAPLE_MAXPACKETS)
351 			break;
352 	}
353 	mutex_unlock(&maple_wlist_lock);
354 	if (maple_packets > 0) {
355 		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
356 			dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
357 				       PAGE_SIZE, DMA_BIDIRECTIONAL);
358 	}
359 }
360 
361 /* check if there is a driver registered likely to match this device */
362 static int check_matching_maple_driver(struct device_driver *driver,
363 					void *devptr)
364 {
365 	struct maple_driver *maple_drv;
366 	struct maple_device *mdev;
367 
368 	mdev = devptr;
369 	maple_drv = to_maple_driver(driver);
370 	if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
371 		return 1;
372 	return 0;
373 }
374 
375 static void maple_detach_driver(struct maple_device *mdev)
376 {
377 	if (!mdev)
378 		return;
379 	device_unregister(&mdev->dev);
380 	mdev = NULL;
381 }
382 
383 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
384 static void maple_attach_driver(struct maple_device *mdev)
385 {
386 	char *p, *recvbuf;
387 	unsigned long function;
388 	int matched, retval;
389 
390 	recvbuf = mdev->mq->recvbuf;
391 	/* copy the data as individual elements in
392 	* case of memory optimisation */
393 	memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
394 	memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
395 	memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
396 	memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
397 	memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
398 	memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
399 	memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
400 	memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
401 	memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
402 	mdev->product_name[30] = '\0';
403 	memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
404 	mdev->product_licence[60] = '\0';
405 
406 	for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
407 		if (*p == ' ')
408 			*p = '\0';
409 		else
410 			break;
411 	for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
412 		if (*p == ' ')
413 			*p = '\0';
414 		else
415 			break;
416 
417 	printk(KERN_INFO "Maple device detected: %s\n",
418 		mdev->product_name);
419 	printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
420 
421 	function = be32_to_cpu(mdev->devinfo.function);
422 
423 	if (function > 0x200) {
424 		/* Do this silently - as not a real device */
425 		function = 0;
426 		mdev->driver = &maple_dummy_driver;
427 		sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
428 	} else {
429 		printk(KERN_INFO
430 			"Maple bus at (%d, %d): Function 0x%lX\n",
431 			mdev->port, mdev->unit, function);
432 
433 		matched =
434 			bus_for_each_drv(&maple_bus_type, NULL, mdev,
435 				check_matching_maple_driver);
436 
437 		if (matched == 0) {
438 			/* Driver does not exist yet */
439 			printk(KERN_INFO
440 				"No maple driver found.\n");
441 			mdev->driver = &maple_dummy_driver;
442 		}
443 		sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
444 			mdev->unit, function);
445 	}
446 	mdev->function = function;
447 	mdev->dev.release = &maple_release_device;
448 	retval = device_register(&mdev->dev);
449 	if (retval) {
450 		printk(KERN_INFO
451 		"Maple bus: Attempt to register device"
452 		" (%x, %x) failed.\n",
453 		mdev->port, mdev->unit);
454 		maple_free_dev(mdev);
455 		mdev = NULL;
456 		return;
457 	}
458 }
459 
460 /*
461  * if device has been registered for the given
462  * port and unit then return 1 - allows identification
463  * of which devices need to be attached or detached
464  */
465 static int detach_maple_device(struct device *device, void *portptr)
466 {
467 	struct maple_device_specify *ds;
468 	struct maple_device *mdev;
469 
470 	ds = portptr;
471 	mdev = to_maple_dev(device);
472 	if (mdev->port == ds->port && mdev->unit == ds->unit)
473 		return 1;
474 	return 0;
475 }
476 
477 static int setup_maple_commands(struct device *device, void *ignored)
478 {
479 	int add;
480 	struct maple_device *maple_dev = to_maple_dev(device);
481 
482 	if ((maple_dev->interval > 0)
483 	    && time_after(jiffies, maple_dev->when)) {
484 		/* bounce if we cannot lock */
485 		add = maple_add_packet(maple_dev,
486 			be32_to_cpu(maple_dev->devinfo.function),
487 			MAPLE_COMMAND_GETCOND, 1, NULL);
488 		if (!add)
489 			maple_dev->when = jiffies + maple_dev->interval;
490 	} else {
491 		if (time_after(jiffies, maple_pnp_time))
492 			/* This will also bounce */
493 			maple_add_packet(maple_dev, 0,
494 				MAPLE_COMMAND_DEVINFO, 0, NULL);
495 	}
496 	return 0;
497 }
498 
499 /* VBLANK bottom half - implemented via workqueue */
500 static void maple_vblank_handler(struct work_struct *work)
501 {
502 	if (!list_empty(&maple_sentq) || !maple_dma_done())
503 		return;
504 
505 	ctrl_outl(0, MAPLE_ENABLE);
506 
507 	bus_for_each_dev(&maple_bus_type, NULL, NULL,
508 			 setup_maple_commands);
509 
510 	if (time_after(jiffies, maple_pnp_time))
511 		maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
512 
513 	mutex_lock(&maple_wlist_lock);
514 	if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
515 		mutex_unlock(&maple_wlist_lock);
516 		maple_send();
517 	} else {
518 		mutex_unlock(&maple_wlist_lock);
519 	}
520 
521 	maplebus_dma_reset();
522 }
523 
524 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/
525 static void maple_map_subunits(struct maple_device *mdev, int submask)
526 {
527 	int retval, k, devcheck;
528 	struct maple_device *mdev_add;
529 	struct maple_device_specify ds;
530 
531 	ds.port = mdev->port;
532 	for (k = 0; k < 5; k++) {
533 		ds.unit = k + 1;
534 		retval =
535 		    bus_for_each_dev(&maple_bus_type, NULL, &ds,
536 				     detach_maple_device);
537 		if (retval) {
538 			submask = submask >> 1;
539 			continue;
540 		}
541 		devcheck = submask & 0x01;
542 		if (devcheck) {
543 			mdev_add = maple_alloc_dev(mdev->port, k + 1);
544 			if (!mdev_add)
545 				return;
546 			maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
547 				0, NULL);
548 			/* mark that we are checking sub devices */
549 			scanning = 1;
550 		}
551 		submask = submask >> 1;
552 	}
553 }
554 
555 /* mark a device as removed */
556 static void maple_clean_submap(struct maple_device *mdev)
557 {
558 	int killbit;
559 
560 	killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
561 	killbit = ~killbit;
562 	killbit &= 0xFF;
563 	subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
564 }
565 
566 /* handle empty port or hotplug removal */
567 static void maple_response_none(struct maple_device *mdev,
568 				struct mapleq *mq)
569 {
570 	if (mdev->unit != 0) {
571 		list_del(&mq->list);
572 		maple_clean_submap(mdev);
573 		printk(KERN_INFO
574 		       "Maple bus device detaching at (%d, %d)\n",
575 		       mdev->port, mdev->unit);
576 		maple_detach_driver(mdev);
577 		return;
578 	}
579 	if (!started || !fullscan) {
580 		if (checked[mdev->port] == false) {
581 			checked[mdev->port] = true;
582 			printk(KERN_INFO "No maple devices attached"
583 				" to port %d\n", mdev->port);
584 		}
585 		return;
586 	}
587 	maple_clean_submap(mdev);
588 }
589 
590 /* preprocess hotplugs or scans */
591 static void maple_response_devinfo(struct maple_device *mdev,
592 				   char *recvbuf)
593 {
594 	char submask;
595 	if (!started || (scanning == 2) || !fullscan) {
596 		if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
597 			checked[mdev->port] = true;
598 			maple_attach_driver(mdev);
599 		} else {
600 			if (mdev->unit != 0)
601 				maple_attach_driver(mdev);
602 		}
603 		return;
604 	}
605 	if (mdev->unit == 0) {
606 		submask = recvbuf[2] & 0x1F;
607 		if (submask ^ subdevice_map[mdev->port]) {
608 			maple_map_subunits(mdev, submask);
609 			subdevice_map[mdev->port] = submask;
610 		}
611 	}
612 }
613 
614 static void maple_port_rescan(void)
615 {
616 	int i;
617 	struct maple_device *mdev;
618 
619 	fullscan = 1;
620 	for (i = 0; i < MAPLE_PORTS; i++) {
621 		if (checked[i] == false) {
622 			fullscan = 0;
623 			mdev = baseunits[i];
624 			/*
625 			 *  test lock in case scan has failed
626 			 *  but device is still locked
627 			 */
628 			if (mutex_is_locked(&mdev->mq->mutex))
629 				mutex_unlock(&mdev->mq->mutex);
630 			maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
631 				0, NULL);
632 		}
633 	}
634 }
635 
636 /* maple dma end bottom half - implemented via workqueue */
637 static void maple_dma_handler(struct work_struct *work)
638 {
639 	struct mapleq *mq, *nmq;
640 	struct maple_device *dev;
641 	char *recvbuf;
642 	enum maple_code code;
643 
644 	if (!maple_dma_done())
645 		return;
646 	ctrl_outl(0, MAPLE_ENABLE);
647 	if (!list_empty(&maple_sentq)) {
648 		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
649 			recvbuf = mq->recvbuf;
650 			code = recvbuf[0];
651 			dev = mq->dev;
652 			kfree(mq->sendbuf);
653 			mutex_unlock(&mq->mutex);
654 			list_del_init(&mq->list);
655 
656 			switch (code) {
657 			case MAPLE_RESPONSE_NONE:
658 				maple_response_none(dev, mq);
659 				break;
660 
661 			case MAPLE_RESPONSE_DEVINFO:
662 				maple_response_devinfo(dev, recvbuf);
663 				break;
664 
665 			case MAPLE_RESPONSE_DATATRF:
666 				if (dev->callback)
667 					dev->callback(mq);
668 				break;
669 
670 			case MAPLE_RESPONSE_FILEERR:
671 			case MAPLE_RESPONSE_AGAIN:
672 			case MAPLE_RESPONSE_BADCMD:
673 			case MAPLE_RESPONSE_BADFUNC:
674 				printk(KERN_DEBUG
675 				       "Maple non-fatal error 0x%X\n",
676 				       code);
677 				break;
678 
679 			case MAPLE_RESPONSE_ALLINFO:
680 				printk(KERN_DEBUG
681 				       "Maple - extended device information"
682 					" not supported\n");
683 				break;
684 
685 			case MAPLE_RESPONSE_OK:
686 				break;
687 
688 			default:
689 				break;
690 			}
691 		}
692 		/* if scanning is 1 then we have subdevices to check */
693 		if (scanning == 1) {
694 			maple_send();
695 			scanning = 2;
696 		} else
697 			scanning = 0;
698 		/*check if we have actually tested all ports yet */
699 		if (!fullscan)
700 			maple_port_rescan();
701 		/* mark that we have been through the first scan */
702 		if (started == 0)
703 			started = 1;
704 	}
705 	maplebus_dma_reset();
706 }
707 
708 static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
709 {
710 	/* Load everything into the bottom half */
711 	schedule_work(&maple_dma_process);
712 	return IRQ_HANDLED;
713 }
714 
715 static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
716 {
717 	schedule_work(&maple_vblank_process);
718 	return IRQ_HANDLED;
719 }
720 
721 static int maple_set_dma_interrupt_handler(void)
722 {
723 	return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
724 		IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
725 }
726 
727 static int maple_set_vblank_interrupt_handler(void)
728 {
729 	return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
730 		IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
731 }
732 
733 static int maple_get_dma_buffer(void)
734 {
735 	maple_sendbuf =
736 	    (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
737 				      MAPLE_DMA_PAGES);
738 	if (!maple_sendbuf)
739 		return -ENOMEM;
740 	return 0;
741 }
742 
743 static int match_maple_bus_driver(struct device *devptr,
744 				  struct device_driver *drvptr)
745 {
746 	struct maple_driver *maple_drv = to_maple_driver(drvptr);
747 	struct maple_device *maple_dev = to_maple_dev(devptr);
748 
749 	/* Trap empty port case */
750 	if (maple_dev->devinfo.function == 0xFFFFFFFF)
751 		return 0;
752 	else if (maple_dev->devinfo.function &
753 		 cpu_to_be32(maple_drv->function))
754 		return 1;
755 	return 0;
756 }
757 
758 static int maple_bus_uevent(struct device *dev,
759 			    struct kobj_uevent_env *env)
760 {
761 	return 0;
762 }
763 
764 static void maple_bus_release(struct device *dev)
765 {
766 }
767 
768 static struct maple_driver maple_dummy_driver = {
769 	.drv = {
770 		.name = "maple_dummy_driver",
771 		.bus = &maple_bus_type,
772 	},
773 };
774 
775 struct bus_type maple_bus_type = {
776 	.name = "maple",
777 	.match = match_maple_bus_driver,
778 	.uevent = maple_bus_uevent,
779 };
780 EXPORT_SYMBOL_GPL(maple_bus_type);
781 
782 static struct device maple_bus = {
783 	.bus_id = "maple",
784 	.release = maple_bus_release,
785 };
786 
787 static int __init maple_bus_init(void)
788 {
789 	int retval, i;
790 	struct maple_device *mdev[MAPLE_PORTS];
791 	ctrl_outl(0, MAPLE_STATE);
792 
793 	retval = device_register(&maple_bus);
794 	if (retval)
795 		goto cleanup;
796 
797 	retval = bus_register(&maple_bus_type);
798 	if (retval)
799 		goto cleanup_device;
800 
801 	retval = driver_register(&maple_dummy_driver.drv);
802 	if (retval)
803 		goto cleanup_bus;
804 
805 	/* allocate memory for maple bus dma */
806 	retval = maple_get_dma_buffer();
807 	if (retval) {
808 		printk(KERN_INFO
809 		       "Maple bus: Failed to allocate Maple DMA buffers\n");
810 		goto cleanup_basic;
811 	}
812 
813 	/* set up DMA interrupt handler */
814 	retval = maple_set_dma_interrupt_handler();
815 	if (retval) {
816 		printk(KERN_INFO
817 		       "Maple bus: Failed to grab maple DMA IRQ\n");
818 		goto cleanup_dma;
819 	}
820 
821 	/* set up VBLANK interrupt handler */
822 	retval = maple_set_vblank_interrupt_handler();
823 	if (retval) {
824 		printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
825 		goto cleanup_irq;
826 	}
827 
828 	maple_queue_cache =
829 	    kmem_cache_create("maple_queue_cache", 0x400, 0,
830 			      SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
831 
832 	if (!maple_queue_cache)
833 		goto cleanup_bothirqs;
834 
835 	INIT_LIST_HEAD(&maple_waitq);
836 	INIT_LIST_HEAD(&maple_sentq);
837 
838 	/* setup maple ports */
839 	for (i = 0; i < MAPLE_PORTS; i++) {
840 		checked[i] = false;
841 		mdev[i] = maple_alloc_dev(i, 0);
842 		baseunits[i] = mdev[i];
843 		if (!mdev[i]) {
844 			while (i-- > 0)
845 				maple_free_dev(mdev[i]);
846 			goto cleanup_cache;
847 		}
848 		maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
849 		subdevice_map[i] = 0;
850 	}
851 
852 	/* setup maplebus hardware */
853 	maplebus_dma_reset();
854 	/* initial detection */
855 	maple_send();
856 	maple_pnp_time = jiffies;
857 	printk(KERN_INFO "Maple bus core now registered.\n");
858 
859 	return 0;
860 
861 cleanup_cache:
862 	kmem_cache_destroy(maple_queue_cache);
863 
864 cleanup_bothirqs:
865 	free_irq(HW_EVENT_VSYNC, 0);
866 
867 cleanup_irq:
868 	free_irq(HW_EVENT_MAPLE_DMA, 0);
869 
870 cleanup_dma:
871 	free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
872 
873 cleanup_basic:
874 	driver_unregister(&maple_dummy_driver.drv);
875 
876 cleanup_bus:
877 	bus_unregister(&maple_bus_type);
878 
879 cleanup_device:
880 	device_unregister(&maple_bus);
881 
882 cleanup:
883 	printk(KERN_INFO "Maple bus registration failed\n");
884 	return retval;
885 }
886 /* Push init to later to ensure hardware gets detected */
887 fs_initcall(maple_bus_init);
888