xref: /linux/arch/powerpc/platforms/cell/spu_base.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  * Low-level SPU handling
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/interrupt.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/ptrace.h>
29 #include <linux/slab.h>
30 #include <linux/wait.h>
31 #include <linux/mm.h>
32 #include <linux/io.h>
33 #include <linux/mutex.h>
34 #include <asm/spu.h>
35 #include <asm/spu_priv1.h>
36 #include <asm/xmon.h>
37 
38 const struct spu_management_ops *spu_management_ops;
39 const struct spu_priv1_ops *spu_priv1_ops;
40 
41 EXPORT_SYMBOL_GPL(spu_priv1_ops);
42 
43 static int __spu_trap_invalid_dma(struct spu *spu)
44 {
45 	pr_debug("%s\n", __FUNCTION__);
46 	spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
47 	return 0;
48 }
49 
50 static int __spu_trap_dma_align(struct spu *spu)
51 {
52 	pr_debug("%s\n", __FUNCTION__);
53 	spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
54 	return 0;
55 }
56 
57 static int __spu_trap_error(struct spu *spu)
58 {
59 	pr_debug("%s\n", __FUNCTION__);
60 	spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
61 	return 0;
62 }
63 
64 static void spu_restart_dma(struct spu *spu)
65 {
66 	struct spu_priv2 __iomem *priv2 = spu->priv2;
67 
68 	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
69 		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
70 }
71 
72 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
73 {
74 	struct spu_priv2 __iomem *priv2 = spu->priv2;
75 	struct mm_struct *mm = spu->mm;
76 	u64 esid, vsid, llp;
77 
78 	pr_debug("%s\n", __FUNCTION__);
79 
80 	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
81 		/* SLBs are pre-loaded for context switch, so
82 		 * we should never get here!
83 		 */
84 		printk("%s: invalid access during switch!\n", __func__);
85 		return 1;
86 	}
87 	esid = (ea & ESID_MASK) | SLB_ESID_V;
88 
89 	switch(REGION_ID(ea)) {
90 	case USER_REGION_ID:
91 #ifdef CONFIG_HUGETLB_PAGE
92 		if (in_hugepage_area(mm->context, ea))
93 			llp = mmu_psize_defs[mmu_huge_psize].sllp;
94 		else
95 #endif
96 			llp = mmu_psize_defs[mmu_virtual_psize].sllp;
97 		vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
98 				SLB_VSID_USER | llp;
99 		break;
100 	case VMALLOC_REGION_ID:
101 		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
102 		vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
103 			SLB_VSID_KERNEL | llp;
104 		break;
105 	case KERNEL_REGION_ID:
106 		llp = mmu_psize_defs[mmu_linear_psize].sllp;
107 		vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
108 			SLB_VSID_KERNEL | llp;
109 		break;
110 	default:
111 		/* Future: support kernel segments so that drivers
112 		 * can use SPUs.
113 		 */
114 		pr_debug("invalid region access at %016lx\n", ea);
115 		return 1;
116 	}
117 
118 	out_be64(&priv2->slb_index_W, spu->slb_replace);
119 	out_be64(&priv2->slb_vsid_RW, vsid);
120 	out_be64(&priv2->slb_esid_RW, esid);
121 
122 	spu->slb_replace++;
123 	if (spu->slb_replace >= 8)
124 		spu->slb_replace = 0;
125 
126 	spu_restart_dma(spu);
127 
128 	return 0;
129 }
130 
131 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
132 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
133 {
134 	pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
135 
136 	/* Handle kernel space hash faults immediately.
137 	   User hash faults need to be deferred to process context. */
138 	if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
139 	    && REGION_ID(ea) != USER_REGION_ID
140 	    && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
141 		spu_restart_dma(spu);
142 		return 0;
143 	}
144 
145 	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
146 		printk("%s: invalid access during switch!\n", __func__);
147 		return 1;
148 	}
149 
150 	spu->dar = ea;
151 	spu->dsisr = dsisr;
152 	mb();
153 	spu->stop_callback(spu);
154 	return 0;
155 }
156 
157 static irqreturn_t
158 spu_irq_class_0(int irq, void *data)
159 {
160 	struct spu *spu;
161 
162 	spu = data;
163 	spu->class_0_pending = 1;
164 	spu->stop_callback(spu);
165 
166 	return IRQ_HANDLED;
167 }
168 
169 int
170 spu_irq_class_0_bottom(struct spu *spu)
171 {
172 	unsigned long stat, mask;
173 	unsigned long flags;
174 
175 	spu->class_0_pending = 0;
176 
177 	spin_lock_irqsave(&spu->register_lock, flags);
178 	mask = spu_int_mask_get(spu, 0);
179 	stat = spu_int_stat_get(spu, 0);
180 
181 	stat &= mask;
182 
183 	if (stat & 1) /* invalid DMA alignment */
184 		__spu_trap_dma_align(spu);
185 
186 	if (stat & 2) /* invalid MFC DMA */
187 		__spu_trap_invalid_dma(spu);
188 
189 	if (stat & 4) /* error on SPU */
190 		__spu_trap_error(spu);
191 
192 	spu_int_stat_clear(spu, 0, stat);
193 	spin_unlock_irqrestore(&spu->register_lock, flags);
194 
195 	return (stat & 0x7) ? -EIO : 0;
196 }
197 EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
198 
199 static irqreturn_t
200 spu_irq_class_1(int irq, void *data)
201 {
202 	struct spu *spu;
203 	unsigned long stat, mask, dar, dsisr;
204 
205 	spu = data;
206 
207 	/* atomically read & clear class1 status. */
208 	spin_lock(&spu->register_lock);
209 	mask  = spu_int_mask_get(spu, 1);
210 	stat  = spu_int_stat_get(spu, 1) & mask;
211 	dar   = spu_mfc_dar_get(spu);
212 	dsisr = spu_mfc_dsisr_get(spu);
213 	if (stat & 2) /* mapping fault */
214 		spu_mfc_dsisr_set(spu, 0ul);
215 	spu_int_stat_clear(spu, 1, stat);
216 	spin_unlock(&spu->register_lock);
217 	pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
218 			dar, dsisr);
219 
220 	if (stat & 1) /* segment fault */
221 		__spu_trap_data_seg(spu, dar);
222 
223 	if (stat & 2) { /* mapping fault */
224 		__spu_trap_data_map(spu, dar, dsisr);
225 	}
226 
227 	if (stat & 4) /* ls compare & suspend on get */
228 		;
229 
230 	if (stat & 8) /* ls compare & suspend on put */
231 		;
232 
233 	return stat ? IRQ_HANDLED : IRQ_NONE;
234 }
235 EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
236 
237 static irqreturn_t
238 spu_irq_class_2(int irq, void *data)
239 {
240 	struct spu *spu;
241 	unsigned long stat;
242 	unsigned long mask;
243 
244 	spu = data;
245 	spin_lock(&spu->register_lock);
246 	stat = spu_int_stat_get(spu, 2);
247 	mask = spu_int_mask_get(spu, 2);
248 	/* ignore interrupts we're not waiting for */
249 	stat &= mask;
250 	/*
251 	 * mailbox interrupts (0x1 and 0x10) are level triggered.
252 	 * mask them now before acknowledging.
253 	 */
254 	if (stat & 0x11)
255 		spu_int_mask_and(spu, 2, ~(stat & 0x11));
256 	/* acknowledge all interrupts before the callbacks */
257 	spu_int_stat_clear(spu, 2, stat);
258 	spin_unlock(&spu->register_lock);
259 
260 	pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
261 
262 	if (stat & 1)  /* PPC core mailbox */
263 		spu->ibox_callback(spu);
264 
265 	if (stat & 2) /* SPU stop-and-signal */
266 		spu->stop_callback(spu);
267 
268 	if (stat & 4) /* SPU halted */
269 		spu->stop_callback(spu);
270 
271 	if (stat & 8) /* DMA tag group complete */
272 		spu->mfc_callback(spu);
273 
274 	if (stat & 0x10) /* SPU mailbox threshold */
275 		spu->wbox_callback(spu);
276 
277 	return stat ? IRQ_HANDLED : IRQ_NONE;
278 }
279 
280 static int spu_request_irqs(struct spu *spu)
281 {
282 	int ret = 0;
283 
284 	if (spu->irqs[0] != NO_IRQ) {
285 		snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
286 			 spu->number);
287 		ret = request_irq(spu->irqs[0], spu_irq_class_0,
288 				  IRQF_DISABLED,
289 				  spu->irq_c0, spu);
290 		if (ret)
291 			goto bail0;
292 	}
293 	if (spu->irqs[1] != NO_IRQ) {
294 		snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
295 			 spu->number);
296 		ret = request_irq(spu->irqs[1], spu_irq_class_1,
297 				  IRQF_DISABLED,
298 				  spu->irq_c1, spu);
299 		if (ret)
300 			goto bail1;
301 	}
302 	if (spu->irqs[2] != NO_IRQ) {
303 		snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
304 			 spu->number);
305 		ret = request_irq(spu->irqs[2], spu_irq_class_2,
306 				  IRQF_DISABLED,
307 				  spu->irq_c2, spu);
308 		if (ret)
309 			goto bail2;
310 	}
311 	return 0;
312 
313 bail2:
314 	if (spu->irqs[1] != NO_IRQ)
315 		free_irq(spu->irqs[1], spu);
316 bail1:
317 	if (spu->irqs[0] != NO_IRQ)
318 		free_irq(spu->irqs[0], spu);
319 bail0:
320 	return ret;
321 }
322 
323 static void spu_free_irqs(struct spu *spu)
324 {
325 	if (spu->irqs[0] != NO_IRQ)
326 		free_irq(spu->irqs[0], spu);
327 	if (spu->irqs[1] != NO_IRQ)
328 		free_irq(spu->irqs[1], spu);
329 	if (spu->irqs[2] != NO_IRQ)
330 		free_irq(spu->irqs[2], spu);
331 }
332 
333 static struct list_head spu_list[MAX_NUMNODES];
334 static LIST_HEAD(spu_full_list);
335 static DEFINE_MUTEX(spu_mutex);
336 
337 static void spu_init_channels(struct spu *spu)
338 {
339 	static const struct {
340 		 unsigned channel;
341 		 unsigned count;
342 	} zero_list[] = {
343 		{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
344 		{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
345 	}, count_list[] = {
346 		{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
347 		{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
348 		{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
349 	};
350 	struct spu_priv2 __iomem *priv2;
351 	int i;
352 
353 	priv2 = spu->priv2;
354 
355 	/* initialize all channel data to zero */
356 	for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
357 		int count;
358 
359 		out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
360 		for (count = 0; count < zero_list[i].count; count++)
361 			out_be64(&priv2->spu_chnldata_RW, 0);
362 	}
363 
364 	/* initialize channel counts to meaningful values */
365 	for (i = 0; i < ARRAY_SIZE(count_list); i++) {
366 		out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
367 		out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
368 	}
369 }
370 
371 struct spu *spu_alloc_node(int node)
372 {
373 	struct spu *spu = NULL;
374 
375 	mutex_lock(&spu_mutex);
376 	if (!list_empty(&spu_list[node])) {
377 		spu = list_entry(spu_list[node].next, struct spu, list);
378 		list_del_init(&spu->list);
379 		pr_debug("Got SPU %d %d\n", spu->number, spu->node);
380 		spu_init_channels(spu);
381 	}
382 	mutex_unlock(&spu_mutex);
383 
384 	return spu;
385 }
386 EXPORT_SYMBOL_GPL(spu_alloc_node);
387 
388 struct spu *spu_alloc(void)
389 {
390 	struct spu *spu = NULL;
391 	int node;
392 
393 	for (node = 0; node < MAX_NUMNODES; node++) {
394 		spu = spu_alloc_node(node);
395 		if (spu)
396 			break;
397 	}
398 
399 	return spu;
400 }
401 
402 void spu_free(struct spu *spu)
403 {
404 	mutex_lock(&spu_mutex);
405 	list_add_tail(&spu->list, &spu_list[spu->node]);
406 	mutex_unlock(&spu_mutex);
407 }
408 EXPORT_SYMBOL_GPL(spu_free);
409 
410 static int spu_handle_mm_fault(struct spu *spu)
411 {
412 	struct mm_struct *mm = spu->mm;
413 	struct vm_area_struct *vma;
414 	u64 ea, dsisr, is_write;
415 	int ret;
416 
417 	ea = spu->dar;
418 	dsisr = spu->dsisr;
419 #if 0
420 	if (!IS_VALID_EA(ea)) {
421 		return -EFAULT;
422 	}
423 #endif /* XXX */
424 	if (mm == NULL) {
425 		return -EFAULT;
426 	}
427 	if (mm->pgd == NULL) {
428 		return -EFAULT;
429 	}
430 
431 	down_read(&mm->mmap_sem);
432 	vma = find_vma(mm, ea);
433 	if (!vma)
434 		goto bad_area;
435 	if (vma->vm_start <= ea)
436 		goto good_area;
437 	if (!(vma->vm_flags & VM_GROWSDOWN))
438 		goto bad_area;
439 #if 0
440 	if (expand_stack(vma, ea))
441 		goto bad_area;
442 #endif /* XXX */
443 good_area:
444 	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
445 	if (is_write) {
446 		if (!(vma->vm_flags & VM_WRITE))
447 			goto bad_area;
448 	} else {
449 		if (dsisr & MFC_DSISR_ACCESS_DENIED)
450 			goto bad_area;
451 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
452 			goto bad_area;
453 	}
454 	ret = 0;
455 	switch (handle_mm_fault(mm, vma, ea, is_write)) {
456 	case VM_FAULT_MINOR:
457 		current->min_flt++;
458 		break;
459 	case VM_FAULT_MAJOR:
460 		current->maj_flt++;
461 		break;
462 	case VM_FAULT_SIGBUS:
463 		ret = -EFAULT;
464 		goto bad_area;
465 	case VM_FAULT_OOM:
466 		ret = -ENOMEM;
467 		goto bad_area;
468 	default:
469 		BUG();
470 	}
471 	up_read(&mm->mmap_sem);
472 	return ret;
473 
474 bad_area:
475 	up_read(&mm->mmap_sem);
476 	return -EFAULT;
477 }
478 
479 int spu_irq_class_1_bottom(struct spu *spu)
480 {
481 	u64 ea, dsisr, access, error = 0UL;
482 	int ret = 0;
483 
484 	ea = spu->dar;
485 	dsisr = spu->dsisr;
486 	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
487 		u64 flags;
488 
489 		access = (_PAGE_PRESENT | _PAGE_USER);
490 		access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
491 		local_irq_save(flags);
492 		if (hash_page(ea, access, 0x300) != 0)
493 			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
494 		local_irq_restore(flags);
495 	}
496 	if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
497 		if ((ret = spu_handle_mm_fault(spu)) != 0)
498 			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
499 		else
500 			error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
501 	}
502 	spu->dar = 0UL;
503 	spu->dsisr = 0UL;
504 	if (!error) {
505 		spu_restart_dma(spu);
506 	} else {
507 		spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
508 	}
509 	return ret;
510 }
511 
512 struct sysdev_class spu_sysdev_class = {
513 	set_kset_name("spu")
514 };
515 
516 int spu_add_sysdev_attr(struct sysdev_attribute *attr)
517 {
518 	struct spu *spu;
519 	mutex_lock(&spu_mutex);
520 
521 	list_for_each_entry(spu, &spu_full_list, full_list)
522 		sysdev_create_file(&spu->sysdev, attr);
523 
524 	mutex_unlock(&spu_mutex);
525 	return 0;
526 }
527 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
528 
529 int spu_add_sysdev_attr_group(struct attribute_group *attrs)
530 {
531 	struct spu *spu;
532 	mutex_lock(&spu_mutex);
533 
534 	list_for_each_entry(spu, &spu_full_list, full_list)
535 		sysfs_create_group(&spu->sysdev.kobj, attrs);
536 
537 	mutex_unlock(&spu_mutex);
538 	return 0;
539 }
540 EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
541 
542 
543 void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
544 {
545 	struct spu *spu;
546 	mutex_lock(&spu_mutex);
547 
548 	list_for_each_entry(spu, &spu_full_list, full_list)
549 		sysdev_remove_file(&spu->sysdev, attr);
550 
551 	mutex_unlock(&spu_mutex);
552 }
553 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
554 
555 void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
556 {
557 	struct spu *spu;
558 	mutex_lock(&spu_mutex);
559 
560 	list_for_each_entry(spu, &spu_full_list, full_list)
561 		sysfs_remove_group(&spu->sysdev.kobj, attrs);
562 
563 	mutex_unlock(&spu_mutex);
564 }
565 EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
566 
567 static int spu_create_sysdev(struct spu *spu)
568 {
569 	int ret;
570 
571 	spu->sysdev.id = spu->number;
572 	spu->sysdev.cls = &spu_sysdev_class;
573 	ret = sysdev_register(&spu->sysdev);
574 	if (ret) {
575 		printk(KERN_ERR "Can't register SPU %d with sysfs\n",
576 				spu->number);
577 		return ret;
578 	}
579 
580 	sysfs_add_device_to_node(&spu->sysdev, spu->node);
581 
582 	return 0;
583 }
584 
585 static void spu_destroy_sysdev(struct spu *spu)
586 {
587 	sysfs_remove_device_from_node(&spu->sysdev, spu->node);
588 	sysdev_unregister(&spu->sysdev);
589 }
590 
591 static int __init create_spu(void *data)
592 {
593 	struct spu *spu;
594 	int ret;
595 	static int number;
596 
597 	ret = -ENOMEM;
598 	spu = kzalloc(sizeof (*spu), GFP_KERNEL);
599 	if (!spu)
600 		goto out;
601 
602 	spin_lock_init(&spu->register_lock);
603 	mutex_lock(&spu_mutex);
604 	spu->number = number++;
605 	mutex_unlock(&spu_mutex);
606 
607 	ret = spu_create_spu(spu, data);
608 
609 	if (ret)
610 		goto out_free;
611 
612 	spu_mfc_sdr_setup(spu);
613 	spu_mfc_sr1_set(spu, 0x33);
614 	ret = spu_request_irqs(spu);
615 	if (ret)
616 		goto out_destroy;
617 
618 	ret = spu_create_sysdev(spu);
619 	if (ret)
620 		goto out_free_irqs;
621 
622 	mutex_lock(&spu_mutex);
623 	list_add(&spu->list, &spu_list[spu->node]);
624 	list_add(&spu->full_list, &spu_full_list);
625 	mutex_unlock(&spu_mutex);
626 
627 	goto out;
628 
629 out_free_irqs:
630 	spu_free_irqs(spu);
631 out_destroy:
632 	spu_destroy_spu(spu);
633 out_free:
634 	kfree(spu);
635 out:
636 	return ret;
637 }
638 
639 static void destroy_spu(struct spu *spu)
640 {
641 	list_del_init(&spu->list);
642 	list_del_init(&spu->full_list);
643 
644 	spu_destroy_sysdev(spu);
645 	spu_free_irqs(spu);
646 	spu_destroy_spu(spu);
647 	kfree(spu);
648 }
649 
650 static void cleanup_spu_base(void)
651 {
652 	struct spu *spu, *tmp;
653 	int node;
654 
655 	mutex_lock(&spu_mutex);
656 	for (node = 0; node < MAX_NUMNODES; node++) {
657 		list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
658 			destroy_spu(spu);
659 	}
660 	mutex_unlock(&spu_mutex);
661 	sysdev_class_unregister(&spu_sysdev_class);
662 }
663 module_exit(cleanup_spu_base);
664 
665 static int __init init_spu_base(void)
666 {
667 	int i, ret;
668 
669 	if (!spu_management_ops)
670 		return 0;
671 
672 	/* create sysdev class for spus */
673 	ret = sysdev_class_register(&spu_sysdev_class);
674 	if (ret)
675 		return ret;
676 
677 	for (i = 0; i < MAX_NUMNODES; i++)
678 		INIT_LIST_HEAD(&spu_list[i]);
679 
680 	ret = spu_enumerate_spus(create_spu);
681 
682 	if (ret) {
683 		printk(KERN_WARNING "%s: Error initializing spus\n",
684 			__FUNCTION__);
685 		cleanup_spu_base();
686 		return ret;
687 	}
688 
689 	xmon_register_spus(&spu_full_list);
690 
691 	return ret;
692 }
693 module_init(init_spu_base);
694 
695 MODULE_LICENSE("GPL");
696 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
697