xref: /linux/arch/powerpc/platforms/cell/spufs/file.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * SPU file system -- file contents
3  *
4  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5  *
6  * Author: Arnd Bergmann <arndb@de.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/fs.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 
32 #include <asm/io.h>
33 #include <asm/semaphore.h>
34 #include <asm/spu.h>
35 #include <asm/spu_info.h>
36 #include <asm/uaccess.h>
37 
38 #include "spufs.h"
39 
40 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
41 
42 static int
43 spufs_mem_open(struct inode *inode, struct file *file)
44 {
45 	struct spufs_inode_info *i = SPUFS_I(inode);
46 	struct spu_context *ctx = i->i_ctx;
47 	file->private_data = ctx;
48 	ctx->local_store = inode->i_mapping;
49 	smp_wmb();
50 	return 0;
51 }
52 
53 static ssize_t
54 __spufs_mem_read(struct spu_context *ctx, char __user *buffer,
55 			size_t size, loff_t *pos)
56 {
57 	char *local_store = ctx->ops->get_ls(ctx);
58 	return simple_read_from_buffer(buffer, size, pos, local_store,
59 					LS_SIZE);
60 }
61 
62 static ssize_t
63 spufs_mem_read(struct file *file, char __user *buffer,
64 				size_t size, loff_t *pos)
65 {
66 	struct spu_context *ctx = file->private_data;
67 	ssize_t ret;
68 
69 	spu_acquire(ctx);
70 	ret = __spufs_mem_read(ctx, buffer, size, pos);
71 	spu_release(ctx);
72 	return ret;
73 }
74 
75 static ssize_t
76 spufs_mem_write(struct file *file, const char __user *buffer,
77 					size_t size, loff_t *ppos)
78 {
79 	struct spu_context *ctx = file->private_data;
80 	char *local_store;
81 	loff_t pos = *ppos;
82 	int ret;
83 
84 	if (pos < 0)
85 		return -EINVAL;
86 	if (pos > LS_SIZE)
87 		return -EFBIG;
88 	if (size > LS_SIZE - pos)
89 		size = LS_SIZE - pos;
90 
91 	spu_acquire(ctx);
92 	local_store = ctx->ops->get_ls(ctx);
93 	ret = copy_from_user(local_store + pos, buffer, size);
94 	spu_release(ctx);
95 
96 	if (ret)
97 		return -EFAULT;
98 	*ppos = pos + size;
99 	return size;
100 }
101 
102 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
103 					  unsigned long address)
104 {
105 	struct spu_context *ctx = vma->vm_file->private_data;
106 	unsigned long pfn, offset = address - vma->vm_start;
107 
108 	offset += vma->vm_pgoff << PAGE_SHIFT;
109 
110 	if (offset >= LS_SIZE)
111 		return NOPFN_SIGBUS;
112 
113 	spu_acquire(ctx);
114 
115 	if (ctx->state == SPU_STATE_SAVED) {
116 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
117 							& ~_PAGE_NO_CACHE);
118 		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
119 	} else {
120 		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
121 					     | _PAGE_NO_CACHE);
122 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
123 	}
124 	vm_insert_pfn(vma, address, pfn);
125 
126 	spu_release(ctx);
127 
128 	return NOPFN_REFAULT;
129 }
130 
131 
132 static struct vm_operations_struct spufs_mem_mmap_vmops = {
133 	.nopfn = spufs_mem_mmap_nopfn,
134 };
135 
136 static int
137 spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
138 {
139 	if (!(vma->vm_flags & VM_SHARED))
140 		return -EINVAL;
141 
142 	vma->vm_flags |= VM_IO | VM_PFNMAP;
143 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
144 				     | _PAGE_NO_CACHE);
145 
146 	vma->vm_ops = &spufs_mem_mmap_vmops;
147 	return 0;
148 }
149 
150 static const struct file_operations spufs_mem_fops = {
151 	.open	 = spufs_mem_open,
152 	.read    = spufs_mem_read,
153 	.write   = spufs_mem_write,
154 	.llseek  = generic_file_llseek,
155 	.mmap    = spufs_mem_mmap,
156 };
157 
158 static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
159 				    unsigned long address,
160 				    unsigned long ps_offs,
161 				    unsigned long ps_size)
162 {
163 	struct spu_context *ctx = vma->vm_file->private_data;
164 	unsigned long area, offset = address - vma->vm_start;
165 	int ret;
166 
167 	offset += vma->vm_pgoff << PAGE_SHIFT;
168 	if (offset >= ps_size)
169 		return NOPFN_SIGBUS;
170 
171 	/* error here usually means a signal.. we might want to test
172 	 * the error code more precisely though
173 	 */
174 	ret = spu_acquire_runnable(ctx, 0);
175 	if (ret)
176 		return NOPFN_REFAULT;
177 
178 	area = ctx->spu->problem_phys + ps_offs;
179 	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
180 	spu_release(ctx);
181 
182 	return NOPFN_REFAULT;
183 }
184 
185 #if SPUFS_MMAP_4K
186 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
187 					   unsigned long address)
188 {
189 	return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
190 }
191 
192 static struct vm_operations_struct spufs_cntl_mmap_vmops = {
193 	.nopfn = spufs_cntl_mmap_nopfn,
194 };
195 
196 /*
197  * mmap support for problem state control area [0x4000 - 0x4fff].
198  */
199 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
200 {
201 	if (!(vma->vm_flags & VM_SHARED))
202 		return -EINVAL;
203 
204 	vma->vm_flags |= VM_IO | VM_PFNMAP;
205 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
206 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
207 
208 	vma->vm_ops = &spufs_cntl_mmap_vmops;
209 	return 0;
210 }
211 #else /* SPUFS_MMAP_4K */
212 #define spufs_cntl_mmap NULL
213 #endif /* !SPUFS_MMAP_4K */
214 
215 static u64 spufs_cntl_get(void *data)
216 {
217 	struct spu_context *ctx = data;
218 	u64 val;
219 
220 	spu_acquire(ctx);
221 	val = ctx->ops->status_read(ctx);
222 	spu_release(ctx);
223 
224 	return val;
225 }
226 
227 static void spufs_cntl_set(void *data, u64 val)
228 {
229 	struct spu_context *ctx = data;
230 
231 	spu_acquire(ctx);
232 	ctx->ops->runcntl_write(ctx, val);
233 	spu_release(ctx);
234 }
235 
236 static int spufs_cntl_open(struct inode *inode, struct file *file)
237 {
238 	struct spufs_inode_info *i = SPUFS_I(inode);
239 	struct spu_context *ctx = i->i_ctx;
240 
241 	file->private_data = ctx;
242 	ctx->cntl = inode->i_mapping;
243 	smp_wmb();
244 	return simple_attr_open(inode, file, spufs_cntl_get,
245 					spufs_cntl_set, "0x%08lx");
246 }
247 
248 static const struct file_operations spufs_cntl_fops = {
249 	.open = spufs_cntl_open,
250 	.release = simple_attr_close,
251 	.read = simple_attr_read,
252 	.write = simple_attr_write,
253 	.mmap = spufs_cntl_mmap,
254 };
255 
256 static int
257 spufs_regs_open(struct inode *inode, struct file *file)
258 {
259 	struct spufs_inode_info *i = SPUFS_I(inode);
260 	file->private_data = i->i_ctx;
261 	return 0;
262 }
263 
264 static ssize_t
265 __spufs_regs_read(struct spu_context *ctx, char __user *buffer,
266 			size_t size, loff_t *pos)
267 {
268 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
269 	return simple_read_from_buffer(buffer, size, pos,
270 				      lscsa->gprs, sizeof lscsa->gprs);
271 }
272 
273 static ssize_t
274 spufs_regs_read(struct file *file, char __user *buffer,
275 		size_t size, loff_t *pos)
276 {
277 	int ret;
278 	struct spu_context *ctx = file->private_data;
279 
280 	spu_acquire_saved(ctx);
281 	ret = __spufs_regs_read(ctx, buffer, size, pos);
282 	spu_release(ctx);
283 	return ret;
284 }
285 
286 static ssize_t
287 spufs_regs_write(struct file *file, const char __user *buffer,
288 		 size_t size, loff_t *pos)
289 {
290 	struct spu_context *ctx = file->private_data;
291 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
292 	int ret;
293 
294 	size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
295 	if (size <= 0)
296 		return -EFBIG;
297 	*pos += size;
298 
299 	spu_acquire_saved(ctx);
300 
301 	ret = copy_from_user(lscsa->gprs + *pos - size,
302 			     buffer, size) ? -EFAULT : size;
303 
304 	spu_release(ctx);
305 	return ret;
306 }
307 
308 static const struct file_operations spufs_regs_fops = {
309 	.open	 = spufs_regs_open,
310 	.read    = spufs_regs_read,
311 	.write   = spufs_regs_write,
312 	.llseek  = generic_file_llseek,
313 };
314 
315 static ssize_t
316 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
317 			size_t size, loff_t * pos)
318 {
319 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
320 	return simple_read_from_buffer(buffer, size, pos,
321 				      &lscsa->fpcr, sizeof(lscsa->fpcr));
322 }
323 
324 static ssize_t
325 spufs_fpcr_read(struct file *file, char __user * buffer,
326 		size_t size, loff_t * pos)
327 {
328 	int ret;
329 	struct spu_context *ctx = file->private_data;
330 
331 	spu_acquire_saved(ctx);
332 	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
333 	spu_release(ctx);
334 	return ret;
335 }
336 
337 static ssize_t
338 spufs_fpcr_write(struct file *file, const char __user * buffer,
339 		 size_t size, loff_t * pos)
340 {
341 	struct spu_context *ctx = file->private_data;
342 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
343 	int ret;
344 
345 	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
346 	if (size <= 0)
347 		return -EFBIG;
348 	*pos += size;
349 
350 	spu_acquire_saved(ctx);
351 
352 	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
353 			     buffer, size) ? -EFAULT : size;
354 
355 	spu_release(ctx);
356 	return ret;
357 }
358 
359 static const struct file_operations spufs_fpcr_fops = {
360 	.open = spufs_regs_open,
361 	.read = spufs_fpcr_read,
362 	.write = spufs_fpcr_write,
363 	.llseek = generic_file_llseek,
364 };
365 
366 /* generic open function for all pipe-like files */
367 static int spufs_pipe_open(struct inode *inode, struct file *file)
368 {
369 	struct spufs_inode_info *i = SPUFS_I(inode);
370 	file->private_data = i->i_ctx;
371 
372 	return nonseekable_open(inode, file);
373 }
374 
375 /*
376  * Read as many bytes from the mailbox as possible, until
377  * one of the conditions becomes true:
378  *
379  * - no more data available in the mailbox
380  * - end of the user provided buffer
381  * - end of the mapped area
382  */
383 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
384 			size_t len, loff_t *pos)
385 {
386 	struct spu_context *ctx = file->private_data;
387 	u32 mbox_data, __user *udata;
388 	ssize_t count;
389 
390 	if (len < 4)
391 		return -EINVAL;
392 
393 	if (!access_ok(VERIFY_WRITE, buf, len))
394 		return -EFAULT;
395 
396 	udata = (void __user *)buf;
397 
398 	spu_acquire(ctx);
399 	for (count = 0; (count + 4) <= len; count += 4, udata++) {
400 		int ret;
401 		ret = ctx->ops->mbox_read(ctx, &mbox_data);
402 		if (ret == 0)
403 			break;
404 
405 		/*
406 		 * at the end of the mapped area, we can fault
407 		 * but still need to return the data we have
408 		 * read successfully so far.
409 		 */
410 		ret = __put_user(mbox_data, udata);
411 		if (ret) {
412 			if (!count)
413 				count = -EFAULT;
414 			break;
415 		}
416 	}
417 	spu_release(ctx);
418 
419 	if (!count)
420 		count = -EAGAIN;
421 
422 	return count;
423 }
424 
425 static const struct file_operations spufs_mbox_fops = {
426 	.open	= spufs_pipe_open,
427 	.read	= spufs_mbox_read,
428 };
429 
430 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
431 			size_t len, loff_t *pos)
432 {
433 	struct spu_context *ctx = file->private_data;
434 	u32 mbox_stat;
435 
436 	if (len < 4)
437 		return -EINVAL;
438 
439 	spu_acquire(ctx);
440 
441 	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
442 
443 	spu_release(ctx);
444 
445 	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
446 		return -EFAULT;
447 
448 	return 4;
449 }
450 
451 static const struct file_operations spufs_mbox_stat_fops = {
452 	.open	= spufs_pipe_open,
453 	.read	= spufs_mbox_stat_read,
454 };
455 
456 /* low-level ibox access function */
457 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
458 {
459 	return ctx->ops->ibox_read(ctx, data);
460 }
461 
462 static int spufs_ibox_fasync(int fd, struct file *file, int on)
463 {
464 	struct spu_context *ctx = file->private_data;
465 
466 	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
467 }
468 
469 /* interrupt-level ibox callback function. */
470 void spufs_ibox_callback(struct spu *spu)
471 {
472 	struct spu_context *ctx = spu->ctx;
473 
474 	wake_up_all(&ctx->ibox_wq);
475 	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
476 }
477 
478 /*
479  * Read as many bytes from the interrupt mailbox as possible, until
480  * one of the conditions becomes true:
481  *
482  * - no more data available in the mailbox
483  * - end of the user provided buffer
484  * - end of the mapped area
485  *
486  * If the file is opened without O_NONBLOCK, we wait here until
487  * any data is available, but return when we have been able to
488  * read something.
489  */
490 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
491 			size_t len, loff_t *pos)
492 {
493 	struct spu_context *ctx = file->private_data;
494 	u32 ibox_data, __user *udata;
495 	ssize_t count;
496 
497 	if (len < 4)
498 		return -EINVAL;
499 
500 	if (!access_ok(VERIFY_WRITE, buf, len))
501 		return -EFAULT;
502 
503 	udata = (void __user *)buf;
504 
505 	spu_acquire(ctx);
506 
507 	/* wait only for the first element */
508 	count = 0;
509 	if (file->f_flags & O_NONBLOCK) {
510 		if (!spu_ibox_read(ctx, &ibox_data))
511 			count = -EAGAIN;
512 	} else {
513 		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
514 	}
515 	if (count)
516 		goto out;
517 
518 	/* if we can't write at all, return -EFAULT */
519 	count = __put_user(ibox_data, udata);
520 	if (count)
521 		goto out;
522 
523 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
524 		int ret;
525 		ret = ctx->ops->ibox_read(ctx, &ibox_data);
526 		if (ret == 0)
527 			break;
528 		/*
529 		 * at the end of the mapped area, we can fault
530 		 * but still need to return the data we have
531 		 * read successfully so far.
532 		 */
533 		ret = __put_user(ibox_data, udata);
534 		if (ret)
535 			break;
536 	}
537 
538 out:
539 	spu_release(ctx);
540 
541 	return count;
542 }
543 
544 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
545 {
546 	struct spu_context *ctx = file->private_data;
547 	unsigned int mask;
548 
549 	poll_wait(file, &ctx->ibox_wq, wait);
550 
551 	spu_acquire(ctx);
552 	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
553 	spu_release(ctx);
554 
555 	return mask;
556 }
557 
558 static const struct file_operations spufs_ibox_fops = {
559 	.open	= spufs_pipe_open,
560 	.read	= spufs_ibox_read,
561 	.poll	= spufs_ibox_poll,
562 	.fasync	= spufs_ibox_fasync,
563 };
564 
565 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
566 			size_t len, loff_t *pos)
567 {
568 	struct spu_context *ctx = file->private_data;
569 	u32 ibox_stat;
570 
571 	if (len < 4)
572 		return -EINVAL;
573 
574 	spu_acquire(ctx);
575 	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
576 	spu_release(ctx);
577 
578 	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
579 		return -EFAULT;
580 
581 	return 4;
582 }
583 
584 static const struct file_operations spufs_ibox_stat_fops = {
585 	.open	= spufs_pipe_open,
586 	.read	= spufs_ibox_stat_read,
587 };
588 
589 /* low-level mailbox write */
590 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
591 {
592 	return ctx->ops->wbox_write(ctx, data);
593 }
594 
595 static int spufs_wbox_fasync(int fd, struct file *file, int on)
596 {
597 	struct spu_context *ctx = file->private_data;
598 	int ret;
599 
600 	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
601 
602 	return ret;
603 }
604 
605 /* interrupt-level wbox callback function. */
606 void spufs_wbox_callback(struct spu *spu)
607 {
608 	struct spu_context *ctx = spu->ctx;
609 
610 	wake_up_all(&ctx->wbox_wq);
611 	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
612 }
613 
614 /*
615  * Write as many bytes to the interrupt mailbox as possible, until
616  * one of the conditions becomes true:
617  *
618  * - the mailbox is full
619  * - end of the user provided buffer
620  * - end of the mapped area
621  *
622  * If the file is opened without O_NONBLOCK, we wait here until
623  * space is availabyl, but return when we have been able to
624  * write something.
625  */
626 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
627 			size_t len, loff_t *pos)
628 {
629 	struct spu_context *ctx = file->private_data;
630 	u32 wbox_data, __user *udata;
631 	ssize_t count;
632 
633 	if (len < 4)
634 		return -EINVAL;
635 
636 	udata = (void __user *)buf;
637 	if (!access_ok(VERIFY_READ, buf, len))
638 		return -EFAULT;
639 
640 	if (__get_user(wbox_data, udata))
641 		return -EFAULT;
642 
643 	spu_acquire(ctx);
644 
645 	/*
646 	 * make sure we can at least write one element, by waiting
647 	 * in case of !O_NONBLOCK
648 	 */
649 	count = 0;
650 	if (file->f_flags & O_NONBLOCK) {
651 		if (!spu_wbox_write(ctx, wbox_data))
652 			count = -EAGAIN;
653 	} else {
654 		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
655 	}
656 
657 	if (count)
658 		goto out;
659 
660 	/* write aѕ much as possible */
661 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
662 		int ret;
663 		ret = __get_user(wbox_data, udata);
664 		if (ret)
665 			break;
666 
667 		ret = spu_wbox_write(ctx, wbox_data);
668 		if (ret == 0)
669 			break;
670 	}
671 
672 out:
673 	spu_release(ctx);
674 	return count;
675 }
676 
677 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
678 {
679 	struct spu_context *ctx = file->private_data;
680 	unsigned int mask;
681 
682 	poll_wait(file, &ctx->wbox_wq, wait);
683 
684 	spu_acquire(ctx);
685 	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
686 	spu_release(ctx);
687 
688 	return mask;
689 }
690 
691 static const struct file_operations spufs_wbox_fops = {
692 	.open	= spufs_pipe_open,
693 	.write	= spufs_wbox_write,
694 	.poll	= spufs_wbox_poll,
695 	.fasync	= spufs_wbox_fasync,
696 };
697 
698 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
699 			size_t len, loff_t *pos)
700 {
701 	struct spu_context *ctx = file->private_data;
702 	u32 wbox_stat;
703 
704 	if (len < 4)
705 		return -EINVAL;
706 
707 	spu_acquire(ctx);
708 	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
709 	spu_release(ctx);
710 
711 	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
712 		return -EFAULT;
713 
714 	return 4;
715 }
716 
717 static const struct file_operations spufs_wbox_stat_fops = {
718 	.open	= spufs_pipe_open,
719 	.read	= spufs_wbox_stat_read,
720 };
721 
722 static int spufs_signal1_open(struct inode *inode, struct file *file)
723 {
724 	struct spufs_inode_info *i = SPUFS_I(inode);
725 	struct spu_context *ctx = i->i_ctx;
726 	file->private_data = ctx;
727 	ctx->signal1 = inode->i_mapping;
728 	smp_wmb();
729 	return nonseekable_open(inode, file);
730 }
731 
732 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
733 			size_t len, loff_t *pos)
734 {
735 	int ret = 0;
736 	u32 data;
737 
738 	if (len < 4)
739 		return -EINVAL;
740 
741 	if (ctx->csa.spu_chnlcnt_RW[3]) {
742 		data = ctx->csa.spu_chnldata_RW[3];
743 		ret = 4;
744 	}
745 
746 	if (!ret)
747 		goto out;
748 
749 	if (copy_to_user(buf, &data, 4))
750 		return -EFAULT;
751 
752 out:
753 	return ret;
754 }
755 
756 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
757 			size_t len, loff_t *pos)
758 {
759 	int ret;
760 	struct spu_context *ctx = file->private_data;
761 
762 	spu_acquire_saved(ctx);
763 	ret = __spufs_signal1_read(ctx, buf, len, pos);
764 	spu_release(ctx);
765 
766 	return ret;
767 }
768 
769 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
770 			size_t len, loff_t *pos)
771 {
772 	struct spu_context *ctx;
773 	u32 data;
774 
775 	ctx = file->private_data;
776 
777 	if (len < 4)
778 		return -EINVAL;
779 
780 	if (copy_from_user(&data, buf, 4))
781 		return -EFAULT;
782 
783 	spu_acquire(ctx);
784 	ctx->ops->signal1_write(ctx, data);
785 	spu_release(ctx);
786 
787 	return 4;
788 }
789 
790 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
791 					      unsigned long address)
792 {
793 #if PAGE_SIZE == 0x1000
794 	return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
795 #elif PAGE_SIZE == 0x10000
796 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
797 	 * signal 1 and 2 area
798 	 */
799 	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
800 #else
801 #error unsupported page size
802 #endif
803 }
804 
805 static struct vm_operations_struct spufs_signal1_mmap_vmops = {
806 	.nopfn = spufs_signal1_mmap_nopfn,
807 };
808 
809 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
810 {
811 	if (!(vma->vm_flags & VM_SHARED))
812 		return -EINVAL;
813 
814 	vma->vm_flags |= VM_IO | VM_PFNMAP;
815 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
816 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
817 
818 	vma->vm_ops = &spufs_signal1_mmap_vmops;
819 	return 0;
820 }
821 
822 static const struct file_operations spufs_signal1_fops = {
823 	.open = spufs_signal1_open,
824 	.read = spufs_signal1_read,
825 	.write = spufs_signal1_write,
826 	.mmap = spufs_signal1_mmap,
827 };
828 
829 static int spufs_signal2_open(struct inode *inode, struct file *file)
830 {
831 	struct spufs_inode_info *i = SPUFS_I(inode);
832 	struct spu_context *ctx = i->i_ctx;
833 	file->private_data = ctx;
834 	ctx->signal2 = inode->i_mapping;
835 	smp_wmb();
836 	return nonseekable_open(inode, file);
837 }
838 
839 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
840 			size_t len, loff_t *pos)
841 {
842 	int ret = 0;
843 	u32 data;
844 
845 	if (len < 4)
846 		return -EINVAL;
847 
848 	if (ctx->csa.spu_chnlcnt_RW[4]) {
849 		data =  ctx->csa.spu_chnldata_RW[4];
850 		ret = 4;
851 	}
852 
853 	if (!ret)
854 		goto out;
855 
856 	if (copy_to_user(buf, &data, 4))
857 		return -EFAULT;
858 
859 out:
860 	return ret;
861 }
862 
863 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
864 			size_t len, loff_t *pos)
865 {
866 	struct spu_context *ctx = file->private_data;
867 	int ret;
868 
869 	spu_acquire_saved(ctx);
870 	ret = __spufs_signal2_read(ctx, buf, len, pos);
871 	spu_release(ctx);
872 
873 	return ret;
874 }
875 
876 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
877 			size_t len, loff_t *pos)
878 {
879 	struct spu_context *ctx;
880 	u32 data;
881 
882 	ctx = file->private_data;
883 
884 	if (len < 4)
885 		return -EINVAL;
886 
887 	if (copy_from_user(&data, buf, 4))
888 		return -EFAULT;
889 
890 	spu_acquire(ctx);
891 	ctx->ops->signal2_write(ctx, data);
892 	spu_release(ctx);
893 
894 	return 4;
895 }
896 
897 #if SPUFS_MMAP_4K
898 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
899 					      unsigned long address)
900 {
901 #if PAGE_SIZE == 0x1000
902 	return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
903 #elif PAGE_SIZE == 0x10000
904 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
905 	 * signal 1 and 2 area
906 	 */
907 	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
908 #else
909 #error unsupported page size
910 #endif
911 }
912 
913 static struct vm_operations_struct spufs_signal2_mmap_vmops = {
914 	.nopfn = spufs_signal2_mmap_nopfn,
915 };
916 
917 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
918 {
919 	if (!(vma->vm_flags & VM_SHARED))
920 		return -EINVAL;
921 
922 	vma->vm_flags |= VM_IO | VM_PFNMAP;
923 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
924 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
925 
926 	vma->vm_ops = &spufs_signal2_mmap_vmops;
927 	return 0;
928 }
929 #else /* SPUFS_MMAP_4K */
930 #define spufs_signal2_mmap NULL
931 #endif /* !SPUFS_MMAP_4K */
932 
933 static const struct file_operations spufs_signal2_fops = {
934 	.open = spufs_signal2_open,
935 	.read = spufs_signal2_read,
936 	.write = spufs_signal2_write,
937 	.mmap = spufs_signal2_mmap,
938 };
939 
940 static void spufs_signal1_type_set(void *data, u64 val)
941 {
942 	struct spu_context *ctx = data;
943 
944 	spu_acquire(ctx);
945 	ctx->ops->signal1_type_set(ctx, val);
946 	spu_release(ctx);
947 }
948 
949 static u64 __spufs_signal1_type_get(void *data)
950 {
951 	struct spu_context *ctx = data;
952 	return ctx->ops->signal1_type_get(ctx);
953 }
954 
955 static u64 spufs_signal1_type_get(void *data)
956 {
957 	struct spu_context *ctx = data;
958 	u64 ret;
959 
960 	spu_acquire(ctx);
961 	ret = __spufs_signal1_type_get(data);
962 	spu_release(ctx);
963 
964 	return ret;
965 }
966 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
967 					spufs_signal1_type_set, "%llu");
968 
969 static void spufs_signal2_type_set(void *data, u64 val)
970 {
971 	struct spu_context *ctx = data;
972 
973 	spu_acquire(ctx);
974 	ctx->ops->signal2_type_set(ctx, val);
975 	spu_release(ctx);
976 }
977 
978 static u64 __spufs_signal2_type_get(void *data)
979 {
980 	struct spu_context *ctx = data;
981 	return ctx->ops->signal2_type_get(ctx);
982 }
983 
984 static u64 spufs_signal2_type_get(void *data)
985 {
986 	struct spu_context *ctx = data;
987 	u64 ret;
988 
989 	spu_acquire(ctx);
990 	ret = __spufs_signal2_type_get(data);
991 	spu_release(ctx);
992 
993 	return ret;
994 }
995 DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
996 					spufs_signal2_type_set, "%llu");
997 
998 #if SPUFS_MMAP_4K
999 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
1000 					  unsigned long address)
1001 {
1002 	return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1003 }
1004 
1005 static struct vm_operations_struct spufs_mss_mmap_vmops = {
1006 	.nopfn = spufs_mss_mmap_nopfn,
1007 };
1008 
1009 /*
1010  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1011  */
1012 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1013 {
1014 	if (!(vma->vm_flags & VM_SHARED))
1015 		return -EINVAL;
1016 
1017 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1018 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1019 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1020 
1021 	vma->vm_ops = &spufs_mss_mmap_vmops;
1022 	return 0;
1023 }
1024 #else /* SPUFS_MMAP_4K */
1025 #define spufs_mss_mmap NULL
1026 #endif /* !SPUFS_MMAP_4K */
1027 
1028 static int spufs_mss_open(struct inode *inode, struct file *file)
1029 {
1030 	struct spufs_inode_info *i = SPUFS_I(inode);
1031 	struct spu_context *ctx = i->i_ctx;
1032 
1033 	file->private_data = i->i_ctx;
1034 	ctx->mss = inode->i_mapping;
1035 	smp_wmb();
1036 	return nonseekable_open(inode, file);
1037 }
1038 
1039 static const struct file_operations spufs_mss_fops = {
1040 	.open	 = spufs_mss_open,
1041 	.mmap	 = spufs_mss_mmap,
1042 };
1043 
1044 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
1045 					    unsigned long address)
1046 {
1047 	return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1048 }
1049 
1050 static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1051 	.nopfn = spufs_psmap_mmap_nopfn,
1052 };
1053 
1054 /*
1055  * mmap support for full problem state area [0x00000 - 0x1ffff].
1056  */
1057 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1058 {
1059 	if (!(vma->vm_flags & VM_SHARED))
1060 		return -EINVAL;
1061 
1062 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1063 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1064 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1065 
1066 	vma->vm_ops = &spufs_psmap_mmap_vmops;
1067 	return 0;
1068 }
1069 
1070 static int spufs_psmap_open(struct inode *inode, struct file *file)
1071 {
1072 	struct spufs_inode_info *i = SPUFS_I(inode);
1073 	struct spu_context *ctx = i->i_ctx;
1074 
1075 	file->private_data = i->i_ctx;
1076 	ctx->psmap = inode->i_mapping;
1077 	smp_wmb();
1078 	return nonseekable_open(inode, file);
1079 }
1080 
1081 static const struct file_operations spufs_psmap_fops = {
1082 	.open	 = spufs_psmap_open,
1083 	.mmap	 = spufs_psmap_mmap,
1084 };
1085 
1086 
1087 #if SPUFS_MMAP_4K
1088 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
1089 					  unsigned long address)
1090 {
1091 	return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1092 }
1093 
1094 static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1095 	.nopfn = spufs_mfc_mmap_nopfn,
1096 };
1097 
1098 /*
1099  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1100  */
1101 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1102 {
1103 	if (!(vma->vm_flags & VM_SHARED))
1104 		return -EINVAL;
1105 
1106 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1107 	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1108 				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1109 
1110 	vma->vm_ops = &spufs_mfc_mmap_vmops;
1111 	return 0;
1112 }
1113 #else /* SPUFS_MMAP_4K */
1114 #define spufs_mfc_mmap NULL
1115 #endif /* !SPUFS_MMAP_4K */
1116 
1117 static int spufs_mfc_open(struct inode *inode, struct file *file)
1118 {
1119 	struct spufs_inode_info *i = SPUFS_I(inode);
1120 	struct spu_context *ctx = i->i_ctx;
1121 
1122 	/* we don't want to deal with DMA into other processes */
1123 	if (ctx->owner != current->mm)
1124 		return -EINVAL;
1125 
1126 	if (atomic_read(&inode->i_count) != 1)
1127 		return -EBUSY;
1128 
1129 	file->private_data = ctx;
1130 	ctx->mfc = inode->i_mapping;
1131 	smp_wmb();
1132 	return nonseekable_open(inode, file);
1133 }
1134 
1135 /* interrupt-level mfc callback function. */
1136 void spufs_mfc_callback(struct spu *spu)
1137 {
1138 	struct spu_context *ctx = spu->ctx;
1139 
1140 	wake_up_all(&ctx->mfc_wq);
1141 
1142 	pr_debug("%s %s\n", __FUNCTION__, spu->name);
1143 	if (ctx->mfc_fasync) {
1144 		u32 free_elements, tagstatus;
1145 		unsigned int mask;
1146 
1147 		/* no need for spu_acquire in interrupt context */
1148 		free_elements = ctx->ops->get_mfc_free_elements(ctx);
1149 		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1150 
1151 		mask = 0;
1152 		if (free_elements & 0xffff)
1153 			mask |= POLLOUT;
1154 		if (tagstatus & ctx->tagwait)
1155 			mask |= POLLIN;
1156 
1157 		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
1158 	}
1159 }
1160 
1161 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1162 {
1163 	/* See if there is one tag group is complete */
1164 	/* FIXME we need locking around tagwait */
1165 	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1166 	ctx->tagwait &= ~*status;
1167 	if (*status)
1168 		return 1;
1169 
1170 	/* enable interrupt waiting for any tag group,
1171 	   may silently fail if interrupts are already enabled */
1172 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1173 	return 0;
1174 }
1175 
1176 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1177 			size_t size, loff_t *pos)
1178 {
1179 	struct spu_context *ctx = file->private_data;
1180 	int ret = -EINVAL;
1181 	u32 status;
1182 
1183 	if (size != 4)
1184 		goto out;
1185 
1186 	spu_acquire(ctx);
1187 	if (file->f_flags & O_NONBLOCK) {
1188 		status = ctx->ops->read_mfc_tagstatus(ctx);
1189 		if (!(status & ctx->tagwait))
1190 			ret = -EAGAIN;
1191 		else
1192 			ctx->tagwait &= ~status;
1193 	} else {
1194 		ret = spufs_wait(ctx->mfc_wq,
1195 			   spufs_read_mfc_tagstatus(ctx, &status));
1196 	}
1197 	spu_release(ctx);
1198 
1199 	if (ret)
1200 		goto out;
1201 
1202 	ret = 4;
1203 	if (copy_to_user(buffer, &status, 4))
1204 		ret = -EFAULT;
1205 
1206 out:
1207 	return ret;
1208 }
1209 
1210 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1211 {
1212 	pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
1213 		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1214 
1215 	switch (cmd->cmd) {
1216 	case MFC_PUT_CMD:
1217 	case MFC_PUTF_CMD:
1218 	case MFC_PUTB_CMD:
1219 	case MFC_GET_CMD:
1220 	case MFC_GETF_CMD:
1221 	case MFC_GETB_CMD:
1222 		break;
1223 	default:
1224 		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1225 		return -EIO;
1226 	}
1227 
1228 	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1229 		pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1230 				cmd->ea, cmd->lsa);
1231 		return -EIO;
1232 	}
1233 
1234 	switch (cmd->size & 0xf) {
1235 	case 1:
1236 		break;
1237 	case 2:
1238 		if (cmd->lsa & 1)
1239 			goto error;
1240 		break;
1241 	case 4:
1242 		if (cmd->lsa & 3)
1243 			goto error;
1244 		break;
1245 	case 8:
1246 		if (cmd->lsa & 7)
1247 			goto error;
1248 		break;
1249 	case 0:
1250 		if (cmd->lsa & 15)
1251 			goto error;
1252 		break;
1253 	error:
1254 	default:
1255 		pr_debug("invalid DMA alignment %x for size %x\n",
1256 			cmd->lsa & 0xf, cmd->size);
1257 		return -EIO;
1258 	}
1259 
1260 	if (cmd->size > 16 * 1024) {
1261 		pr_debug("invalid DMA size %x\n", cmd->size);
1262 		return -EIO;
1263 	}
1264 
1265 	if (cmd->tag & 0xfff0) {
1266 		/* we reserve the higher tag numbers for kernel use */
1267 		pr_debug("invalid DMA tag\n");
1268 		return -EIO;
1269 	}
1270 
1271 	if (cmd->class) {
1272 		/* not supported in this version */
1273 		pr_debug("invalid DMA class\n");
1274 		return -EIO;
1275 	}
1276 
1277 	return 0;
1278 }
1279 
1280 static int spu_send_mfc_command(struct spu_context *ctx,
1281 				struct mfc_dma_command cmd,
1282 				int *error)
1283 {
1284 	*error = ctx->ops->send_mfc_command(ctx, &cmd);
1285 	if (*error == -EAGAIN) {
1286 		/* wait for any tag group to complete
1287 		   so we have space for the new command */
1288 		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1289 		/* try again, because the queue might be
1290 		   empty again */
1291 		*error = ctx->ops->send_mfc_command(ctx, &cmd);
1292 		if (*error == -EAGAIN)
1293 			return 0;
1294 	}
1295 	return 1;
1296 }
1297 
1298 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1299 			size_t size, loff_t *pos)
1300 {
1301 	struct spu_context *ctx = file->private_data;
1302 	struct mfc_dma_command cmd;
1303 	int ret = -EINVAL;
1304 
1305 	if (size != sizeof cmd)
1306 		goto out;
1307 
1308 	ret = -EFAULT;
1309 	if (copy_from_user(&cmd, buffer, sizeof cmd))
1310 		goto out;
1311 
1312 	ret = spufs_check_valid_dma(&cmd);
1313 	if (ret)
1314 		goto out;
1315 
1316 	spu_acquire_runnable(ctx, 0);
1317 	if (file->f_flags & O_NONBLOCK) {
1318 		ret = ctx->ops->send_mfc_command(ctx, &cmd);
1319 	} else {
1320 		int status;
1321 		ret = spufs_wait(ctx->mfc_wq,
1322 				 spu_send_mfc_command(ctx, cmd, &status));
1323 		if (status)
1324 			ret = status;
1325 	}
1326 	spu_release(ctx);
1327 
1328 	if (ret)
1329 		goto out;
1330 
1331 	ctx->tagwait |= 1 << cmd.tag;
1332 	ret = size;
1333 
1334 out:
1335 	return ret;
1336 }
1337 
1338 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
1339 {
1340 	struct spu_context *ctx = file->private_data;
1341 	u32 free_elements, tagstatus;
1342 	unsigned int mask;
1343 
1344 	spu_acquire(ctx);
1345 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1346 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
1347 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1348 	spu_release(ctx);
1349 
1350 	poll_wait(file, &ctx->mfc_wq, wait);
1351 
1352 	mask = 0;
1353 	if (free_elements & 0xffff)
1354 		mask |= POLLOUT | POLLWRNORM;
1355 	if (tagstatus & ctx->tagwait)
1356 		mask |= POLLIN | POLLRDNORM;
1357 
1358 	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
1359 		free_elements, tagstatus, ctx->tagwait);
1360 
1361 	return mask;
1362 }
1363 
1364 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1365 {
1366 	struct spu_context *ctx = file->private_data;
1367 	int ret;
1368 
1369 	spu_acquire(ctx);
1370 #if 0
1371 /* this currently hangs */
1372 	ret = spufs_wait(ctx->mfc_wq,
1373 			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
1374 	if (ret)
1375 		goto out;
1376 	ret = spufs_wait(ctx->mfc_wq,
1377 			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
1378 out:
1379 #else
1380 	ret = 0;
1381 #endif
1382 	spu_release(ctx);
1383 
1384 	return ret;
1385 }
1386 
1387 static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
1388 			   int datasync)
1389 {
1390 	return spufs_mfc_flush(file, NULL);
1391 }
1392 
1393 static int spufs_mfc_fasync(int fd, struct file *file, int on)
1394 {
1395 	struct spu_context *ctx = file->private_data;
1396 
1397 	return fasync_helper(fd, file, on, &ctx->mfc_fasync);
1398 }
1399 
1400 static const struct file_operations spufs_mfc_fops = {
1401 	.open	 = spufs_mfc_open,
1402 	.read	 = spufs_mfc_read,
1403 	.write	 = spufs_mfc_write,
1404 	.poll	 = spufs_mfc_poll,
1405 	.flush	 = spufs_mfc_flush,
1406 	.fsync	 = spufs_mfc_fsync,
1407 	.fasync	 = spufs_mfc_fasync,
1408 	.mmap	 = spufs_mfc_mmap,
1409 };
1410 
1411 static void spufs_npc_set(void *data, u64 val)
1412 {
1413 	struct spu_context *ctx = data;
1414 	spu_acquire(ctx);
1415 	ctx->ops->npc_write(ctx, val);
1416 	spu_release(ctx);
1417 }
1418 
1419 static u64 spufs_npc_get(void *data)
1420 {
1421 	struct spu_context *ctx = data;
1422 	u64 ret;
1423 	spu_acquire(ctx);
1424 	ret = ctx->ops->npc_read(ctx);
1425 	spu_release(ctx);
1426 	return ret;
1427 }
1428 DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1429 			"0x%llx\n")
1430 
1431 static void spufs_decr_set(void *data, u64 val)
1432 {
1433 	struct spu_context *ctx = data;
1434 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1435 	spu_acquire_saved(ctx);
1436 	lscsa->decr.slot[0] = (u32) val;
1437 	spu_release(ctx);
1438 }
1439 
1440 static u64 __spufs_decr_get(void *data)
1441 {
1442 	struct spu_context *ctx = data;
1443 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1444 	return lscsa->decr.slot[0];
1445 }
1446 
1447 static u64 spufs_decr_get(void *data)
1448 {
1449 	struct spu_context *ctx = data;
1450 	u64 ret;
1451 	spu_acquire_saved(ctx);
1452 	ret = __spufs_decr_get(data);
1453 	spu_release(ctx);
1454 	return ret;
1455 }
1456 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1457 			"0x%llx\n")
1458 
1459 static void spufs_decr_status_set(void *data, u64 val)
1460 {
1461 	struct spu_context *ctx = data;
1462 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1463 	spu_acquire_saved(ctx);
1464 	lscsa->decr_status.slot[0] = (u32) val;
1465 	spu_release(ctx);
1466 }
1467 
1468 static u64 __spufs_decr_status_get(void *data)
1469 {
1470 	struct spu_context *ctx = data;
1471 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1472 	return lscsa->decr_status.slot[0];
1473 }
1474 
1475 static u64 spufs_decr_status_get(void *data)
1476 {
1477 	struct spu_context *ctx = data;
1478 	u64 ret;
1479 	spu_acquire_saved(ctx);
1480 	ret = __spufs_decr_status_get(data);
1481 	spu_release(ctx);
1482 	return ret;
1483 }
1484 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1485 			spufs_decr_status_set, "0x%llx\n")
1486 
1487 static void spufs_event_mask_set(void *data, u64 val)
1488 {
1489 	struct spu_context *ctx = data;
1490 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1491 	spu_acquire_saved(ctx);
1492 	lscsa->event_mask.slot[0] = (u32) val;
1493 	spu_release(ctx);
1494 }
1495 
1496 static u64 __spufs_event_mask_get(void *data)
1497 {
1498 	struct spu_context *ctx = data;
1499 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1500 	return lscsa->event_mask.slot[0];
1501 }
1502 
1503 static u64 spufs_event_mask_get(void *data)
1504 {
1505 	struct spu_context *ctx = data;
1506 	u64 ret;
1507 	spu_acquire_saved(ctx);
1508 	ret = __spufs_event_mask_get(data);
1509 	spu_release(ctx);
1510 	return ret;
1511 }
1512 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1513 			spufs_event_mask_set, "0x%llx\n")
1514 
1515 static u64 __spufs_event_status_get(void *data)
1516 {
1517 	struct spu_context *ctx = data;
1518 	struct spu_state *state = &ctx->csa;
1519 	u64 stat;
1520 	stat = state->spu_chnlcnt_RW[0];
1521 	if (stat)
1522 		return state->spu_chnldata_RW[0];
1523 	return 0;
1524 }
1525 
1526 static u64 spufs_event_status_get(void *data)
1527 {
1528 	struct spu_context *ctx = data;
1529 	u64 ret = 0;
1530 
1531 	spu_acquire_saved(ctx);
1532 	ret = __spufs_event_status_get(data);
1533 	spu_release(ctx);
1534 	return ret;
1535 }
1536 DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1537 			NULL, "0x%llx\n")
1538 
1539 static void spufs_srr0_set(void *data, u64 val)
1540 {
1541 	struct spu_context *ctx = data;
1542 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1543 	spu_acquire_saved(ctx);
1544 	lscsa->srr0.slot[0] = (u32) val;
1545 	spu_release(ctx);
1546 }
1547 
1548 static u64 spufs_srr0_get(void *data)
1549 {
1550 	struct spu_context *ctx = data;
1551 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1552 	u64 ret;
1553 	spu_acquire_saved(ctx);
1554 	ret = lscsa->srr0.slot[0];
1555 	spu_release(ctx);
1556 	return ret;
1557 }
1558 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1559 			"0x%llx\n")
1560 
1561 static u64 spufs_id_get(void *data)
1562 {
1563 	struct spu_context *ctx = data;
1564 	u64 num;
1565 
1566 	spu_acquire(ctx);
1567 	if (ctx->state == SPU_STATE_RUNNABLE)
1568 		num = ctx->spu->number;
1569 	else
1570 		num = (unsigned int)-1;
1571 	spu_release(ctx);
1572 
1573 	return num;
1574 }
1575 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
1576 
1577 static u64 __spufs_object_id_get(void *data)
1578 {
1579 	struct spu_context *ctx = data;
1580 	return ctx->object_id;
1581 }
1582 
1583 static u64 spufs_object_id_get(void *data)
1584 {
1585 	/* FIXME: Should there really be no locking here? */
1586 	return __spufs_object_id_get(data);
1587 }
1588 
1589 static void spufs_object_id_set(void *data, u64 id)
1590 {
1591 	struct spu_context *ctx = data;
1592 	ctx->object_id = id;
1593 }
1594 
1595 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1596 		spufs_object_id_set, "0x%llx\n");
1597 
1598 static u64 __spufs_lslr_get(void *data)
1599 {
1600 	struct spu_context *ctx = data;
1601 	return ctx->csa.priv2.spu_lslr_RW;
1602 }
1603 
1604 static u64 spufs_lslr_get(void *data)
1605 {
1606 	struct spu_context *ctx = data;
1607 	u64 ret;
1608 
1609 	spu_acquire_saved(ctx);
1610 	ret = __spufs_lslr_get(data);
1611 	spu_release(ctx);
1612 
1613 	return ret;
1614 }
1615 DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
1616 
1617 static int spufs_info_open(struct inode *inode, struct file *file)
1618 {
1619 	struct spufs_inode_info *i = SPUFS_I(inode);
1620 	struct spu_context *ctx = i->i_ctx;
1621 	file->private_data = ctx;
1622 	return 0;
1623 }
1624 
1625 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
1626 			char __user *buf, size_t len, loff_t *pos)
1627 {
1628 	u32 mbox_stat;
1629 	u32 data;
1630 
1631 	mbox_stat = ctx->csa.prob.mb_stat_R;
1632 	if (mbox_stat & 0x0000ff) {
1633 		data = ctx->csa.prob.pu_mb_R;
1634 	}
1635 
1636 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1637 }
1638 
1639 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1640 				   size_t len, loff_t *pos)
1641 {
1642 	int ret;
1643 	struct spu_context *ctx = file->private_data;
1644 
1645 	if (!access_ok(VERIFY_WRITE, buf, len))
1646 		return -EFAULT;
1647 
1648 	spu_acquire_saved(ctx);
1649 	spin_lock(&ctx->csa.register_lock);
1650 	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1651 	spin_unlock(&ctx->csa.register_lock);
1652 	spu_release(ctx);
1653 
1654 	return ret;
1655 }
1656 
1657 static const struct file_operations spufs_mbox_info_fops = {
1658 	.open = spufs_info_open,
1659 	.read = spufs_mbox_info_read,
1660 	.llseek  = generic_file_llseek,
1661 };
1662 
1663 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
1664 				char __user *buf, size_t len, loff_t *pos)
1665 {
1666 	u32 ibox_stat;
1667 	u32 data;
1668 
1669 	ibox_stat = ctx->csa.prob.mb_stat_R;
1670 	if (ibox_stat & 0xff0000) {
1671 		data = ctx->csa.priv2.puint_mb_R;
1672 	}
1673 
1674 	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
1675 }
1676 
1677 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1678 				   size_t len, loff_t *pos)
1679 {
1680 	struct spu_context *ctx = file->private_data;
1681 	int ret;
1682 
1683 	if (!access_ok(VERIFY_WRITE, buf, len))
1684 		return -EFAULT;
1685 
1686 	spu_acquire_saved(ctx);
1687 	spin_lock(&ctx->csa.register_lock);
1688 	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1689 	spin_unlock(&ctx->csa.register_lock);
1690 	spu_release(ctx);
1691 
1692 	return ret;
1693 }
1694 
1695 static const struct file_operations spufs_ibox_info_fops = {
1696 	.open = spufs_info_open,
1697 	.read = spufs_ibox_info_read,
1698 	.llseek  = generic_file_llseek,
1699 };
1700 
1701 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
1702 			char __user *buf, size_t len, loff_t *pos)
1703 {
1704 	int i, cnt;
1705 	u32 data[4];
1706 	u32 wbox_stat;
1707 
1708 	wbox_stat = ctx->csa.prob.mb_stat_R;
1709 	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
1710 	for (i = 0; i < cnt; i++) {
1711 		data[i] = ctx->csa.spu_mailbox_data[i];
1712 	}
1713 
1714 	return simple_read_from_buffer(buf, len, pos, &data,
1715 				cnt * sizeof(u32));
1716 }
1717 
1718 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
1719 				   size_t len, loff_t *pos)
1720 {
1721 	struct spu_context *ctx = file->private_data;
1722 	int ret;
1723 
1724 	if (!access_ok(VERIFY_WRITE, buf, len))
1725 		return -EFAULT;
1726 
1727 	spu_acquire_saved(ctx);
1728 	spin_lock(&ctx->csa.register_lock);
1729 	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
1730 	spin_unlock(&ctx->csa.register_lock);
1731 	spu_release(ctx);
1732 
1733 	return ret;
1734 }
1735 
1736 static const struct file_operations spufs_wbox_info_fops = {
1737 	.open = spufs_info_open,
1738 	.read = spufs_wbox_info_read,
1739 	.llseek  = generic_file_llseek,
1740 };
1741 
1742 static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
1743 			char __user *buf, size_t len, loff_t *pos)
1744 {
1745 	struct spu_dma_info info;
1746 	struct mfc_cq_sr *qp, *spuqp;
1747 	int i;
1748 
1749 	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
1750 	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
1751 	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
1752 	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
1753 	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
1754 	for (i = 0; i < 16; i++) {
1755 		qp = &info.dma_info_command_data[i];
1756 		spuqp = &ctx->csa.priv2.spuq[i];
1757 
1758 		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
1759 		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
1760 		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
1761 		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
1762 	}
1763 
1764 	return simple_read_from_buffer(buf, len, pos, &info,
1765 				sizeof info);
1766 }
1767 
1768 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
1769 			      size_t len, loff_t *pos)
1770 {
1771 	struct spu_context *ctx = file->private_data;
1772 	int ret;
1773 
1774 	if (!access_ok(VERIFY_WRITE, buf, len))
1775 		return -EFAULT;
1776 
1777 	spu_acquire_saved(ctx);
1778 	spin_lock(&ctx->csa.register_lock);
1779 	ret = __spufs_dma_info_read(ctx, buf, len, pos);
1780 	spin_unlock(&ctx->csa.register_lock);
1781 	spu_release(ctx);
1782 
1783 	return ret;
1784 }
1785 
1786 static const struct file_operations spufs_dma_info_fops = {
1787 	.open = spufs_info_open,
1788 	.read = spufs_dma_info_read,
1789 };
1790 
1791 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
1792 			char __user *buf, size_t len, loff_t *pos)
1793 {
1794 	struct spu_proxydma_info info;
1795 	struct mfc_cq_sr *qp, *puqp;
1796 	int ret = sizeof info;
1797 	int i;
1798 
1799 	if (len < ret)
1800 		return -EINVAL;
1801 
1802 	if (!access_ok(VERIFY_WRITE, buf, len))
1803 		return -EFAULT;
1804 
1805 	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
1806 	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
1807 	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
1808 	for (i = 0; i < 8; i++) {
1809 		qp = &info.proxydma_info_command_data[i];
1810 		puqp = &ctx->csa.priv2.puq[i];
1811 
1812 		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
1813 		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
1814 		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
1815 		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
1816 	}
1817 
1818 	return simple_read_from_buffer(buf, len, pos, &info,
1819 				sizeof info);
1820 }
1821 
1822 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
1823 				   size_t len, loff_t *pos)
1824 {
1825 	struct spu_context *ctx = file->private_data;
1826 	int ret;
1827 
1828 	spu_acquire_saved(ctx);
1829 	spin_lock(&ctx->csa.register_lock);
1830 	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
1831 	spin_unlock(&ctx->csa.register_lock);
1832 	spu_release(ctx);
1833 
1834 	return ret;
1835 }
1836 
1837 static const struct file_operations spufs_proxydma_info_fops = {
1838 	.open = spufs_info_open,
1839 	.read = spufs_proxydma_info_read,
1840 };
1841 
1842 struct tree_descr spufs_dir_contents[] = {
1843 	{ "mem",  &spufs_mem_fops,  0666, },
1844 	{ "regs", &spufs_regs_fops,  0666, },
1845 	{ "mbox", &spufs_mbox_fops, 0444, },
1846 	{ "ibox", &spufs_ibox_fops, 0444, },
1847 	{ "wbox", &spufs_wbox_fops, 0222, },
1848 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1849 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1850 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1851 	{ "signal1", &spufs_signal1_fops, 0666, },
1852 	{ "signal2", &spufs_signal2_fops, 0666, },
1853 	{ "signal1_type", &spufs_signal1_type, 0666, },
1854 	{ "signal2_type", &spufs_signal2_type, 0666, },
1855 	{ "cntl", &spufs_cntl_fops,  0666, },
1856 	{ "fpcr", &spufs_fpcr_fops, 0666, },
1857 	{ "lslr", &spufs_lslr_ops, 0444, },
1858 	{ "mfc", &spufs_mfc_fops, 0666, },
1859 	{ "mss", &spufs_mss_fops, 0666, },
1860 	{ "npc", &spufs_npc_ops, 0666, },
1861 	{ "srr0", &spufs_srr0_ops, 0666, },
1862 	{ "decr", &spufs_decr_ops, 0666, },
1863 	{ "decr_status", &spufs_decr_status_ops, 0666, },
1864 	{ "event_mask", &spufs_event_mask_ops, 0666, },
1865 	{ "event_status", &spufs_event_status_ops, 0444, },
1866 	{ "psmap", &spufs_psmap_fops, 0666, },
1867 	{ "phys-id", &spufs_id_ops, 0666, },
1868 	{ "object-id", &spufs_object_id_ops, 0666, },
1869 	{ "mbox_info", &spufs_mbox_info_fops, 0444, },
1870 	{ "ibox_info", &spufs_ibox_info_fops, 0444, },
1871 	{ "wbox_info", &spufs_wbox_info_fops, 0444, },
1872 	{ "dma_info", &spufs_dma_info_fops, 0444, },
1873 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
1874 	{},
1875 };
1876 
1877 struct tree_descr spufs_dir_nosched_contents[] = {
1878 	{ "mem",  &spufs_mem_fops,  0666, },
1879 	{ "mbox", &spufs_mbox_fops, 0444, },
1880 	{ "ibox", &spufs_ibox_fops, 0444, },
1881 	{ "wbox", &spufs_wbox_fops, 0222, },
1882 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
1883 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
1884 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
1885 	{ "signal1", &spufs_signal1_fops, 0666, },
1886 	{ "signal2", &spufs_signal2_fops, 0666, },
1887 	{ "signal1_type", &spufs_signal1_type, 0666, },
1888 	{ "signal2_type", &spufs_signal2_type, 0666, },
1889 	{ "mss", &spufs_mss_fops, 0666, },
1890 	{ "mfc", &spufs_mfc_fops, 0666, },
1891 	{ "cntl", &spufs_cntl_fops,  0666, },
1892 	{ "npc", &spufs_npc_ops, 0666, },
1893 	{ "psmap", &spufs_psmap_fops, 0666, },
1894 	{ "phys-id", &spufs_id_ops, 0666, },
1895 	{ "object-id", &spufs_object_id_ops, 0666, },
1896 	{},
1897 };
1898 
1899 struct spufs_coredump_reader spufs_coredump_read[] = {
1900 	{ "regs", __spufs_regs_read, NULL, 128 * 16 },
1901 	{ "fpcr", __spufs_fpcr_read, NULL, 16 },
1902 	{ "lslr", NULL, __spufs_lslr_get, 11 },
1903 	{ "decr", NULL, __spufs_decr_get, 11 },
1904 	{ "decr_status", NULL, __spufs_decr_status_get, 11 },
1905 	{ "mem", __spufs_mem_read, NULL, 256 * 1024, },
1906 	{ "signal1", __spufs_signal1_read, NULL, 4 },
1907 	{ "signal1_type", NULL, __spufs_signal1_type_get, 2 },
1908 	{ "signal2", __spufs_signal2_read, NULL, 4 },
1909 	{ "signal2_type", NULL, __spufs_signal2_type_get, 2 },
1910 	{ "event_mask", NULL, __spufs_event_mask_get, 8 },
1911 	{ "event_status", NULL, __spufs_event_status_get, 8 },
1912 	{ "mbox_info", __spufs_mbox_info_read, NULL, 4 },
1913 	{ "ibox_info", __spufs_ibox_info_read, NULL, 4 },
1914 	{ "wbox_info", __spufs_wbox_info_read, NULL, 16 },
1915 	{ "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
1916 	{ "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
1917 	{ "object-id", NULL, __spufs_object_id_get, 19 },
1918 	{ },
1919 };
1920 int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
1921 
1922