xref: /linux/arch/powerpc/platforms/cell/spufs/file.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SPU file system -- file contents
4  *
5  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6  *
7  * Author: Arnd Bergmann <arndb@de.ibm.com>
8  */
9 
10 #undef DEBUG
11 
12 #include <linux/coredump.h>
13 #include <linux/fs.h>
14 #include <linux/ioctl.h>
15 #include <linux/export.h>
16 #include <linux/pagemap.h>
17 #include <linux/poll.h>
18 #include <linux/ptrace.h>
19 #include <linux/seq_file.h>
20 #include <linux/slab.h>
21 
22 #include <asm/io.h>
23 #include <asm/time.h>
24 #include <asm/spu.h>
25 #include <asm/spu_info.h>
26 #include <linux/uaccess.h>
27 
28 #include "spufs.h"
29 #include "sputrace.h"
30 
31 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
32 
33 /* Simple attribute files */
34 struct spufs_attr {
35 	int (*get)(void *, u64 *);
36 	int (*set)(void *, u64);
37 	char get_buf[24];       /* enough to store a u64 and "\n\0" */
38 	char set_buf[24];
39 	void *data;
40 	const char *fmt;        /* format for read operation */
41 	struct mutex mutex;     /* protects access to these buffers */
42 };
43 
44 static int spufs_attr_open(struct inode *inode, struct file *file,
45 		int (*get)(void *, u64 *), int (*set)(void *, u64),
46 		const char *fmt)
47 {
48 	struct spufs_attr *attr;
49 
50 	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
51 	if (!attr)
52 		return -ENOMEM;
53 
54 	attr->get = get;
55 	attr->set = set;
56 	attr->data = inode->i_private;
57 	attr->fmt = fmt;
58 	mutex_init(&attr->mutex);
59 	file->private_data = attr;
60 
61 	return nonseekable_open(inode, file);
62 }
63 
64 static int spufs_attr_release(struct inode *inode, struct file *file)
65 {
66        kfree(file->private_data);
67 	return 0;
68 }
69 
70 static ssize_t spufs_attr_read(struct file *file, char __user *buf,
71 		size_t len, loff_t *ppos)
72 {
73 	struct spufs_attr *attr;
74 	size_t size;
75 	ssize_t ret;
76 
77 	attr = file->private_data;
78 	if (!attr->get)
79 		return -EACCES;
80 
81 	ret = mutex_lock_interruptible(&attr->mutex);
82 	if (ret)
83 		return ret;
84 
85 	if (*ppos) {		/* continued read */
86 		size = strlen(attr->get_buf);
87 	} else {		/* first read */
88 		u64 val;
89 		ret = attr->get(attr->data, &val);
90 		if (ret)
91 			goto out;
92 
93 		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
94 				 attr->fmt, (unsigned long long)val);
95 	}
96 
97 	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
98 out:
99 	mutex_unlock(&attr->mutex);
100 	return ret;
101 }
102 
103 static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
104 		size_t len, loff_t *ppos)
105 {
106 	struct spufs_attr *attr;
107 	u64 val;
108 	size_t size;
109 	ssize_t ret;
110 
111 	attr = file->private_data;
112 	if (!attr->set)
113 		return -EACCES;
114 
115 	ret = mutex_lock_interruptible(&attr->mutex);
116 	if (ret)
117 		return ret;
118 
119 	ret = -EFAULT;
120 	size = min(sizeof(attr->set_buf) - 1, len);
121 	if (copy_from_user(attr->set_buf, buf, size))
122 		goto out;
123 
124 	ret = len; /* claim we got the whole input */
125 	attr->set_buf[size] = '\0';
126 	val = simple_strtol(attr->set_buf, NULL, 0);
127 	attr->set(attr->data, val);
128 out:
129 	mutex_unlock(&attr->mutex);
130 	return ret;
131 }
132 
133 static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf,
134 		size_t size)
135 {
136 	if (!dump_emit(cprm, buf, size))
137 		return -EIO;
138 	return size;
139 }
140 
141 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
142 static int __fops ## _open(struct inode *inode, struct file *file)	\
143 {									\
144 	__simple_attr_check_format(__fmt, 0ull);			\
145 	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
146 }									\
147 static const struct file_operations __fops = {				\
148 	.open	 = __fops ## _open,					\
149 	.release = spufs_attr_release,					\
150 	.read	 = spufs_attr_read,					\
151 	.write	 = spufs_attr_write,					\
152 	.llseek  = generic_file_llseek,					\
153 };
154 
155 
156 static int
157 spufs_mem_open(struct inode *inode, struct file *file)
158 {
159 	struct spufs_inode_info *i = SPUFS_I(inode);
160 	struct spu_context *ctx = i->i_ctx;
161 
162 	mutex_lock(&ctx->mapping_lock);
163 	file->private_data = ctx;
164 	if (!i->i_openers++)
165 		ctx->local_store = inode->i_mapping;
166 	mutex_unlock(&ctx->mapping_lock);
167 	return 0;
168 }
169 
170 static int
171 spufs_mem_release(struct inode *inode, struct file *file)
172 {
173 	struct spufs_inode_info *i = SPUFS_I(inode);
174 	struct spu_context *ctx = i->i_ctx;
175 
176 	mutex_lock(&ctx->mapping_lock);
177 	if (!--i->i_openers)
178 		ctx->local_store = NULL;
179 	mutex_unlock(&ctx->mapping_lock);
180 	return 0;
181 }
182 
183 static ssize_t
184 spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm)
185 {
186 	return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE);
187 }
188 
189 static ssize_t
190 spufs_mem_read(struct file *file, char __user *buffer,
191 				size_t size, loff_t *pos)
192 {
193 	struct spu_context *ctx = file->private_data;
194 	ssize_t ret;
195 
196 	ret = spu_acquire(ctx);
197 	if (ret)
198 		return ret;
199 	ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx),
200 				      LS_SIZE);
201 	spu_release(ctx);
202 
203 	return ret;
204 }
205 
206 static ssize_t
207 spufs_mem_write(struct file *file, const char __user *buffer,
208 					size_t size, loff_t *ppos)
209 {
210 	struct spu_context *ctx = file->private_data;
211 	char *local_store;
212 	loff_t pos = *ppos;
213 	int ret;
214 
215 	if (pos > LS_SIZE)
216 		return -EFBIG;
217 
218 	ret = spu_acquire(ctx);
219 	if (ret)
220 		return ret;
221 
222 	local_store = ctx->ops->get_ls(ctx);
223 	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
224 	spu_release(ctx);
225 
226 	return size;
227 }
228 
229 static vm_fault_t
230 spufs_mem_mmap_fault(struct vm_fault *vmf)
231 {
232 	struct vm_area_struct *vma = vmf->vma;
233 	struct spu_context *ctx	= vma->vm_file->private_data;
234 	unsigned long pfn, offset;
235 	vm_fault_t ret;
236 
237 	offset = vmf->pgoff << PAGE_SHIFT;
238 	if (offset >= LS_SIZE)
239 		return VM_FAULT_SIGBUS;
240 
241 	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
242 			vmf->address, offset);
243 
244 	if (spu_acquire(ctx))
245 		return VM_FAULT_NOPAGE;
246 
247 	if (ctx->state == SPU_STATE_SAVED) {
248 		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
249 		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
250 	} else {
251 		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
252 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
253 	}
254 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
255 
256 	spu_release(ctx);
257 
258 	return ret;
259 }
260 
261 static int spufs_mem_mmap_access(struct vm_area_struct *vma,
262 				unsigned long address,
263 				void *buf, int len, int write)
264 {
265 	struct spu_context *ctx = vma->vm_file->private_data;
266 	unsigned long offset = address - vma->vm_start;
267 	char *local_store;
268 
269 	if (write && !(vma->vm_flags & VM_WRITE))
270 		return -EACCES;
271 	if (spu_acquire(ctx))
272 		return -EINTR;
273 	if ((offset + len) > vma->vm_end)
274 		len = vma->vm_end - offset;
275 	local_store = ctx->ops->get_ls(ctx);
276 	if (write)
277 		memcpy_toio(local_store + offset, buf, len);
278 	else
279 		memcpy_fromio(buf, local_store + offset, len);
280 	spu_release(ctx);
281 	return len;
282 }
283 
284 static const struct vm_operations_struct spufs_mem_mmap_vmops = {
285 	.fault = spufs_mem_mmap_fault,
286 	.access = spufs_mem_mmap_access,
287 };
288 
289 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
290 {
291 	if (!(vma->vm_flags & VM_SHARED))
292 		return -EINVAL;
293 
294 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
295 	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
296 
297 	vma->vm_ops = &spufs_mem_mmap_vmops;
298 	return 0;
299 }
300 
301 static const struct file_operations spufs_mem_fops = {
302 	.open			= spufs_mem_open,
303 	.release		= spufs_mem_release,
304 	.read			= spufs_mem_read,
305 	.write			= spufs_mem_write,
306 	.llseek			= generic_file_llseek,
307 	.mmap			= spufs_mem_mmap,
308 };
309 
310 static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
311 				    unsigned long ps_offs,
312 				    unsigned long ps_size)
313 {
314 	struct spu_context *ctx = vmf->vma->vm_file->private_data;
315 	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
316 	int err = 0;
317 	vm_fault_t ret = VM_FAULT_NOPAGE;
318 
319 	spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
320 
321 	if (offset >= ps_size)
322 		return VM_FAULT_SIGBUS;
323 
324 	if (fatal_signal_pending(current))
325 		return VM_FAULT_SIGBUS;
326 
327 	/*
328 	 * Because we release the mmap_lock, the context may be destroyed while
329 	 * we're in spu_wait. Grab an extra reference so it isn't destroyed
330 	 * in the meantime.
331 	 */
332 	get_spu_context(ctx);
333 
334 	/*
335 	 * We have to wait for context to be loaded before we have
336 	 * pages to hand out to the user, but we don't want to wait
337 	 * with the mmap_lock held.
338 	 * It is possible to drop the mmap_lock here, but then we need
339 	 * to return VM_FAULT_NOPAGE because the mappings may have
340 	 * hanged.
341 	 */
342 	if (spu_acquire(ctx))
343 		goto refault;
344 
345 	if (ctx->state == SPU_STATE_SAVED) {
346 		mmap_read_unlock(current->mm);
347 		spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
348 		err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
349 		spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
350 		mmap_read_lock(current->mm);
351 	} else {
352 		area = ctx->spu->problem_phys + ps_offs;
353 		ret = vmf_insert_pfn(vmf->vma, vmf->address,
354 				(area + offset) >> PAGE_SHIFT);
355 		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
356 	}
357 
358 	if (!err)
359 		spu_release(ctx);
360 
361 refault:
362 	put_spu_context(ctx);
363 	return ret;
364 }
365 
366 #if SPUFS_MMAP_4K
367 static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf)
368 {
369 	return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
370 }
371 
372 static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
373 	.fault = spufs_cntl_mmap_fault,
374 };
375 
376 /*
377  * mmap support for problem state control area [0x4000 - 0x4fff].
378  */
379 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
380 {
381 	if (!(vma->vm_flags & VM_SHARED))
382 		return -EINVAL;
383 
384 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
385 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
386 
387 	vma->vm_ops = &spufs_cntl_mmap_vmops;
388 	return 0;
389 }
390 #else /* SPUFS_MMAP_4K */
391 #define spufs_cntl_mmap NULL
392 #endif /* !SPUFS_MMAP_4K */
393 
394 static int spufs_cntl_get(void *data, u64 *val)
395 {
396 	struct spu_context *ctx = data;
397 	int ret;
398 
399 	ret = spu_acquire(ctx);
400 	if (ret)
401 		return ret;
402 	*val = ctx->ops->status_read(ctx);
403 	spu_release(ctx);
404 
405 	return 0;
406 }
407 
408 static int spufs_cntl_set(void *data, u64 val)
409 {
410 	struct spu_context *ctx = data;
411 	int ret;
412 
413 	ret = spu_acquire(ctx);
414 	if (ret)
415 		return ret;
416 	ctx->ops->runcntl_write(ctx, val);
417 	spu_release(ctx);
418 
419 	return 0;
420 }
421 
422 static int spufs_cntl_open(struct inode *inode, struct file *file)
423 {
424 	struct spufs_inode_info *i = SPUFS_I(inode);
425 	struct spu_context *ctx = i->i_ctx;
426 
427 	mutex_lock(&ctx->mapping_lock);
428 	file->private_data = ctx;
429 	if (!i->i_openers++)
430 		ctx->cntl = inode->i_mapping;
431 	mutex_unlock(&ctx->mapping_lock);
432 	return simple_attr_open(inode, file, spufs_cntl_get,
433 					spufs_cntl_set, "0x%08lx");
434 }
435 
436 static int
437 spufs_cntl_release(struct inode *inode, struct file *file)
438 {
439 	struct spufs_inode_info *i = SPUFS_I(inode);
440 	struct spu_context *ctx = i->i_ctx;
441 
442 	simple_attr_release(inode, file);
443 
444 	mutex_lock(&ctx->mapping_lock);
445 	if (!--i->i_openers)
446 		ctx->cntl = NULL;
447 	mutex_unlock(&ctx->mapping_lock);
448 	return 0;
449 }
450 
451 static const struct file_operations spufs_cntl_fops = {
452 	.open = spufs_cntl_open,
453 	.release = spufs_cntl_release,
454 	.read = simple_attr_read,
455 	.write = simple_attr_write,
456 	.llseek	= no_llseek,
457 	.mmap = spufs_cntl_mmap,
458 };
459 
460 static int
461 spufs_regs_open(struct inode *inode, struct file *file)
462 {
463 	struct spufs_inode_info *i = SPUFS_I(inode);
464 	file->private_data = i->i_ctx;
465 	return 0;
466 }
467 
468 static ssize_t
469 spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm)
470 {
471 	return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs,
472 			       sizeof(ctx->csa.lscsa->gprs));
473 }
474 
475 static ssize_t
476 spufs_regs_read(struct file *file, char __user *buffer,
477 		size_t size, loff_t *pos)
478 {
479 	int ret;
480 	struct spu_context *ctx = file->private_data;
481 
482 	/* pre-check for file position: if we'd return EOF, there's no point
483 	 * causing a deschedule */
484 	if (*pos >= sizeof(ctx->csa.lscsa->gprs))
485 		return 0;
486 
487 	ret = spu_acquire_saved(ctx);
488 	if (ret)
489 		return ret;
490 	ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs,
491 				      sizeof(ctx->csa.lscsa->gprs));
492 	spu_release_saved(ctx);
493 	return ret;
494 }
495 
496 static ssize_t
497 spufs_regs_write(struct file *file, const char __user *buffer,
498 		 size_t size, loff_t *pos)
499 {
500 	struct spu_context *ctx = file->private_data;
501 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
502 	int ret;
503 
504 	if (*pos >= sizeof(lscsa->gprs))
505 		return -EFBIG;
506 
507 	ret = spu_acquire_saved(ctx);
508 	if (ret)
509 		return ret;
510 
511 	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
512 					buffer, size);
513 
514 	spu_release_saved(ctx);
515 	return size;
516 }
517 
518 static const struct file_operations spufs_regs_fops = {
519 	.open	 = spufs_regs_open,
520 	.read    = spufs_regs_read,
521 	.write   = spufs_regs_write,
522 	.llseek  = generic_file_llseek,
523 };
524 
525 static ssize_t
526 spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm)
527 {
528 	return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr,
529 			       sizeof(ctx->csa.lscsa->fpcr));
530 }
531 
532 static ssize_t
533 spufs_fpcr_read(struct file *file, char __user * buffer,
534 		size_t size, loff_t * pos)
535 {
536 	int ret;
537 	struct spu_context *ctx = file->private_data;
538 
539 	ret = spu_acquire_saved(ctx);
540 	if (ret)
541 		return ret;
542 	ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr,
543 				      sizeof(ctx->csa.lscsa->fpcr));
544 	spu_release_saved(ctx);
545 	return ret;
546 }
547 
548 static ssize_t
549 spufs_fpcr_write(struct file *file, const char __user * buffer,
550 		 size_t size, loff_t * pos)
551 {
552 	struct spu_context *ctx = file->private_data;
553 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
554 	int ret;
555 
556 	if (*pos >= sizeof(lscsa->fpcr))
557 		return -EFBIG;
558 
559 	ret = spu_acquire_saved(ctx);
560 	if (ret)
561 		return ret;
562 
563 	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
564 					buffer, size);
565 
566 	spu_release_saved(ctx);
567 	return size;
568 }
569 
570 static const struct file_operations spufs_fpcr_fops = {
571 	.open = spufs_regs_open,
572 	.read = spufs_fpcr_read,
573 	.write = spufs_fpcr_write,
574 	.llseek = generic_file_llseek,
575 };
576 
577 /* generic open function for all pipe-like files */
578 static int spufs_pipe_open(struct inode *inode, struct file *file)
579 {
580 	struct spufs_inode_info *i = SPUFS_I(inode);
581 	file->private_data = i->i_ctx;
582 
583 	return stream_open(inode, file);
584 }
585 
586 /*
587  * Read as many bytes from the mailbox as possible, until
588  * one of the conditions becomes true:
589  *
590  * - no more data available in the mailbox
591  * - end of the user provided buffer
592  * - end of the mapped area
593  */
594 static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
595 			size_t len, loff_t *pos)
596 {
597 	struct spu_context *ctx = file->private_data;
598 	u32 mbox_data, __user *udata = (void __user *)buf;
599 	ssize_t count;
600 
601 	if (len < 4)
602 		return -EINVAL;
603 
604 	count = spu_acquire(ctx);
605 	if (count)
606 		return count;
607 
608 	for (count = 0; (count + 4) <= len; count += 4, udata++) {
609 		int ret;
610 		ret = ctx->ops->mbox_read(ctx, &mbox_data);
611 		if (ret == 0)
612 			break;
613 
614 		/*
615 		 * at the end of the mapped area, we can fault
616 		 * but still need to return the data we have
617 		 * read successfully so far.
618 		 */
619 		ret = put_user(mbox_data, udata);
620 		if (ret) {
621 			if (!count)
622 				count = -EFAULT;
623 			break;
624 		}
625 	}
626 	spu_release(ctx);
627 
628 	if (!count)
629 		count = -EAGAIN;
630 
631 	return count;
632 }
633 
634 static const struct file_operations spufs_mbox_fops = {
635 	.open	= spufs_pipe_open,
636 	.read	= spufs_mbox_read,
637 	.llseek	= no_llseek,
638 };
639 
640 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
641 			size_t len, loff_t *pos)
642 {
643 	struct spu_context *ctx = file->private_data;
644 	ssize_t ret;
645 	u32 mbox_stat;
646 
647 	if (len < 4)
648 		return -EINVAL;
649 
650 	ret = spu_acquire(ctx);
651 	if (ret)
652 		return ret;
653 
654 	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
655 
656 	spu_release(ctx);
657 
658 	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
659 		return -EFAULT;
660 
661 	return 4;
662 }
663 
664 static const struct file_operations spufs_mbox_stat_fops = {
665 	.open	= spufs_pipe_open,
666 	.read	= spufs_mbox_stat_read,
667 	.llseek = no_llseek,
668 };
669 
670 /* low-level ibox access function */
671 size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
672 {
673 	return ctx->ops->ibox_read(ctx, data);
674 }
675 
676 /* interrupt-level ibox callback function. */
677 void spufs_ibox_callback(struct spu *spu)
678 {
679 	struct spu_context *ctx = spu->ctx;
680 
681 	if (ctx)
682 		wake_up_all(&ctx->ibox_wq);
683 }
684 
685 /*
686  * Read as many bytes from the interrupt mailbox as possible, until
687  * one of the conditions becomes true:
688  *
689  * - no more data available in the mailbox
690  * - end of the user provided buffer
691  * - end of the mapped area
692  *
693  * If the file is opened without O_NONBLOCK, we wait here until
694  * any data is available, but return when we have been able to
695  * read something.
696  */
697 static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
698 			size_t len, loff_t *pos)
699 {
700 	struct spu_context *ctx = file->private_data;
701 	u32 ibox_data, __user *udata = (void __user *)buf;
702 	ssize_t count;
703 
704 	if (len < 4)
705 		return -EINVAL;
706 
707 	count = spu_acquire(ctx);
708 	if (count)
709 		goto out;
710 
711 	/* wait only for the first element */
712 	count = 0;
713 	if (file->f_flags & O_NONBLOCK) {
714 		if (!spu_ibox_read(ctx, &ibox_data)) {
715 			count = -EAGAIN;
716 			goto out_unlock;
717 		}
718 	} else {
719 		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
720 		if (count)
721 			goto out;
722 	}
723 
724 	/* if we can't write at all, return -EFAULT */
725 	count = put_user(ibox_data, udata);
726 	if (count)
727 		goto out_unlock;
728 
729 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
730 		int ret;
731 		ret = ctx->ops->ibox_read(ctx, &ibox_data);
732 		if (ret == 0)
733 			break;
734 		/*
735 		 * at the end of the mapped area, we can fault
736 		 * but still need to return the data we have
737 		 * read successfully so far.
738 		 */
739 		ret = put_user(ibox_data, udata);
740 		if (ret)
741 			break;
742 	}
743 
744 out_unlock:
745 	spu_release(ctx);
746 out:
747 	return count;
748 }
749 
750 static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait)
751 {
752 	struct spu_context *ctx = file->private_data;
753 	__poll_t mask;
754 
755 	poll_wait(file, &ctx->ibox_wq, wait);
756 
757 	/*
758 	 * For now keep this uninterruptible and also ignore the rule
759 	 * that poll should not sleep.  Will be fixed later.
760 	 */
761 	mutex_lock(&ctx->state_mutex);
762 	mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM);
763 	spu_release(ctx);
764 
765 	return mask;
766 }
767 
768 static const struct file_operations spufs_ibox_fops = {
769 	.open	= spufs_pipe_open,
770 	.read	= spufs_ibox_read,
771 	.poll	= spufs_ibox_poll,
772 	.llseek = no_llseek,
773 };
774 
775 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
776 			size_t len, loff_t *pos)
777 {
778 	struct spu_context *ctx = file->private_data;
779 	ssize_t ret;
780 	u32 ibox_stat;
781 
782 	if (len < 4)
783 		return -EINVAL;
784 
785 	ret = spu_acquire(ctx);
786 	if (ret)
787 		return ret;
788 	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
789 	spu_release(ctx);
790 
791 	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
792 		return -EFAULT;
793 
794 	return 4;
795 }
796 
797 static const struct file_operations spufs_ibox_stat_fops = {
798 	.open	= spufs_pipe_open,
799 	.read	= spufs_ibox_stat_read,
800 	.llseek = no_llseek,
801 };
802 
803 /* low-level mailbox write */
804 size_t spu_wbox_write(struct spu_context *ctx, u32 data)
805 {
806 	return ctx->ops->wbox_write(ctx, data);
807 }
808 
809 /* interrupt-level wbox callback function. */
810 void spufs_wbox_callback(struct spu *spu)
811 {
812 	struct spu_context *ctx = spu->ctx;
813 
814 	if (ctx)
815 		wake_up_all(&ctx->wbox_wq);
816 }
817 
818 /*
819  * Write as many bytes to the interrupt mailbox as possible, until
820  * one of the conditions becomes true:
821  *
822  * - the mailbox is full
823  * - end of the user provided buffer
824  * - end of the mapped area
825  *
826  * If the file is opened without O_NONBLOCK, we wait here until
827  * space is available, but return when we have been able to
828  * write something.
829  */
830 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
831 			size_t len, loff_t *pos)
832 {
833 	struct spu_context *ctx = file->private_data;
834 	u32 wbox_data, __user *udata = (void __user *)buf;
835 	ssize_t count;
836 
837 	if (len < 4)
838 		return -EINVAL;
839 
840 	if (get_user(wbox_data, udata))
841 		return -EFAULT;
842 
843 	count = spu_acquire(ctx);
844 	if (count)
845 		goto out;
846 
847 	/*
848 	 * make sure we can at least write one element, by waiting
849 	 * in case of !O_NONBLOCK
850 	 */
851 	count = 0;
852 	if (file->f_flags & O_NONBLOCK) {
853 		if (!spu_wbox_write(ctx, wbox_data)) {
854 			count = -EAGAIN;
855 			goto out_unlock;
856 		}
857 	} else {
858 		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
859 		if (count)
860 			goto out;
861 	}
862 
863 
864 	/* write as much as possible */
865 	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
866 		int ret;
867 		ret = get_user(wbox_data, udata);
868 		if (ret)
869 			break;
870 
871 		ret = spu_wbox_write(ctx, wbox_data);
872 		if (ret == 0)
873 			break;
874 	}
875 
876 out_unlock:
877 	spu_release(ctx);
878 out:
879 	return count;
880 }
881 
882 static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait)
883 {
884 	struct spu_context *ctx = file->private_data;
885 	__poll_t mask;
886 
887 	poll_wait(file, &ctx->wbox_wq, wait);
888 
889 	/*
890 	 * For now keep this uninterruptible and also ignore the rule
891 	 * that poll should not sleep.  Will be fixed later.
892 	 */
893 	mutex_lock(&ctx->state_mutex);
894 	mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM);
895 	spu_release(ctx);
896 
897 	return mask;
898 }
899 
900 static const struct file_operations spufs_wbox_fops = {
901 	.open	= spufs_pipe_open,
902 	.write	= spufs_wbox_write,
903 	.poll	= spufs_wbox_poll,
904 	.llseek = no_llseek,
905 };
906 
907 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
908 			size_t len, loff_t *pos)
909 {
910 	struct spu_context *ctx = file->private_data;
911 	ssize_t ret;
912 	u32 wbox_stat;
913 
914 	if (len < 4)
915 		return -EINVAL;
916 
917 	ret = spu_acquire(ctx);
918 	if (ret)
919 		return ret;
920 	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
921 	spu_release(ctx);
922 
923 	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
924 		return -EFAULT;
925 
926 	return 4;
927 }
928 
929 static const struct file_operations spufs_wbox_stat_fops = {
930 	.open	= spufs_pipe_open,
931 	.read	= spufs_wbox_stat_read,
932 	.llseek = no_llseek,
933 };
934 
935 static int spufs_signal1_open(struct inode *inode, struct file *file)
936 {
937 	struct spufs_inode_info *i = SPUFS_I(inode);
938 	struct spu_context *ctx = i->i_ctx;
939 
940 	mutex_lock(&ctx->mapping_lock);
941 	file->private_data = ctx;
942 	if (!i->i_openers++)
943 		ctx->signal1 = inode->i_mapping;
944 	mutex_unlock(&ctx->mapping_lock);
945 	return nonseekable_open(inode, file);
946 }
947 
948 static int
949 spufs_signal1_release(struct inode *inode, struct file *file)
950 {
951 	struct spufs_inode_info *i = SPUFS_I(inode);
952 	struct spu_context *ctx = i->i_ctx;
953 
954 	mutex_lock(&ctx->mapping_lock);
955 	if (!--i->i_openers)
956 		ctx->signal1 = NULL;
957 	mutex_unlock(&ctx->mapping_lock);
958 	return 0;
959 }
960 
961 static ssize_t spufs_signal1_dump(struct spu_context *ctx,
962 		struct coredump_params *cprm)
963 {
964 	if (!ctx->csa.spu_chnlcnt_RW[3])
965 		return 0;
966 	return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3],
967 			       sizeof(ctx->csa.spu_chnldata_RW[3]));
968 }
969 
970 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
971 			size_t len)
972 {
973 	if (len < sizeof(ctx->csa.spu_chnldata_RW[3]))
974 		return -EINVAL;
975 	if (!ctx->csa.spu_chnlcnt_RW[3])
976 		return 0;
977 	if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3],
978 			 sizeof(ctx->csa.spu_chnldata_RW[3])))
979 		return -EFAULT;
980 	return sizeof(ctx->csa.spu_chnldata_RW[3]);
981 }
982 
983 static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
984 			size_t len, loff_t *pos)
985 {
986 	int ret;
987 	struct spu_context *ctx = file->private_data;
988 
989 	ret = spu_acquire_saved(ctx);
990 	if (ret)
991 		return ret;
992 	ret = __spufs_signal1_read(ctx, buf, len);
993 	spu_release_saved(ctx);
994 
995 	return ret;
996 }
997 
998 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
999 			size_t len, loff_t *pos)
1000 {
1001 	struct spu_context *ctx;
1002 	ssize_t ret;
1003 	u32 data;
1004 
1005 	ctx = file->private_data;
1006 
1007 	if (len < 4)
1008 		return -EINVAL;
1009 
1010 	if (copy_from_user(&data, buf, 4))
1011 		return -EFAULT;
1012 
1013 	ret = spu_acquire(ctx);
1014 	if (ret)
1015 		return ret;
1016 	ctx->ops->signal1_write(ctx, data);
1017 	spu_release(ctx);
1018 
1019 	return 4;
1020 }
1021 
1022 static vm_fault_t
1023 spufs_signal1_mmap_fault(struct vm_fault *vmf)
1024 {
1025 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1026 	return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1027 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1028 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1029 	 * signal 1 and 2 area
1030 	 */
1031 	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1032 #else
1033 #error unsupported page size
1034 #endif
1035 }
1036 
1037 static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
1038 	.fault = spufs_signal1_mmap_fault,
1039 };
1040 
1041 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
1042 {
1043 	if (!(vma->vm_flags & VM_SHARED))
1044 		return -EINVAL;
1045 
1046 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
1047 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1048 
1049 	vma->vm_ops = &spufs_signal1_mmap_vmops;
1050 	return 0;
1051 }
1052 
1053 static const struct file_operations spufs_signal1_fops = {
1054 	.open = spufs_signal1_open,
1055 	.release = spufs_signal1_release,
1056 	.read = spufs_signal1_read,
1057 	.write = spufs_signal1_write,
1058 	.mmap = spufs_signal1_mmap,
1059 	.llseek = no_llseek,
1060 };
1061 
1062 static const struct file_operations spufs_signal1_nosched_fops = {
1063 	.open = spufs_signal1_open,
1064 	.release = spufs_signal1_release,
1065 	.write = spufs_signal1_write,
1066 	.mmap = spufs_signal1_mmap,
1067 	.llseek = no_llseek,
1068 };
1069 
1070 static int spufs_signal2_open(struct inode *inode, struct file *file)
1071 {
1072 	struct spufs_inode_info *i = SPUFS_I(inode);
1073 	struct spu_context *ctx = i->i_ctx;
1074 
1075 	mutex_lock(&ctx->mapping_lock);
1076 	file->private_data = ctx;
1077 	if (!i->i_openers++)
1078 		ctx->signal2 = inode->i_mapping;
1079 	mutex_unlock(&ctx->mapping_lock);
1080 	return nonseekable_open(inode, file);
1081 }
1082 
1083 static int
1084 spufs_signal2_release(struct inode *inode, struct file *file)
1085 {
1086 	struct spufs_inode_info *i = SPUFS_I(inode);
1087 	struct spu_context *ctx = i->i_ctx;
1088 
1089 	mutex_lock(&ctx->mapping_lock);
1090 	if (!--i->i_openers)
1091 		ctx->signal2 = NULL;
1092 	mutex_unlock(&ctx->mapping_lock);
1093 	return 0;
1094 }
1095 
1096 static ssize_t spufs_signal2_dump(struct spu_context *ctx,
1097 		struct coredump_params *cprm)
1098 {
1099 	if (!ctx->csa.spu_chnlcnt_RW[4])
1100 		return 0;
1101 	return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4],
1102 			       sizeof(ctx->csa.spu_chnldata_RW[4]));
1103 }
1104 
1105 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1106 			size_t len)
1107 {
1108 	if (len < sizeof(ctx->csa.spu_chnldata_RW[4]))
1109 		return -EINVAL;
1110 	if (!ctx->csa.spu_chnlcnt_RW[4])
1111 		return 0;
1112 	if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4],
1113 			 sizeof(ctx->csa.spu_chnldata_RW[4])))
1114 		return -EFAULT;
1115 	return sizeof(ctx->csa.spu_chnldata_RW[4]);
1116 }
1117 
1118 static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
1119 			size_t len, loff_t *pos)
1120 {
1121 	struct spu_context *ctx = file->private_data;
1122 	int ret;
1123 
1124 	ret = spu_acquire_saved(ctx);
1125 	if (ret)
1126 		return ret;
1127 	ret = __spufs_signal2_read(ctx, buf, len);
1128 	spu_release_saved(ctx);
1129 
1130 	return ret;
1131 }
1132 
1133 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1134 			size_t len, loff_t *pos)
1135 {
1136 	struct spu_context *ctx;
1137 	ssize_t ret;
1138 	u32 data;
1139 
1140 	ctx = file->private_data;
1141 
1142 	if (len < 4)
1143 		return -EINVAL;
1144 
1145 	if (copy_from_user(&data, buf, 4))
1146 		return -EFAULT;
1147 
1148 	ret = spu_acquire(ctx);
1149 	if (ret)
1150 		return ret;
1151 	ctx->ops->signal2_write(ctx, data);
1152 	spu_release(ctx);
1153 
1154 	return 4;
1155 }
1156 
1157 #if SPUFS_MMAP_4K
1158 static vm_fault_t
1159 spufs_signal2_mmap_fault(struct vm_fault *vmf)
1160 {
1161 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1162 	return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1163 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1164 	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1165 	 * signal 1 and 2 area
1166 	 */
1167 	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1168 #else
1169 #error unsupported page size
1170 #endif
1171 }
1172 
1173 static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
1174 	.fault = spufs_signal2_mmap_fault,
1175 };
1176 
1177 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
1178 {
1179 	if (!(vma->vm_flags & VM_SHARED))
1180 		return -EINVAL;
1181 
1182 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
1183 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1184 
1185 	vma->vm_ops = &spufs_signal2_mmap_vmops;
1186 	return 0;
1187 }
1188 #else /* SPUFS_MMAP_4K */
1189 #define spufs_signal2_mmap NULL
1190 #endif /* !SPUFS_MMAP_4K */
1191 
1192 static const struct file_operations spufs_signal2_fops = {
1193 	.open = spufs_signal2_open,
1194 	.release = spufs_signal2_release,
1195 	.read = spufs_signal2_read,
1196 	.write = spufs_signal2_write,
1197 	.mmap = spufs_signal2_mmap,
1198 	.llseek = no_llseek,
1199 };
1200 
1201 static const struct file_operations spufs_signal2_nosched_fops = {
1202 	.open = spufs_signal2_open,
1203 	.release = spufs_signal2_release,
1204 	.write = spufs_signal2_write,
1205 	.mmap = spufs_signal2_mmap,
1206 	.llseek = no_llseek,
1207 };
1208 
1209 /*
1210  * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1211  * work of acquiring (or not) the SPU context before calling through
1212  * to the actual get routine. The set routine is called directly.
1213  */
1214 #define SPU_ATTR_NOACQUIRE	0
1215 #define SPU_ATTR_ACQUIRE	1
1216 #define SPU_ATTR_ACQUIRE_SAVED	2
1217 
1218 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
1219 static int __##__get(void *data, u64 *val)				\
1220 {									\
1221 	struct spu_context *ctx = data;					\
1222 	int ret = 0;							\
1223 									\
1224 	if (__acquire == SPU_ATTR_ACQUIRE) {				\
1225 		ret = spu_acquire(ctx);					\
1226 		if (ret)						\
1227 			return ret;					\
1228 		*val = __get(ctx);					\
1229 		spu_release(ctx);					\
1230 	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
1231 		ret = spu_acquire_saved(ctx);				\
1232 		if (ret)						\
1233 			return ret;					\
1234 		*val = __get(ctx);					\
1235 		spu_release_saved(ctx);					\
1236 	} else								\
1237 		*val = __get(ctx);					\
1238 									\
1239 	return 0;							\
1240 }									\
1241 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1242 
1243 static int spufs_signal1_type_set(void *data, u64 val)
1244 {
1245 	struct spu_context *ctx = data;
1246 	int ret;
1247 
1248 	ret = spu_acquire(ctx);
1249 	if (ret)
1250 		return ret;
1251 	ctx->ops->signal1_type_set(ctx, val);
1252 	spu_release(ctx);
1253 
1254 	return 0;
1255 }
1256 
1257 static u64 spufs_signal1_type_get(struct spu_context *ctx)
1258 {
1259 	return ctx->ops->signal1_type_get(ctx);
1260 }
1261 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
1262 		       spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1263 
1264 
1265 static int spufs_signal2_type_set(void *data, u64 val)
1266 {
1267 	struct spu_context *ctx = data;
1268 	int ret;
1269 
1270 	ret = spu_acquire(ctx);
1271 	if (ret)
1272 		return ret;
1273 	ctx->ops->signal2_type_set(ctx, val);
1274 	spu_release(ctx);
1275 
1276 	return 0;
1277 }
1278 
1279 static u64 spufs_signal2_type_get(struct spu_context *ctx)
1280 {
1281 	return ctx->ops->signal2_type_get(ctx);
1282 }
1283 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1284 		       spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1285 
1286 #if SPUFS_MMAP_4K
1287 static vm_fault_t
1288 spufs_mss_mmap_fault(struct vm_fault *vmf)
1289 {
1290 	return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1291 }
1292 
1293 static const struct vm_operations_struct spufs_mss_mmap_vmops = {
1294 	.fault = spufs_mss_mmap_fault,
1295 };
1296 
1297 /*
1298  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1299  */
1300 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
1301 {
1302 	if (!(vma->vm_flags & VM_SHARED))
1303 		return -EINVAL;
1304 
1305 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
1306 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1307 
1308 	vma->vm_ops = &spufs_mss_mmap_vmops;
1309 	return 0;
1310 }
1311 #else /* SPUFS_MMAP_4K */
1312 #define spufs_mss_mmap NULL
1313 #endif /* !SPUFS_MMAP_4K */
1314 
1315 static int spufs_mss_open(struct inode *inode, struct file *file)
1316 {
1317 	struct spufs_inode_info *i = SPUFS_I(inode);
1318 	struct spu_context *ctx = i->i_ctx;
1319 
1320 	file->private_data = i->i_ctx;
1321 
1322 	mutex_lock(&ctx->mapping_lock);
1323 	if (!i->i_openers++)
1324 		ctx->mss = inode->i_mapping;
1325 	mutex_unlock(&ctx->mapping_lock);
1326 	return nonseekable_open(inode, file);
1327 }
1328 
1329 static int
1330 spufs_mss_release(struct inode *inode, struct file *file)
1331 {
1332 	struct spufs_inode_info *i = SPUFS_I(inode);
1333 	struct spu_context *ctx = i->i_ctx;
1334 
1335 	mutex_lock(&ctx->mapping_lock);
1336 	if (!--i->i_openers)
1337 		ctx->mss = NULL;
1338 	mutex_unlock(&ctx->mapping_lock);
1339 	return 0;
1340 }
1341 
1342 static const struct file_operations spufs_mss_fops = {
1343 	.open	 = spufs_mss_open,
1344 	.release = spufs_mss_release,
1345 	.mmap	 = spufs_mss_mmap,
1346 	.llseek  = no_llseek,
1347 };
1348 
1349 static vm_fault_t
1350 spufs_psmap_mmap_fault(struct vm_fault *vmf)
1351 {
1352 	return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1353 }
1354 
1355 static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
1356 	.fault = spufs_psmap_mmap_fault,
1357 };
1358 
1359 /*
1360  * mmap support for full problem state area [0x00000 - 0x1ffff].
1361  */
1362 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
1363 {
1364 	if (!(vma->vm_flags & VM_SHARED))
1365 		return -EINVAL;
1366 
1367 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
1368 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1369 
1370 	vma->vm_ops = &spufs_psmap_mmap_vmops;
1371 	return 0;
1372 }
1373 
1374 static int spufs_psmap_open(struct inode *inode, struct file *file)
1375 {
1376 	struct spufs_inode_info *i = SPUFS_I(inode);
1377 	struct spu_context *ctx = i->i_ctx;
1378 
1379 	mutex_lock(&ctx->mapping_lock);
1380 	file->private_data = i->i_ctx;
1381 	if (!i->i_openers++)
1382 		ctx->psmap = inode->i_mapping;
1383 	mutex_unlock(&ctx->mapping_lock);
1384 	return nonseekable_open(inode, file);
1385 }
1386 
1387 static int
1388 spufs_psmap_release(struct inode *inode, struct file *file)
1389 {
1390 	struct spufs_inode_info *i = SPUFS_I(inode);
1391 	struct spu_context *ctx = i->i_ctx;
1392 
1393 	mutex_lock(&ctx->mapping_lock);
1394 	if (!--i->i_openers)
1395 		ctx->psmap = NULL;
1396 	mutex_unlock(&ctx->mapping_lock);
1397 	return 0;
1398 }
1399 
1400 static const struct file_operations spufs_psmap_fops = {
1401 	.open	 = spufs_psmap_open,
1402 	.release = spufs_psmap_release,
1403 	.mmap	 = spufs_psmap_mmap,
1404 	.llseek  = no_llseek,
1405 };
1406 
1407 
1408 #if SPUFS_MMAP_4K
1409 static vm_fault_t
1410 spufs_mfc_mmap_fault(struct vm_fault *vmf)
1411 {
1412 	return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1413 }
1414 
1415 static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
1416 	.fault = spufs_mfc_mmap_fault,
1417 };
1418 
1419 /*
1420  * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1421  */
1422 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
1423 {
1424 	if (!(vma->vm_flags & VM_SHARED))
1425 		return -EINVAL;
1426 
1427 	vm_flags_set(vma, VM_IO | VM_PFNMAP);
1428 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1429 
1430 	vma->vm_ops = &spufs_mfc_mmap_vmops;
1431 	return 0;
1432 }
1433 #else /* SPUFS_MMAP_4K */
1434 #define spufs_mfc_mmap NULL
1435 #endif /* !SPUFS_MMAP_4K */
1436 
1437 static int spufs_mfc_open(struct inode *inode, struct file *file)
1438 {
1439 	struct spufs_inode_info *i = SPUFS_I(inode);
1440 	struct spu_context *ctx = i->i_ctx;
1441 
1442 	/* we don't want to deal with DMA into other processes */
1443 	if (ctx->owner != current->mm)
1444 		return -EINVAL;
1445 
1446 	if (atomic_read(&inode->i_count) != 1)
1447 		return -EBUSY;
1448 
1449 	mutex_lock(&ctx->mapping_lock);
1450 	file->private_data = ctx;
1451 	if (!i->i_openers++)
1452 		ctx->mfc = inode->i_mapping;
1453 	mutex_unlock(&ctx->mapping_lock);
1454 	return nonseekable_open(inode, file);
1455 }
1456 
1457 static int
1458 spufs_mfc_release(struct inode *inode, struct file *file)
1459 {
1460 	struct spufs_inode_info *i = SPUFS_I(inode);
1461 	struct spu_context *ctx = i->i_ctx;
1462 
1463 	mutex_lock(&ctx->mapping_lock);
1464 	if (!--i->i_openers)
1465 		ctx->mfc = NULL;
1466 	mutex_unlock(&ctx->mapping_lock);
1467 	return 0;
1468 }
1469 
1470 /* interrupt-level mfc callback function. */
1471 void spufs_mfc_callback(struct spu *spu)
1472 {
1473 	struct spu_context *ctx = spu->ctx;
1474 
1475 	if (ctx)
1476 		wake_up_all(&ctx->mfc_wq);
1477 }
1478 
1479 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
1480 {
1481 	/* See if there is one tag group is complete */
1482 	/* FIXME we need locking around tagwait */
1483 	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
1484 	ctx->tagwait &= ~*status;
1485 	if (*status)
1486 		return 1;
1487 
1488 	/* enable interrupt waiting for any tag group,
1489 	   may silently fail if interrupts are already enabled */
1490 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1491 	return 0;
1492 }
1493 
1494 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
1495 			size_t size, loff_t *pos)
1496 {
1497 	struct spu_context *ctx = file->private_data;
1498 	int ret = -EINVAL;
1499 	u32 status;
1500 
1501 	if (size != 4)
1502 		goto out;
1503 
1504 	ret = spu_acquire(ctx);
1505 	if (ret)
1506 		return ret;
1507 
1508 	ret = -EINVAL;
1509 	if (file->f_flags & O_NONBLOCK) {
1510 		status = ctx->ops->read_mfc_tagstatus(ctx);
1511 		if (!(status & ctx->tagwait))
1512 			ret = -EAGAIN;
1513 		else
1514 			/* XXX(hch): shouldn't we clear ret here? */
1515 			ctx->tagwait &= ~status;
1516 	} else {
1517 		ret = spufs_wait(ctx->mfc_wq,
1518 			   spufs_read_mfc_tagstatus(ctx, &status));
1519 		if (ret)
1520 			goto out;
1521 	}
1522 	spu_release(ctx);
1523 
1524 	ret = 4;
1525 	if (copy_to_user(buffer, &status, 4))
1526 		ret = -EFAULT;
1527 
1528 out:
1529 	return ret;
1530 }
1531 
1532 static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
1533 {
1534 	pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
1535 		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
1536 
1537 	switch (cmd->cmd) {
1538 	case MFC_PUT_CMD:
1539 	case MFC_PUTF_CMD:
1540 	case MFC_PUTB_CMD:
1541 	case MFC_GET_CMD:
1542 	case MFC_GETF_CMD:
1543 	case MFC_GETB_CMD:
1544 		break;
1545 	default:
1546 		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
1547 		return -EIO;
1548 	}
1549 
1550 	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
1551 		pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1552 				cmd->ea, cmd->lsa);
1553 		return -EIO;
1554 	}
1555 
1556 	switch (cmd->size & 0xf) {
1557 	case 1:
1558 		break;
1559 	case 2:
1560 		if (cmd->lsa & 1)
1561 			goto error;
1562 		break;
1563 	case 4:
1564 		if (cmd->lsa & 3)
1565 			goto error;
1566 		break;
1567 	case 8:
1568 		if (cmd->lsa & 7)
1569 			goto error;
1570 		break;
1571 	case 0:
1572 		if (cmd->lsa & 15)
1573 			goto error;
1574 		break;
1575 	error:
1576 	default:
1577 		pr_debug("invalid DMA alignment %x for size %x\n",
1578 			cmd->lsa & 0xf, cmd->size);
1579 		return -EIO;
1580 	}
1581 
1582 	if (cmd->size > 16 * 1024) {
1583 		pr_debug("invalid DMA size %x\n", cmd->size);
1584 		return -EIO;
1585 	}
1586 
1587 	if (cmd->tag & 0xfff0) {
1588 		/* we reserve the higher tag numbers for kernel use */
1589 		pr_debug("invalid DMA tag\n");
1590 		return -EIO;
1591 	}
1592 
1593 	if (cmd->class) {
1594 		/* not supported in this version */
1595 		pr_debug("invalid DMA class\n");
1596 		return -EIO;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int spu_send_mfc_command(struct spu_context *ctx,
1603 				struct mfc_dma_command cmd,
1604 				int *error)
1605 {
1606 	*error = ctx->ops->send_mfc_command(ctx, &cmd);
1607 	if (*error == -EAGAIN) {
1608 		/* wait for any tag group to complete
1609 		   so we have space for the new command */
1610 		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
1611 		/* try again, because the queue might be
1612 		   empty again */
1613 		*error = ctx->ops->send_mfc_command(ctx, &cmd);
1614 		if (*error == -EAGAIN)
1615 			return 0;
1616 	}
1617 	return 1;
1618 }
1619 
1620 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1621 			size_t size, loff_t *pos)
1622 {
1623 	struct spu_context *ctx = file->private_data;
1624 	struct mfc_dma_command cmd;
1625 	int ret = -EINVAL;
1626 
1627 	if (size != sizeof cmd)
1628 		goto out;
1629 
1630 	ret = -EFAULT;
1631 	if (copy_from_user(&cmd, buffer, sizeof cmd))
1632 		goto out;
1633 
1634 	ret = spufs_check_valid_dma(&cmd);
1635 	if (ret)
1636 		goto out;
1637 
1638 	ret = spu_acquire(ctx);
1639 	if (ret)
1640 		goto out;
1641 
1642 	ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1643 	if (ret)
1644 		goto out;
1645 
1646 	if (file->f_flags & O_NONBLOCK) {
1647 		ret = ctx->ops->send_mfc_command(ctx, &cmd);
1648 	} else {
1649 		int status;
1650 		ret = spufs_wait(ctx->mfc_wq,
1651 				 spu_send_mfc_command(ctx, cmd, &status));
1652 		if (ret)
1653 			goto out;
1654 		if (status)
1655 			ret = status;
1656 	}
1657 
1658 	if (ret)
1659 		goto out_unlock;
1660 
1661 	ctx->tagwait |= 1 << cmd.tag;
1662 	ret = size;
1663 
1664 out_unlock:
1665 	spu_release(ctx);
1666 out:
1667 	return ret;
1668 }
1669 
1670 static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait)
1671 {
1672 	struct spu_context *ctx = file->private_data;
1673 	u32 free_elements, tagstatus;
1674 	__poll_t mask;
1675 
1676 	poll_wait(file, &ctx->mfc_wq, wait);
1677 
1678 	/*
1679 	 * For now keep this uninterruptible and also ignore the rule
1680 	 * that poll should not sleep.  Will be fixed later.
1681 	 */
1682 	mutex_lock(&ctx->state_mutex);
1683 	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
1684 	free_elements = ctx->ops->get_mfc_free_elements(ctx);
1685 	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
1686 	spu_release(ctx);
1687 
1688 	mask = 0;
1689 	if (free_elements & 0xffff)
1690 		mask |= EPOLLOUT | EPOLLWRNORM;
1691 	if (tagstatus & ctx->tagwait)
1692 		mask |= EPOLLIN | EPOLLRDNORM;
1693 
1694 	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
1695 		free_elements, tagstatus, ctx->tagwait);
1696 
1697 	return mask;
1698 }
1699 
1700 static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1701 {
1702 	struct spu_context *ctx = file->private_data;
1703 	int ret;
1704 
1705 	ret = spu_acquire(ctx);
1706 	if (ret)
1707 		return ret;
1708 
1709 	spu_release(ctx);
1710 
1711 	return 0;
1712 }
1713 
1714 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1715 {
1716 	struct inode *inode = file_inode(file);
1717 	int err = file_write_and_wait_range(file, start, end);
1718 	if (!err) {
1719 		inode_lock(inode);
1720 		err = spufs_mfc_flush(file, NULL);
1721 		inode_unlock(inode);
1722 	}
1723 	return err;
1724 }
1725 
1726 static const struct file_operations spufs_mfc_fops = {
1727 	.open	 = spufs_mfc_open,
1728 	.release = spufs_mfc_release,
1729 	.read	 = spufs_mfc_read,
1730 	.write	 = spufs_mfc_write,
1731 	.poll	 = spufs_mfc_poll,
1732 	.flush	 = spufs_mfc_flush,
1733 	.fsync	 = spufs_mfc_fsync,
1734 	.mmap	 = spufs_mfc_mmap,
1735 	.llseek  = no_llseek,
1736 };
1737 
1738 static int spufs_npc_set(void *data, u64 val)
1739 {
1740 	struct spu_context *ctx = data;
1741 	int ret;
1742 
1743 	ret = spu_acquire(ctx);
1744 	if (ret)
1745 		return ret;
1746 	ctx->ops->npc_write(ctx, val);
1747 	spu_release(ctx);
1748 
1749 	return 0;
1750 }
1751 
1752 static u64 spufs_npc_get(struct spu_context *ctx)
1753 {
1754 	return ctx->ops->npc_read(ctx);
1755 }
1756 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
1757 		       "0x%llx\n", SPU_ATTR_ACQUIRE);
1758 
1759 static int spufs_decr_set(void *data, u64 val)
1760 {
1761 	struct spu_context *ctx = data;
1762 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1763 	int ret;
1764 
1765 	ret = spu_acquire_saved(ctx);
1766 	if (ret)
1767 		return ret;
1768 	lscsa->decr.slot[0] = (u32) val;
1769 	spu_release_saved(ctx);
1770 
1771 	return 0;
1772 }
1773 
1774 static u64 spufs_decr_get(struct spu_context *ctx)
1775 {
1776 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1777 	return lscsa->decr.slot[0];
1778 }
1779 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
1780 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1781 
1782 static int spufs_decr_status_set(void *data, u64 val)
1783 {
1784 	struct spu_context *ctx = data;
1785 	int ret;
1786 
1787 	ret = spu_acquire_saved(ctx);
1788 	if (ret)
1789 		return ret;
1790 	if (val)
1791 		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
1792 	else
1793 		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1794 	spu_release_saved(ctx);
1795 
1796 	return 0;
1797 }
1798 
1799 static u64 spufs_decr_status_get(struct spu_context *ctx)
1800 {
1801 	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
1802 		return SPU_DECR_STATUS_RUNNING;
1803 	else
1804 		return 0;
1805 }
1806 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
1807 		       spufs_decr_status_set, "0x%llx\n",
1808 		       SPU_ATTR_ACQUIRE_SAVED);
1809 
1810 static int spufs_event_mask_set(void *data, u64 val)
1811 {
1812 	struct spu_context *ctx = data;
1813 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1814 	int ret;
1815 
1816 	ret = spu_acquire_saved(ctx);
1817 	if (ret)
1818 		return ret;
1819 	lscsa->event_mask.slot[0] = (u32) val;
1820 	spu_release_saved(ctx);
1821 
1822 	return 0;
1823 }
1824 
1825 static u64 spufs_event_mask_get(struct spu_context *ctx)
1826 {
1827 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1828 	return lscsa->event_mask.slot[0];
1829 }
1830 
1831 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
1832 		       spufs_event_mask_set, "0x%llx\n",
1833 		       SPU_ATTR_ACQUIRE_SAVED);
1834 
1835 static u64 spufs_event_status_get(struct spu_context *ctx)
1836 {
1837 	struct spu_state *state = &ctx->csa;
1838 	u64 stat;
1839 	stat = state->spu_chnlcnt_RW[0];
1840 	if (stat)
1841 		return state->spu_chnldata_RW[0];
1842 	return 0;
1843 }
1844 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
1845 		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1846 
1847 static int spufs_srr0_set(void *data, u64 val)
1848 {
1849 	struct spu_context *ctx = data;
1850 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1851 	int ret;
1852 
1853 	ret = spu_acquire_saved(ctx);
1854 	if (ret)
1855 		return ret;
1856 	lscsa->srr0.slot[0] = (u32) val;
1857 	spu_release_saved(ctx);
1858 
1859 	return 0;
1860 }
1861 
1862 static u64 spufs_srr0_get(struct spu_context *ctx)
1863 {
1864 	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1865 	return lscsa->srr0.slot[0];
1866 }
1867 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
1868 		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1869 
1870 static u64 spufs_id_get(struct spu_context *ctx)
1871 {
1872 	u64 num;
1873 
1874 	if (ctx->state == SPU_STATE_RUNNABLE)
1875 		num = ctx->spu->number;
1876 	else
1877 		num = (unsigned int)-1;
1878 
1879 	return num;
1880 }
1881 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
1882 		       SPU_ATTR_ACQUIRE)
1883 
1884 static u64 spufs_object_id_get(struct spu_context *ctx)
1885 {
1886 	/* FIXME: Should there really be no locking here? */
1887 	return ctx->object_id;
1888 }
1889 
1890 static int spufs_object_id_set(void *data, u64 id)
1891 {
1892 	struct spu_context *ctx = data;
1893 	ctx->object_id = id;
1894 
1895 	return 0;
1896 }
1897 
1898 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
1899 		       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1900 
1901 static u64 spufs_lslr_get(struct spu_context *ctx)
1902 {
1903 	return ctx->csa.priv2.spu_lslr_RW;
1904 }
1905 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
1906 		       SPU_ATTR_ACQUIRE_SAVED);
1907 
1908 static int spufs_info_open(struct inode *inode, struct file *file)
1909 {
1910 	struct spufs_inode_info *i = SPUFS_I(inode);
1911 	struct spu_context *ctx = i->i_ctx;
1912 	file->private_data = ctx;
1913 	return 0;
1914 }
1915 
1916 static int spufs_caps_show(struct seq_file *s, void *private)
1917 {
1918 	struct spu_context *ctx = s->private;
1919 
1920 	if (!(ctx->flags & SPU_CREATE_NOSCHED))
1921 		seq_puts(s, "sched\n");
1922 	if (!(ctx->flags & SPU_CREATE_ISOLATE))
1923 		seq_puts(s, "step\n");
1924 	return 0;
1925 }
1926 
1927 static int spufs_caps_open(struct inode *inode, struct file *file)
1928 {
1929 	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
1930 }
1931 
1932 static const struct file_operations spufs_caps_fops = {
1933 	.open		= spufs_caps_open,
1934 	.read		= seq_read,
1935 	.llseek		= seq_lseek,
1936 	.release	= single_release,
1937 };
1938 
1939 static ssize_t spufs_mbox_info_dump(struct spu_context *ctx,
1940 		struct coredump_params *cprm)
1941 {
1942 	if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
1943 		return 0;
1944 	return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R,
1945 			       sizeof(ctx->csa.prob.pu_mb_R));
1946 }
1947 
1948 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
1949 				   size_t len, loff_t *pos)
1950 {
1951 	struct spu_context *ctx = file->private_data;
1952 	u32 stat, data;
1953 	int ret;
1954 
1955 	ret = spu_acquire_saved(ctx);
1956 	if (ret)
1957 		return ret;
1958 	spin_lock(&ctx->csa.register_lock);
1959 	stat = ctx->csa.prob.mb_stat_R;
1960 	data = ctx->csa.prob.pu_mb_R;
1961 	spin_unlock(&ctx->csa.register_lock);
1962 	spu_release_saved(ctx);
1963 
1964 	/* EOF if there's no entry in the mbox */
1965 	if (!(stat & 0x0000ff))
1966 		return 0;
1967 
1968 	return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
1969 }
1970 
1971 static const struct file_operations spufs_mbox_info_fops = {
1972 	.open = spufs_info_open,
1973 	.read = spufs_mbox_info_read,
1974 	.llseek  = generic_file_llseek,
1975 };
1976 
1977 static ssize_t spufs_ibox_info_dump(struct spu_context *ctx,
1978 		struct coredump_params *cprm)
1979 {
1980 	if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
1981 		return 0;
1982 	return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R,
1983 			       sizeof(ctx->csa.priv2.puint_mb_R));
1984 }
1985 
1986 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
1987 				   size_t len, loff_t *pos)
1988 {
1989 	struct spu_context *ctx = file->private_data;
1990 	u32 stat, data;
1991 	int ret;
1992 
1993 	ret = spu_acquire_saved(ctx);
1994 	if (ret)
1995 		return ret;
1996 	spin_lock(&ctx->csa.register_lock);
1997 	stat = ctx->csa.prob.mb_stat_R;
1998 	data = ctx->csa.priv2.puint_mb_R;
1999 	spin_unlock(&ctx->csa.register_lock);
2000 	spu_release_saved(ctx);
2001 
2002 	/* EOF if there's no entry in the ibox */
2003 	if (!(stat & 0xff0000))
2004 		return 0;
2005 
2006 	return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
2007 }
2008 
2009 static const struct file_operations spufs_ibox_info_fops = {
2010 	.open = spufs_info_open,
2011 	.read = spufs_ibox_info_read,
2012 	.llseek  = generic_file_llseek,
2013 };
2014 
2015 static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
2016 {
2017 	return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
2018 }
2019 
2020 static ssize_t spufs_wbox_info_dump(struct spu_context *ctx,
2021 		struct coredump_params *cprm)
2022 {
2023 	return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data,
2024 			spufs_wbox_info_cnt(ctx));
2025 }
2026 
2027 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
2028 				   size_t len, loff_t *pos)
2029 {
2030 	struct spu_context *ctx = file->private_data;
2031 	u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
2032 	int ret, count;
2033 
2034 	ret = spu_acquire_saved(ctx);
2035 	if (ret)
2036 		return ret;
2037 	spin_lock(&ctx->csa.register_lock);
2038 	count = spufs_wbox_info_cnt(ctx);
2039 	memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
2040 	spin_unlock(&ctx->csa.register_lock);
2041 	spu_release_saved(ctx);
2042 
2043 	return simple_read_from_buffer(buf, len, pos, &data,
2044 				count * sizeof(u32));
2045 }
2046 
2047 static const struct file_operations spufs_wbox_info_fops = {
2048 	.open = spufs_info_open,
2049 	.read = spufs_wbox_info_read,
2050 	.llseek  = generic_file_llseek,
2051 };
2052 
2053 static void spufs_get_dma_info(struct spu_context *ctx,
2054 		struct spu_dma_info *info)
2055 {
2056 	int i;
2057 
2058 	info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
2059 	info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
2060 	info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
2061 	info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
2062 	info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
2063 	for (i = 0; i < 16; i++) {
2064 		struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
2065 		struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
2066 
2067 		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
2068 		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
2069 		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
2070 		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
2071 	}
2072 }
2073 
2074 static ssize_t spufs_dma_info_dump(struct spu_context *ctx,
2075 		struct coredump_params *cprm)
2076 {
2077 	struct spu_dma_info info;
2078 
2079 	spufs_get_dma_info(ctx, &info);
2080 	return spufs_dump_emit(cprm, &info, sizeof(info));
2081 }
2082 
2083 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
2084 			      size_t len, loff_t *pos)
2085 {
2086 	struct spu_context *ctx = file->private_data;
2087 	struct spu_dma_info info;
2088 	int ret;
2089 
2090 	ret = spu_acquire_saved(ctx);
2091 	if (ret)
2092 		return ret;
2093 	spin_lock(&ctx->csa.register_lock);
2094 	spufs_get_dma_info(ctx, &info);
2095 	spin_unlock(&ctx->csa.register_lock);
2096 	spu_release_saved(ctx);
2097 
2098 	return simple_read_from_buffer(buf, len, pos, &info,
2099 				sizeof(info));
2100 }
2101 
2102 static const struct file_operations spufs_dma_info_fops = {
2103 	.open = spufs_info_open,
2104 	.read = spufs_dma_info_read,
2105 	.llseek = no_llseek,
2106 };
2107 
2108 static void spufs_get_proxydma_info(struct spu_context *ctx,
2109 		struct spu_proxydma_info *info)
2110 {
2111 	int i;
2112 
2113 	info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
2114 	info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
2115 	info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
2116 
2117 	for (i = 0; i < 8; i++) {
2118 		struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
2119 		struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
2120 
2121 		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
2122 		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
2123 		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
2124 		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
2125 	}
2126 }
2127 
2128 static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx,
2129 		struct coredump_params *cprm)
2130 {
2131 	struct spu_proxydma_info info;
2132 
2133 	spufs_get_proxydma_info(ctx, &info);
2134 	return spufs_dump_emit(cprm, &info, sizeof(info));
2135 }
2136 
2137 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
2138 				   size_t len, loff_t *pos)
2139 {
2140 	struct spu_context *ctx = file->private_data;
2141 	struct spu_proxydma_info info;
2142 	int ret;
2143 
2144 	if (len < sizeof(info))
2145 		return -EINVAL;
2146 
2147 	ret = spu_acquire_saved(ctx);
2148 	if (ret)
2149 		return ret;
2150 	spin_lock(&ctx->csa.register_lock);
2151 	spufs_get_proxydma_info(ctx, &info);
2152 	spin_unlock(&ctx->csa.register_lock);
2153 	spu_release_saved(ctx);
2154 
2155 	return simple_read_from_buffer(buf, len, pos, &info,
2156 				sizeof(info));
2157 }
2158 
2159 static const struct file_operations spufs_proxydma_info_fops = {
2160 	.open = spufs_info_open,
2161 	.read = spufs_proxydma_info_read,
2162 	.llseek = no_llseek,
2163 };
2164 
2165 static int spufs_show_tid(struct seq_file *s, void *private)
2166 {
2167 	struct spu_context *ctx = s->private;
2168 
2169 	seq_printf(s, "%d\n", ctx->tid);
2170 	return 0;
2171 }
2172 
2173 static int spufs_tid_open(struct inode *inode, struct file *file)
2174 {
2175 	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
2176 }
2177 
2178 static const struct file_operations spufs_tid_fops = {
2179 	.open		= spufs_tid_open,
2180 	.read		= seq_read,
2181 	.llseek		= seq_lseek,
2182 	.release	= single_release,
2183 };
2184 
2185 static const char *ctx_state_names[] = {
2186 	"user", "system", "iowait", "loaded"
2187 };
2188 
2189 static unsigned long long spufs_acct_time(struct spu_context *ctx,
2190 		enum spu_utilization_state state)
2191 {
2192 	unsigned long long time = ctx->stats.times[state];
2193 
2194 	/*
2195 	 * In general, utilization statistics are updated by the controlling
2196 	 * thread as the spu context moves through various well defined
2197 	 * state transitions, but if the context is lazily loaded its
2198 	 * utilization statistics are not updated as the controlling thread
2199 	 * is not tightly coupled with the execution of the spu context.  We
2200 	 * calculate and apply the time delta from the last recorded state
2201 	 * of the spu context.
2202 	 */
2203 	if (ctx->spu && ctx->stats.util_state == state) {
2204 		time += ktime_get_ns() - ctx->stats.tstamp;
2205 	}
2206 
2207 	return time / NSEC_PER_MSEC;
2208 }
2209 
2210 static unsigned long long spufs_slb_flts(struct spu_context *ctx)
2211 {
2212 	unsigned long long slb_flts = ctx->stats.slb_flt;
2213 
2214 	if (ctx->state == SPU_STATE_RUNNABLE) {
2215 		slb_flts += (ctx->spu->stats.slb_flt -
2216 			     ctx->stats.slb_flt_base);
2217 	}
2218 
2219 	return slb_flts;
2220 }
2221 
2222 static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
2223 {
2224 	unsigned long long class2_intrs = ctx->stats.class2_intr;
2225 
2226 	if (ctx->state == SPU_STATE_RUNNABLE) {
2227 		class2_intrs += (ctx->spu->stats.class2_intr -
2228 				 ctx->stats.class2_intr_base);
2229 	}
2230 
2231 	return class2_intrs;
2232 }
2233 
2234 
2235 static int spufs_show_stat(struct seq_file *s, void *private)
2236 {
2237 	struct spu_context *ctx = s->private;
2238 	int ret;
2239 
2240 	ret = spu_acquire(ctx);
2241 	if (ret)
2242 		return ret;
2243 
2244 	seq_printf(s, "%s %llu %llu %llu %llu "
2245 		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2246 		ctx_state_names[ctx->stats.util_state],
2247 		spufs_acct_time(ctx, SPU_UTIL_USER),
2248 		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2249 		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2250 		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2251 		ctx->stats.vol_ctx_switch,
2252 		ctx->stats.invol_ctx_switch,
2253 		spufs_slb_flts(ctx),
2254 		ctx->stats.hash_flt,
2255 		ctx->stats.min_flt,
2256 		ctx->stats.maj_flt,
2257 		spufs_class2_intrs(ctx),
2258 		ctx->stats.libassist);
2259 	spu_release(ctx);
2260 	return 0;
2261 }
2262 
2263 static int spufs_stat_open(struct inode *inode, struct file *file)
2264 {
2265 	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
2266 }
2267 
2268 static const struct file_operations spufs_stat_fops = {
2269 	.open		= spufs_stat_open,
2270 	.read		= seq_read,
2271 	.llseek		= seq_lseek,
2272 	.release	= single_release,
2273 };
2274 
2275 static inline int spufs_switch_log_used(struct spu_context *ctx)
2276 {
2277 	return (ctx->switch_log->head - ctx->switch_log->tail) %
2278 		SWITCH_LOG_BUFSIZE;
2279 }
2280 
2281 static inline int spufs_switch_log_avail(struct spu_context *ctx)
2282 {
2283 	return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
2284 }
2285 
2286 static int spufs_switch_log_open(struct inode *inode, struct file *file)
2287 {
2288 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2289 	int rc;
2290 
2291 	rc = spu_acquire(ctx);
2292 	if (rc)
2293 		return rc;
2294 
2295 	if (ctx->switch_log) {
2296 		rc = -EBUSY;
2297 		goto out;
2298 	}
2299 
2300 	ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log,
2301 				  SWITCH_LOG_BUFSIZE), GFP_KERNEL);
2302 
2303 	if (!ctx->switch_log) {
2304 		rc = -ENOMEM;
2305 		goto out;
2306 	}
2307 
2308 	ctx->switch_log->head = ctx->switch_log->tail = 0;
2309 	init_waitqueue_head(&ctx->switch_log->wait);
2310 	rc = 0;
2311 
2312 out:
2313 	spu_release(ctx);
2314 	return rc;
2315 }
2316 
2317 static int spufs_switch_log_release(struct inode *inode, struct file *file)
2318 {
2319 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2320 	int rc;
2321 
2322 	rc = spu_acquire(ctx);
2323 	if (rc)
2324 		return rc;
2325 
2326 	kfree(ctx->switch_log);
2327 	ctx->switch_log = NULL;
2328 	spu_release(ctx);
2329 
2330 	return 0;
2331 }
2332 
2333 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
2334 {
2335 	struct switch_log_entry *p;
2336 
2337 	p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
2338 
2339 	return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n",
2340 			(unsigned long long) p->tstamp.tv_sec,
2341 			(unsigned int) p->tstamp.tv_nsec,
2342 			p->spu_id,
2343 			(unsigned int) p->type,
2344 			(unsigned int) p->val,
2345 			(unsigned long long) p->timebase);
2346 }
2347 
2348 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2349 			     size_t len, loff_t *ppos)
2350 {
2351 	struct inode *inode = file_inode(file);
2352 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2353 	int error = 0, cnt = 0;
2354 
2355 	if (!buf)
2356 		return -EINVAL;
2357 
2358 	error = spu_acquire(ctx);
2359 	if (error)
2360 		return error;
2361 
2362 	while (cnt < len) {
2363 		char tbuf[128];
2364 		int width;
2365 
2366 		if (spufs_switch_log_used(ctx) == 0) {
2367 			if (cnt > 0) {
2368 				/* If there's data ready to go, we can
2369 				 * just return straight away */
2370 				break;
2371 
2372 			} else if (file->f_flags & O_NONBLOCK) {
2373 				error = -EAGAIN;
2374 				break;
2375 
2376 			} else {
2377 				/* spufs_wait will drop the mutex and
2378 				 * re-acquire, but since we're in read(), the
2379 				 * file cannot be _released (and so
2380 				 * ctx->switch_log is stable).
2381 				 */
2382 				error = spufs_wait(ctx->switch_log->wait,
2383 						spufs_switch_log_used(ctx) > 0);
2384 
2385 				/* On error, spufs_wait returns without the
2386 				 * state mutex held */
2387 				if (error)
2388 					return error;
2389 
2390 				/* We may have had entries read from underneath
2391 				 * us while we dropped the mutex in spufs_wait,
2392 				 * so re-check */
2393 				if (spufs_switch_log_used(ctx) == 0)
2394 					continue;
2395 			}
2396 		}
2397 
2398 		width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2399 		if (width < len)
2400 			ctx->switch_log->tail =
2401 				(ctx->switch_log->tail + 1) %
2402 				 SWITCH_LOG_BUFSIZE;
2403 		else
2404 			/* If the record is greater than space available return
2405 			 * partial buffer (so far) */
2406 			break;
2407 
2408 		error = copy_to_user(buf + cnt, tbuf, width);
2409 		if (error)
2410 			break;
2411 		cnt += width;
2412 	}
2413 
2414 	spu_release(ctx);
2415 
2416 	return cnt == 0 ? error : cnt;
2417 }
2418 
2419 static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait)
2420 {
2421 	struct inode *inode = file_inode(file);
2422 	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2423 	__poll_t mask = 0;
2424 	int rc;
2425 
2426 	poll_wait(file, &ctx->switch_log->wait, wait);
2427 
2428 	rc = spu_acquire(ctx);
2429 	if (rc)
2430 		return rc;
2431 
2432 	if (spufs_switch_log_used(ctx) > 0)
2433 		mask |= EPOLLIN;
2434 
2435 	spu_release(ctx);
2436 
2437 	return mask;
2438 }
2439 
2440 static const struct file_operations spufs_switch_log_fops = {
2441 	.open		= spufs_switch_log_open,
2442 	.read		= spufs_switch_log_read,
2443 	.poll		= spufs_switch_log_poll,
2444 	.release	= spufs_switch_log_release,
2445 	.llseek		= no_llseek,
2446 };
2447 
2448 /**
2449  * Log a context switch event to a switch log reader.
2450  *
2451  * Must be called with ctx->state_mutex held.
2452  */
2453 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2454 		u32 type, u32 val)
2455 {
2456 	if (!ctx->switch_log)
2457 		return;
2458 
2459 	if (spufs_switch_log_avail(ctx) > 1) {
2460 		struct switch_log_entry *p;
2461 
2462 		p = ctx->switch_log->log + ctx->switch_log->head;
2463 		ktime_get_ts64(&p->tstamp);
2464 		p->timebase = get_tb();
2465 		p->spu_id = spu ? spu->number : -1;
2466 		p->type = type;
2467 		p->val = val;
2468 
2469 		ctx->switch_log->head =
2470 			(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2471 	}
2472 
2473 	wake_up(&ctx->switch_log->wait);
2474 }
2475 
2476 static int spufs_show_ctx(struct seq_file *s, void *private)
2477 {
2478 	struct spu_context *ctx = s->private;
2479 	u64 mfc_control_RW;
2480 
2481 	mutex_lock(&ctx->state_mutex);
2482 	if (ctx->spu) {
2483 		struct spu *spu = ctx->spu;
2484 		struct spu_priv2 __iomem *priv2 = spu->priv2;
2485 
2486 		spin_lock_irq(&spu->register_lock);
2487 		mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2488 		spin_unlock_irq(&spu->register_lock);
2489 	} else {
2490 		struct spu_state *csa = &ctx->csa;
2491 
2492 		mfc_control_RW = csa->priv2.mfc_control_RW;
2493 	}
2494 
2495 	seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2496 		" %c %llx %llx %llx %llx %x %x\n",
2497 		ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2498 		ctx->flags,
2499 		ctx->sched_flags,
2500 		ctx->prio,
2501 		ctx->time_slice,
2502 		ctx->spu ? ctx->spu->number : -1,
2503 		!list_empty(&ctx->rq) ? 'q' : ' ',
2504 		ctx->csa.class_0_pending,
2505 		ctx->csa.class_0_dar,
2506 		ctx->csa.class_1_dsisr,
2507 		mfc_control_RW,
2508 		ctx->ops->runcntl_read(ctx),
2509 		ctx->ops->status_read(ctx));
2510 
2511 	mutex_unlock(&ctx->state_mutex);
2512 
2513 	return 0;
2514 }
2515 
2516 static int spufs_ctx_open(struct inode *inode, struct file *file)
2517 {
2518 	return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2519 }
2520 
2521 static const struct file_operations spufs_ctx_fops = {
2522 	.open           = spufs_ctx_open,
2523 	.read           = seq_read,
2524 	.llseek         = seq_lseek,
2525 	.release        = single_release,
2526 };
2527 
2528 const struct spufs_tree_descr spufs_dir_contents[] = {
2529 	{ "capabilities", &spufs_caps_fops, 0444, },
2530 	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2531 	{ "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
2532 	{ "mbox", &spufs_mbox_fops, 0444, },
2533 	{ "ibox", &spufs_ibox_fops, 0444, },
2534 	{ "wbox", &spufs_wbox_fops, 0222, },
2535 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2536 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2537 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2538 	{ "signal1", &spufs_signal1_fops, 0666, },
2539 	{ "signal2", &spufs_signal2_fops, 0666, },
2540 	{ "signal1_type", &spufs_signal1_type, 0666, },
2541 	{ "signal2_type", &spufs_signal2_type, 0666, },
2542 	{ "cntl", &spufs_cntl_fops,  0666, },
2543 	{ "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2544 	{ "lslr", &spufs_lslr_ops, 0444, },
2545 	{ "mfc", &spufs_mfc_fops, 0666, },
2546 	{ "mss", &spufs_mss_fops, 0666, },
2547 	{ "npc", &spufs_npc_ops, 0666, },
2548 	{ "srr0", &spufs_srr0_ops, 0666, },
2549 	{ "decr", &spufs_decr_ops, 0666, },
2550 	{ "decr_status", &spufs_decr_status_ops, 0666, },
2551 	{ "event_mask", &spufs_event_mask_ops, 0666, },
2552 	{ "event_status", &spufs_event_status_ops, 0444, },
2553 	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2554 	{ "phys-id", &spufs_id_ops, 0666, },
2555 	{ "object-id", &spufs_object_id_ops, 0666, },
2556 	{ "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2557 	{ "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2558 	{ "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2559 	{ "dma_info", &spufs_dma_info_fops, 0444,
2560 		sizeof(struct spu_dma_info), },
2561 	{ "proxydma_info", &spufs_proxydma_info_fops, 0444,
2562 		sizeof(struct spu_proxydma_info)},
2563 	{ "tid", &spufs_tid_fops, 0444, },
2564 	{ "stat", &spufs_stat_fops, 0444, },
2565 	{ "switch_log", &spufs_switch_log_fops, 0444 },
2566 	{},
2567 };
2568 
2569 const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2570 	{ "capabilities", &spufs_caps_fops, 0444, },
2571 	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
2572 	{ "mbox", &spufs_mbox_fops, 0444, },
2573 	{ "ibox", &spufs_ibox_fops, 0444, },
2574 	{ "wbox", &spufs_wbox_fops, 0222, },
2575 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2576 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2577 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2578 	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
2579 	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2580 	{ "signal1_type", &spufs_signal1_type, 0666, },
2581 	{ "signal2_type", &spufs_signal2_type, 0666, },
2582 	{ "mss", &spufs_mss_fops, 0666, },
2583 	{ "mfc", &spufs_mfc_fops, 0666, },
2584 	{ "cntl", &spufs_cntl_fops,  0666, },
2585 	{ "npc", &spufs_npc_ops, 0666, },
2586 	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2587 	{ "phys-id", &spufs_id_ops, 0666, },
2588 	{ "object-id", &spufs_object_id_ops, 0666, },
2589 	{ "tid", &spufs_tid_fops, 0444, },
2590 	{ "stat", &spufs_stat_fops, 0444, },
2591 	{},
2592 };
2593 
2594 const struct spufs_tree_descr spufs_dir_debug_contents[] = {
2595 	{ ".ctx", &spufs_ctx_fops, 0444, },
2596 	{},
2597 };
2598 
2599 const struct spufs_coredump_reader spufs_coredump_read[] = {
2600 	{ "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])},
2601 	{ "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) },
2602 	{ "lslr", NULL, spufs_lslr_get, 19 },
2603 	{ "decr", NULL, spufs_decr_get, 19 },
2604 	{ "decr_status", NULL, spufs_decr_status_get, 19 },
2605 	{ "mem", spufs_mem_dump, NULL, LS_SIZE, },
2606 	{ "signal1", spufs_signal1_dump, NULL, sizeof(u32) },
2607 	{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
2608 	{ "signal2", spufs_signal2_dump, NULL, sizeof(u32) },
2609 	{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
2610 	{ "event_mask", NULL, spufs_event_mask_get, 19 },
2611 	{ "event_status", NULL, spufs_event_status_get, 19 },
2612 	{ "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) },
2613 	{ "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) },
2614 	{ "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)},
2615 	{ "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)},
2616 	{ "proxydma_info", spufs_proxydma_info_dump,
2617 			   NULL, sizeof(struct spu_proxydma_info)},
2618 	{ "object-id", NULL, spufs_object_id_get, 19 },
2619 	{ "npc", NULL, spufs_npc_get, 19 },
2620 	{ NULL },
2621 };
2622