1 /* 2 * SPU file system -- file contents 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #undef DEBUG 24 25 #include <linux/fs.h> 26 #include <linux/ioctl.h> 27 #include <linux/export.h> 28 #include <linux/pagemap.h> 29 #include <linux/poll.h> 30 #include <linux/ptrace.h> 31 #include <linux/seq_file.h> 32 #include <linux/slab.h> 33 34 #include <asm/io.h> 35 #include <asm/time.h> 36 #include <asm/spu.h> 37 #include <asm/spu_info.h> 38 #include <asm/uaccess.h> 39 40 #include "spufs.h" 41 #include "sputrace.h" 42 43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) 44 45 /* Simple attribute files */ 46 struct spufs_attr { 47 int (*get)(void *, u64 *); 48 int (*set)(void *, u64); 49 char get_buf[24]; /* enough to store a u64 and "\n\0" */ 50 char set_buf[24]; 51 void *data; 52 const char *fmt; /* format for read operation */ 53 struct mutex mutex; /* protects access to these buffers */ 54 }; 55 56 static int spufs_attr_open(struct inode *inode, struct file *file, 57 int (*get)(void *, u64 *), int (*set)(void *, u64), 58 const char *fmt) 59 { 60 struct spufs_attr *attr; 61 62 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 63 if (!attr) 64 return -ENOMEM; 65 66 attr->get = get; 67 attr->set = set; 68 attr->data = inode->i_private; 69 attr->fmt = fmt; 70 mutex_init(&attr->mutex); 71 file->private_data = attr; 72 73 return nonseekable_open(inode, file); 74 } 75 76 static int spufs_attr_release(struct inode *inode, struct file *file) 77 { 78 kfree(file->private_data); 79 return 0; 80 } 81 82 static ssize_t spufs_attr_read(struct file *file, char __user *buf, 83 size_t len, loff_t *ppos) 84 { 85 struct spufs_attr *attr; 86 size_t size; 87 ssize_t ret; 88 89 attr = file->private_data; 90 if (!attr->get) 91 return -EACCES; 92 93 ret = mutex_lock_interruptible(&attr->mutex); 94 if (ret) 95 return ret; 96 97 if (*ppos) { /* continued read */ 98 size = strlen(attr->get_buf); 99 } else { /* first read */ 100 u64 val; 101 ret = attr->get(attr->data, &val); 102 if (ret) 103 goto out; 104 105 size = scnprintf(attr->get_buf, sizeof(attr->get_buf), 106 attr->fmt, (unsigned long long)val); 107 } 108 109 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 110 out: 111 mutex_unlock(&attr->mutex); 112 return ret; 113 } 114 115 static ssize_t spufs_attr_write(struct file *file, const char __user *buf, 116 size_t len, loff_t *ppos) 117 { 118 struct spufs_attr *attr; 119 u64 val; 120 size_t size; 121 ssize_t ret; 122 123 attr = file->private_data; 124 if (!attr->set) 125 return -EACCES; 126 127 ret = mutex_lock_interruptible(&attr->mutex); 128 if (ret) 129 return ret; 130 131 ret = -EFAULT; 132 size = min(sizeof(attr->set_buf) - 1, len); 133 if (copy_from_user(attr->set_buf, buf, size)) 134 goto out; 135 136 ret = len; /* claim we got the whole input */ 137 attr->set_buf[size] = '\0'; 138 val = simple_strtol(attr->set_buf, NULL, 0); 139 attr->set(attr->data, val); 140 out: 141 mutex_unlock(&attr->mutex); 142 return ret; 143 } 144 145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ 146 static int __fops ## _open(struct inode *inode, struct file *file) \ 147 { \ 148 __simple_attr_check_format(__fmt, 0ull); \ 149 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 150 } \ 151 static const struct file_operations __fops = { \ 152 .open = __fops ## _open, \ 153 .release = spufs_attr_release, \ 154 .read = spufs_attr_read, \ 155 .write = spufs_attr_write, \ 156 .llseek = generic_file_llseek, \ 157 }; 158 159 160 static int 161 spufs_mem_open(struct inode *inode, struct file *file) 162 { 163 struct spufs_inode_info *i = SPUFS_I(inode); 164 struct spu_context *ctx = i->i_ctx; 165 166 mutex_lock(&ctx->mapping_lock); 167 file->private_data = ctx; 168 if (!i->i_openers++) 169 ctx->local_store = inode->i_mapping; 170 mutex_unlock(&ctx->mapping_lock); 171 return 0; 172 } 173 174 static int 175 spufs_mem_release(struct inode *inode, struct file *file) 176 { 177 struct spufs_inode_info *i = SPUFS_I(inode); 178 struct spu_context *ctx = i->i_ctx; 179 180 mutex_lock(&ctx->mapping_lock); 181 if (!--i->i_openers) 182 ctx->local_store = NULL; 183 mutex_unlock(&ctx->mapping_lock); 184 return 0; 185 } 186 187 static ssize_t 188 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, 189 size_t size, loff_t *pos) 190 { 191 char *local_store = ctx->ops->get_ls(ctx); 192 return simple_read_from_buffer(buffer, size, pos, local_store, 193 LS_SIZE); 194 } 195 196 static ssize_t 197 spufs_mem_read(struct file *file, char __user *buffer, 198 size_t size, loff_t *pos) 199 { 200 struct spu_context *ctx = file->private_data; 201 ssize_t ret; 202 203 ret = spu_acquire(ctx); 204 if (ret) 205 return ret; 206 ret = __spufs_mem_read(ctx, buffer, size, pos); 207 spu_release(ctx); 208 209 return ret; 210 } 211 212 static ssize_t 213 spufs_mem_write(struct file *file, const char __user *buffer, 214 size_t size, loff_t *ppos) 215 { 216 struct spu_context *ctx = file->private_data; 217 char *local_store; 218 loff_t pos = *ppos; 219 int ret; 220 221 if (pos > LS_SIZE) 222 return -EFBIG; 223 224 ret = spu_acquire(ctx); 225 if (ret) 226 return ret; 227 228 local_store = ctx->ops->get_ls(ctx); 229 size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); 230 spu_release(ctx); 231 232 return size; 233 } 234 235 static int 236 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 237 { 238 struct spu_context *ctx = vma->vm_file->private_data; 239 unsigned long pfn, offset; 240 241 offset = vmf->pgoff << PAGE_SHIFT; 242 if (offset >= LS_SIZE) 243 return VM_FAULT_SIGBUS; 244 245 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", 246 vmf->address, offset); 247 248 if (spu_acquire(ctx)) 249 return VM_FAULT_NOPAGE; 250 251 if (ctx->state == SPU_STATE_SAVED) { 252 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 253 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); 254 } else { 255 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 256 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; 257 } 258 vm_insert_pfn(vma, vmf->address, pfn); 259 260 spu_release(ctx); 261 262 return VM_FAULT_NOPAGE; 263 } 264 265 static int spufs_mem_mmap_access(struct vm_area_struct *vma, 266 unsigned long address, 267 void *buf, int len, int write) 268 { 269 struct spu_context *ctx = vma->vm_file->private_data; 270 unsigned long offset = address - vma->vm_start; 271 char *local_store; 272 273 if (write && !(vma->vm_flags & VM_WRITE)) 274 return -EACCES; 275 if (spu_acquire(ctx)) 276 return -EINTR; 277 if ((offset + len) > vma->vm_end) 278 len = vma->vm_end - offset; 279 local_store = ctx->ops->get_ls(ctx); 280 if (write) 281 memcpy_toio(local_store + offset, buf, len); 282 else 283 memcpy_fromio(buf, local_store + offset, len); 284 spu_release(ctx); 285 return len; 286 } 287 288 static const struct vm_operations_struct spufs_mem_mmap_vmops = { 289 .fault = spufs_mem_mmap_fault, 290 .access = spufs_mem_mmap_access, 291 }; 292 293 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 294 { 295 if (!(vma->vm_flags & VM_SHARED)) 296 return -EINVAL; 297 298 vma->vm_flags |= VM_IO | VM_PFNMAP; 299 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); 300 301 vma->vm_ops = &spufs_mem_mmap_vmops; 302 return 0; 303 } 304 305 static const struct file_operations spufs_mem_fops = { 306 .open = spufs_mem_open, 307 .release = spufs_mem_release, 308 .read = spufs_mem_read, 309 .write = spufs_mem_write, 310 .llseek = generic_file_llseek, 311 .mmap = spufs_mem_mmap, 312 }; 313 314 static int spufs_ps_fault(struct vm_area_struct *vma, 315 struct vm_fault *vmf, 316 unsigned long ps_offs, 317 unsigned long ps_size) 318 { 319 struct spu_context *ctx = vma->vm_file->private_data; 320 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; 321 int ret = 0; 322 323 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); 324 325 if (offset >= ps_size) 326 return VM_FAULT_SIGBUS; 327 328 if (fatal_signal_pending(current)) 329 return VM_FAULT_SIGBUS; 330 331 /* 332 * Because we release the mmap_sem, the context may be destroyed while 333 * we're in spu_wait. Grab an extra reference so it isn't destroyed 334 * in the meantime. 335 */ 336 get_spu_context(ctx); 337 338 /* 339 * We have to wait for context to be loaded before we have 340 * pages to hand out to the user, but we don't want to wait 341 * with the mmap_sem held. 342 * It is possible to drop the mmap_sem here, but then we need 343 * to return VM_FAULT_NOPAGE because the mappings may have 344 * hanged. 345 */ 346 if (spu_acquire(ctx)) 347 goto refault; 348 349 if (ctx->state == SPU_STATE_SAVED) { 350 up_read(¤t->mm->mmap_sem); 351 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); 352 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 353 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); 354 down_read(¤t->mm->mmap_sem); 355 } else { 356 area = ctx->spu->problem_phys + ps_offs; 357 vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); 358 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); 359 } 360 361 if (!ret) 362 spu_release(ctx); 363 364 refault: 365 put_spu_context(ctx); 366 return VM_FAULT_NOPAGE; 367 } 368 369 #if SPUFS_MMAP_4K 370 static int spufs_cntl_mmap_fault(struct vm_area_struct *vma, 371 struct vm_fault *vmf) 372 { 373 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); 374 } 375 376 static const struct vm_operations_struct spufs_cntl_mmap_vmops = { 377 .fault = spufs_cntl_mmap_fault, 378 }; 379 380 /* 381 * mmap support for problem state control area [0x4000 - 0x4fff]. 382 */ 383 static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) 384 { 385 if (!(vma->vm_flags & VM_SHARED)) 386 return -EINVAL; 387 388 vma->vm_flags |= VM_IO | VM_PFNMAP; 389 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 390 391 vma->vm_ops = &spufs_cntl_mmap_vmops; 392 return 0; 393 } 394 #else /* SPUFS_MMAP_4K */ 395 #define spufs_cntl_mmap NULL 396 #endif /* !SPUFS_MMAP_4K */ 397 398 static int spufs_cntl_get(void *data, u64 *val) 399 { 400 struct spu_context *ctx = data; 401 int ret; 402 403 ret = spu_acquire(ctx); 404 if (ret) 405 return ret; 406 *val = ctx->ops->status_read(ctx); 407 spu_release(ctx); 408 409 return 0; 410 } 411 412 static int spufs_cntl_set(void *data, u64 val) 413 { 414 struct spu_context *ctx = data; 415 int ret; 416 417 ret = spu_acquire(ctx); 418 if (ret) 419 return ret; 420 ctx->ops->runcntl_write(ctx, val); 421 spu_release(ctx); 422 423 return 0; 424 } 425 426 static int spufs_cntl_open(struct inode *inode, struct file *file) 427 { 428 struct spufs_inode_info *i = SPUFS_I(inode); 429 struct spu_context *ctx = i->i_ctx; 430 431 mutex_lock(&ctx->mapping_lock); 432 file->private_data = ctx; 433 if (!i->i_openers++) 434 ctx->cntl = inode->i_mapping; 435 mutex_unlock(&ctx->mapping_lock); 436 return simple_attr_open(inode, file, spufs_cntl_get, 437 spufs_cntl_set, "0x%08lx"); 438 } 439 440 static int 441 spufs_cntl_release(struct inode *inode, struct file *file) 442 { 443 struct spufs_inode_info *i = SPUFS_I(inode); 444 struct spu_context *ctx = i->i_ctx; 445 446 simple_attr_release(inode, file); 447 448 mutex_lock(&ctx->mapping_lock); 449 if (!--i->i_openers) 450 ctx->cntl = NULL; 451 mutex_unlock(&ctx->mapping_lock); 452 return 0; 453 } 454 455 static const struct file_operations spufs_cntl_fops = { 456 .open = spufs_cntl_open, 457 .release = spufs_cntl_release, 458 .read = simple_attr_read, 459 .write = simple_attr_write, 460 .llseek = generic_file_llseek, 461 .mmap = spufs_cntl_mmap, 462 }; 463 464 static int 465 spufs_regs_open(struct inode *inode, struct file *file) 466 { 467 struct spufs_inode_info *i = SPUFS_I(inode); 468 file->private_data = i->i_ctx; 469 return 0; 470 } 471 472 static ssize_t 473 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, 474 size_t size, loff_t *pos) 475 { 476 struct spu_lscsa *lscsa = ctx->csa.lscsa; 477 return simple_read_from_buffer(buffer, size, pos, 478 lscsa->gprs, sizeof lscsa->gprs); 479 } 480 481 static ssize_t 482 spufs_regs_read(struct file *file, char __user *buffer, 483 size_t size, loff_t *pos) 484 { 485 int ret; 486 struct spu_context *ctx = file->private_data; 487 488 /* pre-check for file position: if we'd return EOF, there's no point 489 * causing a deschedule */ 490 if (*pos >= sizeof(ctx->csa.lscsa->gprs)) 491 return 0; 492 493 ret = spu_acquire_saved(ctx); 494 if (ret) 495 return ret; 496 ret = __spufs_regs_read(ctx, buffer, size, pos); 497 spu_release_saved(ctx); 498 return ret; 499 } 500 501 static ssize_t 502 spufs_regs_write(struct file *file, const char __user *buffer, 503 size_t size, loff_t *pos) 504 { 505 struct spu_context *ctx = file->private_data; 506 struct spu_lscsa *lscsa = ctx->csa.lscsa; 507 int ret; 508 509 if (*pos >= sizeof(lscsa->gprs)) 510 return -EFBIG; 511 512 ret = spu_acquire_saved(ctx); 513 if (ret) 514 return ret; 515 516 size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, 517 buffer, size); 518 519 spu_release_saved(ctx); 520 return size; 521 } 522 523 static const struct file_operations spufs_regs_fops = { 524 .open = spufs_regs_open, 525 .read = spufs_regs_read, 526 .write = spufs_regs_write, 527 .llseek = generic_file_llseek, 528 }; 529 530 static ssize_t 531 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, 532 size_t size, loff_t * pos) 533 { 534 struct spu_lscsa *lscsa = ctx->csa.lscsa; 535 return simple_read_from_buffer(buffer, size, pos, 536 &lscsa->fpcr, sizeof(lscsa->fpcr)); 537 } 538 539 static ssize_t 540 spufs_fpcr_read(struct file *file, char __user * buffer, 541 size_t size, loff_t * pos) 542 { 543 int ret; 544 struct spu_context *ctx = file->private_data; 545 546 ret = spu_acquire_saved(ctx); 547 if (ret) 548 return ret; 549 ret = __spufs_fpcr_read(ctx, buffer, size, pos); 550 spu_release_saved(ctx); 551 return ret; 552 } 553 554 static ssize_t 555 spufs_fpcr_write(struct file *file, const char __user * buffer, 556 size_t size, loff_t * pos) 557 { 558 struct spu_context *ctx = file->private_data; 559 struct spu_lscsa *lscsa = ctx->csa.lscsa; 560 int ret; 561 562 if (*pos >= sizeof(lscsa->fpcr)) 563 return -EFBIG; 564 565 ret = spu_acquire_saved(ctx); 566 if (ret) 567 return ret; 568 569 size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, 570 buffer, size); 571 572 spu_release_saved(ctx); 573 return size; 574 } 575 576 static const struct file_operations spufs_fpcr_fops = { 577 .open = spufs_regs_open, 578 .read = spufs_fpcr_read, 579 .write = spufs_fpcr_write, 580 .llseek = generic_file_llseek, 581 }; 582 583 /* generic open function for all pipe-like files */ 584 static int spufs_pipe_open(struct inode *inode, struct file *file) 585 { 586 struct spufs_inode_info *i = SPUFS_I(inode); 587 file->private_data = i->i_ctx; 588 589 return nonseekable_open(inode, file); 590 } 591 592 /* 593 * Read as many bytes from the mailbox as possible, until 594 * one of the conditions becomes true: 595 * 596 * - no more data available in the mailbox 597 * - end of the user provided buffer 598 * - end of the mapped area 599 */ 600 static ssize_t spufs_mbox_read(struct file *file, char __user *buf, 601 size_t len, loff_t *pos) 602 { 603 struct spu_context *ctx = file->private_data; 604 u32 mbox_data, __user *udata; 605 ssize_t count; 606 607 if (len < 4) 608 return -EINVAL; 609 610 if (!access_ok(VERIFY_WRITE, buf, len)) 611 return -EFAULT; 612 613 udata = (void __user *)buf; 614 615 count = spu_acquire(ctx); 616 if (count) 617 return count; 618 619 for (count = 0; (count + 4) <= len; count += 4, udata++) { 620 int ret; 621 ret = ctx->ops->mbox_read(ctx, &mbox_data); 622 if (ret == 0) 623 break; 624 625 /* 626 * at the end of the mapped area, we can fault 627 * but still need to return the data we have 628 * read successfully so far. 629 */ 630 ret = __put_user(mbox_data, udata); 631 if (ret) { 632 if (!count) 633 count = -EFAULT; 634 break; 635 } 636 } 637 spu_release(ctx); 638 639 if (!count) 640 count = -EAGAIN; 641 642 return count; 643 } 644 645 static const struct file_operations spufs_mbox_fops = { 646 .open = spufs_pipe_open, 647 .read = spufs_mbox_read, 648 .llseek = no_llseek, 649 }; 650 651 static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, 652 size_t len, loff_t *pos) 653 { 654 struct spu_context *ctx = file->private_data; 655 ssize_t ret; 656 u32 mbox_stat; 657 658 if (len < 4) 659 return -EINVAL; 660 661 ret = spu_acquire(ctx); 662 if (ret) 663 return ret; 664 665 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; 666 667 spu_release(ctx); 668 669 if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) 670 return -EFAULT; 671 672 return 4; 673 } 674 675 static const struct file_operations spufs_mbox_stat_fops = { 676 .open = spufs_pipe_open, 677 .read = spufs_mbox_stat_read, 678 .llseek = no_llseek, 679 }; 680 681 /* low-level ibox access function */ 682 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) 683 { 684 return ctx->ops->ibox_read(ctx, data); 685 } 686 687 static int spufs_ibox_fasync(int fd, struct file *file, int on) 688 { 689 struct spu_context *ctx = file->private_data; 690 691 return fasync_helper(fd, file, on, &ctx->ibox_fasync); 692 } 693 694 /* interrupt-level ibox callback function. */ 695 void spufs_ibox_callback(struct spu *spu) 696 { 697 struct spu_context *ctx = spu->ctx; 698 699 if (!ctx) 700 return; 701 702 wake_up_all(&ctx->ibox_wq); 703 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 704 } 705 706 /* 707 * Read as many bytes from the interrupt mailbox as possible, until 708 * one of the conditions becomes true: 709 * 710 * - no more data available in the mailbox 711 * - end of the user provided buffer 712 * - end of the mapped area 713 * 714 * If the file is opened without O_NONBLOCK, we wait here until 715 * any data is available, but return when we have been able to 716 * read something. 717 */ 718 static ssize_t spufs_ibox_read(struct file *file, char __user *buf, 719 size_t len, loff_t *pos) 720 { 721 struct spu_context *ctx = file->private_data; 722 u32 ibox_data, __user *udata; 723 ssize_t count; 724 725 if (len < 4) 726 return -EINVAL; 727 728 if (!access_ok(VERIFY_WRITE, buf, len)) 729 return -EFAULT; 730 731 udata = (void __user *)buf; 732 733 count = spu_acquire(ctx); 734 if (count) 735 goto out; 736 737 /* wait only for the first element */ 738 count = 0; 739 if (file->f_flags & O_NONBLOCK) { 740 if (!spu_ibox_read(ctx, &ibox_data)) { 741 count = -EAGAIN; 742 goto out_unlock; 743 } 744 } else { 745 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); 746 if (count) 747 goto out; 748 } 749 750 /* if we can't write at all, return -EFAULT */ 751 count = __put_user(ibox_data, udata); 752 if (count) 753 goto out_unlock; 754 755 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 756 int ret; 757 ret = ctx->ops->ibox_read(ctx, &ibox_data); 758 if (ret == 0) 759 break; 760 /* 761 * at the end of the mapped area, we can fault 762 * but still need to return the data we have 763 * read successfully so far. 764 */ 765 ret = __put_user(ibox_data, udata); 766 if (ret) 767 break; 768 } 769 770 out_unlock: 771 spu_release(ctx); 772 out: 773 return count; 774 } 775 776 static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) 777 { 778 struct spu_context *ctx = file->private_data; 779 unsigned int mask; 780 781 poll_wait(file, &ctx->ibox_wq, wait); 782 783 /* 784 * For now keep this uninterruptible and also ignore the rule 785 * that poll should not sleep. Will be fixed later. 786 */ 787 mutex_lock(&ctx->state_mutex); 788 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); 789 spu_release(ctx); 790 791 return mask; 792 } 793 794 static const struct file_operations spufs_ibox_fops = { 795 .open = spufs_pipe_open, 796 .read = spufs_ibox_read, 797 .poll = spufs_ibox_poll, 798 .fasync = spufs_ibox_fasync, 799 .llseek = no_llseek, 800 }; 801 802 static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, 803 size_t len, loff_t *pos) 804 { 805 struct spu_context *ctx = file->private_data; 806 ssize_t ret; 807 u32 ibox_stat; 808 809 if (len < 4) 810 return -EINVAL; 811 812 ret = spu_acquire(ctx); 813 if (ret) 814 return ret; 815 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; 816 spu_release(ctx); 817 818 if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) 819 return -EFAULT; 820 821 return 4; 822 } 823 824 static const struct file_operations spufs_ibox_stat_fops = { 825 .open = spufs_pipe_open, 826 .read = spufs_ibox_stat_read, 827 .llseek = no_llseek, 828 }; 829 830 /* low-level mailbox write */ 831 size_t spu_wbox_write(struct spu_context *ctx, u32 data) 832 { 833 return ctx->ops->wbox_write(ctx, data); 834 } 835 836 static int spufs_wbox_fasync(int fd, struct file *file, int on) 837 { 838 struct spu_context *ctx = file->private_data; 839 int ret; 840 841 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); 842 843 return ret; 844 } 845 846 /* interrupt-level wbox callback function. */ 847 void spufs_wbox_callback(struct spu *spu) 848 { 849 struct spu_context *ctx = spu->ctx; 850 851 if (!ctx) 852 return; 853 854 wake_up_all(&ctx->wbox_wq); 855 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 856 } 857 858 /* 859 * Write as many bytes to the interrupt mailbox as possible, until 860 * one of the conditions becomes true: 861 * 862 * - the mailbox is full 863 * - end of the user provided buffer 864 * - end of the mapped area 865 * 866 * If the file is opened without O_NONBLOCK, we wait here until 867 * space is available, but return when we have been able to 868 * write something. 869 */ 870 static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, 871 size_t len, loff_t *pos) 872 { 873 struct spu_context *ctx = file->private_data; 874 u32 wbox_data, __user *udata; 875 ssize_t count; 876 877 if (len < 4) 878 return -EINVAL; 879 880 udata = (void __user *)buf; 881 if (!access_ok(VERIFY_READ, buf, len)) 882 return -EFAULT; 883 884 if (__get_user(wbox_data, udata)) 885 return -EFAULT; 886 887 count = spu_acquire(ctx); 888 if (count) 889 goto out; 890 891 /* 892 * make sure we can at least write one element, by waiting 893 * in case of !O_NONBLOCK 894 */ 895 count = 0; 896 if (file->f_flags & O_NONBLOCK) { 897 if (!spu_wbox_write(ctx, wbox_data)) { 898 count = -EAGAIN; 899 goto out_unlock; 900 } 901 } else { 902 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); 903 if (count) 904 goto out; 905 } 906 907 908 /* write as much as possible */ 909 for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { 910 int ret; 911 ret = __get_user(wbox_data, udata); 912 if (ret) 913 break; 914 915 ret = spu_wbox_write(ctx, wbox_data); 916 if (ret == 0) 917 break; 918 } 919 920 out_unlock: 921 spu_release(ctx); 922 out: 923 return count; 924 } 925 926 static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) 927 { 928 struct spu_context *ctx = file->private_data; 929 unsigned int mask; 930 931 poll_wait(file, &ctx->wbox_wq, wait); 932 933 /* 934 * For now keep this uninterruptible and also ignore the rule 935 * that poll should not sleep. Will be fixed later. 936 */ 937 mutex_lock(&ctx->state_mutex); 938 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); 939 spu_release(ctx); 940 941 return mask; 942 } 943 944 static const struct file_operations spufs_wbox_fops = { 945 .open = spufs_pipe_open, 946 .write = spufs_wbox_write, 947 .poll = spufs_wbox_poll, 948 .fasync = spufs_wbox_fasync, 949 .llseek = no_llseek, 950 }; 951 952 static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, 953 size_t len, loff_t *pos) 954 { 955 struct spu_context *ctx = file->private_data; 956 ssize_t ret; 957 u32 wbox_stat; 958 959 if (len < 4) 960 return -EINVAL; 961 962 ret = spu_acquire(ctx); 963 if (ret) 964 return ret; 965 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; 966 spu_release(ctx); 967 968 if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) 969 return -EFAULT; 970 971 return 4; 972 } 973 974 static const struct file_operations spufs_wbox_stat_fops = { 975 .open = spufs_pipe_open, 976 .read = spufs_wbox_stat_read, 977 .llseek = no_llseek, 978 }; 979 980 static int spufs_signal1_open(struct inode *inode, struct file *file) 981 { 982 struct spufs_inode_info *i = SPUFS_I(inode); 983 struct spu_context *ctx = i->i_ctx; 984 985 mutex_lock(&ctx->mapping_lock); 986 file->private_data = ctx; 987 if (!i->i_openers++) 988 ctx->signal1 = inode->i_mapping; 989 mutex_unlock(&ctx->mapping_lock); 990 return nonseekable_open(inode, file); 991 } 992 993 static int 994 spufs_signal1_release(struct inode *inode, struct file *file) 995 { 996 struct spufs_inode_info *i = SPUFS_I(inode); 997 struct spu_context *ctx = i->i_ctx; 998 999 mutex_lock(&ctx->mapping_lock); 1000 if (!--i->i_openers) 1001 ctx->signal1 = NULL; 1002 mutex_unlock(&ctx->mapping_lock); 1003 return 0; 1004 } 1005 1006 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, 1007 size_t len, loff_t *pos) 1008 { 1009 int ret = 0; 1010 u32 data; 1011 1012 if (len < 4) 1013 return -EINVAL; 1014 1015 if (ctx->csa.spu_chnlcnt_RW[3]) { 1016 data = ctx->csa.spu_chnldata_RW[3]; 1017 ret = 4; 1018 } 1019 1020 if (!ret) 1021 goto out; 1022 1023 if (copy_to_user(buf, &data, 4)) 1024 return -EFAULT; 1025 1026 out: 1027 return ret; 1028 } 1029 1030 static ssize_t spufs_signal1_read(struct file *file, char __user *buf, 1031 size_t len, loff_t *pos) 1032 { 1033 int ret; 1034 struct spu_context *ctx = file->private_data; 1035 1036 ret = spu_acquire_saved(ctx); 1037 if (ret) 1038 return ret; 1039 ret = __spufs_signal1_read(ctx, buf, len, pos); 1040 spu_release_saved(ctx); 1041 1042 return ret; 1043 } 1044 1045 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, 1046 size_t len, loff_t *pos) 1047 { 1048 struct spu_context *ctx; 1049 ssize_t ret; 1050 u32 data; 1051 1052 ctx = file->private_data; 1053 1054 if (len < 4) 1055 return -EINVAL; 1056 1057 if (copy_from_user(&data, buf, 4)) 1058 return -EFAULT; 1059 1060 ret = spu_acquire(ctx); 1061 if (ret) 1062 return ret; 1063 ctx->ops->signal1_write(ctx, data); 1064 spu_release(ctx); 1065 1066 return 4; 1067 } 1068 1069 static int 1070 spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1071 { 1072 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1073 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); 1074 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1075 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1076 * signal 1 and 2 area 1077 */ 1078 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1079 #else 1080 #error unsupported page size 1081 #endif 1082 } 1083 1084 static const struct vm_operations_struct spufs_signal1_mmap_vmops = { 1085 .fault = spufs_signal1_mmap_fault, 1086 }; 1087 1088 static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1089 { 1090 if (!(vma->vm_flags & VM_SHARED)) 1091 return -EINVAL; 1092 1093 vma->vm_flags |= VM_IO | VM_PFNMAP; 1094 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1095 1096 vma->vm_ops = &spufs_signal1_mmap_vmops; 1097 return 0; 1098 } 1099 1100 static const struct file_operations spufs_signal1_fops = { 1101 .open = spufs_signal1_open, 1102 .release = spufs_signal1_release, 1103 .read = spufs_signal1_read, 1104 .write = spufs_signal1_write, 1105 .mmap = spufs_signal1_mmap, 1106 .llseek = no_llseek, 1107 }; 1108 1109 static const struct file_operations spufs_signal1_nosched_fops = { 1110 .open = spufs_signal1_open, 1111 .release = spufs_signal1_release, 1112 .write = spufs_signal1_write, 1113 .mmap = spufs_signal1_mmap, 1114 .llseek = no_llseek, 1115 }; 1116 1117 static int spufs_signal2_open(struct inode *inode, struct file *file) 1118 { 1119 struct spufs_inode_info *i = SPUFS_I(inode); 1120 struct spu_context *ctx = i->i_ctx; 1121 1122 mutex_lock(&ctx->mapping_lock); 1123 file->private_data = ctx; 1124 if (!i->i_openers++) 1125 ctx->signal2 = inode->i_mapping; 1126 mutex_unlock(&ctx->mapping_lock); 1127 return nonseekable_open(inode, file); 1128 } 1129 1130 static int 1131 spufs_signal2_release(struct inode *inode, struct file *file) 1132 { 1133 struct spufs_inode_info *i = SPUFS_I(inode); 1134 struct spu_context *ctx = i->i_ctx; 1135 1136 mutex_lock(&ctx->mapping_lock); 1137 if (!--i->i_openers) 1138 ctx->signal2 = NULL; 1139 mutex_unlock(&ctx->mapping_lock); 1140 return 0; 1141 } 1142 1143 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, 1144 size_t len, loff_t *pos) 1145 { 1146 int ret = 0; 1147 u32 data; 1148 1149 if (len < 4) 1150 return -EINVAL; 1151 1152 if (ctx->csa.spu_chnlcnt_RW[4]) { 1153 data = ctx->csa.spu_chnldata_RW[4]; 1154 ret = 4; 1155 } 1156 1157 if (!ret) 1158 goto out; 1159 1160 if (copy_to_user(buf, &data, 4)) 1161 return -EFAULT; 1162 1163 out: 1164 return ret; 1165 } 1166 1167 static ssize_t spufs_signal2_read(struct file *file, char __user *buf, 1168 size_t len, loff_t *pos) 1169 { 1170 struct spu_context *ctx = file->private_data; 1171 int ret; 1172 1173 ret = spu_acquire_saved(ctx); 1174 if (ret) 1175 return ret; 1176 ret = __spufs_signal2_read(ctx, buf, len, pos); 1177 spu_release_saved(ctx); 1178 1179 return ret; 1180 } 1181 1182 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, 1183 size_t len, loff_t *pos) 1184 { 1185 struct spu_context *ctx; 1186 ssize_t ret; 1187 u32 data; 1188 1189 ctx = file->private_data; 1190 1191 if (len < 4) 1192 return -EINVAL; 1193 1194 if (copy_from_user(&data, buf, 4)) 1195 return -EFAULT; 1196 1197 ret = spu_acquire(ctx); 1198 if (ret) 1199 return ret; 1200 ctx->ops->signal2_write(ctx, data); 1201 spu_release(ctx); 1202 1203 return 4; 1204 } 1205 1206 #if SPUFS_MMAP_4K 1207 static int 1208 spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1209 { 1210 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 1211 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); 1212 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 1213 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1214 * signal 1 and 2 area 1215 */ 1216 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); 1217 #else 1218 #error unsupported page size 1219 #endif 1220 } 1221 1222 static const struct vm_operations_struct spufs_signal2_mmap_vmops = { 1223 .fault = spufs_signal2_mmap_fault, 1224 }; 1225 1226 static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1227 { 1228 if (!(vma->vm_flags & VM_SHARED)) 1229 return -EINVAL; 1230 1231 vma->vm_flags |= VM_IO | VM_PFNMAP; 1232 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1233 1234 vma->vm_ops = &spufs_signal2_mmap_vmops; 1235 return 0; 1236 } 1237 #else /* SPUFS_MMAP_4K */ 1238 #define spufs_signal2_mmap NULL 1239 #endif /* !SPUFS_MMAP_4K */ 1240 1241 static const struct file_operations spufs_signal2_fops = { 1242 .open = spufs_signal2_open, 1243 .release = spufs_signal2_release, 1244 .read = spufs_signal2_read, 1245 .write = spufs_signal2_write, 1246 .mmap = spufs_signal2_mmap, 1247 .llseek = no_llseek, 1248 }; 1249 1250 static const struct file_operations spufs_signal2_nosched_fops = { 1251 .open = spufs_signal2_open, 1252 .release = spufs_signal2_release, 1253 .write = spufs_signal2_write, 1254 .mmap = spufs_signal2_mmap, 1255 .llseek = no_llseek, 1256 }; 1257 1258 /* 1259 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the 1260 * work of acquiring (or not) the SPU context before calling through 1261 * to the actual get routine. The set routine is called directly. 1262 */ 1263 #define SPU_ATTR_NOACQUIRE 0 1264 #define SPU_ATTR_ACQUIRE 1 1265 #define SPU_ATTR_ACQUIRE_SAVED 2 1266 1267 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ 1268 static int __##__get(void *data, u64 *val) \ 1269 { \ 1270 struct spu_context *ctx = data; \ 1271 int ret = 0; \ 1272 \ 1273 if (__acquire == SPU_ATTR_ACQUIRE) { \ 1274 ret = spu_acquire(ctx); \ 1275 if (ret) \ 1276 return ret; \ 1277 *val = __get(ctx); \ 1278 spu_release(ctx); \ 1279 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ 1280 ret = spu_acquire_saved(ctx); \ 1281 if (ret) \ 1282 return ret; \ 1283 *val = __get(ctx); \ 1284 spu_release_saved(ctx); \ 1285 } else \ 1286 *val = __get(ctx); \ 1287 \ 1288 return 0; \ 1289 } \ 1290 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); 1291 1292 static int spufs_signal1_type_set(void *data, u64 val) 1293 { 1294 struct spu_context *ctx = data; 1295 int ret; 1296 1297 ret = spu_acquire(ctx); 1298 if (ret) 1299 return ret; 1300 ctx->ops->signal1_type_set(ctx, val); 1301 spu_release(ctx); 1302 1303 return 0; 1304 } 1305 1306 static u64 spufs_signal1_type_get(struct spu_context *ctx) 1307 { 1308 return ctx->ops->signal1_type_get(ctx); 1309 } 1310 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, 1311 spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1312 1313 1314 static int spufs_signal2_type_set(void *data, u64 val) 1315 { 1316 struct spu_context *ctx = data; 1317 int ret; 1318 1319 ret = spu_acquire(ctx); 1320 if (ret) 1321 return ret; 1322 ctx->ops->signal2_type_set(ctx, val); 1323 spu_release(ctx); 1324 1325 return 0; 1326 } 1327 1328 static u64 spufs_signal2_type_get(struct spu_context *ctx) 1329 { 1330 return ctx->ops->signal2_type_get(ctx); 1331 } 1332 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, 1333 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1334 1335 #if SPUFS_MMAP_4K 1336 static int 1337 spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1338 { 1339 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE); 1340 } 1341 1342 static const struct vm_operations_struct spufs_mss_mmap_vmops = { 1343 .fault = spufs_mss_mmap_fault, 1344 }; 1345 1346 /* 1347 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1348 */ 1349 static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) 1350 { 1351 if (!(vma->vm_flags & VM_SHARED)) 1352 return -EINVAL; 1353 1354 vma->vm_flags |= VM_IO | VM_PFNMAP; 1355 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1356 1357 vma->vm_ops = &spufs_mss_mmap_vmops; 1358 return 0; 1359 } 1360 #else /* SPUFS_MMAP_4K */ 1361 #define spufs_mss_mmap NULL 1362 #endif /* !SPUFS_MMAP_4K */ 1363 1364 static int spufs_mss_open(struct inode *inode, struct file *file) 1365 { 1366 struct spufs_inode_info *i = SPUFS_I(inode); 1367 struct spu_context *ctx = i->i_ctx; 1368 1369 file->private_data = i->i_ctx; 1370 1371 mutex_lock(&ctx->mapping_lock); 1372 if (!i->i_openers++) 1373 ctx->mss = inode->i_mapping; 1374 mutex_unlock(&ctx->mapping_lock); 1375 return nonseekable_open(inode, file); 1376 } 1377 1378 static int 1379 spufs_mss_release(struct inode *inode, struct file *file) 1380 { 1381 struct spufs_inode_info *i = SPUFS_I(inode); 1382 struct spu_context *ctx = i->i_ctx; 1383 1384 mutex_lock(&ctx->mapping_lock); 1385 if (!--i->i_openers) 1386 ctx->mss = NULL; 1387 mutex_unlock(&ctx->mapping_lock); 1388 return 0; 1389 } 1390 1391 static const struct file_operations spufs_mss_fops = { 1392 .open = spufs_mss_open, 1393 .release = spufs_mss_release, 1394 .mmap = spufs_mss_mmap, 1395 .llseek = no_llseek, 1396 }; 1397 1398 static int 1399 spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1400 { 1401 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE); 1402 } 1403 1404 static const struct vm_operations_struct spufs_psmap_mmap_vmops = { 1405 .fault = spufs_psmap_mmap_fault, 1406 }; 1407 1408 /* 1409 * mmap support for full problem state area [0x00000 - 0x1ffff]. 1410 */ 1411 static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) 1412 { 1413 if (!(vma->vm_flags & VM_SHARED)) 1414 return -EINVAL; 1415 1416 vma->vm_flags |= VM_IO | VM_PFNMAP; 1417 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1418 1419 vma->vm_ops = &spufs_psmap_mmap_vmops; 1420 return 0; 1421 } 1422 1423 static int spufs_psmap_open(struct inode *inode, struct file *file) 1424 { 1425 struct spufs_inode_info *i = SPUFS_I(inode); 1426 struct spu_context *ctx = i->i_ctx; 1427 1428 mutex_lock(&ctx->mapping_lock); 1429 file->private_data = i->i_ctx; 1430 if (!i->i_openers++) 1431 ctx->psmap = inode->i_mapping; 1432 mutex_unlock(&ctx->mapping_lock); 1433 return nonseekable_open(inode, file); 1434 } 1435 1436 static int 1437 spufs_psmap_release(struct inode *inode, struct file *file) 1438 { 1439 struct spufs_inode_info *i = SPUFS_I(inode); 1440 struct spu_context *ctx = i->i_ctx; 1441 1442 mutex_lock(&ctx->mapping_lock); 1443 if (!--i->i_openers) 1444 ctx->psmap = NULL; 1445 mutex_unlock(&ctx->mapping_lock); 1446 return 0; 1447 } 1448 1449 static const struct file_operations spufs_psmap_fops = { 1450 .open = spufs_psmap_open, 1451 .release = spufs_psmap_release, 1452 .mmap = spufs_psmap_mmap, 1453 .llseek = no_llseek, 1454 }; 1455 1456 1457 #if SPUFS_MMAP_4K 1458 static int 1459 spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1460 { 1461 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE); 1462 } 1463 1464 static const struct vm_operations_struct spufs_mfc_mmap_vmops = { 1465 .fault = spufs_mfc_mmap_fault, 1466 }; 1467 1468 /* 1469 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. 1470 */ 1471 static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) 1472 { 1473 if (!(vma->vm_flags & VM_SHARED)) 1474 return -EINVAL; 1475 1476 vma->vm_flags |= VM_IO | VM_PFNMAP; 1477 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1478 1479 vma->vm_ops = &spufs_mfc_mmap_vmops; 1480 return 0; 1481 } 1482 #else /* SPUFS_MMAP_4K */ 1483 #define spufs_mfc_mmap NULL 1484 #endif /* !SPUFS_MMAP_4K */ 1485 1486 static int spufs_mfc_open(struct inode *inode, struct file *file) 1487 { 1488 struct spufs_inode_info *i = SPUFS_I(inode); 1489 struct spu_context *ctx = i->i_ctx; 1490 1491 /* we don't want to deal with DMA into other processes */ 1492 if (ctx->owner != current->mm) 1493 return -EINVAL; 1494 1495 if (atomic_read(&inode->i_count) != 1) 1496 return -EBUSY; 1497 1498 mutex_lock(&ctx->mapping_lock); 1499 file->private_data = ctx; 1500 if (!i->i_openers++) 1501 ctx->mfc = inode->i_mapping; 1502 mutex_unlock(&ctx->mapping_lock); 1503 return nonseekable_open(inode, file); 1504 } 1505 1506 static int 1507 spufs_mfc_release(struct inode *inode, struct file *file) 1508 { 1509 struct spufs_inode_info *i = SPUFS_I(inode); 1510 struct spu_context *ctx = i->i_ctx; 1511 1512 mutex_lock(&ctx->mapping_lock); 1513 if (!--i->i_openers) 1514 ctx->mfc = NULL; 1515 mutex_unlock(&ctx->mapping_lock); 1516 return 0; 1517 } 1518 1519 /* interrupt-level mfc callback function. */ 1520 void spufs_mfc_callback(struct spu *spu) 1521 { 1522 struct spu_context *ctx = spu->ctx; 1523 1524 if (!ctx) 1525 return; 1526 1527 wake_up_all(&ctx->mfc_wq); 1528 1529 pr_debug("%s %s\n", __func__, spu->name); 1530 if (ctx->mfc_fasync) { 1531 u32 free_elements, tagstatus; 1532 unsigned int mask; 1533 1534 /* no need for spu_acquire in interrupt context */ 1535 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1536 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1537 1538 mask = 0; 1539 if (free_elements & 0xffff) 1540 mask |= POLLOUT; 1541 if (tagstatus & ctx->tagwait) 1542 mask |= POLLIN; 1543 1544 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); 1545 } 1546 } 1547 1548 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) 1549 { 1550 /* See if there is one tag group is complete */ 1551 /* FIXME we need locking around tagwait */ 1552 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; 1553 ctx->tagwait &= ~*status; 1554 if (*status) 1555 return 1; 1556 1557 /* enable interrupt waiting for any tag group, 1558 may silently fail if interrupts are already enabled */ 1559 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1560 return 0; 1561 } 1562 1563 static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, 1564 size_t size, loff_t *pos) 1565 { 1566 struct spu_context *ctx = file->private_data; 1567 int ret = -EINVAL; 1568 u32 status; 1569 1570 if (size != 4) 1571 goto out; 1572 1573 ret = spu_acquire(ctx); 1574 if (ret) 1575 return ret; 1576 1577 ret = -EINVAL; 1578 if (file->f_flags & O_NONBLOCK) { 1579 status = ctx->ops->read_mfc_tagstatus(ctx); 1580 if (!(status & ctx->tagwait)) 1581 ret = -EAGAIN; 1582 else 1583 /* XXX(hch): shouldn't we clear ret here? */ 1584 ctx->tagwait &= ~status; 1585 } else { 1586 ret = spufs_wait(ctx->mfc_wq, 1587 spufs_read_mfc_tagstatus(ctx, &status)); 1588 if (ret) 1589 goto out; 1590 } 1591 spu_release(ctx); 1592 1593 ret = 4; 1594 if (copy_to_user(buffer, &status, 4)) 1595 ret = -EFAULT; 1596 1597 out: 1598 return ret; 1599 } 1600 1601 static int spufs_check_valid_dma(struct mfc_dma_command *cmd) 1602 { 1603 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, 1604 cmd->ea, cmd->size, cmd->tag, cmd->cmd); 1605 1606 switch (cmd->cmd) { 1607 case MFC_PUT_CMD: 1608 case MFC_PUTF_CMD: 1609 case MFC_PUTB_CMD: 1610 case MFC_GET_CMD: 1611 case MFC_GETF_CMD: 1612 case MFC_GETB_CMD: 1613 break; 1614 default: 1615 pr_debug("invalid DMA opcode %x\n", cmd->cmd); 1616 return -EIO; 1617 } 1618 1619 if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { 1620 pr_debug("invalid DMA alignment, ea %llx lsa %x\n", 1621 cmd->ea, cmd->lsa); 1622 return -EIO; 1623 } 1624 1625 switch (cmd->size & 0xf) { 1626 case 1: 1627 break; 1628 case 2: 1629 if (cmd->lsa & 1) 1630 goto error; 1631 break; 1632 case 4: 1633 if (cmd->lsa & 3) 1634 goto error; 1635 break; 1636 case 8: 1637 if (cmd->lsa & 7) 1638 goto error; 1639 break; 1640 case 0: 1641 if (cmd->lsa & 15) 1642 goto error; 1643 break; 1644 error: 1645 default: 1646 pr_debug("invalid DMA alignment %x for size %x\n", 1647 cmd->lsa & 0xf, cmd->size); 1648 return -EIO; 1649 } 1650 1651 if (cmd->size > 16 * 1024) { 1652 pr_debug("invalid DMA size %x\n", cmd->size); 1653 return -EIO; 1654 } 1655 1656 if (cmd->tag & 0xfff0) { 1657 /* we reserve the higher tag numbers for kernel use */ 1658 pr_debug("invalid DMA tag\n"); 1659 return -EIO; 1660 } 1661 1662 if (cmd->class) { 1663 /* not supported in this version */ 1664 pr_debug("invalid DMA class\n"); 1665 return -EIO; 1666 } 1667 1668 return 0; 1669 } 1670 1671 static int spu_send_mfc_command(struct spu_context *ctx, 1672 struct mfc_dma_command cmd, 1673 int *error) 1674 { 1675 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1676 if (*error == -EAGAIN) { 1677 /* wait for any tag group to complete 1678 so we have space for the new command */ 1679 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); 1680 /* try again, because the queue might be 1681 empty again */ 1682 *error = ctx->ops->send_mfc_command(ctx, &cmd); 1683 if (*error == -EAGAIN) 1684 return 0; 1685 } 1686 return 1; 1687 } 1688 1689 static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, 1690 size_t size, loff_t *pos) 1691 { 1692 struct spu_context *ctx = file->private_data; 1693 struct mfc_dma_command cmd; 1694 int ret = -EINVAL; 1695 1696 if (size != sizeof cmd) 1697 goto out; 1698 1699 ret = -EFAULT; 1700 if (copy_from_user(&cmd, buffer, sizeof cmd)) 1701 goto out; 1702 1703 ret = spufs_check_valid_dma(&cmd); 1704 if (ret) 1705 goto out; 1706 1707 ret = spu_acquire(ctx); 1708 if (ret) 1709 goto out; 1710 1711 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 1712 if (ret) 1713 goto out; 1714 1715 if (file->f_flags & O_NONBLOCK) { 1716 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1717 } else { 1718 int status; 1719 ret = spufs_wait(ctx->mfc_wq, 1720 spu_send_mfc_command(ctx, cmd, &status)); 1721 if (ret) 1722 goto out; 1723 if (status) 1724 ret = status; 1725 } 1726 1727 if (ret) 1728 goto out_unlock; 1729 1730 ctx->tagwait |= 1 << cmd.tag; 1731 ret = size; 1732 1733 out_unlock: 1734 spu_release(ctx); 1735 out: 1736 return ret; 1737 } 1738 1739 static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait) 1740 { 1741 struct spu_context *ctx = file->private_data; 1742 u32 free_elements, tagstatus; 1743 unsigned int mask; 1744 1745 poll_wait(file, &ctx->mfc_wq, wait); 1746 1747 /* 1748 * For now keep this uninterruptible and also ignore the rule 1749 * that poll should not sleep. Will be fixed later. 1750 */ 1751 mutex_lock(&ctx->state_mutex); 1752 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); 1753 free_elements = ctx->ops->get_mfc_free_elements(ctx); 1754 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); 1755 spu_release(ctx); 1756 1757 mask = 0; 1758 if (free_elements & 0xffff) 1759 mask |= POLLOUT | POLLWRNORM; 1760 if (tagstatus & ctx->tagwait) 1761 mask |= POLLIN | POLLRDNORM; 1762 1763 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, 1764 free_elements, tagstatus, ctx->tagwait); 1765 1766 return mask; 1767 } 1768 1769 static int spufs_mfc_flush(struct file *file, fl_owner_t id) 1770 { 1771 struct spu_context *ctx = file->private_data; 1772 int ret; 1773 1774 ret = spu_acquire(ctx); 1775 if (ret) 1776 goto out; 1777 #if 0 1778 /* this currently hangs */ 1779 ret = spufs_wait(ctx->mfc_wq, 1780 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); 1781 if (ret) 1782 goto out; 1783 ret = spufs_wait(ctx->mfc_wq, 1784 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); 1785 if (ret) 1786 goto out; 1787 #else 1788 ret = 0; 1789 #endif 1790 spu_release(ctx); 1791 out: 1792 return ret; 1793 } 1794 1795 static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1796 { 1797 struct inode *inode = file_inode(file); 1798 int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 1799 if (!err) { 1800 inode_lock(inode); 1801 err = spufs_mfc_flush(file, NULL); 1802 inode_unlock(inode); 1803 } 1804 return err; 1805 } 1806 1807 static int spufs_mfc_fasync(int fd, struct file *file, int on) 1808 { 1809 struct spu_context *ctx = file->private_data; 1810 1811 return fasync_helper(fd, file, on, &ctx->mfc_fasync); 1812 } 1813 1814 static const struct file_operations spufs_mfc_fops = { 1815 .open = spufs_mfc_open, 1816 .release = spufs_mfc_release, 1817 .read = spufs_mfc_read, 1818 .write = spufs_mfc_write, 1819 .poll = spufs_mfc_poll, 1820 .flush = spufs_mfc_flush, 1821 .fsync = spufs_mfc_fsync, 1822 .fasync = spufs_mfc_fasync, 1823 .mmap = spufs_mfc_mmap, 1824 .llseek = no_llseek, 1825 }; 1826 1827 static int spufs_npc_set(void *data, u64 val) 1828 { 1829 struct spu_context *ctx = data; 1830 int ret; 1831 1832 ret = spu_acquire(ctx); 1833 if (ret) 1834 return ret; 1835 ctx->ops->npc_write(ctx, val); 1836 spu_release(ctx); 1837 1838 return 0; 1839 } 1840 1841 static u64 spufs_npc_get(struct spu_context *ctx) 1842 { 1843 return ctx->ops->npc_read(ctx); 1844 } 1845 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, 1846 "0x%llx\n", SPU_ATTR_ACQUIRE); 1847 1848 static int spufs_decr_set(void *data, u64 val) 1849 { 1850 struct spu_context *ctx = data; 1851 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1852 int ret; 1853 1854 ret = spu_acquire_saved(ctx); 1855 if (ret) 1856 return ret; 1857 lscsa->decr.slot[0] = (u32) val; 1858 spu_release_saved(ctx); 1859 1860 return 0; 1861 } 1862 1863 static u64 spufs_decr_get(struct spu_context *ctx) 1864 { 1865 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1866 return lscsa->decr.slot[0]; 1867 } 1868 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, 1869 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); 1870 1871 static int spufs_decr_status_set(void *data, u64 val) 1872 { 1873 struct spu_context *ctx = data; 1874 int ret; 1875 1876 ret = spu_acquire_saved(ctx); 1877 if (ret) 1878 return ret; 1879 if (val) 1880 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; 1881 else 1882 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; 1883 spu_release_saved(ctx); 1884 1885 return 0; 1886 } 1887 1888 static u64 spufs_decr_status_get(struct spu_context *ctx) 1889 { 1890 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) 1891 return SPU_DECR_STATUS_RUNNING; 1892 else 1893 return 0; 1894 } 1895 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, 1896 spufs_decr_status_set, "0x%llx\n", 1897 SPU_ATTR_ACQUIRE_SAVED); 1898 1899 static int spufs_event_mask_set(void *data, u64 val) 1900 { 1901 struct spu_context *ctx = data; 1902 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1903 int ret; 1904 1905 ret = spu_acquire_saved(ctx); 1906 if (ret) 1907 return ret; 1908 lscsa->event_mask.slot[0] = (u32) val; 1909 spu_release_saved(ctx); 1910 1911 return 0; 1912 } 1913 1914 static u64 spufs_event_mask_get(struct spu_context *ctx) 1915 { 1916 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1917 return lscsa->event_mask.slot[0]; 1918 } 1919 1920 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, 1921 spufs_event_mask_set, "0x%llx\n", 1922 SPU_ATTR_ACQUIRE_SAVED); 1923 1924 static u64 spufs_event_status_get(struct spu_context *ctx) 1925 { 1926 struct spu_state *state = &ctx->csa; 1927 u64 stat; 1928 stat = state->spu_chnlcnt_RW[0]; 1929 if (stat) 1930 return state->spu_chnldata_RW[0]; 1931 return 0; 1932 } 1933 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, 1934 NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1935 1936 static int spufs_srr0_set(void *data, u64 val) 1937 { 1938 struct spu_context *ctx = data; 1939 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1940 int ret; 1941 1942 ret = spu_acquire_saved(ctx); 1943 if (ret) 1944 return ret; 1945 lscsa->srr0.slot[0] = (u32) val; 1946 spu_release_saved(ctx); 1947 1948 return 0; 1949 } 1950 1951 static u64 spufs_srr0_get(struct spu_context *ctx) 1952 { 1953 struct spu_lscsa *lscsa = ctx->csa.lscsa; 1954 return lscsa->srr0.slot[0]; 1955 } 1956 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, 1957 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) 1958 1959 static u64 spufs_id_get(struct spu_context *ctx) 1960 { 1961 u64 num; 1962 1963 if (ctx->state == SPU_STATE_RUNNABLE) 1964 num = ctx->spu->number; 1965 else 1966 num = (unsigned int)-1; 1967 1968 return num; 1969 } 1970 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", 1971 SPU_ATTR_ACQUIRE) 1972 1973 static u64 spufs_object_id_get(struct spu_context *ctx) 1974 { 1975 /* FIXME: Should there really be no locking here? */ 1976 return ctx->object_id; 1977 } 1978 1979 static int spufs_object_id_set(void *data, u64 id) 1980 { 1981 struct spu_context *ctx = data; 1982 ctx->object_id = id; 1983 1984 return 0; 1985 } 1986 1987 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, 1988 spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); 1989 1990 static u64 spufs_lslr_get(struct spu_context *ctx) 1991 { 1992 return ctx->csa.priv2.spu_lslr_RW; 1993 } 1994 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", 1995 SPU_ATTR_ACQUIRE_SAVED); 1996 1997 static int spufs_info_open(struct inode *inode, struct file *file) 1998 { 1999 struct spufs_inode_info *i = SPUFS_I(inode); 2000 struct spu_context *ctx = i->i_ctx; 2001 file->private_data = ctx; 2002 return 0; 2003 } 2004 2005 static int spufs_caps_show(struct seq_file *s, void *private) 2006 { 2007 struct spu_context *ctx = s->private; 2008 2009 if (!(ctx->flags & SPU_CREATE_NOSCHED)) 2010 seq_puts(s, "sched\n"); 2011 if (!(ctx->flags & SPU_CREATE_ISOLATE)) 2012 seq_puts(s, "step\n"); 2013 return 0; 2014 } 2015 2016 static int spufs_caps_open(struct inode *inode, struct file *file) 2017 { 2018 return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); 2019 } 2020 2021 static const struct file_operations spufs_caps_fops = { 2022 .open = spufs_caps_open, 2023 .read = seq_read, 2024 .llseek = seq_lseek, 2025 .release = single_release, 2026 }; 2027 2028 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, 2029 char __user *buf, size_t len, loff_t *pos) 2030 { 2031 u32 data; 2032 2033 /* EOF if there's no entry in the mbox */ 2034 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) 2035 return 0; 2036 2037 data = ctx->csa.prob.pu_mb_R; 2038 2039 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2040 } 2041 2042 static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, 2043 size_t len, loff_t *pos) 2044 { 2045 int ret; 2046 struct spu_context *ctx = file->private_data; 2047 2048 if (!access_ok(VERIFY_WRITE, buf, len)) 2049 return -EFAULT; 2050 2051 ret = spu_acquire_saved(ctx); 2052 if (ret) 2053 return ret; 2054 spin_lock(&ctx->csa.register_lock); 2055 ret = __spufs_mbox_info_read(ctx, buf, len, pos); 2056 spin_unlock(&ctx->csa.register_lock); 2057 spu_release_saved(ctx); 2058 2059 return ret; 2060 } 2061 2062 static const struct file_operations spufs_mbox_info_fops = { 2063 .open = spufs_info_open, 2064 .read = spufs_mbox_info_read, 2065 .llseek = generic_file_llseek, 2066 }; 2067 2068 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, 2069 char __user *buf, size_t len, loff_t *pos) 2070 { 2071 u32 data; 2072 2073 /* EOF if there's no entry in the ibox */ 2074 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) 2075 return 0; 2076 2077 data = ctx->csa.priv2.puint_mb_R; 2078 2079 return simple_read_from_buffer(buf, len, pos, &data, sizeof data); 2080 } 2081 2082 static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, 2083 size_t len, loff_t *pos) 2084 { 2085 struct spu_context *ctx = file->private_data; 2086 int ret; 2087 2088 if (!access_ok(VERIFY_WRITE, buf, len)) 2089 return -EFAULT; 2090 2091 ret = spu_acquire_saved(ctx); 2092 if (ret) 2093 return ret; 2094 spin_lock(&ctx->csa.register_lock); 2095 ret = __spufs_ibox_info_read(ctx, buf, len, pos); 2096 spin_unlock(&ctx->csa.register_lock); 2097 spu_release_saved(ctx); 2098 2099 return ret; 2100 } 2101 2102 static const struct file_operations spufs_ibox_info_fops = { 2103 .open = spufs_info_open, 2104 .read = spufs_ibox_info_read, 2105 .llseek = generic_file_llseek, 2106 }; 2107 2108 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, 2109 char __user *buf, size_t len, loff_t *pos) 2110 { 2111 int i, cnt; 2112 u32 data[4]; 2113 u32 wbox_stat; 2114 2115 wbox_stat = ctx->csa.prob.mb_stat_R; 2116 cnt = 4 - ((wbox_stat & 0x00ff00) >> 8); 2117 for (i = 0; i < cnt; i++) { 2118 data[i] = ctx->csa.spu_mailbox_data[i]; 2119 } 2120 2121 return simple_read_from_buffer(buf, len, pos, &data, 2122 cnt * sizeof(u32)); 2123 } 2124 2125 static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, 2126 size_t len, loff_t *pos) 2127 { 2128 struct spu_context *ctx = file->private_data; 2129 int ret; 2130 2131 if (!access_ok(VERIFY_WRITE, buf, len)) 2132 return -EFAULT; 2133 2134 ret = spu_acquire_saved(ctx); 2135 if (ret) 2136 return ret; 2137 spin_lock(&ctx->csa.register_lock); 2138 ret = __spufs_wbox_info_read(ctx, buf, len, pos); 2139 spin_unlock(&ctx->csa.register_lock); 2140 spu_release_saved(ctx); 2141 2142 return ret; 2143 } 2144 2145 static const struct file_operations spufs_wbox_info_fops = { 2146 .open = spufs_info_open, 2147 .read = spufs_wbox_info_read, 2148 .llseek = generic_file_llseek, 2149 }; 2150 2151 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, 2152 char __user *buf, size_t len, loff_t *pos) 2153 { 2154 struct spu_dma_info info; 2155 struct mfc_cq_sr *qp, *spuqp; 2156 int i; 2157 2158 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; 2159 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; 2160 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; 2161 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; 2162 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; 2163 for (i = 0; i < 16; i++) { 2164 qp = &info.dma_info_command_data[i]; 2165 spuqp = &ctx->csa.priv2.spuq[i]; 2166 2167 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; 2168 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; 2169 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; 2170 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; 2171 } 2172 2173 return simple_read_from_buffer(buf, len, pos, &info, 2174 sizeof info); 2175 } 2176 2177 static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, 2178 size_t len, loff_t *pos) 2179 { 2180 struct spu_context *ctx = file->private_data; 2181 int ret; 2182 2183 if (!access_ok(VERIFY_WRITE, buf, len)) 2184 return -EFAULT; 2185 2186 ret = spu_acquire_saved(ctx); 2187 if (ret) 2188 return ret; 2189 spin_lock(&ctx->csa.register_lock); 2190 ret = __spufs_dma_info_read(ctx, buf, len, pos); 2191 spin_unlock(&ctx->csa.register_lock); 2192 spu_release_saved(ctx); 2193 2194 return ret; 2195 } 2196 2197 static const struct file_operations spufs_dma_info_fops = { 2198 .open = spufs_info_open, 2199 .read = spufs_dma_info_read, 2200 .llseek = no_llseek, 2201 }; 2202 2203 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, 2204 char __user *buf, size_t len, loff_t *pos) 2205 { 2206 struct spu_proxydma_info info; 2207 struct mfc_cq_sr *qp, *puqp; 2208 int ret = sizeof info; 2209 int i; 2210 2211 if (len < ret) 2212 return -EINVAL; 2213 2214 if (!access_ok(VERIFY_WRITE, buf, len)) 2215 return -EFAULT; 2216 2217 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; 2218 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; 2219 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; 2220 for (i = 0; i < 8; i++) { 2221 qp = &info.proxydma_info_command_data[i]; 2222 puqp = &ctx->csa.priv2.puq[i]; 2223 2224 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; 2225 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; 2226 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; 2227 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; 2228 } 2229 2230 return simple_read_from_buffer(buf, len, pos, &info, 2231 sizeof info); 2232 } 2233 2234 static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, 2235 size_t len, loff_t *pos) 2236 { 2237 struct spu_context *ctx = file->private_data; 2238 int ret; 2239 2240 ret = spu_acquire_saved(ctx); 2241 if (ret) 2242 return ret; 2243 spin_lock(&ctx->csa.register_lock); 2244 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); 2245 spin_unlock(&ctx->csa.register_lock); 2246 spu_release_saved(ctx); 2247 2248 return ret; 2249 } 2250 2251 static const struct file_operations spufs_proxydma_info_fops = { 2252 .open = spufs_info_open, 2253 .read = spufs_proxydma_info_read, 2254 .llseek = no_llseek, 2255 }; 2256 2257 static int spufs_show_tid(struct seq_file *s, void *private) 2258 { 2259 struct spu_context *ctx = s->private; 2260 2261 seq_printf(s, "%d\n", ctx->tid); 2262 return 0; 2263 } 2264 2265 static int spufs_tid_open(struct inode *inode, struct file *file) 2266 { 2267 return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); 2268 } 2269 2270 static const struct file_operations spufs_tid_fops = { 2271 .open = spufs_tid_open, 2272 .read = seq_read, 2273 .llseek = seq_lseek, 2274 .release = single_release, 2275 }; 2276 2277 static const char *ctx_state_names[] = { 2278 "user", "system", "iowait", "loaded" 2279 }; 2280 2281 static unsigned long long spufs_acct_time(struct spu_context *ctx, 2282 enum spu_utilization_state state) 2283 { 2284 unsigned long long time = ctx->stats.times[state]; 2285 2286 /* 2287 * In general, utilization statistics are updated by the controlling 2288 * thread as the spu context moves through various well defined 2289 * state transitions, but if the context is lazily loaded its 2290 * utilization statistics are not updated as the controlling thread 2291 * is not tightly coupled with the execution of the spu context. We 2292 * calculate and apply the time delta from the last recorded state 2293 * of the spu context. 2294 */ 2295 if (ctx->spu && ctx->stats.util_state == state) { 2296 time += ktime_get_ns() - ctx->stats.tstamp; 2297 } 2298 2299 return time / NSEC_PER_MSEC; 2300 } 2301 2302 static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2303 { 2304 unsigned long long slb_flts = ctx->stats.slb_flt; 2305 2306 if (ctx->state == SPU_STATE_RUNNABLE) { 2307 slb_flts += (ctx->spu->stats.slb_flt - 2308 ctx->stats.slb_flt_base); 2309 } 2310 2311 return slb_flts; 2312 } 2313 2314 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) 2315 { 2316 unsigned long long class2_intrs = ctx->stats.class2_intr; 2317 2318 if (ctx->state == SPU_STATE_RUNNABLE) { 2319 class2_intrs += (ctx->spu->stats.class2_intr - 2320 ctx->stats.class2_intr_base); 2321 } 2322 2323 return class2_intrs; 2324 } 2325 2326 2327 static int spufs_show_stat(struct seq_file *s, void *private) 2328 { 2329 struct spu_context *ctx = s->private; 2330 int ret; 2331 2332 ret = spu_acquire(ctx); 2333 if (ret) 2334 return ret; 2335 2336 seq_printf(s, "%s %llu %llu %llu %llu " 2337 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2338 ctx_state_names[ctx->stats.util_state], 2339 spufs_acct_time(ctx, SPU_UTIL_USER), 2340 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), 2341 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), 2342 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), 2343 ctx->stats.vol_ctx_switch, 2344 ctx->stats.invol_ctx_switch, 2345 spufs_slb_flts(ctx), 2346 ctx->stats.hash_flt, 2347 ctx->stats.min_flt, 2348 ctx->stats.maj_flt, 2349 spufs_class2_intrs(ctx), 2350 ctx->stats.libassist); 2351 spu_release(ctx); 2352 return 0; 2353 } 2354 2355 static int spufs_stat_open(struct inode *inode, struct file *file) 2356 { 2357 return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); 2358 } 2359 2360 static const struct file_operations spufs_stat_fops = { 2361 .open = spufs_stat_open, 2362 .read = seq_read, 2363 .llseek = seq_lseek, 2364 .release = single_release, 2365 }; 2366 2367 static inline int spufs_switch_log_used(struct spu_context *ctx) 2368 { 2369 return (ctx->switch_log->head - ctx->switch_log->tail) % 2370 SWITCH_LOG_BUFSIZE; 2371 } 2372 2373 static inline int spufs_switch_log_avail(struct spu_context *ctx) 2374 { 2375 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); 2376 } 2377 2378 static int spufs_switch_log_open(struct inode *inode, struct file *file) 2379 { 2380 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2381 int rc; 2382 2383 rc = spu_acquire(ctx); 2384 if (rc) 2385 return rc; 2386 2387 if (ctx->switch_log) { 2388 rc = -EBUSY; 2389 goto out; 2390 } 2391 2392 ctx->switch_log = kmalloc(sizeof(struct switch_log) + 2393 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), 2394 GFP_KERNEL); 2395 2396 if (!ctx->switch_log) { 2397 rc = -ENOMEM; 2398 goto out; 2399 } 2400 2401 ctx->switch_log->head = ctx->switch_log->tail = 0; 2402 init_waitqueue_head(&ctx->switch_log->wait); 2403 rc = 0; 2404 2405 out: 2406 spu_release(ctx); 2407 return rc; 2408 } 2409 2410 static int spufs_switch_log_release(struct inode *inode, struct file *file) 2411 { 2412 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2413 int rc; 2414 2415 rc = spu_acquire(ctx); 2416 if (rc) 2417 return rc; 2418 2419 kfree(ctx->switch_log); 2420 ctx->switch_log = NULL; 2421 spu_release(ctx); 2422 2423 return 0; 2424 } 2425 2426 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2427 { 2428 struct switch_log_entry *p; 2429 2430 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; 2431 2432 return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n", 2433 (unsigned int) p->tstamp.tv_sec, 2434 (unsigned int) p->tstamp.tv_nsec, 2435 p->spu_id, 2436 (unsigned int) p->type, 2437 (unsigned int) p->val, 2438 (unsigned long long) p->timebase); 2439 } 2440 2441 static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, 2442 size_t len, loff_t *ppos) 2443 { 2444 struct inode *inode = file_inode(file); 2445 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2446 int error = 0, cnt = 0; 2447 2448 if (!buf) 2449 return -EINVAL; 2450 2451 error = spu_acquire(ctx); 2452 if (error) 2453 return error; 2454 2455 while (cnt < len) { 2456 char tbuf[128]; 2457 int width; 2458 2459 if (spufs_switch_log_used(ctx) == 0) { 2460 if (cnt > 0) { 2461 /* If there's data ready to go, we can 2462 * just return straight away */ 2463 break; 2464 2465 } else if (file->f_flags & O_NONBLOCK) { 2466 error = -EAGAIN; 2467 break; 2468 2469 } else { 2470 /* spufs_wait will drop the mutex and 2471 * re-acquire, but since we're in read(), the 2472 * file cannot be _released (and so 2473 * ctx->switch_log is stable). 2474 */ 2475 error = spufs_wait(ctx->switch_log->wait, 2476 spufs_switch_log_used(ctx) > 0); 2477 2478 /* On error, spufs_wait returns without the 2479 * state mutex held */ 2480 if (error) 2481 return error; 2482 2483 /* We may have had entries read from underneath 2484 * us while we dropped the mutex in spufs_wait, 2485 * so re-check */ 2486 if (spufs_switch_log_used(ctx) == 0) 2487 continue; 2488 } 2489 } 2490 2491 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2492 if (width < len) 2493 ctx->switch_log->tail = 2494 (ctx->switch_log->tail + 1) % 2495 SWITCH_LOG_BUFSIZE; 2496 else 2497 /* If the record is greater than space available return 2498 * partial buffer (so far) */ 2499 break; 2500 2501 error = copy_to_user(buf + cnt, tbuf, width); 2502 if (error) 2503 break; 2504 cnt += width; 2505 } 2506 2507 spu_release(ctx); 2508 2509 return cnt == 0 ? error : cnt; 2510 } 2511 2512 static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) 2513 { 2514 struct inode *inode = file_inode(file); 2515 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2516 unsigned int mask = 0; 2517 int rc; 2518 2519 poll_wait(file, &ctx->switch_log->wait, wait); 2520 2521 rc = spu_acquire(ctx); 2522 if (rc) 2523 return rc; 2524 2525 if (spufs_switch_log_used(ctx) > 0) 2526 mask |= POLLIN; 2527 2528 spu_release(ctx); 2529 2530 return mask; 2531 } 2532 2533 static const struct file_operations spufs_switch_log_fops = { 2534 .open = spufs_switch_log_open, 2535 .read = spufs_switch_log_read, 2536 .poll = spufs_switch_log_poll, 2537 .release = spufs_switch_log_release, 2538 .llseek = no_llseek, 2539 }; 2540 2541 /** 2542 * Log a context switch event to a switch log reader. 2543 * 2544 * Must be called with ctx->state_mutex held. 2545 */ 2546 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2547 u32 type, u32 val) 2548 { 2549 if (!ctx->switch_log) 2550 return; 2551 2552 if (spufs_switch_log_avail(ctx) > 1) { 2553 struct switch_log_entry *p; 2554 2555 p = ctx->switch_log->log + ctx->switch_log->head; 2556 ktime_get_ts(&p->tstamp); 2557 p->timebase = get_tb(); 2558 p->spu_id = spu ? spu->number : -1; 2559 p->type = type; 2560 p->val = val; 2561 2562 ctx->switch_log->head = 2563 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2564 } 2565 2566 wake_up(&ctx->switch_log->wait); 2567 } 2568 2569 static int spufs_show_ctx(struct seq_file *s, void *private) 2570 { 2571 struct spu_context *ctx = s->private; 2572 u64 mfc_control_RW; 2573 2574 mutex_lock(&ctx->state_mutex); 2575 if (ctx->spu) { 2576 struct spu *spu = ctx->spu; 2577 struct spu_priv2 __iomem *priv2 = spu->priv2; 2578 2579 spin_lock_irq(&spu->register_lock); 2580 mfc_control_RW = in_be64(&priv2->mfc_control_RW); 2581 spin_unlock_irq(&spu->register_lock); 2582 } else { 2583 struct spu_state *csa = &ctx->csa; 2584 2585 mfc_control_RW = csa->priv2.mfc_control_RW; 2586 } 2587 2588 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" 2589 " %c %llx %llx %llx %llx %x %x\n", 2590 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', 2591 ctx->flags, 2592 ctx->sched_flags, 2593 ctx->prio, 2594 ctx->time_slice, 2595 ctx->spu ? ctx->spu->number : -1, 2596 !list_empty(&ctx->rq) ? 'q' : ' ', 2597 ctx->csa.class_0_pending, 2598 ctx->csa.class_0_dar, 2599 ctx->csa.class_1_dsisr, 2600 mfc_control_RW, 2601 ctx->ops->runcntl_read(ctx), 2602 ctx->ops->status_read(ctx)); 2603 2604 mutex_unlock(&ctx->state_mutex); 2605 2606 return 0; 2607 } 2608 2609 static int spufs_ctx_open(struct inode *inode, struct file *file) 2610 { 2611 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); 2612 } 2613 2614 static const struct file_operations spufs_ctx_fops = { 2615 .open = spufs_ctx_open, 2616 .read = seq_read, 2617 .llseek = seq_lseek, 2618 .release = single_release, 2619 }; 2620 2621 const struct spufs_tree_descr spufs_dir_contents[] = { 2622 { "capabilities", &spufs_caps_fops, 0444, }, 2623 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2624 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, 2625 { "mbox", &spufs_mbox_fops, 0444, }, 2626 { "ibox", &spufs_ibox_fops, 0444, }, 2627 { "wbox", &spufs_wbox_fops, 0222, }, 2628 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2629 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2630 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2631 { "signal1", &spufs_signal1_fops, 0666, }, 2632 { "signal2", &spufs_signal2_fops, 0666, }, 2633 { "signal1_type", &spufs_signal1_type, 0666, }, 2634 { "signal2_type", &spufs_signal2_type, 0666, }, 2635 { "cntl", &spufs_cntl_fops, 0666, }, 2636 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, 2637 { "lslr", &spufs_lslr_ops, 0444, }, 2638 { "mfc", &spufs_mfc_fops, 0666, }, 2639 { "mss", &spufs_mss_fops, 0666, }, 2640 { "npc", &spufs_npc_ops, 0666, }, 2641 { "srr0", &spufs_srr0_ops, 0666, }, 2642 { "decr", &spufs_decr_ops, 0666, }, 2643 { "decr_status", &spufs_decr_status_ops, 0666, }, 2644 { "event_mask", &spufs_event_mask_ops, 0666, }, 2645 { "event_status", &spufs_event_status_ops, 0444, }, 2646 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2647 { "phys-id", &spufs_id_ops, 0666, }, 2648 { "object-id", &spufs_object_id_ops, 0666, }, 2649 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, 2650 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, 2651 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, 2652 { "dma_info", &spufs_dma_info_fops, 0444, 2653 sizeof(struct spu_dma_info), }, 2654 { "proxydma_info", &spufs_proxydma_info_fops, 0444, 2655 sizeof(struct spu_proxydma_info)}, 2656 { "tid", &spufs_tid_fops, 0444, }, 2657 { "stat", &spufs_stat_fops, 0444, }, 2658 { "switch_log", &spufs_switch_log_fops, 0444 }, 2659 {}, 2660 }; 2661 2662 const struct spufs_tree_descr spufs_dir_nosched_contents[] = { 2663 { "capabilities", &spufs_caps_fops, 0444, }, 2664 { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, 2665 { "mbox", &spufs_mbox_fops, 0444, }, 2666 { "ibox", &spufs_ibox_fops, 0444, }, 2667 { "wbox", &spufs_wbox_fops, 0222, }, 2668 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, 2669 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, 2670 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, 2671 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2672 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2673 { "signal1_type", &spufs_signal1_type, 0666, }, 2674 { "signal2_type", &spufs_signal2_type, 0666, }, 2675 { "mss", &spufs_mss_fops, 0666, }, 2676 { "mfc", &spufs_mfc_fops, 0666, }, 2677 { "cntl", &spufs_cntl_fops, 0666, }, 2678 { "npc", &spufs_npc_ops, 0666, }, 2679 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, 2680 { "phys-id", &spufs_id_ops, 0666, }, 2681 { "object-id", &spufs_object_id_ops, 0666, }, 2682 { "tid", &spufs_tid_fops, 0444, }, 2683 { "stat", &spufs_stat_fops, 0444, }, 2684 {}, 2685 }; 2686 2687 const struct spufs_tree_descr spufs_dir_debug_contents[] = { 2688 { ".ctx", &spufs_ctx_fops, 0444, }, 2689 {}, 2690 }; 2691 2692 const struct spufs_coredump_reader spufs_coredump_read[] = { 2693 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2694 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2695 { "lslr", NULL, spufs_lslr_get, 19 }, 2696 { "decr", NULL, spufs_decr_get, 19 }, 2697 { "decr_status", NULL, spufs_decr_status_get, 19 }, 2698 { "mem", __spufs_mem_read, NULL, LS_SIZE, }, 2699 { "signal1", __spufs_signal1_read, NULL, sizeof(u32) }, 2700 { "signal1_type", NULL, spufs_signal1_type_get, 19 }, 2701 { "signal2", __spufs_signal2_read, NULL, sizeof(u32) }, 2702 { "signal2_type", NULL, spufs_signal2_type_get, 19 }, 2703 { "event_mask", NULL, spufs_event_mask_get, 19 }, 2704 { "event_status", NULL, spufs_event_status_get, 19 }, 2705 { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) }, 2706 { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) }, 2707 { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)}, 2708 { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)}, 2709 { "proxydma_info", __spufs_proxydma_info_read, 2710 NULL, sizeof(struct spu_proxydma_info)}, 2711 { "object-id", NULL, spufs_object_id_get, 19 }, 2712 { "npc", NULL, spufs_npc_get, 19 }, 2713 { NULL }, 2714 }; 2715