1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/debug.h> 89 #include <asm/fpu.h> 90 #include <asm/fpu_emulator.h> 91 #include <asm/inst.h> 92 #include <asm/unaligned-emul.h> 93 #include <asm/mmu_context.h> 94 #include <linux/uaccess.h> 95 96 #include "access-helper.h" 97 98 enum { 99 UNALIGNED_ACTION_QUIET, 100 UNALIGNED_ACTION_SIGNAL, 101 UNALIGNED_ACTION_SHOW, 102 }; 103 #ifdef CONFIG_DEBUG_FS 104 static u32 unaligned_instructions; 105 static u32 unaligned_action; 106 #else 107 #define unaligned_action UNALIGNED_ACTION_QUIET 108 #endif 109 extern void show_registers(struct pt_regs *regs); 110 111 static void emulate_load_store_insn(struct pt_regs *regs, 112 void __user *addr, unsigned int *pc) 113 { 114 unsigned long origpc, orig31, value; 115 union mips_instruction insn; 116 unsigned int res; 117 bool user = user_mode(regs); 118 119 origpc = (unsigned long)pc; 120 orig31 = regs->regs[31]; 121 122 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 123 124 /* 125 * This load never faults. 126 */ 127 __get_inst32(&insn.word, pc, user); 128 129 switch (insn.i_format.opcode) { 130 /* 131 * These are instructions that a compiler doesn't generate. We 132 * can assume therefore that the code is MIPS-aware and 133 * really buggy. Emulating these instructions would break the 134 * semantics anyway. 135 */ 136 case ll_op: 137 case lld_op: 138 case sc_op: 139 case scd_op: 140 141 /* 142 * For these instructions the only way to create an address 143 * error is an attempted access to kernel/supervisor address 144 * space. 145 */ 146 case ldl_op: 147 case ldr_op: 148 case lwl_op: 149 case lwr_op: 150 case sdl_op: 151 case sdr_op: 152 case swl_op: 153 case swr_op: 154 case lb_op: 155 case lbu_op: 156 case sb_op: 157 goto sigbus; 158 159 /* 160 * The remaining opcodes are the ones that are really of 161 * interest. 162 */ 163 #ifdef CONFIG_MACH_INGENIC 164 case spec2_op: 165 if (insn.mxu_lx_format.func != mxu_lx_op) 166 goto sigbus; /* other MXU instructions we don't care */ 167 168 switch (insn.mxu_lx_format.op) { 169 case mxu_lxw_op: 170 if (user && !access_ok(addr, 4)) 171 goto sigbus; 172 LoadW(addr, value, res); 173 if (res) 174 goto fault; 175 compute_return_epc(regs); 176 regs->regs[insn.mxu_lx_format.rd] = value; 177 break; 178 case mxu_lxh_op: 179 if (user && !access_ok(addr, 2)) 180 goto sigbus; 181 LoadHW(addr, value, res); 182 if (res) 183 goto fault; 184 compute_return_epc(regs); 185 regs->regs[insn.dsp_format.rd] = value; 186 break; 187 case mxu_lxhu_op: 188 if (user && !access_ok(addr, 2)) 189 goto sigbus; 190 LoadHWU(addr, value, res); 191 if (res) 192 goto fault; 193 compute_return_epc(regs); 194 regs->regs[insn.dsp_format.rd] = value; 195 break; 196 case mxu_lxb_op: 197 case mxu_lxbu_op: 198 goto sigbus; 199 default: 200 goto sigill; 201 } 202 break; 203 #endif 204 case spec3_op: 205 if (insn.dsp_format.func == lx_op) { 206 switch (insn.dsp_format.op) { 207 case lwx_op: 208 if (user && !access_ok(addr, 4)) 209 goto sigbus; 210 LoadW(addr, value, res); 211 if (res) 212 goto fault; 213 compute_return_epc(regs); 214 regs->regs[insn.dsp_format.rd] = value; 215 break; 216 case lhx_op: 217 if (user && !access_ok(addr, 2)) 218 goto sigbus; 219 LoadHW(addr, value, res); 220 if (res) 221 goto fault; 222 compute_return_epc(regs); 223 regs->regs[insn.dsp_format.rd] = value; 224 break; 225 default: 226 goto sigill; 227 } 228 } 229 #ifdef CONFIG_EVA 230 else { 231 /* 232 * we can land here only from kernel accessing user 233 * memory, so we need to "switch" the address limit to 234 * user space, so that address check can work properly. 235 */ 236 switch (insn.spec3_format.func) { 237 case lhe_op: 238 if (!access_ok(addr, 2)) 239 goto sigbus; 240 LoadHWE(addr, value, res); 241 if (res) 242 goto fault; 243 compute_return_epc(regs); 244 regs->regs[insn.spec3_format.rt] = value; 245 break; 246 case lwe_op: 247 if (!access_ok(addr, 4)) 248 goto sigbus; 249 LoadWE(addr, value, res); 250 if (res) 251 goto fault; 252 compute_return_epc(regs); 253 regs->regs[insn.spec3_format.rt] = value; 254 break; 255 case lhue_op: 256 if (!access_ok(addr, 2)) 257 goto sigbus; 258 LoadHWUE(addr, value, res); 259 if (res) 260 goto fault; 261 compute_return_epc(regs); 262 regs->regs[insn.spec3_format.rt] = value; 263 break; 264 case she_op: 265 if (!access_ok(addr, 2)) 266 goto sigbus; 267 compute_return_epc(regs); 268 value = regs->regs[insn.spec3_format.rt]; 269 StoreHWE(addr, value, res); 270 if (res) 271 goto fault; 272 break; 273 case swe_op: 274 if (!access_ok(addr, 4)) 275 goto sigbus; 276 compute_return_epc(regs); 277 value = regs->regs[insn.spec3_format.rt]; 278 StoreWE(addr, value, res); 279 if (res) 280 goto fault; 281 break; 282 default: 283 goto sigill; 284 } 285 } 286 #endif 287 break; 288 case lh_op: 289 if (user && !access_ok(addr, 2)) 290 goto sigbus; 291 292 if (IS_ENABLED(CONFIG_EVA) && user) 293 LoadHWE(addr, value, res); 294 else 295 LoadHW(addr, value, res); 296 297 if (res) 298 goto fault; 299 compute_return_epc(regs); 300 regs->regs[insn.i_format.rt] = value; 301 break; 302 303 case lw_op: 304 if (user && !access_ok(addr, 4)) 305 goto sigbus; 306 307 if (IS_ENABLED(CONFIG_EVA) && user) 308 LoadWE(addr, value, res); 309 else 310 LoadW(addr, value, res); 311 312 if (res) 313 goto fault; 314 compute_return_epc(regs); 315 regs->regs[insn.i_format.rt] = value; 316 break; 317 318 case lhu_op: 319 if (user && !access_ok(addr, 2)) 320 goto sigbus; 321 322 if (IS_ENABLED(CONFIG_EVA) && user) 323 LoadHWUE(addr, value, res); 324 else 325 LoadHWU(addr, value, res); 326 327 if (res) 328 goto fault; 329 compute_return_epc(regs); 330 regs->regs[insn.i_format.rt] = value; 331 break; 332 333 case lwu_op: 334 #ifdef CONFIG_64BIT 335 /* 336 * A 32-bit kernel might be running on a 64-bit processor. But 337 * if we're on a 32-bit processor and an i-cache incoherency 338 * or race makes us see a 64-bit instruction here the sdl/sdr 339 * would blow up, so for now we don't handle unaligned 64-bit 340 * instructions on 32-bit kernels. 341 */ 342 if (user && !access_ok(addr, 4)) 343 goto sigbus; 344 345 LoadWU(addr, value, res); 346 if (res) 347 goto fault; 348 compute_return_epc(regs); 349 regs->regs[insn.i_format.rt] = value; 350 break; 351 #endif /* CONFIG_64BIT */ 352 353 /* Cannot handle 64-bit instructions in 32-bit kernel */ 354 goto sigill; 355 356 case ld_op: 357 #ifdef CONFIG_64BIT 358 /* 359 * A 32-bit kernel might be running on a 64-bit processor. But 360 * if we're on a 32-bit processor and an i-cache incoherency 361 * or race makes us see a 64-bit instruction here the sdl/sdr 362 * would blow up, so for now we don't handle unaligned 64-bit 363 * instructions on 32-bit kernels. 364 */ 365 if (user && !access_ok(addr, 8)) 366 goto sigbus; 367 368 LoadDW(addr, value, res); 369 if (res) 370 goto fault; 371 compute_return_epc(regs); 372 regs->regs[insn.i_format.rt] = value; 373 break; 374 #endif /* CONFIG_64BIT */ 375 376 /* Cannot handle 64-bit instructions in 32-bit kernel */ 377 goto sigill; 378 379 case sh_op: 380 if (user && !access_ok(addr, 2)) 381 goto sigbus; 382 383 compute_return_epc(regs); 384 value = regs->regs[insn.i_format.rt]; 385 386 if (IS_ENABLED(CONFIG_EVA) && user) 387 StoreHWE(addr, value, res); 388 else 389 StoreHW(addr, value, res); 390 391 if (res) 392 goto fault; 393 break; 394 395 case sw_op: 396 if (user && !access_ok(addr, 4)) 397 goto sigbus; 398 399 compute_return_epc(regs); 400 value = regs->regs[insn.i_format.rt]; 401 402 if (IS_ENABLED(CONFIG_EVA) && user) 403 StoreWE(addr, value, res); 404 else 405 StoreW(addr, value, res); 406 407 if (res) 408 goto fault; 409 break; 410 411 case sd_op: 412 #ifdef CONFIG_64BIT 413 /* 414 * A 32-bit kernel might be running on a 64-bit processor. But 415 * if we're on a 32-bit processor and an i-cache incoherency 416 * or race makes us see a 64-bit instruction here the sdl/sdr 417 * would blow up, so for now we don't handle unaligned 64-bit 418 * instructions on 32-bit kernels. 419 */ 420 if (user && !access_ok(addr, 8)) 421 goto sigbus; 422 423 compute_return_epc(regs); 424 value = regs->regs[insn.i_format.rt]; 425 StoreDW(addr, value, res); 426 if (res) 427 goto fault; 428 break; 429 #endif /* CONFIG_64BIT */ 430 431 /* Cannot handle 64-bit instructions in 32-bit kernel */ 432 goto sigill; 433 434 #ifdef CONFIG_MIPS_FP_SUPPORT 435 436 case lwc1_op: 437 case ldc1_op: 438 case swc1_op: 439 case sdc1_op: 440 case cop1x_op: { 441 void __user *fault_addr = NULL; 442 443 die_if_kernel("Unaligned FP access in kernel code", regs); 444 BUG_ON(!used_math()); 445 446 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 447 &fault_addr); 448 own_fpu(1); /* Restore FPU state. */ 449 450 /* Signal if something went wrong. */ 451 process_fpemu_return(res, fault_addr, 0); 452 453 if (res == 0) 454 break; 455 return; 456 } 457 #endif /* CONFIG_MIPS_FP_SUPPORT */ 458 459 #ifdef CONFIG_CPU_HAS_MSA 460 461 case msa_op: { 462 unsigned int wd, preempted; 463 enum msa_2b_fmt df; 464 union fpureg *fpr; 465 466 if (!cpu_has_msa) 467 goto sigill; 468 469 /* 470 * If we've reached this point then userland should have taken 471 * the MSA disabled exception & initialised vector context at 472 * some point in the past. 473 */ 474 BUG_ON(!thread_msa_context_live()); 475 476 df = insn.msa_mi10_format.df; 477 wd = insn.msa_mi10_format.wd; 478 fpr = ¤t->thread.fpu.fpr[wd]; 479 480 switch (insn.msa_mi10_format.func) { 481 case msa_ld_op: 482 if (!access_ok(addr, sizeof(*fpr))) 483 goto sigbus; 484 485 do { 486 /* 487 * If we have live MSA context keep track of 488 * whether we get preempted in order to avoid 489 * the register context we load being clobbered 490 * by the live context as it's saved during 491 * preemption. If we don't have live context 492 * then it can't be saved to clobber the value 493 * we load. 494 */ 495 preempted = test_thread_flag(TIF_USEDMSA); 496 497 res = __copy_from_user_inatomic(fpr, addr, 498 sizeof(*fpr)); 499 if (res) 500 goto fault; 501 502 /* 503 * Update the hardware register if it is in use 504 * by the task in this quantum, in order to 505 * avoid having to save & restore the whole 506 * vector context. 507 */ 508 preempt_disable(); 509 if (test_thread_flag(TIF_USEDMSA)) { 510 write_msa_wr(wd, fpr, df); 511 preempted = 0; 512 } 513 preempt_enable(); 514 } while (preempted); 515 break; 516 517 case msa_st_op: 518 if (!access_ok(addr, sizeof(*fpr))) 519 goto sigbus; 520 521 /* 522 * Update from the hardware register if it is in use by 523 * the task in this quantum, in order to avoid having to 524 * save & restore the whole vector context. 525 */ 526 preempt_disable(); 527 if (test_thread_flag(TIF_USEDMSA)) 528 read_msa_wr(wd, fpr, df); 529 preempt_enable(); 530 531 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); 532 if (res) 533 goto fault; 534 break; 535 536 default: 537 goto sigbus; 538 } 539 540 compute_return_epc(regs); 541 break; 542 } 543 #endif /* CONFIG_CPU_HAS_MSA */ 544 545 #ifndef CONFIG_CPU_MIPSR6 546 /* 547 * COP2 is available to implementor for application specific use. 548 * It's up to applications to register a notifier chain and do 549 * whatever they have to do, including possible sending of signals. 550 * 551 * This instruction has been reallocated in Release 6 552 */ 553 case lwc2_op: 554 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 555 break; 556 557 case ldc2_op: 558 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 559 break; 560 561 case swc2_op: 562 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 563 break; 564 565 case sdc2_op: 566 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 567 break; 568 #endif 569 default: 570 /* 571 * Pheeee... We encountered an yet unknown instruction or 572 * cache coherence problem. Die sucker, die ... 573 */ 574 goto sigill; 575 } 576 577 #ifdef CONFIG_DEBUG_FS 578 unaligned_instructions++; 579 #endif 580 581 return; 582 583 fault: 584 /* roll back jump/branch */ 585 regs->cp0_epc = origpc; 586 regs->regs[31] = orig31; 587 /* Did we have an exception handler installed? */ 588 if (fixup_exception(regs)) 589 return; 590 591 die_if_kernel("Unhandled kernel unaligned access", regs); 592 force_sig(SIGSEGV); 593 594 return; 595 596 sigbus: 597 die_if_kernel("Unhandled kernel unaligned access", regs); 598 force_sig(SIGBUS); 599 600 return; 601 602 sigill: 603 die_if_kernel 604 ("Unhandled kernel unaligned access or invalid instruction", regs); 605 force_sig(SIGILL); 606 } 607 608 /* Recode table from 16-bit register notation to 32-bit GPR. */ 609 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 610 611 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 612 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 613 614 static void emulate_load_store_microMIPS(struct pt_regs *regs, 615 void __user *addr) 616 { 617 unsigned long value; 618 unsigned int res; 619 int i; 620 unsigned int reg = 0, rvar; 621 unsigned long orig31; 622 u16 __user *pc16; 623 u16 halfword; 624 unsigned int word; 625 unsigned long origpc, contpc; 626 union mips_instruction insn; 627 struct mm_decoded_insn mminsn; 628 bool user = user_mode(regs); 629 630 origpc = regs->cp0_epc; 631 orig31 = regs->regs[31]; 632 633 mminsn.micro_mips_mode = 1; 634 635 /* 636 * This load never faults. 637 */ 638 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 639 __get_user(halfword, pc16); 640 pc16++; 641 contpc = regs->cp0_epc + 2; 642 word = ((unsigned int)halfword << 16); 643 mminsn.pc_inc = 2; 644 645 if (!mm_insn_16bit(halfword)) { 646 __get_user(halfword, pc16); 647 pc16++; 648 contpc = regs->cp0_epc + 4; 649 mminsn.pc_inc = 4; 650 word |= halfword; 651 } 652 mminsn.insn = word; 653 654 if (get_user(halfword, pc16)) 655 goto fault; 656 mminsn.next_pc_inc = 2; 657 word = ((unsigned int)halfword << 16); 658 659 if (!mm_insn_16bit(halfword)) { 660 pc16++; 661 if (get_user(halfword, pc16)) 662 goto fault; 663 mminsn.next_pc_inc = 4; 664 word |= halfword; 665 } 666 mminsn.next_insn = word; 667 668 insn = (union mips_instruction)(mminsn.insn); 669 if (mm_isBranchInstr(regs, mminsn, &contpc)) 670 insn = (union mips_instruction)(mminsn.next_insn); 671 672 /* Parse instruction to find what to do */ 673 674 switch (insn.mm_i_format.opcode) { 675 676 case mm_pool32a_op: 677 switch (insn.mm_x_format.func) { 678 case mm_lwxs_op: 679 reg = insn.mm_x_format.rd; 680 goto loadW; 681 } 682 683 goto sigbus; 684 685 case mm_pool32b_op: 686 switch (insn.mm_m_format.func) { 687 case mm_lwp_func: 688 reg = insn.mm_m_format.rd; 689 if (reg == 31) 690 goto sigbus; 691 692 if (user && !access_ok(addr, 8)) 693 goto sigbus; 694 695 LoadW(addr, value, res); 696 if (res) 697 goto fault; 698 regs->regs[reg] = value; 699 addr += 4; 700 LoadW(addr, value, res); 701 if (res) 702 goto fault; 703 regs->regs[reg + 1] = value; 704 goto success; 705 706 case mm_swp_func: 707 reg = insn.mm_m_format.rd; 708 if (reg == 31) 709 goto sigbus; 710 711 if (user && !access_ok(addr, 8)) 712 goto sigbus; 713 714 value = regs->regs[reg]; 715 StoreW(addr, value, res); 716 if (res) 717 goto fault; 718 addr += 4; 719 value = regs->regs[reg + 1]; 720 StoreW(addr, value, res); 721 if (res) 722 goto fault; 723 goto success; 724 725 case mm_ldp_func: 726 #ifdef CONFIG_64BIT 727 reg = insn.mm_m_format.rd; 728 if (reg == 31) 729 goto sigbus; 730 731 if (user && !access_ok(addr, 16)) 732 goto sigbus; 733 734 LoadDW(addr, value, res); 735 if (res) 736 goto fault; 737 regs->regs[reg] = value; 738 addr += 8; 739 LoadDW(addr, value, res); 740 if (res) 741 goto fault; 742 regs->regs[reg + 1] = value; 743 goto success; 744 #endif /* CONFIG_64BIT */ 745 746 goto sigill; 747 748 case mm_sdp_func: 749 #ifdef CONFIG_64BIT 750 reg = insn.mm_m_format.rd; 751 if (reg == 31) 752 goto sigbus; 753 754 if (user && !access_ok(addr, 16)) 755 goto sigbus; 756 757 value = regs->regs[reg]; 758 StoreDW(addr, value, res); 759 if (res) 760 goto fault; 761 addr += 8; 762 value = regs->regs[reg + 1]; 763 StoreDW(addr, value, res); 764 if (res) 765 goto fault; 766 goto success; 767 #endif /* CONFIG_64BIT */ 768 769 goto sigill; 770 771 case mm_lwm32_func: 772 reg = insn.mm_m_format.rd; 773 rvar = reg & 0xf; 774 if ((rvar > 9) || !reg) 775 goto sigill; 776 if (reg & 0x10) { 777 if (user && !access_ok(addr, 4 * (rvar + 1))) 778 goto sigbus; 779 } else { 780 if (user && !access_ok(addr, 4 * rvar)) 781 goto sigbus; 782 } 783 if (rvar == 9) 784 rvar = 8; 785 for (i = 16; rvar; rvar--, i++) { 786 LoadW(addr, value, res); 787 if (res) 788 goto fault; 789 addr += 4; 790 regs->regs[i] = value; 791 } 792 if ((reg & 0xf) == 9) { 793 LoadW(addr, value, res); 794 if (res) 795 goto fault; 796 addr += 4; 797 regs->regs[30] = value; 798 } 799 if (reg & 0x10) { 800 LoadW(addr, value, res); 801 if (res) 802 goto fault; 803 regs->regs[31] = value; 804 } 805 goto success; 806 807 case mm_swm32_func: 808 reg = insn.mm_m_format.rd; 809 rvar = reg & 0xf; 810 if ((rvar > 9) || !reg) 811 goto sigill; 812 if (reg & 0x10) { 813 if (user && !access_ok(addr, 4 * (rvar + 1))) 814 goto sigbus; 815 } else { 816 if (user && !access_ok(addr, 4 * rvar)) 817 goto sigbus; 818 } 819 if (rvar == 9) 820 rvar = 8; 821 for (i = 16; rvar; rvar--, i++) { 822 value = regs->regs[i]; 823 StoreW(addr, value, res); 824 if (res) 825 goto fault; 826 addr += 4; 827 } 828 if ((reg & 0xf) == 9) { 829 value = regs->regs[30]; 830 StoreW(addr, value, res); 831 if (res) 832 goto fault; 833 addr += 4; 834 } 835 if (reg & 0x10) { 836 value = regs->regs[31]; 837 StoreW(addr, value, res); 838 if (res) 839 goto fault; 840 } 841 goto success; 842 843 case mm_ldm_func: 844 #ifdef CONFIG_64BIT 845 reg = insn.mm_m_format.rd; 846 rvar = reg & 0xf; 847 if ((rvar > 9) || !reg) 848 goto sigill; 849 if (reg & 0x10) { 850 if (user && !access_ok(addr, 8 * (rvar + 1))) 851 goto sigbus; 852 } else { 853 if (user && !access_ok(addr, 8 * rvar)) 854 goto sigbus; 855 } 856 if (rvar == 9) 857 rvar = 8; 858 859 for (i = 16; rvar; rvar--, i++) { 860 LoadDW(addr, value, res); 861 if (res) 862 goto fault; 863 addr += 4; 864 regs->regs[i] = value; 865 } 866 if ((reg & 0xf) == 9) { 867 LoadDW(addr, value, res); 868 if (res) 869 goto fault; 870 addr += 8; 871 regs->regs[30] = value; 872 } 873 if (reg & 0x10) { 874 LoadDW(addr, value, res); 875 if (res) 876 goto fault; 877 regs->regs[31] = value; 878 } 879 goto success; 880 #endif /* CONFIG_64BIT */ 881 882 goto sigill; 883 884 case mm_sdm_func: 885 #ifdef CONFIG_64BIT 886 reg = insn.mm_m_format.rd; 887 rvar = reg & 0xf; 888 if ((rvar > 9) || !reg) 889 goto sigill; 890 if (reg & 0x10) { 891 if (user && !access_ok(addr, 8 * (rvar + 1))) 892 goto sigbus; 893 } else { 894 if (user && !access_ok(addr, 8 * rvar)) 895 goto sigbus; 896 } 897 if (rvar == 9) 898 rvar = 8; 899 900 for (i = 16; rvar; rvar--, i++) { 901 value = regs->regs[i]; 902 StoreDW(addr, value, res); 903 if (res) 904 goto fault; 905 addr += 8; 906 } 907 if ((reg & 0xf) == 9) { 908 value = regs->regs[30]; 909 StoreDW(addr, value, res); 910 if (res) 911 goto fault; 912 addr += 8; 913 } 914 if (reg & 0x10) { 915 value = regs->regs[31]; 916 StoreDW(addr, value, res); 917 if (res) 918 goto fault; 919 } 920 goto success; 921 #endif /* CONFIG_64BIT */ 922 923 goto sigill; 924 925 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 926 } 927 928 goto sigbus; 929 930 case mm_pool32c_op: 931 switch (insn.mm_m_format.func) { 932 case mm_lwu_func: 933 reg = insn.mm_m_format.rd; 934 goto loadWU; 935 } 936 937 /* LL,SC,LLD,SCD are not serviced */ 938 goto sigbus; 939 940 #ifdef CONFIG_MIPS_FP_SUPPORT 941 case mm_pool32f_op: 942 switch (insn.mm_x_format.func) { 943 case mm_lwxc1_func: 944 case mm_swxc1_func: 945 case mm_ldxc1_func: 946 case mm_sdxc1_func: 947 goto fpu_emul; 948 } 949 950 goto sigbus; 951 952 case mm_ldc132_op: 953 case mm_sdc132_op: 954 case mm_lwc132_op: 955 case mm_swc132_op: { 956 void __user *fault_addr = NULL; 957 958 fpu_emul: 959 /* roll back jump/branch */ 960 regs->cp0_epc = origpc; 961 regs->regs[31] = orig31; 962 963 die_if_kernel("Unaligned FP access in kernel code", regs); 964 BUG_ON(!used_math()); 965 BUG_ON(!is_fpu_owner()); 966 967 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 968 &fault_addr); 969 own_fpu(1); /* restore FPU state */ 970 971 /* If something went wrong, signal */ 972 process_fpemu_return(res, fault_addr, 0); 973 974 if (res == 0) 975 goto success; 976 return; 977 } 978 #endif /* CONFIG_MIPS_FP_SUPPORT */ 979 980 case mm_lh32_op: 981 reg = insn.mm_i_format.rt; 982 goto loadHW; 983 984 case mm_lhu32_op: 985 reg = insn.mm_i_format.rt; 986 goto loadHWU; 987 988 case mm_lw32_op: 989 reg = insn.mm_i_format.rt; 990 goto loadW; 991 992 case mm_sh32_op: 993 reg = insn.mm_i_format.rt; 994 goto storeHW; 995 996 case mm_sw32_op: 997 reg = insn.mm_i_format.rt; 998 goto storeW; 999 1000 case mm_ld32_op: 1001 reg = insn.mm_i_format.rt; 1002 goto loadDW; 1003 1004 case mm_sd32_op: 1005 reg = insn.mm_i_format.rt; 1006 goto storeDW; 1007 1008 case mm_pool16c_op: 1009 switch (insn.mm16_m_format.func) { 1010 case mm_lwm16_op: 1011 reg = insn.mm16_m_format.rlist; 1012 rvar = reg + 1; 1013 if (user && !access_ok(addr, 4 * rvar)) 1014 goto sigbus; 1015 1016 for (i = 16; rvar; rvar--, i++) { 1017 LoadW(addr, value, res); 1018 if (res) 1019 goto fault; 1020 addr += 4; 1021 regs->regs[i] = value; 1022 } 1023 LoadW(addr, value, res); 1024 if (res) 1025 goto fault; 1026 regs->regs[31] = value; 1027 1028 goto success; 1029 1030 case mm_swm16_op: 1031 reg = insn.mm16_m_format.rlist; 1032 rvar = reg + 1; 1033 if (user && !access_ok(addr, 4 * rvar)) 1034 goto sigbus; 1035 1036 for (i = 16; rvar; rvar--, i++) { 1037 value = regs->regs[i]; 1038 StoreW(addr, value, res); 1039 if (res) 1040 goto fault; 1041 addr += 4; 1042 } 1043 value = regs->regs[31]; 1044 StoreW(addr, value, res); 1045 if (res) 1046 goto fault; 1047 1048 goto success; 1049 1050 } 1051 1052 goto sigbus; 1053 1054 case mm_lhu16_op: 1055 reg = reg16to32[insn.mm16_rb_format.rt]; 1056 goto loadHWU; 1057 1058 case mm_lw16_op: 1059 reg = reg16to32[insn.mm16_rb_format.rt]; 1060 goto loadW; 1061 1062 case mm_sh16_op: 1063 reg = reg16to32st[insn.mm16_rb_format.rt]; 1064 goto storeHW; 1065 1066 case mm_sw16_op: 1067 reg = reg16to32st[insn.mm16_rb_format.rt]; 1068 goto storeW; 1069 1070 case mm_lwsp16_op: 1071 reg = insn.mm16_r5_format.rt; 1072 goto loadW; 1073 1074 case mm_swsp16_op: 1075 reg = insn.mm16_r5_format.rt; 1076 goto storeW; 1077 1078 case mm_lwgp16_op: 1079 reg = reg16to32[insn.mm16_r3_format.rt]; 1080 goto loadW; 1081 1082 default: 1083 goto sigill; 1084 } 1085 1086 loadHW: 1087 if (user && !access_ok(addr, 2)) 1088 goto sigbus; 1089 1090 LoadHW(addr, value, res); 1091 if (res) 1092 goto fault; 1093 regs->regs[reg] = value; 1094 goto success; 1095 1096 loadHWU: 1097 if (user && !access_ok(addr, 2)) 1098 goto sigbus; 1099 1100 LoadHWU(addr, value, res); 1101 if (res) 1102 goto fault; 1103 regs->regs[reg] = value; 1104 goto success; 1105 1106 loadW: 1107 if (user && !access_ok(addr, 4)) 1108 goto sigbus; 1109 1110 LoadW(addr, value, res); 1111 if (res) 1112 goto fault; 1113 regs->regs[reg] = value; 1114 goto success; 1115 1116 loadWU: 1117 #ifdef CONFIG_64BIT 1118 /* 1119 * A 32-bit kernel might be running on a 64-bit processor. But 1120 * if we're on a 32-bit processor and an i-cache incoherency 1121 * or race makes us see a 64-bit instruction here the sdl/sdr 1122 * would blow up, so for now we don't handle unaligned 64-bit 1123 * instructions on 32-bit kernels. 1124 */ 1125 if (user && !access_ok(addr, 4)) 1126 goto sigbus; 1127 1128 LoadWU(addr, value, res); 1129 if (res) 1130 goto fault; 1131 regs->regs[reg] = value; 1132 goto success; 1133 #endif /* CONFIG_64BIT */ 1134 1135 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1136 goto sigill; 1137 1138 loadDW: 1139 #ifdef CONFIG_64BIT 1140 /* 1141 * A 32-bit kernel might be running on a 64-bit processor. But 1142 * if we're on a 32-bit processor and an i-cache incoherency 1143 * or race makes us see a 64-bit instruction here the sdl/sdr 1144 * would blow up, so for now we don't handle unaligned 64-bit 1145 * instructions on 32-bit kernels. 1146 */ 1147 if (user && !access_ok(addr, 8)) 1148 goto sigbus; 1149 1150 LoadDW(addr, value, res); 1151 if (res) 1152 goto fault; 1153 regs->regs[reg] = value; 1154 goto success; 1155 #endif /* CONFIG_64BIT */ 1156 1157 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1158 goto sigill; 1159 1160 storeHW: 1161 if (user && !access_ok(addr, 2)) 1162 goto sigbus; 1163 1164 value = regs->regs[reg]; 1165 StoreHW(addr, value, res); 1166 if (res) 1167 goto fault; 1168 goto success; 1169 1170 storeW: 1171 if (user && !access_ok(addr, 4)) 1172 goto sigbus; 1173 1174 value = regs->regs[reg]; 1175 StoreW(addr, value, res); 1176 if (res) 1177 goto fault; 1178 goto success; 1179 1180 storeDW: 1181 #ifdef CONFIG_64BIT 1182 /* 1183 * A 32-bit kernel might be running on a 64-bit processor. But 1184 * if we're on a 32-bit processor and an i-cache incoherency 1185 * or race makes us see a 64-bit instruction here the sdl/sdr 1186 * would blow up, so for now we don't handle unaligned 64-bit 1187 * instructions on 32-bit kernels. 1188 */ 1189 if (user && !access_ok(addr, 8)) 1190 goto sigbus; 1191 1192 value = regs->regs[reg]; 1193 StoreDW(addr, value, res); 1194 if (res) 1195 goto fault; 1196 goto success; 1197 #endif /* CONFIG_64BIT */ 1198 1199 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1200 goto sigill; 1201 1202 success: 1203 regs->cp0_epc = contpc; /* advance or branch */ 1204 1205 #ifdef CONFIG_DEBUG_FS 1206 unaligned_instructions++; 1207 #endif 1208 return; 1209 1210 fault: 1211 /* roll back jump/branch */ 1212 regs->cp0_epc = origpc; 1213 regs->regs[31] = orig31; 1214 /* Did we have an exception handler installed? */ 1215 if (fixup_exception(regs)) 1216 return; 1217 1218 die_if_kernel("Unhandled kernel unaligned access", regs); 1219 force_sig(SIGSEGV); 1220 1221 return; 1222 1223 sigbus: 1224 die_if_kernel("Unhandled kernel unaligned access", regs); 1225 force_sig(SIGBUS); 1226 1227 return; 1228 1229 sigill: 1230 die_if_kernel 1231 ("Unhandled kernel unaligned access or invalid instruction", regs); 1232 force_sig(SIGILL); 1233 } 1234 1235 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 1236 { 1237 unsigned long value; 1238 unsigned int res; 1239 int reg; 1240 unsigned long orig31; 1241 u16 __user *pc16; 1242 unsigned long origpc; 1243 union mips16e_instruction mips16inst, oldinst; 1244 unsigned int opcode; 1245 int extended = 0; 1246 bool user = user_mode(regs); 1247 1248 origpc = regs->cp0_epc; 1249 orig31 = regs->regs[31]; 1250 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 1251 /* 1252 * This load never faults. 1253 */ 1254 __get_user(mips16inst.full, pc16); 1255 oldinst = mips16inst; 1256 1257 /* skip EXTEND instruction */ 1258 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 1259 extended = 1; 1260 pc16++; 1261 __get_user(mips16inst.full, pc16); 1262 } else if (delay_slot(regs)) { 1263 /* skip jump instructions */ 1264 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 1265 if (mips16inst.ri.opcode == MIPS16e_jal_op) 1266 pc16++; 1267 pc16++; 1268 if (get_user(mips16inst.full, pc16)) 1269 goto sigbus; 1270 } 1271 1272 opcode = mips16inst.ri.opcode; 1273 switch (opcode) { 1274 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 1275 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 1276 case MIPS16e_ldpc_func: 1277 case MIPS16e_ldsp_func: 1278 reg = reg16to32[mips16inst.ri64.ry]; 1279 goto loadDW; 1280 1281 case MIPS16e_sdsp_func: 1282 reg = reg16to32[mips16inst.ri64.ry]; 1283 goto writeDW; 1284 1285 case MIPS16e_sdrasp_func: 1286 reg = 29; /* GPRSP */ 1287 goto writeDW; 1288 } 1289 1290 goto sigbus; 1291 1292 case MIPS16e_swsp_op: 1293 reg = reg16to32[mips16inst.ri.rx]; 1294 if (extended && cpu_has_mips16e2) 1295 switch (mips16inst.ri.imm >> 5) { 1296 case 0: /* SWSP */ 1297 case 1: /* SWGP */ 1298 break; 1299 case 2: /* SHGP */ 1300 opcode = MIPS16e_sh_op; 1301 break; 1302 default: 1303 goto sigbus; 1304 } 1305 break; 1306 1307 case MIPS16e_lwpc_op: 1308 reg = reg16to32[mips16inst.ri.rx]; 1309 break; 1310 1311 case MIPS16e_lwsp_op: 1312 reg = reg16to32[mips16inst.ri.rx]; 1313 if (extended && cpu_has_mips16e2) 1314 switch (mips16inst.ri.imm >> 5) { 1315 case 0: /* LWSP */ 1316 case 1: /* LWGP */ 1317 break; 1318 case 2: /* LHGP */ 1319 opcode = MIPS16e_lh_op; 1320 break; 1321 case 4: /* LHUGP */ 1322 opcode = MIPS16e_lhu_op; 1323 break; 1324 default: 1325 goto sigbus; 1326 } 1327 break; 1328 1329 case MIPS16e_i8_op: 1330 if (mips16inst.i8.func != MIPS16e_swrasp_func) 1331 goto sigbus; 1332 reg = 29; /* GPRSP */ 1333 break; 1334 1335 default: 1336 reg = reg16to32[mips16inst.rri.ry]; 1337 break; 1338 } 1339 1340 switch (opcode) { 1341 1342 case MIPS16e_lb_op: 1343 case MIPS16e_lbu_op: 1344 case MIPS16e_sb_op: 1345 goto sigbus; 1346 1347 case MIPS16e_lh_op: 1348 if (user && !access_ok(addr, 2)) 1349 goto sigbus; 1350 1351 LoadHW(addr, value, res); 1352 if (res) 1353 goto fault; 1354 MIPS16e_compute_return_epc(regs, &oldinst); 1355 regs->regs[reg] = value; 1356 break; 1357 1358 case MIPS16e_lhu_op: 1359 if (user && !access_ok(addr, 2)) 1360 goto sigbus; 1361 1362 LoadHWU(addr, value, res); 1363 if (res) 1364 goto fault; 1365 MIPS16e_compute_return_epc(regs, &oldinst); 1366 regs->regs[reg] = value; 1367 break; 1368 1369 case MIPS16e_lw_op: 1370 case MIPS16e_lwpc_op: 1371 case MIPS16e_lwsp_op: 1372 if (user && !access_ok(addr, 4)) 1373 goto sigbus; 1374 1375 LoadW(addr, value, res); 1376 if (res) 1377 goto fault; 1378 MIPS16e_compute_return_epc(regs, &oldinst); 1379 regs->regs[reg] = value; 1380 break; 1381 1382 case MIPS16e_lwu_op: 1383 #ifdef CONFIG_64BIT 1384 /* 1385 * A 32-bit kernel might be running on a 64-bit processor. But 1386 * if we're on a 32-bit processor and an i-cache incoherency 1387 * or race makes us see a 64-bit instruction here the sdl/sdr 1388 * would blow up, so for now we don't handle unaligned 64-bit 1389 * instructions on 32-bit kernels. 1390 */ 1391 if (user && !access_ok(addr, 4)) 1392 goto sigbus; 1393 1394 LoadWU(addr, value, res); 1395 if (res) 1396 goto fault; 1397 MIPS16e_compute_return_epc(regs, &oldinst); 1398 regs->regs[reg] = value; 1399 break; 1400 #endif /* CONFIG_64BIT */ 1401 1402 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1403 goto sigill; 1404 1405 case MIPS16e_ld_op: 1406 loadDW: 1407 #ifdef CONFIG_64BIT 1408 /* 1409 * A 32-bit kernel might be running on a 64-bit processor. But 1410 * if we're on a 32-bit processor and an i-cache incoherency 1411 * or race makes us see a 64-bit instruction here the sdl/sdr 1412 * would blow up, so for now we don't handle unaligned 64-bit 1413 * instructions on 32-bit kernels. 1414 */ 1415 if (user && !access_ok(addr, 8)) 1416 goto sigbus; 1417 1418 LoadDW(addr, value, res); 1419 if (res) 1420 goto fault; 1421 MIPS16e_compute_return_epc(regs, &oldinst); 1422 regs->regs[reg] = value; 1423 break; 1424 #endif /* CONFIG_64BIT */ 1425 1426 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1427 goto sigill; 1428 1429 case MIPS16e_sh_op: 1430 if (user && !access_ok(addr, 2)) 1431 goto sigbus; 1432 1433 MIPS16e_compute_return_epc(regs, &oldinst); 1434 value = regs->regs[reg]; 1435 StoreHW(addr, value, res); 1436 if (res) 1437 goto fault; 1438 break; 1439 1440 case MIPS16e_sw_op: 1441 case MIPS16e_swsp_op: 1442 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 1443 if (user && !access_ok(addr, 4)) 1444 goto sigbus; 1445 1446 MIPS16e_compute_return_epc(regs, &oldinst); 1447 value = regs->regs[reg]; 1448 StoreW(addr, value, res); 1449 if (res) 1450 goto fault; 1451 break; 1452 1453 case MIPS16e_sd_op: 1454 writeDW: 1455 #ifdef CONFIG_64BIT 1456 /* 1457 * A 32-bit kernel might be running on a 64-bit processor. But 1458 * if we're on a 32-bit processor and an i-cache incoherency 1459 * or race makes us see a 64-bit instruction here the sdl/sdr 1460 * would blow up, so for now we don't handle unaligned 64-bit 1461 * instructions on 32-bit kernels. 1462 */ 1463 if (user && !access_ok(addr, 8)) 1464 goto sigbus; 1465 1466 MIPS16e_compute_return_epc(regs, &oldinst); 1467 value = regs->regs[reg]; 1468 StoreDW(addr, value, res); 1469 if (res) 1470 goto fault; 1471 break; 1472 #endif /* CONFIG_64BIT */ 1473 1474 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1475 goto sigill; 1476 1477 default: 1478 /* 1479 * Pheeee... We encountered an yet unknown instruction or 1480 * cache coherence problem. Die sucker, die ... 1481 */ 1482 goto sigill; 1483 } 1484 1485 #ifdef CONFIG_DEBUG_FS 1486 unaligned_instructions++; 1487 #endif 1488 1489 return; 1490 1491 fault: 1492 /* roll back jump/branch */ 1493 regs->cp0_epc = origpc; 1494 regs->regs[31] = orig31; 1495 /* Did we have an exception handler installed? */ 1496 if (fixup_exception(regs)) 1497 return; 1498 1499 die_if_kernel("Unhandled kernel unaligned access", regs); 1500 force_sig(SIGSEGV); 1501 1502 return; 1503 1504 sigbus: 1505 die_if_kernel("Unhandled kernel unaligned access", regs); 1506 force_sig(SIGBUS); 1507 1508 return; 1509 1510 sigill: 1511 die_if_kernel 1512 ("Unhandled kernel unaligned access or invalid instruction", regs); 1513 force_sig(SIGILL); 1514 } 1515 1516 asmlinkage void do_ade(struct pt_regs *regs) 1517 { 1518 enum ctx_state prev_state; 1519 unsigned int *pc; 1520 1521 prev_state = exception_enter(); 1522 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1523 1, regs, regs->cp0_badvaddr); 1524 1525 #ifdef CONFIG_64BIT 1526 /* 1527 * check, if we are hitting space between CPU implemented maximum 1528 * virtual user address and 64bit maximum virtual user address 1529 * and do exception handling to get EFAULTs for get_user/put_user 1530 */ 1531 if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) && 1532 (regs->cp0_badvaddr < XKSSEG)) { 1533 if (fixup_exception(regs)) { 1534 current->thread.cp0_baduaddr = regs->cp0_badvaddr; 1535 return; 1536 } 1537 goto sigbus; 1538 } 1539 #endif 1540 1541 /* 1542 * Did we catch a fault trying to load an instruction? 1543 */ 1544 if (regs->cp0_badvaddr == regs->cp0_epc) 1545 goto sigbus; 1546 1547 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 1548 goto sigbus; 1549 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 1550 goto sigbus; 1551 1552 /* 1553 * Do branch emulation only if we didn't forward the exception. 1554 * This is all so but ugly ... 1555 */ 1556 1557 /* 1558 * Are we running in microMIPS mode? 1559 */ 1560 if (get_isa16_mode(regs->cp0_epc)) { 1561 /* 1562 * Did we catch a fault trying to load an instruction in 1563 * 16-bit mode? 1564 */ 1565 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 1566 goto sigbus; 1567 if (unaligned_action == UNALIGNED_ACTION_SHOW) 1568 show_registers(regs); 1569 1570 if (cpu_has_mmips) { 1571 emulate_load_store_microMIPS(regs, 1572 (void __user *)regs->cp0_badvaddr); 1573 return; 1574 } 1575 1576 if (cpu_has_mips16) { 1577 emulate_load_store_MIPS16e(regs, 1578 (void __user *)regs->cp0_badvaddr); 1579 return; 1580 } 1581 1582 goto sigbus; 1583 } 1584 1585 if (unaligned_action == UNALIGNED_ACTION_SHOW) 1586 show_registers(regs); 1587 pc = (unsigned int *)exception_epc(regs); 1588 1589 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 1590 1591 return; 1592 1593 sigbus: 1594 die_if_kernel("Kernel unaligned instruction access", regs); 1595 force_sig(SIGBUS); 1596 1597 /* 1598 * XXX On return from the signal handler we should advance the epc 1599 */ 1600 exception_exit(prev_state); 1601 } 1602 1603 #ifdef CONFIG_DEBUG_FS 1604 static int __init debugfs_unaligned(void) 1605 { 1606 debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, 1607 &unaligned_instructions); 1608 debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 1609 mips_debugfs_dir, &unaligned_action); 1610 return 0; 1611 } 1612 arch_initcall(debugfs_unaligned); 1613 #endif 1614