1/* 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 3 * 4 * kernel entry points (interruptions, system call wrappers) 5 * Copyright (C) 1999,2000 Philipp Rumpf 6 * Copyright (C) 1999 SuSE GmbH Nuernberg 7 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25#include <asm/asm-offsets.h> 26 27/* we have the following possibilities to act on an interruption: 28 * - handle in assembly and use shadowed registers only 29 * - save registers to kernel stack and handle in assembly or C */ 30 31 32#include <asm/psw.h> 33#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 34#include <asm/assembly.h> /* for LDREG/STREG defines */ 35#include <asm/pgtable.h> 36#include <asm/signal.h> 37#include <asm/unistd.h> 38#include <asm/thread_info.h> 39 40#include <linux/linkage.h> 41 42#ifdef CONFIG_64BIT 43 .level 2.0w 44#else 45 .level 2.0 46#endif 47 48 .import pa_dbit_lock,data 49 50 /* space_to_prot macro creates a prot id from a space id */ 51 52#if (SPACEID_SHIFT) == 0 53 .macro space_to_prot spc prot 54 depd,z \spc,62,31,\prot 55 .endm 56#else 57 .macro space_to_prot spc prot 58 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 59 .endm 60#endif 61 62 /* Switch to virtual mapping, trashing only %r1 */ 63 .macro virt_map 64 /* pcxt_ssm_bug */ 65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 66 mtsp %r0, %sr4 67 mtsp %r0, %sr5 68 mfsp %sr7, %r1 69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ 70 mtsp %r1, %sr3 71 tovirt_r1 %r29 72 load32 KERNEL_PSW, %r1 73 74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 75 mtsp %r0, %sr6 76 mtsp %r0, %sr7 77 mtctl %r0, %cr17 /* Clear IIASQ tail */ 78 mtctl %r0, %cr17 /* Clear IIASQ head */ 79 mtctl %r1, %ipsw 80 load32 4f, %r1 81 mtctl %r1, %cr18 /* Set IIAOQ tail */ 82 ldo 4(%r1), %r1 83 mtctl %r1, %cr18 /* Set IIAOQ head */ 84 rfir 85 nop 864: 87 .endm 88 89 /* 90 * The "get_stack" macros are responsible for determining the 91 * kernel stack value. 92 * 93 * If sr7 == 0 94 * Already using a kernel stack, so call the 95 * get_stack_use_r30 macro to push a pt_regs structure 96 * on the stack, and store registers there. 97 * else 98 * Need to set up a kernel stack, so call the 99 * get_stack_use_cr30 macro to set up a pointer 100 * to the pt_regs structure contained within the 101 * task pointer pointed to by cr30. Set the stack 102 * pointer to point to the end of the task structure. 103 * 104 * Note that we use shadowed registers for temps until 105 * we can save %r26 and %r29. %r26 is used to preserve 106 * %r8 (a shadowed register) which temporarily contained 107 * either the fault type ("code") or the eirr. We need 108 * to use a non-shadowed register to carry the value over 109 * the rfir in virt_map. We use %r26 since this value winds 110 * up being passed as the argument to either do_cpu_irq_mask 111 * or handle_interruption. %r29 is used to hold a pointer 112 * the register save area, and once again, it needs to 113 * be a non-shadowed register so that it survives the rfir. 114 * 115 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 116 */ 117 118 .macro get_stack_use_cr30 119 120 /* we save the registers in the task struct */ 121 122 mfctl %cr30, %r1 123 tophys %r1,%r9 124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 125 tophys %r1,%r9 126 ldo TASK_REGS(%r9),%r9 127 STREG %r30, PT_GR30(%r9) 128 STREG %r29,PT_GR29(%r9) 129 STREG %r26,PT_GR26(%r9) 130 copy %r9,%r29 131 mfctl %cr30, %r1 132 ldo THREAD_SZ_ALGN(%r1), %r30 133 .endm 134 135 .macro get_stack_use_r30 136 137 /* we put a struct pt_regs on the stack and save the registers there */ 138 139 tophys %r30,%r9 140 STREG %r30,PT_GR30(%r9) 141 ldo PT_SZ_ALGN(%r30),%r30 142 STREG %r29,PT_GR29(%r9) 143 STREG %r26,PT_GR26(%r9) 144 copy %r9,%r29 145 .endm 146 147 .macro rest_stack 148 LDREG PT_GR1(%r29), %r1 149 LDREG PT_GR30(%r29),%r30 150 LDREG PT_GR29(%r29),%r29 151 .endm 152 153 /* default interruption handler 154 * (calls traps.c:handle_interruption) */ 155 .macro def code 156 b intr_save 157 ldi \code, %r8 158 .align 32 159 .endm 160 161 /* Interrupt interruption handler 162 * (calls irq.c:do_cpu_irq_mask) */ 163 .macro extint code 164 b intr_extint 165 mfsp %sr7,%r16 166 .align 32 167 .endm 168 169 .import os_hpmc, code 170 171 /* HPMC handler */ 172 .macro hpmc code 173 nop /* must be a NOP, will be patched later */ 174 load32 PA(os_hpmc), %r3 175 bv,n 0(%r3) 176 nop 177 .word 0 /* checksum (will be patched) */ 178 .word PA(os_hpmc) /* address of handler */ 179 .word 0 /* length of handler */ 180 .endm 181 182 /* 183 * Performance Note: Instructions will be moved up into 184 * this part of the code later on, once we are sure 185 * that the tlb miss handlers are close to final form. 186 */ 187 188 /* Register definitions for tlb miss handler macros */ 189 190 va = r8 /* virtual address for which the trap occurred */ 191 spc = r24 /* space for which the trap occurred */ 192 193#ifndef CONFIG_64BIT 194 195 /* 196 * itlb miss interruption handler (parisc 1.1 - 32 bit) 197 */ 198 199 .macro itlb_11 code 200 201 mfctl %pcsq, spc 202 b itlb_miss_11 203 mfctl %pcoq, va 204 205 .align 32 206 .endm 207#endif 208 209 /* 210 * itlb miss interruption handler (parisc 2.0) 211 */ 212 213 .macro itlb_20 code 214 mfctl %pcsq, spc 215#ifdef CONFIG_64BIT 216 b itlb_miss_20w 217#else 218 b itlb_miss_20 219#endif 220 mfctl %pcoq, va 221 222 .align 32 223 .endm 224 225#ifndef CONFIG_64BIT 226 /* 227 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 228 */ 229 230 .macro naitlb_11 code 231 232 mfctl %isr,spc 233 b naitlb_miss_11 234 mfctl %ior,va 235 236 .align 32 237 .endm 238#endif 239 240 /* 241 * naitlb miss interruption handler (parisc 2.0) 242 */ 243 244 .macro naitlb_20 code 245 246 mfctl %isr,spc 247#ifdef CONFIG_64BIT 248 b naitlb_miss_20w 249#else 250 b naitlb_miss_20 251#endif 252 mfctl %ior,va 253 254 .align 32 255 .endm 256 257#ifndef CONFIG_64BIT 258 /* 259 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 260 */ 261 262 .macro dtlb_11 code 263 264 mfctl %isr, spc 265 b dtlb_miss_11 266 mfctl %ior, va 267 268 .align 32 269 .endm 270#endif 271 272 /* 273 * dtlb miss interruption handler (parisc 2.0) 274 */ 275 276 .macro dtlb_20 code 277 278 mfctl %isr, spc 279#ifdef CONFIG_64BIT 280 b dtlb_miss_20w 281#else 282 b dtlb_miss_20 283#endif 284 mfctl %ior, va 285 286 .align 32 287 .endm 288 289#ifndef CONFIG_64BIT 290 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 291 292 .macro nadtlb_11 code 293 294 mfctl %isr,spc 295 b nadtlb_miss_11 296 mfctl %ior,va 297 298 .align 32 299 .endm 300#endif 301 302 /* nadtlb miss interruption handler (parisc 2.0) */ 303 304 .macro nadtlb_20 code 305 306 mfctl %isr,spc 307#ifdef CONFIG_64BIT 308 b nadtlb_miss_20w 309#else 310 b nadtlb_miss_20 311#endif 312 mfctl %ior,va 313 314 .align 32 315 .endm 316 317#ifndef CONFIG_64BIT 318 /* 319 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 320 */ 321 322 .macro dbit_11 code 323 324 mfctl %isr,spc 325 b dbit_trap_11 326 mfctl %ior,va 327 328 .align 32 329 .endm 330#endif 331 332 /* 333 * dirty bit trap interruption handler (parisc 2.0) 334 */ 335 336 .macro dbit_20 code 337 338 mfctl %isr,spc 339#ifdef CONFIG_64BIT 340 b dbit_trap_20w 341#else 342 b dbit_trap_20 343#endif 344 mfctl %ior,va 345 346 .align 32 347 .endm 348 349 /* In LP64, the space contains part of the upper 32 bits of the 350 * fault. We have to extract this and place it in the va, 351 * zeroing the corresponding bits in the space register */ 352 .macro space_adjust spc,va,tmp 353#ifdef CONFIG_64BIT 354 extrd,u \spc,63,SPACEID_SHIFT,\tmp 355 depd %r0,63,SPACEID_SHIFT,\spc 356 depd \tmp,31,SPACEID_SHIFT,\va 357#endif 358 .endm 359 360 .import swapper_pg_dir,code 361 362 /* Get the pgd. For faults on space zero (kernel space), this 363 * is simply swapper_pg_dir. For user space faults, the 364 * pgd is stored in %cr25 */ 365 .macro get_pgd spc,reg 366 ldil L%PA(swapper_pg_dir),\reg 367 ldo R%PA(swapper_pg_dir)(\reg),\reg 368 or,COND(=) %r0,\spc,%r0 369 mfctl %cr25,\reg 370 .endm 371 372 /* 373 space_check(spc,tmp,fault) 374 375 spc - The space we saw the fault with. 376 tmp - The place to store the current space. 377 fault - Function to call on failure. 378 379 Only allow faults on different spaces from the 380 currently active one if we're the kernel 381 382 */ 383 .macro space_check spc,tmp,fault 384 mfsp %sr7,\tmp 385 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 386 * as kernel, so defeat the space 387 * check if it is */ 388 copy \spc,\tmp 389 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 390 cmpb,COND(<>),n \tmp,\spc,\fault 391 .endm 392 393 /* Look up a PTE in a 2-Level scheme (faulting at each 394 * level if the entry isn't present 395 * 396 * NOTE: we use ldw even for LP64, since the short pointers 397 * can address up to 1TB 398 */ 399 .macro L2_ptep pmd,pte,index,va,fault 400#if PT_NLEVELS == 3 401 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 402#else 403 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 404#endif 405 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 406 copy %r0,\pte 407 ldw,s \index(\pmd),\pmd 408 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 409 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 410 copy \pmd,%r9 411 SHLREG %r9,PxD_VALUE_SHIFT,\pmd 412 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 413 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 414 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 415 LDREG %r0(\pmd),\pte /* pmd is now pte */ 416 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 417 .endm 418 419 /* Look up PTE in a 3-Level scheme. 420 * 421 * Here we implement a Hybrid L2/L3 scheme: we allocate the 422 * first pmd adjacent to the pgd. This means that we can 423 * subtract a constant offset to get to it. The pmd and pgd 424 * sizes are arranged so that a single pmd covers 4GB (giving 425 * a full LP64 process access to 8TB) so our lookups are 426 * effectively L2 for the first 4GB of the kernel (i.e. for 427 * all ILP32 processes and all the kernel for machines with 428 * under 4GB of memory) */ 429 .macro L3_ptep pgd,pte,index,va,fault 430#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 431 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 432 copy %r0,\pte 433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 434 ldw,s \index(\pgd),\pgd 435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 438 shld \pgd,PxD_VALUE_SHIFT,\index 439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 440 copy \index,\pgd 441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 443#endif 444 L2_ptep \pgd,\pte,\index,\va,\fault 445 .endm 446 447 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 448 * don't needlessly dirty the cache line if it was already set */ 449 .macro update_ptep ptep,pte,tmp,tmp1 450 ldi _PAGE_ACCESSED,\tmp1 451 or \tmp1,\pte,\tmp 452 and,COND(<>) \tmp1,\pte,%r0 453 STREG \tmp,0(\ptep) 454 .endm 455 456 /* Set the dirty bit (and accessed bit). No need to be 457 * clever, this is only used from the dirty fault */ 458 .macro update_dirty ptep,pte,tmp 459 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 460 or \tmp,\pte,\pte 461 STREG \pte,0(\ptep) 462 .endm 463 464 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 465 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 466 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 467 468 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 469 .macro convert_for_tlb_insert20 pte 470 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 472 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 473 (63-58)+PAGE_ADD_SHIFT,\pte 474 .endm 475 476 /* Convert the pte and prot to tlb insertion values. How 477 * this happens is quite subtle, read below */ 478 .macro make_insert_tlb spc,pte,prot 479 space_to_prot \spc \prot /* create prot id from space */ 480 /* The following is the real subtlety. This is depositing 481 * T <-> _PAGE_REFTRAP 482 * D <-> _PAGE_DIRTY 483 * B <-> _PAGE_DMB (memory break) 484 * 485 * Then incredible subtlety: The access rights are 486 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 487 * See 3-14 of the parisc 2.0 manual 488 * 489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 490 * trigger an access rights trap in user space if the user 491 * tries to read an unreadable page */ 492 depd \pte,8,7,\prot 493 494 /* PAGE_USER indicates the page can be read with user privileges, 495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 496 * contains _PAGE_READ */ 497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 498 depdi 7,11,3,\prot 499 /* If we're a gateway page, drop PL2 back to zero for promotion 500 * to kernel privilege (so we can execute the page as kernel). 501 * Any privilege promotion page always denys read and write */ 502 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 503 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 504 505 /* Enforce uncacheable pages. 506 * This should ONLY be use for MMIO on PA 2.0 machines. 507 * Memory/DMA is cache coherent on all PA2.0 machines we support 508 * (that means T-class is NOT supported) and the memory controllers 509 * on most of those machines only handles cache transactions. 510 */ 511 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 512 depdi 1,12,1,\prot 513 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 515 convert_for_tlb_insert20 \pte 516 .endm 517 518 /* Identical macro to make_insert_tlb above, except it 519 * makes the tlb entry for the differently formatted pa11 520 * insertion instructions */ 521 .macro make_insert_tlb_11 spc,pte,prot 522 zdep \spc,30,15,\prot 523 dep \pte,8,7,\prot 524 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 525 depi 1,12,1,\prot 526 extru,= \pte,_PAGE_USER_BIT,1,%r0 527 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 528 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 529 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 530 531 /* Get rid of prot bits and convert to page addr for iitlba */ 532 533 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 534 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 535 .endm 536 537 /* This is for ILP32 PA2.0 only. The TLB insertion needs 538 * to extend into I/O space if the address is 0xfXXXXXXX 539 * so we extend the f's into the top word of the pte in 540 * this case */ 541 .macro f_extend pte,tmp 542 extrd,s \pte,42,4,\tmp 543 addi,<> 1,\tmp,%r0 544 extrd,s \pte,63,25,\pte 545 .endm 546 547 /* The alias region is an 8MB aligned 16MB to do clear and 548 * copy user pages at addresses congruent with the user 549 * virtual address. 550 * 551 * To use the alias page, you set %r26 up with the to TLB 552 * entry (identifying the physical page) and %r23 up with 553 * the from tlb entry (or nothing if only a to entry---for 554 * clear_user_page_asm) */ 555 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 556 cmpib,COND(<>),n 0,\spc,\fault 557 ldil L%(TMPALIAS_MAP_START),\tmp 558#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 559 /* on LP64, ldi will sign extend into the upper 32 bits, 560 * which is behaviour we don't want */ 561 depdi 0,31,32,\tmp 562#endif 563 copy \va,\tmp1 564 depi 0,31,23,\tmp1 565 cmpb,COND(<>),n \tmp,\tmp1,\fault 566 mfctl %cr19,\tmp /* iir */ 567 /* get the opcode (first six bits) into \tmp */ 568 extrw,u \tmp,5,6,\tmp 569 /* 570 * Only setting the T bit prevents data cache movein 571 * Setting access rights to zero prevents instruction cache movein 572 * 573 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 574 * to type field and _PAGE_READ goes to top bit of PL1 575 */ 576 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 577 /* 578 * so if the opcode is one (i.e. this is a memory management 579 * instruction) nullify the next load so \prot is only T. 580 * Otherwise this is a normal data operation 581 */ 582 cmpiclr,= 0x01,\tmp,%r0 583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 584#ifdef CONFIG_64BIT 585 depd,z \prot,8,7,\prot 586#else 587 depw,z \prot,8,7,\prot 588#endif 589 /* 590 * OK, it is in the temp alias region, check whether "from" or "to". 591 * Check "subtle" note in pacache.S re: r23/r26. 592 */ 593#ifdef CONFIG_64BIT 594 extrd,u,*= \va,41,1,%r0 595#else 596 extrw,u,= \va,9,1,%r0 597#endif 598 or,COND(tr) %r23,%r0,\pte 599 or %r26,%r0,\pte 600 .endm 601 602 603 /* 604 * Align fault_vector_20 on 4K boundary so that both 605 * fault_vector_11 and fault_vector_20 are on the 606 * same page. This is only necessary as long as we 607 * write protect the kernel text, which we may stop 608 * doing once we use large page translations to cover 609 * the static part of the kernel address space. 610 */ 611 612 .text 613 614 .align PAGE_SIZE 615 616ENTRY(fault_vector_20) 617 /* First vector is invalid (0) */ 618 .ascii "cows can fly" 619 .byte 0 620 .align 32 621 622 hpmc 1 623 def 2 624 def 3 625 extint 4 626 def 5 627 itlb_20 6 628 def 7 629 def 8 630 def 9 631 def 10 632 def 11 633 def 12 634 def 13 635 def 14 636 dtlb_20 15 637 naitlb_20 16 638 nadtlb_20 17 639 def 18 640 def 19 641 dbit_20 20 642 def 21 643 def 22 644 def 23 645 def 24 646 def 25 647 def 26 648 def 27 649 def 28 650 def 29 651 def 30 652 def 31 653END(fault_vector_20) 654 655#ifndef CONFIG_64BIT 656 657 .align 2048 658 659ENTRY(fault_vector_11) 660 /* First vector is invalid (0) */ 661 .ascii "cows can fly" 662 .byte 0 663 .align 32 664 665 hpmc 1 666 def 2 667 def 3 668 extint 4 669 def 5 670 itlb_11 6 671 def 7 672 def 8 673 def 9 674 def 10 675 def 11 676 def 12 677 def 13 678 def 14 679 dtlb_11 15 680 naitlb_11 16 681 nadtlb_11 17 682 def 18 683 def 19 684 dbit_11 20 685 def 21 686 def 22 687 def 23 688 def 24 689 def 25 690 def 26 691 def 27 692 def 28 693 def 29 694 def 30 695 def 31 696END(fault_vector_11) 697 698#endif 699 /* Fault vector is separately protected and *must* be on its own page */ 700 .align PAGE_SIZE 701ENTRY(end_fault_vector) 702 703 .import handle_interruption,code 704 .import do_cpu_irq_mask,code 705 706 /* 707 * r26 = function to be called 708 * r25 = argument to pass in 709 * r24 = flags for do_fork() 710 * 711 * Kernel threads don't ever return, so they don't need 712 * a true register context. We just save away the arguments 713 * for copy_thread/ret_ to properly set up the child. 714 */ 715 716#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */ 717#define CLONE_UNTRACED 0x00800000 718 719 .import do_fork 720ENTRY(__kernel_thread) 721 STREG %r2, -RP_OFFSET(%r30) 722 723 copy %r30, %r1 724 ldo PT_SZ_ALGN(%r30),%r30 725#ifdef CONFIG_64BIT 726 /* Yo, function pointers in wide mode are little structs... -PB */ 727 ldd 24(%r26), %r2 728 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ 729 ldd 16(%r26), %r26 730 731 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */ 732 copy %r0, %r22 /* user_tid */ 733#endif 734 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ 735 STREG %r25, PT_GR25(%r1) 736 ldil L%CLONE_UNTRACED, %r26 737 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */ 738 or %r26, %r24, %r26 /* will have kernel mappings. */ 739 ldi 1, %r25 /* stack_start, signals kernel thread */ 740 stw %r0, -52(%r30) /* user_tid */ 741#ifdef CONFIG_64BIT 742 ldo -16(%r30),%r29 /* Reference param save area */ 743#endif 744 BL do_fork, %r2 745 copy %r1, %r24 /* pt_regs */ 746 747 /* Parent Returns here */ 748 749 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2 750 ldo -PT_SZ_ALGN(%r30), %r30 751 bv %r0(%r2) 752 nop 753ENDPROC(__kernel_thread) 754 755 /* 756 * Child Returns here 757 * 758 * copy_thread moved args from temp save area set up above 759 * into task save area. 760 */ 761 762ENTRY(ret_from_kernel_thread) 763 764 /* Call schedule_tail first though */ 765 BL schedule_tail, %r2 766 nop 767 768 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 769 LDREG TASK_PT_GR25(%r1), %r26 770#ifdef CONFIG_64BIT 771 LDREG TASK_PT_GR27(%r1), %r27 772 LDREG TASK_PT_GR22(%r1), %r22 773#endif 774 LDREG TASK_PT_GR26(%r1), %r1 775 ble 0(%sr7, %r1) 776 copy %r31, %r2 777 778#ifdef CONFIG_64BIT 779 ldo -16(%r30),%r29 /* Reference param save area */ 780 loadgp /* Thread could have been in a module */ 781#endif 782#ifndef CONFIG_64BIT 783 b sys_exit 784#else 785 load32 sys_exit, %r1 786 bv %r0(%r1) 787#endif 788 ldi 0, %r26 789ENDPROC(ret_from_kernel_thread) 790 791 .import sys_execve, code 792ENTRY(__execve) 793 copy %r2, %r15 794 copy %r30, %r16 795 ldo PT_SZ_ALGN(%r30), %r30 796 STREG %r26, PT_GR26(%r16) 797 STREG %r25, PT_GR25(%r16) 798 STREG %r24, PT_GR24(%r16) 799#ifdef CONFIG_64BIT 800 ldo -16(%r30),%r29 /* Reference param save area */ 801#endif 802 BL sys_execve, %r2 803 copy %r16, %r26 804 805 cmpib,=,n 0,%r28,intr_return /* forward */ 806 807 /* yes, this will trap and die. */ 808 copy %r15, %r2 809 copy %r16, %r30 810 bv %r0(%r2) 811 nop 812ENDPROC(__execve) 813 814 815 /* 816 * struct task_struct *_switch_to(struct task_struct *prev, 817 * struct task_struct *next) 818 * 819 * switch kernel stacks and return prev */ 820ENTRY(_switch_to) 821 STREG %r2, -RP_OFFSET(%r30) 822 823 callee_save_float 824 callee_save 825 826 load32 _switch_to_ret, %r2 827 828 STREG %r2, TASK_PT_KPC(%r26) 829 LDREG TASK_PT_KPC(%r25), %r2 830 831 STREG %r30, TASK_PT_KSP(%r26) 832 LDREG TASK_PT_KSP(%r25), %r30 833 LDREG TASK_THREAD_INFO(%r25), %r25 834 bv %r0(%r2) 835 mtctl %r25,%cr30 836 837_switch_to_ret: 838 mtctl %r0, %cr0 /* Needed for single stepping */ 839 callee_rest 840 callee_rest_float 841 842 LDREG -RP_OFFSET(%r30), %r2 843 bv %r0(%r2) 844 copy %r26, %r28 845ENDPROC(_switch_to) 846 847 /* 848 * Common rfi return path for interruptions, kernel execve, and 849 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 850 * return via this path if the signal was received when the process 851 * was running; if the process was blocked on a syscall then the 852 * normal syscall_exit path is used. All syscalls for traced 853 * proceses exit via intr_restore. 854 * 855 * XXX If any syscalls that change a processes space id ever exit 856 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 857 * adjust IASQ[0..1]. 858 * 859 */ 860 861 .align PAGE_SIZE 862 863ENTRY(syscall_exit_rfi) 864 mfctl %cr30,%r16 865 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 866 ldo TASK_REGS(%r16),%r16 867 /* Force iaoq to userspace, as the user has had access to our current 868 * context via sigcontext. Also Filter the PSW for the same reason. 869 */ 870 LDREG PT_IAOQ0(%r16),%r19 871 depi 3,31,2,%r19 872 STREG %r19,PT_IAOQ0(%r16) 873 LDREG PT_IAOQ1(%r16),%r19 874 depi 3,31,2,%r19 875 STREG %r19,PT_IAOQ1(%r16) 876 LDREG PT_PSW(%r16),%r19 877 load32 USER_PSW_MASK,%r1 878#ifdef CONFIG_64BIT 879 load32 USER_PSW_HI_MASK,%r20 880 depd %r20,31,32,%r1 881#endif 882 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 883 load32 USER_PSW,%r1 884 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 885 STREG %r19,PT_PSW(%r16) 886 887 /* 888 * If we aren't being traced, we never saved space registers 889 * (we don't store them in the sigcontext), so set them 890 * to "proper" values now (otherwise we'll wind up restoring 891 * whatever was last stored in the task structure, which might 892 * be inconsistent if an interrupt occurred while on the gateway 893 * page). Note that we may be "trashing" values the user put in 894 * them, but we don't support the user changing them. 895 */ 896 897 STREG %r0,PT_SR2(%r16) 898 mfsp %sr3,%r19 899 STREG %r19,PT_SR0(%r16) 900 STREG %r19,PT_SR1(%r16) 901 STREG %r19,PT_SR3(%r16) 902 STREG %r19,PT_SR4(%r16) 903 STREG %r19,PT_SR5(%r16) 904 STREG %r19,PT_SR6(%r16) 905 STREG %r19,PT_SR7(%r16) 906 907intr_return: 908 /* NOTE: Need to enable interrupts incase we schedule. */ 909 ssm PSW_SM_I, %r0 910 911intr_check_resched: 912 913 /* check for reschedule */ 914 mfctl %cr30,%r1 915 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 916 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 917 918 .import do_notify_resume,code 919intr_check_sig: 920 /* As above */ 921 mfctl %cr30,%r1 922 LDREG TI_FLAGS(%r1),%r19 923 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 924 and,COND(<>) %r19, %r20, %r0 925 b,n intr_restore /* skip past if we've nothing to do */ 926 927 /* This check is critical to having LWS 928 * working. The IASQ is zero on the gateway 929 * page and we cannot deliver any signals until 930 * we get off the gateway page. 931 * 932 * Only do signals if we are returning to user space 933 */ 934 LDREG PT_IASQ0(%r16), %r20 935 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ 936 LDREG PT_IASQ1(%r16), %r20 937 cmpib,COND(=),n 0,%r20,intr_restore /* backward */ 938 939 copy %r0, %r25 /* long in_syscall = 0 */ 940#ifdef CONFIG_64BIT 941 ldo -16(%r30),%r29 /* Reference param save area */ 942#endif 943 944 BL do_notify_resume,%r2 945 copy %r16, %r26 /* struct pt_regs *regs */ 946 947 b,n intr_check_sig 948 949intr_restore: 950 copy %r16,%r29 951 ldo PT_FR31(%r29),%r1 952 rest_fp %r1 953 rest_general %r29 954 955 /* inverse of virt_map */ 956 pcxt_ssm_bug 957 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 958 tophys_r1 %r29 959 960 /* Restore space id's and special cr's from PT_REGS 961 * structure pointed to by r29 962 */ 963 rest_specials %r29 964 965 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 966 * It also restores r1 and r30. 967 */ 968 rest_stack 969 970 rfi 971 nop 972 973#ifndef CONFIG_PREEMPT 974# define intr_do_preempt intr_restore 975#endif /* !CONFIG_PREEMPT */ 976 977 .import schedule,code 978intr_do_resched: 979 /* Only call schedule on return to userspace. If we're returning 980 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise 981 * we jump back to intr_restore. 982 */ 983 LDREG PT_IASQ0(%r16), %r20 984 cmpib,COND(=) 0, %r20, intr_do_preempt 985 nop 986 LDREG PT_IASQ1(%r16), %r20 987 cmpib,COND(=) 0, %r20, intr_do_preempt 988 nop 989 990#ifdef CONFIG_64BIT 991 ldo -16(%r30),%r29 /* Reference param save area */ 992#endif 993 994 ldil L%intr_check_sig, %r2 995#ifndef CONFIG_64BIT 996 b schedule 997#else 998 load32 schedule, %r20 999 bv %r0(%r20) 1000#endif 1001 ldo R%intr_check_sig(%r2), %r2 1002 1003 /* preempt the current task on returning to kernel 1004 * mode from an interrupt, iff need_resched is set, 1005 * and preempt_count is 0. otherwise, we continue on 1006 * our merry way back to the current running task. 1007 */ 1008#ifdef CONFIG_PREEMPT 1009 .import preempt_schedule_irq,code 1010intr_do_preempt: 1011 rsm PSW_SM_I, %r0 /* disable interrupts */ 1012 1013 /* current_thread_info()->preempt_count */ 1014 mfctl %cr30, %r1 1015 LDREG TI_PRE_COUNT(%r1), %r19 1016 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */ 1017 nop /* prev insn branched backwards */ 1018 1019 /* check if we interrupted a critical path */ 1020 LDREG PT_PSW(%r16), %r20 1021 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 1022 nop 1023 1024 BL preempt_schedule_irq, %r2 1025 nop 1026 1027 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ 1028#endif /* CONFIG_PREEMPT */ 1029 1030 /* 1031 * External interrupts. 1032 */ 1033 1034intr_extint: 1035 cmpib,COND(=),n 0,%r16,1f 1036 1037 get_stack_use_cr30 1038 b,n 2f 1039 10401: 1041 get_stack_use_r30 10422: 1043 save_specials %r29 1044 virt_map 1045 save_general %r29 1046 1047 ldo PT_FR0(%r29), %r24 1048 save_fp %r24 1049 1050 loadgp 1051 1052 copy %r29, %r26 /* arg0 is pt_regs */ 1053 copy %r29, %r16 /* save pt_regs */ 1054 1055 ldil L%intr_return, %r2 1056 1057#ifdef CONFIG_64BIT 1058 ldo -16(%r30),%r29 /* Reference param save area */ 1059#endif 1060 1061 b do_cpu_irq_mask 1062 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1063ENDPROC(syscall_exit_rfi) 1064 1065 1066 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1067 1068ENTRY(intr_save) /* for os_hpmc */ 1069 mfsp %sr7,%r16 1070 cmpib,COND(=),n 0,%r16,1f 1071 get_stack_use_cr30 1072 b 2f 1073 copy %r8,%r26 1074 10751: 1076 get_stack_use_r30 1077 copy %r8,%r26 1078 10792: 1080 save_specials %r29 1081 1082 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1083 1084 /* 1085 * FIXME: 1) Use a #define for the hardwired "6" below (and in 1086 * traps.c. 1087 * 2) Once we start executing code above 4 Gb, we need 1088 * to adjust iasq/iaoq here in the same way we 1089 * adjust isr/ior below. 1090 */ 1091 1092 cmpib,COND(=),n 6,%r26,skip_save_ior 1093 1094 1095 mfctl %cr20, %r16 /* isr */ 1096 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1097 mfctl %cr21, %r17 /* ior */ 1098 1099 1100#ifdef CONFIG_64BIT 1101 /* 1102 * If the interrupted code was running with W bit off (32 bit), 1103 * clear the b bits (bits 0 & 1) in the ior. 1104 * save_specials left ipsw value in r8 for us to test. 1105 */ 1106 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1107 depdi 0,1,2,%r17 1108 1109 /* 1110 * FIXME: This code has hardwired assumptions about the split 1111 * between space bits and offset bits. This will change 1112 * when we allow alternate page sizes. 1113 */ 1114 1115 /* adjust isr/ior. */ 1116 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ 1117 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ 1118 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ 1119#endif 1120 STREG %r16, PT_ISR(%r29) 1121 STREG %r17, PT_IOR(%r29) 1122 1123 1124skip_save_ior: 1125 virt_map 1126 save_general %r29 1127 1128 ldo PT_FR0(%r29), %r25 1129 save_fp %r25 1130 1131 loadgp 1132 1133 copy %r29, %r25 /* arg1 is pt_regs */ 1134#ifdef CONFIG_64BIT 1135 ldo -16(%r30),%r29 /* Reference param save area */ 1136#endif 1137 1138 ldil L%intr_check_sig, %r2 1139 copy %r25, %r16 /* save pt_regs */ 1140 1141 b handle_interruption 1142 ldo R%intr_check_sig(%r2), %r2 1143ENDPROC(intr_save) 1144 1145 1146 /* 1147 * Note for all tlb miss handlers: 1148 * 1149 * cr24 contains a pointer to the kernel address space 1150 * page directory. 1151 * 1152 * cr25 contains a pointer to the current user address 1153 * space page directory. 1154 * 1155 * sr3 will contain the space id of the user address space 1156 * of the current running thread while that thread is 1157 * running in the kernel. 1158 */ 1159 1160 /* 1161 * register number allocations. Note that these are all 1162 * in the shadowed registers 1163 */ 1164 1165 t0 = r1 /* temporary register 0 */ 1166 va = r8 /* virtual address for which the trap occurred */ 1167 t1 = r9 /* temporary register 1 */ 1168 pte = r16 /* pte/phys page # */ 1169 prot = r17 /* prot bits */ 1170 spc = r24 /* space for which the trap occurred */ 1171 ptp = r25 /* page directory/page table pointer */ 1172 1173#ifdef CONFIG_64BIT 1174 1175dtlb_miss_20w: 1176 space_adjust spc,va,t0 1177 get_pgd spc,ptp 1178 space_check spc,t0,dtlb_fault 1179 1180 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1181 1182 update_ptep ptp,pte,t0,t1 1183 1184 make_insert_tlb spc,pte,prot 1185 1186 idtlbt pte,prot 1187 1188 rfir 1189 nop 1190 1191dtlb_check_alias_20w: 1192 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1193 1194 idtlbt pte,prot 1195 1196 rfir 1197 nop 1198 1199nadtlb_miss_20w: 1200 space_adjust spc,va,t0 1201 get_pgd spc,ptp 1202 space_check spc,t0,nadtlb_fault 1203 1204 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1205 1206 update_ptep ptp,pte,t0,t1 1207 1208 make_insert_tlb spc,pte,prot 1209 1210 idtlbt pte,prot 1211 1212 rfir 1213 nop 1214 1215nadtlb_check_alias_20w: 1216 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1217 1218 idtlbt pte,prot 1219 1220 rfir 1221 nop 1222 1223#else 1224 1225dtlb_miss_11: 1226 get_pgd spc,ptp 1227 1228 space_check spc,t0,dtlb_fault 1229 1230 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1231 1232 update_ptep ptp,pte,t0,t1 1233 1234 make_insert_tlb_11 spc,pte,prot 1235 1236 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1237 mtsp spc,%sr1 1238 1239 idtlba pte,(%sr1,va) 1240 idtlbp prot,(%sr1,va) 1241 1242 mtsp t0, %sr1 /* Restore sr1 */ 1243 1244 rfir 1245 nop 1246 1247dtlb_check_alias_11: 1248 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1249 1250 idtlba pte,(va) 1251 idtlbp prot,(va) 1252 1253 rfir 1254 nop 1255 1256nadtlb_miss_11: 1257 get_pgd spc,ptp 1258 1259 space_check spc,t0,nadtlb_fault 1260 1261 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1262 1263 update_ptep ptp,pte,t0,t1 1264 1265 make_insert_tlb_11 spc,pte,prot 1266 1267 1268 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1269 mtsp spc,%sr1 1270 1271 idtlba pte,(%sr1,va) 1272 idtlbp prot,(%sr1,va) 1273 1274 mtsp t0, %sr1 /* Restore sr1 */ 1275 1276 rfir 1277 nop 1278 1279nadtlb_check_alias_11: 1280 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1281 1282 idtlba pte,(va) 1283 idtlbp prot,(va) 1284 1285 rfir 1286 nop 1287 1288dtlb_miss_20: 1289 space_adjust spc,va,t0 1290 get_pgd spc,ptp 1291 space_check spc,t0,dtlb_fault 1292 1293 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1294 1295 update_ptep ptp,pte,t0,t1 1296 1297 make_insert_tlb spc,pte,prot 1298 1299 f_extend pte,t0 1300 1301 idtlbt pte,prot 1302 1303 rfir 1304 nop 1305 1306dtlb_check_alias_20: 1307 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1308 1309 idtlbt pte,prot 1310 1311 rfir 1312 nop 1313 1314nadtlb_miss_20: 1315 get_pgd spc,ptp 1316 1317 space_check spc,t0,nadtlb_fault 1318 1319 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1320 1321 update_ptep ptp,pte,t0,t1 1322 1323 make_insert_tlb spc,pte,prot 1324 1325 f_extend pte,t0 1326 1327 idtlbt pte,prot 1328 1329 rfir 1330 nop 1331 1332nadtlb_check_alias_20: 1333 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1334 1335 idtlbt pte,prot 1336 1337 rfir 1338 nop 1339 1340#endif 1341 1342nadtlb_emulate: 1343 1344 /* 1345 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1346 * probei instructions. We don't want to fault for these 1347 * instructions (not only does it not make sense, it can cause 1348 * deadlocks, since some flushes are done with the mmap 1349 * semaphore held). If the translation doesn't exist, we can't 1350 * insert a translation, so have to emulate the side effects 1351 * of the instruction. Since we don't insert a translation 1352 * we can get a lot of faults during a flush loop, so it makes 1353 * sense to try to do it here with minimum overhead. We only 1354 * emulate fdc,fic,pdc,probew,prober instructions whose base 1355 * and index registers are not shadowed. We defer everything 1356 * else to the "slow" path. 1357 */ 1358 1359 mfctl %cr19,%r9 /* Get iir */ 1360 1361 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1362 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1363 1364 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1365 ldi 0x280,%r16 1366 and %r9,%r16,%r17 1367 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1368 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1369 BL get_register,%r25 1370 extrw,u %r9,15,5,%r8 /* Get index register # */ 1371 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1372 copy %r1,%r24 1373 BL get_register,%r25 1374 extrw,u %r9,10,5,%r8 /* Get base register # */ 1375 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1376 BL set_register,%r25 1377 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1378 1379nadtlb_nullify: 1380 mfctl %ipsw,%r8 1381 ldil L%PSW_N,%r9 1382 or %r8,%r9,%r8 /* Set PSW_N */ 1383 mtctl %r8,%ipsw 1384 1385 rfir 1386 nop 1387 1388 /* 1389 When there is no translation for the probe address then we 1390 must nullify the insn and return zero in the target regsiter. 1391 This will indicate to the calling code that it does not have 1392 write/read privileges to this address. 1393 1394 This should technically work for prober and probew in PA 1.1, 1395 and also probe,r and probe,w in PA 2.0 1396 1397 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1398 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1399 1400 */ 1401nadtlb_probe_check: 1402 ldi 0x80,%r16 1403 and %r9,%r16,%r17 1404 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1405 BL get_register,%r25 /* Find the target register */ 1406 extrw,u %r9,31,5,%r8 /* Get target register */ 1407 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1408 BL set_register,%r25 1409 copy %r0,%r1 /* Write zero to target register */ 1410 b nadtlb_nullify /* Nullify return insn */ 1411 nop 1412 1413 1414#ifdef CONFIG_64BIT 1415itlb_miss_20w: 1416 1417 /* 1418 * I miss is a little different, since we allow users to fault 1419 * on the gateway page which is in the kernel address space. 1420 */ 1421 1422 space_adjust spc,va,t0 1423 get_pgd spc,ptp 1424 space_check spc,t0,itlb_fault 1425 1426 L3_ptep ptp,pte,t0,va,itlb_fault 1427 1428 update_ptep ptp,pte,t0,t1 1429 1430 make_insert_tlb spc,pte,prot 1431 1432 iitlbt pte,prot 1433 1434 rfir 1435 nop 1436 1437naitlb_miss_20w: 1438 1439 /* 1440 * I miss is a little different, since we allow users to fault 1441 * on the gateway page which is in the kernel address space. 1442 */ 1443 1444 space_adjust spc,va,t0 1445 get_pgd spc,ptp 1446 space_check spc,t0,naitlb_fault 1447 1448 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1449 1450 update_ptep ptp,pte,t0,t1 1451 1452 make_insert_tlb spc,pte,prot 1453 1454 iitlbt pte,prot 1455 1456 rfir 1457 nop 1458 1459naitlb_check_alias_20w: 1460 do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1461 1462 iitlbt pte,prot 1463 1464 rfir 1465 nop 1466 1467#else 1468 1469itlb_miss_11: 1470 get_pgd spc,ptp 1471 1472 space_check spc,t0,itlb_fault 1473 1474 L2_ptep ptp,pte,t0,va,itlb_fault 1475 1476 update_ptep ptp,pte,t0,t1 1477 1478 make_insert_tlb_11 spc,pte,prot 1479 1480 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1481 mtsp spc,%sr1 1482 1483 iitlba pte,(%sr1,va) 1484 iitlbp prot,(%sr1,va) 1485 1486 mtsp t0, %sr1 /* Restore sr1 */ 1487 1488 rfir 1489 nop 1490 1491naitlb_miss_11: 1492 get_pgd spc,ptp 1493 1494 space_check spc,t0,naitlb_fault 1495 1496 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1497 1498 update_ptep ptp,pte,t0,t1 1499 1500 make_insert_tlb_11 spc,pte,prot 1501 1502 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1503 mtsp spc,%sr1 1504 1505 iitlba pte,(%sr1,va) 1506 iitlbp prot,(%sr1,va) 1507 1508 mtsp t0, %sr1 /* Restore sr1 */ 1509 1510 rfir 1511 nop 1512 1513naitlb_check_alias_11: 1514 do_alias spc,t0,t1,va,pte,prot,itlb_fault 1515 1516 iitlba pte,(%sr0, va) 1517 iitlbp prot,(%sr0, va) 1518 1519 rfir 1520 nop 1521 1522 1523itlb_miss_20: 1524 get_pgd spc,ptp 1525 1526 space_check spc,t0,itlb_fault 1527 1528 L2_ptep ptp,pte,t0,va,itlb_fault 1529 1530 update_ptep ptp,pte,t0,t1 1531 1532 make_insert_tlb spc,pte,prot 1533 1534 f_extend pte,t0 1535 1536 iitlbt pte,prot 1537 1538 rfir 1539 nop 1540 1541naitlb_miss_20: 1542 get_pgd spc,ptp 1543 1544 space_check spc,t0,naitlb_fault 1545 1546 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1547 1548 update_ptep ptp,pte,t0,t1 1549 1550 make_insert_tlb spc,pte,prot 1551 1552 f_extend pte,t0 1553 1554 iitlbt pte,prot 1555 1556 rfir 1557 nop 1558 1559naitlb_check_alias_20: 1560 do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1561 1562 iitlbt pte,prot 1563 1564 rfir 1565 nop 1566 1567#endif 1568 1569#ifdef CONFIG_64BIT 1570 1571dbit_trap_20w: 1572 space_adjust spc,va,t0 1573 get_pgd spc,ptp 1574 space_check spc,t0,dbit_fault 1575 1576 L3_ptep ptp,pte,t0,va,dbit_fault 1577 1578#ifdef CONFIG_SMP 1579 cmpib,COND(=),n 0,spc,dbit_nolock_20w 1580 load32 PA(pa_dbit_lock),t0 1581 1582dbit_spin_20w: 1583 LDCW 0(t0),t1 1584 cmpib,COND(=) 0,t1,dbit_spin_20w 1585 nop 1586 1587dbit_nolock_20w: 1588#endif 1589 update_dirty ptp,pte,t1 1590 1591 make_insert_tlb spc,pte,prot 1592 1593 idtlbt pte,prot 1594#ifdef CONFIG_SMP 1595 cmpib,COND(=),n 0,spc,dbit_nounlock_20w 1596 ldi 1,t1 1597 stw t1,0(t0) 1598 1599dbit_nounlock_20w: 1600#endif 1601 1602 rfir 1603 nop 1604#else 1605 1606dbit_trap_11: 1607 1608 get_pgd spc,ptp 1609 1610 space_check spc,t0,dbit_fault 1611 1612 L2_ptep ptp,pte,t0,va,dbit_fault 1613 1614#ifdef CONFIG_SMP 1615 cmpib,COND(=),n 0,spc,dbit_nolock_11 1616 load32 PA(pa_dbit_lock),t0 1617 1618dbit_spin_11: 1619 LDCW 0(t0),t1 1620 cmpib,= 0,t1,dbit_spin_11 1621 nop 1622 1623dbit_nolock_11: 1624#endif 1625 update_dirty ptp,pte,t1 1626 1627 make_insert_tlb_11 spc,pte,prot 1628 1629 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1630 mtsp spc,%sr1 1631 1632 idtlba pte,(%sr1,va) 1633 idtlbp prot,(%sr1,va) 1634 1635 mtsp t1, %sr1 /* Restore sr1 */ 1636#ifdef CONFIG_SMP 1637 cmpib,COND(=),n 0,spc,dbit_nounlock_11 1638 ldi 1,t1 1639 stw t1,0(t0) 1640 1641dbit_nounlock_11: 1642#endif 1643 1644 rfir 1645 nop 1646 1647dbit_trap_20: 1648 get_pgd spc,ptp 1649 1650 space_check spc,t0,dbit_fault 1651 1652 L2_ptep ptp,pte,t0,va,dbit_fault 1653 1654#ifdef CONFIG_SMP 1655 cmpib,COND(=),n 0,spc,dbit_nolock_20 1656 load32 PA(pa_dbit_lock),t0 1657 1658dbit_spin_20: 1659 LDCW 0(t0),t1 1660 cmpib,= 0,t1,dbit_spin_20 1661 nop 1662 1663dbit_nolock_20: 1664#endif 1665 update_dirty ptp,pte,t1 1666 1667 make_insert_tlb spc,pte,prot 1668 1669 f_extend pte,t1 1670 1671 idtlbt pte,prot 1672 1673#ifdef CONFIG_SMP 1674 cmpib,COND(=),n 0,spc,dbit_nounlock_20 1675 ldi 1,t1 1676 stw t1,0(t0) 1677 1678dbit_nounlock_20: 1679#endif 1680 1681 rfir 1682 nop 1683#endif 1684 1685 .import handle_interruption,code 1686 1687kernel_bad_space: 1688 b intr_save 1689 ldi 31,%r8 /* Use an unused code */ 1690 1691dbit_fault: 1692 b intr_save 1693 ldi 20,%r8 1694 1695itlb_fault: 1696 b intr_save 1697 ldi 6,%r8 1698 1699nadtlb_fault: 1700 b intr_save 1701 ldi 17,%r8 1702 1703naitlb_fault: 1704 b intr_save 1705 ldi 16,%r8 1706 1707dtlb_fault: 1708 b intr_save 1709 ldi 15,%r8 1710 1711 /* Register saving semantics for system calls: 1712 1713 %r1 clobbered by system call macro in userspace 1714 %r2 saved in PT_REGS by gateway page 1715 %r3 - %r18 preserved by C code (saved by signal code) 1716 %r19 - %r20 saved in PT_REGS by gateway page 1717 %r21 - %r22 non-standard syscall args 1718 stored in kernel stack by gateway page 1719 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1720 %r27 - %r30 saved in PT_REGS by gateway page 1721 %r31 syscall return pointer 1722 */ 1723 1724 /* Floating point registers (FIXME: what do we do with these?) 1725 1726 %fr0 - %fr3 status/exception, not preserved 1727 %fr4 - %fr7 arguments 1728 %fr8 - %fr11 not preserved by C code 1729 %fr12 - %fr21 preserved by C code 1730 %fr22 - %fr31 not preserved by C code 1731 */ 1732 1733 .macro reg_save regs 1734 STREG %r3, PT_GR3(\regs) 1735 STREG %r4, PT_GR4(\regs) 1736 STREG %r5, PT_GR5(\regs) 1737 STREG %r6, PT_GR6(\regs) 1738 STREG %r7, PT_GR7(\regs) 1739 STREG %r8, PT_GR8(\regs) 1740 STREG %r9, PT_GR9(\regs) 1741 STREG %r10,PT_GR10(\regs) 1742 STREG %r11,PT_GR11(\regs) 1743 STREG %r12,PT_GR12(\regs) 1744 STREG %r13,PT_GR13(\regs) 1745 STREG %r14,PT_GR14(\regs) 1746 STREG %r15,PT_GR15(\regs) 1747 STREG %r16,PT_GR16(\regs) 1748 STREG %r17,PT_GR17(\regs) 1749 STREG %r18,PT_GR18(\regs) 1750 .endm 1751 1752 .macro reg_restore regs 1753 LDREG PT_GR3(\regs), %r3 1754 LDREG PT_GR4(\regs), %r4 1755 LDREG PT_GR5(\regs), %r5 1756 LDREG PT_GR6(\regs), %r6 1757 LDREG PT_GR7(\regs), %r7 1758 LDREG PT_GR8(\regs), %r8 1759 LDREG PT_GR9(\regs), %r9 1760 LDREG PT_GR10(\regs),%r10 1761 LDREG PT_GR11(\regs),%r11 1762 LDREG PT_GR12(\regs),%r12 1763 LDREG PT_GR13(\regs),%r13 1764 LDREG PT_GR14(\regs),%r14 1765 LDREG PT_GR15(\regs),%r15 1766 LDREG PT_GR16(\regs),%r16 1767 LDREG PT_GR17(\regs),%r17 1768 LDREG PT_GR18(\regs),%r18 1769 .endm 1770 1771ENTRY(sys_fork_wrapper) 1772 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1773 ldo TASK_REGS(%r1),%r1 1774 reg_save %r1 1775 mfctl %cr27, %r3 1776 STREG %r3, PT_CR27(%r1) 1777 1778 STREG %r2,-RP_OFFSET(%r30) 1779 ldo FRAME_SIZE(%r30),%r30 1780#ifdef CONFIG_64BIT 1781 ldo -16(%r30),%r29 /* Reference param save area */ 1782#endif 1783 1784 /* These are call-clobbered registers and therefore 1785 also syscall-clobbered (we hope). */ 1786 STREG %r2,PT_GR19(%r1) /* save for child */ 1787 STREG %r30,PT_GR21(%r1) 1788 1789 LDREG PT_GR30(%r1),%r25 1790 copy %r1,%r24 1791 BL sys_clone,%r2 1792 ldi SIGCHLD,%r26 1793 1794 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1795wrapper_exit: 1796 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */ 1797 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1798 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1799 1800 LDREG PT_CR27(%r1), %r3 1801 mtctl %r3, %cr27 1802 reg_restore %r1 1803 1804 /* strace expects syscall # to be preserved in r20 */ 1805 ldi __NR_fork,%r20 1806 bv %r0(%r2) 1807 STREG %r20,PT_GR20(%r1) 1808ENDPROC(sys_fork_wrapper) 1809 1810 /* Set the return value for the child */ 1811ENTRY(child_return) 1812 BL schedule_tail, %r2 1813 nop 1814 1815 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1 1816 LDREG TASK_PT_GR19(%r1),%r2 1817 b wrapper_exit 1818 copy %r0,%r28 1819ENDPROC(child_return) 1820 1821 1822ENTRY(sys_clone_wrapper) 1823 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1824 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1825 reg_save %r1 1826 mfctl %cr27, %r3 1827 STREG %r3, PT_CR27(%r1) 1828 1829 STREG %r2,-RP_OFFSET(%r30) 1830 ldo FRAME_SIZE(%r30),%r30 1831#ifdef CONFIG_64BIT 1832 ldo -16(%r30),%r29 /* Reference param save area */ 1833#endif 1834 1835 /* WARNING - Clobbers r19 and r21, userspace must save these! */ 1836 STREG %r2,PT_GR19(%r1) /* save for child */ 1837 STREG %r30,PT_GR21(%r1) 1838 BL sys_clone,%r2 1839 copy %r1,%r24 1840 1841 b wrapper_exit 1842 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1843ENDPROC(sys_clone_wrapper) 1844 1845 1846ENTRY(sys_vfork_wrapper) 1847 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1848 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1849 reg_save %r1 1850 mfctl %cr27, %r3 1851 STREG %r3, PT_CR27(%r1) 1852 1853 STREG %r2,-RP_OFFSET(%r30) 1854 ldo FRAME_SIZE(%r30),%r30 1855#ifdef CONFIG_64BIT 1856 ldo -16(%r30),%r29 /* Reference param save area */ 1857#endif 1858 1859 STREG %r2,PT_GR19(%r1) /* save for child */ 1860 STREG %r30,PT_GR21(%r1) 1861 1862 BL sys_vfork,%r2 1863 copy %r1,%r26 1864 1865 b wrapper_exit 1866 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1867ENDPROC(sys_vfork_wrapper) 1868 1869 1870 .macro execve_wrapper execve 1871 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1872 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1873 1874 /* 1875 * Do we need to save/restore r3-r18 here? 1876 * I don't think so. why would new thread need old 1877 * threads registers? 1878 */ 1879 1880 /* %arg0 - %arg3 are already saved for us. */ 1881 1882 STREG %r2,-RP_OFFSET(%r30) 1883 ldo FRAME_SIZE(%r30),%r30 1884#ifdef CONFIG_64BIT 1885 ldo -16(%r30),%r29 /* Reference param save area */ 1886#endif 1887 BL \execve,%r2 1888 copy %r1,%arg0 1889 1890 ldo -FRAME_SIZE(%r30),%r30 1891 LDREG -RP_OFFSET(%r30),%r2 1892 1893 /* If exec succeeded we need to load the args */ 1894 1895 ldo -1024(%r0),%r1 1896 cmpb,>>= %r28,%r1,error_\execve 1897 copy %r2,%r19 1898 1899error_\execve: 1900 bv %r0(%r19) 1901 nop 1902 .endm 1903 1904 .import sys_execve 1905ENTRY(sys_execve_wrapper) 1906 execve_wrapper sys_execve 1907ENDPROC(sys_execve_wrapper) 1908 1909#ifdef CONFIG_64BIT 1910 .import sys32_execve 1911ENTRY(sys32_execve_wrapper) 1912 execve_wrapper sys32_execve 1913ENDPROC(sys32_execve_wrapper) 1914#endif 1915 1916ENTRY(sys_rt_sigreturn_wrapper) 1917 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1918 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1919 /* Don't save regs, we are going to restore them from sigcontext. */ 1920 STREG %r2, -RP_OFFSET(%r30) 1921#ifdef CONFIG_64BIT 1922 ldo FRAME_SIZE(%r30), %r30 1923 BL sys_rt_sigreturn,%r2 1924 ldo -16(%r30),%r29 /* Reference param save area */ 1925#else 1926 BL sys_rt_sigreturn,%r2 1927 ldo FRAME_SIZE(%r30), %r30 1928#endif 1929 1930 ldo -FRAME_SIZE(%r30), %r30 1931 LDREG -RP_OFFSET(%r30), %r2 1932 1933 /* FIXME: I think we need to restore a few more things here. */ 1934 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1935 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1936 reg_restore %r1 1937 1938 /* If the signal was received while the process was blocked on a 1939 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1940 * take us to syscall_exit_rfi and on to intr_return. 1941 */ 1942 bv %r0(%r2) 1943 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1944ENDPROC(sys_rt_sigreturn_wrapper) 1945 1946ENTRY(sys_sigaltstack_wrapper) 1947 /* Get the user stack pointer */ 1948 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1949 ldo TASK_REGS(%r1),%r24 /* get pt regs */ 1950 LDREG TASK_PT_GR30(%r24),%r24 1951 STREG %r2, -RP_OFFSET(%r30) 1952#ifdef CONFIG_64BIT 1953 ldo FRAME_SIZE(%r30), %r30 1954 BL do_sigaltstack,%r2 1955 ldo -16(%r30),%r29 /* Reference param save area */ 1956#else 1957 BL do_sigaltstack,%r2 1958 ldo FRAME_SIZE(%r30), %r30 1959#endif 1960 1961 ldo -FRAME_SIZE(%r30), %r30 1962 LDREG -RP_OFFSET(%r30), %r2 1963 bv %r0(%r2) 1964 nop 1965ENDPROC(sys_sigaltstack_wrapper) 1966 1967#ifdef CONFIG_64BIT 1968ENTRY(sys32_sigaltstack_wrapper) 1969 /* Get the user stack pointer */ 1970 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24 1971 LDREG TASK_PT_GR30(%r24),%r24 1972 STREG %r2, -RP_OFFSET(%r30) 1973 ldo FRAME_SIZE(%r30), %r30 1974 BL do_sigaltstack32,%r2 1975 ldo -16(%r30),%r29 /* Reference param save area */ 1976 1977 ldo -FRAME_SIZE(%r30), %r30 1978 LDREG -RP_OFFSET(%r30), %r2 1979 bv %r0(%r2) 1980 nop 1981ENDPROC(sys32_sigaltstack_wrapper) 1982#endif 1983 1984ENTRY(syscall_exit) 1985 /* NOTE: HP-UX syscalls also come through here 1986 * after hpux_syscall_exit fixes up return 1987 * values. */ 1988 1989 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1990 * via syscall_exit_rfi if the signal was received while the process 1991 * was running. 1992 */ 1993 1994 /* save return value now */ 1995 1996 mfctl %cr30, %r1 1997 LDREG TI_TASK(%r1),%r1 1998 STREG %r28,TASK_PT_GR28(%r1) 1999 2000#ifdef CONFIG_HPUX 2001/* <linux/personality.h> cannot be easily included */ 2002#define PER_HPUX 0x10 2003 ldw TASK_PERSONALITY(%r1),%r19 2004 2005 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */ 2006 ldo -PER_HPUX(%r19), %r19 2007 cmpib,COND(<>),n 0,%r19,1f 2008 2009 /* Save other hpux returns if personality is PER_HPUX */ 2010 STREG %r22,TASK_PT_GR22(%r1) 2011 STREG %r29,TASK_PT_GR29(%r1) 20121: 2013 2014#endif /* CONFIG_HPUX */ 2015 2016 /* Seems to me that dp could be wrong here, if the syscall involved 2017 * calling a module, and nothing got round to restoring dp on return. 2018 */ 2019 loadgp 2020 2021syscall_check_resched: 2022 2023 /* check for reschedule */ 2024 2025 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 2026 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 2027 2028 .import do_signal,code 2029syscall_check_sig: 2030 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 2031 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26 2032 and,COND(<>) %r19, %r26, %r0 2033 b,n syscall_restore /* skip past if we've nothing to do */ 2034 2035syscall_do_signal: 2036 /* Save callee-save registers (for sigcontext). 2037 * FIXME: After this point the process structure should be 2038 * consistent with all the relevant state of the process 2039 * before the syscall. We need to verify this. 2040 */ 2041 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2042 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 2043 reg_save %r26 2044 2045#ifdef CONFIG_64BIT 2046 ldo -16(%r30),%r29 /* Reference param save area */ 2047#endif 2048 2049 BL do_notify_resume,%r2 2050 ldi 1, %r25 /* long in_syscall = 1 */ 2051 2052 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2053 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 2054 reg_restore %r20 2055 2056 b,n syscall_check_sig 2057 2058syscall_restore: 2059 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2060 2061 /* Are we being ptraced? */ 2062 ldw TASK_FLAGS(%r1),%r19 2063 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 2064 and,COND(=) %r19,%r2,%r0 2065 b,n syscall_restore_rfi 2066 2067 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2068 rest_fp %r19 2069 2070 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 2071 mtsar %r19 2072 2073 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 2074 LDREG TASK_PT_GR19(%r1),%r19 2075 LDREG TASK_PT_GR20(%r1),%r20 2076 LDREG TASK_PT_GR21(%r1),%r21 2077 LDREG TASK_PT_GR22(%r1),%r22 2078 LDREG TASK_PT_GR23(%r1),%r23 2079 LDREG TASK_PT_GR24(%r1),%r24 2080 LDREG TASK_PT_GR25(%r1),%r25 2081 LDREG TASK_PT_GR26(%r1),%r26 2082 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 2083 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 2084 LDREG TASK_PT_GR29(%r1),%r29 2085 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2086 2087 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2088 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 2089 rsm PSW_SM_I, %r0 2090 copy %r1,%r30 /* Restore user sp */ 2091 mfsp %sr3,%r1 /* Get user space id */ 2092 mtsp %r1,%sr7 /* Restore sr7 */ 2093 ssm PSW_SM_I, %r0 2094 2095 /* Set sr2 to zero for userspace syscalls to work. */ 2096 mtsp %r0,%sr2 2097 mtsp %r1,%sr4 /* Restore sr4 */ 2098 mtsp %r1,%sr5 /* Restore sr5 */ 2099 mtsp %r1,%sr6 /* Restore sr6 */ 2100 2101 depi 3,31,2,%r31 /* ensure return to user mode. */ 2102 2103#ifdef CONFIG_64BIT 2104 /* decide whether to reset the wide mode bit 2105 * 2106 * For a syscall, the W bit is stored in the lowest bit 2107 * of sp. Extract it and reset W if it is zero */ 2108 extrd,u,*<> %r30,63,1,%r1 2109 rsm PSW_SM_W, %r0 2110 /* now reset the lowest bit of sp if it was set */ 2111 xor %r30,%r1,%r30 2112#endif 2113 be,n 0(%sr3,%r31) /* return to user space */ 2114 2115 /* We have to return via an RFI, so that PSW T and R bits can be set 2116 * appropriately. 2117 * This sets up pt_regs so we can return via intr_restore, which is not 2118 * the most efficient way of doing things, but it works. 2119 */ 2120syscall_restore_rfi: 2121 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 2122 mtctl %r2,%cr0 /* for immediate trap */ 2123 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 2124 ldi 0x0b,%r20 /* Create new PSW */ 2125 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2126 2127 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 2128 * set in thread_info.h and converted to PA bitmap 2129 * numbers in asm-offsets.c */ 2130 2131 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 2132 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 2133 depi -1,27,1,%r20 /* R bit */ 2134 2135 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 2136 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 2137 depi -1,7,1,%r20 /* T bit */ 2138 2139 STREG %r20,TASK_PT_PSW(%r1) 2140 2141 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 2142 2143 mfsp %sr3,%r25 2144 STREG %r25,TASK_PT_SR3(%r1) 2145 STREG %r25,TASK_PT_SR4(%r1) 2146 STREG %r25,TASK_PT_SR5(%r1) 2147 STREG %r25,TASK_PT_SR6(%r1) 2148 STREG %r25,TASK_PT_SR7(%r1) 2149 STREG %r25,TASK_PT_IASQ0(%r1) 2150 STREG %r25,TASK_PT_IASQ1(%r1) 2151 2152 /* XXX W bit??? */ 2153 /* Now if old D bit is clear, it means we didn't save all registers 2154 * on syscall entry, so do that now. This only happens on TRACEME 2155 * calls, or if someone attached to us while we were on a syscall. 2156 * We could make this more efficient by not saving r3-r18, but 2157 * then we wouldn't be able to use the common intr_restore path. 2158 * It is only for traced processes anyway, so performance is not 2159 * an issue. 2160 */ 2161 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 2162 ldo TASK_REGS(%r1),%r25 2163 reg_save %r25 /* Save r3 to r18 */ 2164 2165 /* Save the current sr */ 2166 mfsp %sr0,%r2 2167 STREG %r2,TASK_PT_SR0(%r1) 2168 2169 /* Save the scratch sr */ 2170 mfsp %sr1,%r2 2171 STREG %r2,TASK_PT_SR1(%r1) 2172 2173 /* sr2 should be set to zero for userspace syscalls */ 2174 STREG %r0,TASK_PT_SR2(%r1) 2175 2176pt_regs_ok: 2177 LDREG TASK_PT_GR31(%r1),%r2 2178 depi 3,31,2,%r2 /* ensure return to user mode. */ 2179 STREG %r2,TASK_PT_IAOQ0(%r1) 2180 ldo 4(%r2),%r2 2181 STREG %r2,TASK_PT_IAOQ1(%r1) 2182 copy %r25,%r16 2183 b intr_restore 2184 nop 2185 2186 .import schedule,code 2187syscall_do_resched: 2188 BL schedule,%r2 2189#ifdef CONFIG_64BIT 2190 ldo -16(%r30),%r29 /* Reference param save area */ 2191#else 2192 nop 2193#endif 2194 b syscall_check_resched /* if resched, we start over again */ 2195 nop 2196ENDPROC(syscall_exit) 2197 2198 2199#ifdef CONFIG_FUNCTION_TRACER 2200 .import ftrace_function_trampoline,code 2201ENTRY(_mcount) 2202 copy %r3, %arg2 2203 b ftrace_function_trampoline 2204 nop 2205ENDPROC(_mcount) 2206 2207ENTRY(return_to_handler) 2208 load32 return_trampoline, %rp 2209 copy %ret0, %arg0 2210 copy %ret1, %arg1 2211 b ftrace_return_to_handler 2212 nop 2213return_trampoline: 2214 copy %ret0, %rp 2215 copy %r23, %ret0 2216 copy %r24, %ret1 2217 2218.globl ftrace_stub 2219ftrace_stub: 2220 bv %r0(%rp) 2221 nop 2222ENDPROC(return_to_handler) 2223#endif /* CONFIG_FUNCTION_TRACER */ 2224 2225 2226get_register: 2227 /* 2228 * get_register is used by the non access tlb miss handlers to 2229 * copy the value of the general register specified in r8 into 2230 * r1. This routine can't be used for shadowed registers, since 2231 * the rfir will restore the original value. So, for the shadowed 2232 * registers we put a -1 into r1 to indicate that the register 2233 * should not be used (the register being copied could also have 2234 * a -1 in it, but that is OK, it just means that we will have 2235 * to use the slow path instead). 2236 */ 2237 blr %r8,%r0 2238 nop 2239 bv %r0(%r25) /* r0 */ 2240 copy %r0,%r1 2241 bv %r0(%r25) /* r1 - shadowed */ 2242 ldi -1,%r1 2243 bv %r0(%r25) /* r2 */ 2244 copy %r2,%r1 2245 bv %r0(%r25) /* r3 */ 2246 copy %r3,%r1 2247 bv %r0(%r25) /* r4 */ 2248 copy %r4,%r1 2249 bv %r0(%r25) /* r5 */ 2250 copy %r5,%r1 2251 bv %r0(%r25) /* r6 */ 2252 copy %r6,%r1 2253 bv %r0(%r25) /* r7 */ 2254 copy %r7,%r1 2255 bv %r0(%r25) /* r8 - shadowed */ 2256 ldi -1,%r1 2257 bv %r0(%r25) /* r9 - shadowed */ 2258 ldi -1,%r1 2259 bv %r0(%r25) /* r10 */ 2260 copy %r10,%r1 2261 bv %r0(%r25) /* r11 */ 2262 copy %r11,%r1 2263 bv %r0(%r25) /* r12 */ 2264 copy %r12,%r1 2265 bv %r0(%r25) /* r13 */ 2266 copy %r13,%r1 2267 bv %r0(%r25) /* r14 */ 2268 copy %r14,%r1 2269 bv %r0(%r25) /* r15 */ 2270 copy %r15,%r1 2271 bv %r0(%r25) /* r16 - shadowed */ 2272 ldi -1,%r1 2273 bv %r0(%r25) /* r17 - shadowed */ 2274 ldi -1,%r1 2275 bv %r0(%r25) /* r18 */ 2276 copy %r18,%r1 2277 bv %r0(%r25) /* r19 */ 2278 copy %r19,%r1 2279 bv %r0(%r25) /* r20 */ 2280 copy %r20,%r1 2281 bv %r0(%r25) /* r21 */ 2282 copy %r21,%r1 2283 bv %r0(%r25) /* r22 */ 2284 copy %r22,%r1 2285 bv %r0(%r25) /* r23 */ 2286 copy %r23,%r1 2287 bv %r0(%r25) /* r24 - shadowed */ 2288 ldi -1,%r1 2289 bv %r0(%r25) /* r25 - shadowed */ 2290 ldi -1,%r1 2291 bv %r0(%r25) /* r26 */ 2292 copy %r26,%r1 2293 bv %r0(%r25) /* r27 */ 2294 copy %r27,%r1 2295 bv %r0(%r25) /* r28 */ 2296 copy %r28,%r1 2297 bv %r0(%r25) /* r29 */ 2298 copy %r29,%r1 2299 bv %r0(%r25) /* r30 */ 2300 copy %r30,%r1 2301 bv %r0(%r25) /* r31 */ 2302 copy %r31,%r1 2303 2304 2305set_register: 2306 /* 2307 * set_register is used by the non access tlb miss handlers to 2308 * copy the value of r1 into the general register specified in 2309 * r8. 2310 */ 2311 blr %r8,%r0 2312 nop 2313 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2314 copy %r1,%r0 2315 bv %r0(%r25) /* r1 */ 2316 copy %r1,%r1 2317 bv %r0(%r25) /* r2 */ 2318 copy %r1,%r2 2319 bv %r0(%r25) /* r3 */ 2320 copy %r1,%r3 2321 bv %r0(%r25) /* r4 */ 2322 copy %r1,%r4 2323 bv %r0(%r25) /* r5 */ 2324 copy %r1,%r5 2325 bv %r0(%r25) /* r6 */ 2326 copy %r1,%r6 2327 bv %r0(%r25) /* r7 */ 2328 copy %r1,%r7 2329 bv %r0(%r25) /* r8 */ 2330 copy %r1,%r8 2331 bv %r0(%r25) /* r9 */ 2332 copy %r1,%r9 2333 bv %r0(%r25) /* r10 */ 2334 copy %r1,%r10 2335 bv %r0(%r25) /* r11 */ 2336 copy %r1,%r11 2337 bv %r0(%r25) /* r12 */ 2338 copy %r1,%r12 2339 bv %r0(%r25) /* r13 */ 2340 copy %r1,%r13 2341 bv %r0(%r25) /* r14 */ 2342 copy %r1,%r14 2343 bv %r0(%r25) /* r15 */ 2344 copy %r1,%r15 2345 bv %r0(%r25) /* r16 */ 2346 copy %r1,%r16 2347 bv %r0(%r25) /* r17 */ 2348 copy %r1,%r17 2349 bv %r0(%r25) /* r18 */ 2350 copy %r1,%r18 2351 bv %r0(%r25) /* r19 */ 2352 copy %r1,%r19 2353 bv %r0(%r25) /* r20 */ 2354 copy %r1,%r20 2355 bv %r0(%r25) /* r21 */ 2356 copy %r1,%r21 2357 bv %r0(%r25) /* r22 */ 2358 copy %r1,%r22 2359 bv %r0(%r25) /* r23 */ 2360 copy %r1,%r23 2361 bv %r0(%r25) /* r24 */ 2362 copy %r1,%r24 2363 bv %r0(%r25) /* r25 */ 2364 copy %r1,%r25 2365 bv %r0(%r25) /* r26 */ 2366 copy %r1,%r26 2367 bv %r0(%r25) /* r27 */ 2368 copy %r1,%r27 2369 bv %r0(%r25) /* r28 */ 2370 copy %r1,%r28 2371 bv %r0(%r25) /* r29 */ 2372 copy %r1,%r29 2373 bv %r0(%r25) /* r30 */ 2374 copy %r1,%r30 2375 bv %r0(%r25) /* r31 */ 2376 copy %r1,%r31 2377 2378