1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 4 * 5 * kernel entry points (interruptions, system call wrappers) 6 * Copyright (C) 1999,2000 Philipp Rumpf 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 10 */ 11 12#include <asm/asm-offsets.h> 13 14/* we have the following possibilities to act on an interruption: 15 * - handle in assembly and use shadowed registers only 16 * - save registers to kernel stack and handle in assembly or C */ 17 18 19#include <asm/psw.h> 20#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 21#include <asm/assembly.h> /* for LDREG/STREG defines */ 22#include <asm/signal.h> 23#include <asm/unistd.h> 24#include <asm/ldcw.h> 25#include <asm/traps.h> 26#include <asm/thread_info.h> 27#include <asm/alternative.h> 28#include <asm/spinlock_types.h> 29 30#include <linux/linkage.h> 31#include <linux/pgtable.h> 32 33#ifdef CONFIG_64BIT 34 .level 2.0w 35#else 36 .level 2.0 37#endif 38 39/* 40 * We need seven instructions after a TLB insert for it to take effect. 41 * The PA8800/PA8900 processors are an exception and need 12 instructions. 42 * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one. 43 */ 44#ifdef CONFIG_64BIT 45#define NUM_PIPELINE_INSNS 12 46#else 47#define NUM_PIPELINE_INSNS 7 48#endif 49 50 /* Insert num nops */ 51 .macro insert_nops num 52 .rept \num 53 nop 54 .endr 55 .endm 56 57 /* Get aligned page_table_lock address for this mm from cr28/tr4 */ 58 .macro get_ptl reg 59 mfctl %cr28,\reg 60 .endm 61 62 /* space_to_prot macro creates a prot id from a space id */ 63 64#if (SPACEID_SHIFT) == 0 65 .macro space_to_prot spc prot 66 depd,z \spc,62,31,\prot 67 .endm 68#else 69 .macro space_to_prot spc prot 70 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 71 .endm 72#endif 73 /* 74 * The "get_stack" macros are responsible for determining the 75 * kernel stack value. 76 * 77 * If sr7 == 0 78 * Already using a kernel stack, so call the 79 * get_stack_use_r30 macro to push a pt_regs structure 80 * on the stack, and store registers there. 81 * else 82 * Need to set up a kernel stack, so call the 83 * get_stack_use_cr30 macro to set up a pointer 84 * to the pt_regs structure contained within the 85 * task pointer pointed to by cr30. Load the stack 86 * pointer from the task structure. 87 * 88 * Note that we use shadowed registers for temps until 89 * we can save %r26 and %r29. %r26 is used to preserve 90 * %r8 (a shadowed register) which temporarily contained 91 * either the fault type ("code") or the eirr. We need 92 * to use a non-shadowed register to carry the value over 93 * the rfir in virt_map. We use %r26 since this value winds 94 * up being passed as the argument to either do_cpu_irq_mask 95 * or handle_interruption. %r29 is used to hold a pointer 96 * the register save area, and once again, it needs to 97 * be a non-shadowed register so that it survives the rfir. 98 */ 99 100 .macro get_stack_use_cr30 101 102 /* we save the registers in the task struct */ 103 104 copy %r30, %r17 105 mfctl %cr30, %r1 106 tophys %r1,%r9 /* task_struct */ 107 LDREG TASK_STACK(%r9),%r30 108 ldo PT_SZ_ALGN(%r30),%r30 109 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */ 110 mtsp %r16,%sr3 111 ldo TASK_REGS(%r9),%r9 112 STREG %r17,PT_GR30(%r9) 113 STREG %r29,PT_GR29(%r9) 114 STREG %r26,PT_GR26(%r9) 115 STREG %r16,PT_SR7(%r9) 116 copy %r9,%r29 117 .endm 118 119 .macro get_stack_use_r30 120 121 /* we put a struct pt_regs on the stack and save the registers there */ 122 123 tophys %r30,%r9 124 copy %r30,%r1 125 ldo PT_SZ_ALGN(%r30),%r30 126 STREG %r1,PT_GR30(%r9) 127 STREG %r29,PT_GR29(%r9) 128 STREG %r26,PT_GR26(%r9) 129 STREG %r16,PT_SR7(%r9) 130 copy %r9,%r29 131 .endm 132 133 .macro rest_stack 134 LDREG PT_GR1(%r29), %r1 135 LDREG PT_GR30(%r29),%r30 136 LDREG PT_GR29(%r29),%r29 137 .endm 138 139 /* default interruption handler 140 * (calls traps.c:handle_interruption) */ 141 .macro def code 142 b intr_save 143 ldi \code, %r8 144 .align 32 145 .endm 146 147 /* Interrupt interruption handler 148 * (calls irq.c:do_cpu_irq_mask) */ 149 .macro extint code 150 b intr_extint 151 mfsp %sr7,%r16 152 .align 32 153 .endm 154 155 .import os_hpmc, code 156 157 /* HPMC handler */ 158 .macro hpmc code 159 nop /* must be a NOP, will be patched later */ 160 load32 PA(os_hpmc), %r3 161 bv,n 0(%r3) 162 nop 163 .word 0 /* checksum (will be patched) */ 164 .word 0 /* address of handler */ 165 .word 0 /* length of handler */ 166 .endm 167 168 /* 169 * Performance Note: Instructions will be moved up into 170 * this part of the code later on, once we are sure 171 * that the tlb miss handlers are close to final form. 172 */ 173 174 /* Register definitions for tlb miss handler macros */ 175 176 va = r8 /* virtual address for which the trap occurred */ 177 spc = r24 /* space for which the trap occurred */ 178 179#ifndef CONFIG_64BIT 180 181 /* 182 * itlb miss interruption handler (parisc 1.1 - 32 bit) 183 */ 184 185 .macro itlb_11 code 186 187 mfctl %pcsq, spc 188 b itlb_miss_11 189 mfctl %pcoq, va 190 191 .align 32 192 .endm 193#endif 194 195 /* 196 * itlb miss interruption handler (parisc 2.0) 197 */ 198 199 .macro itlb_20 code 200 mfctl %pcsq, spc 201#ifdef CONFIG_64BIT 202 b itlb_miss_20w 203#else 204 b itlb_miss_20 205#endif 206 mfctl %pcoq, va 207 208 .align 32 209 .endm 210 211#ifndef CONFIG_64BIT 212 /* 213 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 214 */ 215 216 .macro naitlb_11 code 217 218 mfctl %isr,spc 219 b naitlb_miss_11 220 mfctl %ior,va 221 222 .align 32 223 .endm 224#endif 225 226 /* 227 * naitlb miss interruption handler (parisc 2.0) 228 */ 229 230 .macro naitlb_20 code 231 232 mfctl %isr,spc 233#ifdef CONFIG_64BIT 234 b naitlb_miss_20w 235#else 236 b naitlb_miss_20 237#endif 238 mfctl %ior,va 239 240 .align 32 241 .endm 242 243#ifndef CONFIG_64BIT 244 /* 245 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 246 */ 247 248 .macro dtlb_11 code 249 250 mfctl %isr, spc 251 b dtlb_miss_11 252 mfctl %ior, va 253 254 .align 32 255 .endm 256#endif 257 258 /* 259 * dtlb miss interruption handler (parisc 2.0) 260 */ 261 262 .macro dtlb_20 code 263 264 mfctl %isr, spc 265#ifdef CONFIG_64BIT 266 b dtlb_miss_20w 267#else 268 b dtlb_miss_20 269#endif 270 mfctl %ior, va 271 272 .align 32 273 .endm 274 275#ifndef CONFIG_64BIT 276 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 277 278 .macro nadtlb_11 code 279 280 mfctl %isr,spc 281 b nadtlb_miss_11 282 mfctl %ior,va 283 284 .align 32 285 .endm 286#endif 287 288 /* nadtlb miss interruption handler (parisc 2.0) */ 289 290 .macro nadtlb_20 code 291 292 mfctl %isr,spc 293#ifdef CONFIG_64BIT 294 b nadtlb_miss_20w 295#else 296 b nadtlb_miss_20 297#endif 298 mfctl %ior,va 299 300 .align 32 301 .endm 302 303#ifndef CONFIG_64BIT 304 /* 305 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 306 */ 307 308 .macro dbit_11 code 309 310 mfctl %isr,spc 311 b dbit_trap_11 312 mfctl %ior,va 313 314 .align 32 315 .endm 316#endif 317 318 /* 319 * dirty bit trap interruption handler (parisc 2.0) 320 */ 321 322 .macro dbit_20 code 323 324 mfctl %isr,spc 325#ifdef CONFIG_64BIT 326 b dbit_trap_20w 327#else 328 b dbit_trap_20 329#endif 330 mfctl %ior,va 331 332 .align 32 333 .endm 334 335 /* In LP64, the space contains part of the upper 32 bits of the 336 * fault. We have to extract this and place it in the va, 337 * zeroing the corresponding bits in the space register */ 338 .macro space_adjust spc,va,tmp 339#ifdef CONFIG_64BIT 340 extrd,u \spc,63,SPACEID_SHIFT,\tmp 341 depd %r0,63,SPACEID_SHIFT,\spc 342 depd \tmp,31,SPACEID_SHIFT,\va 343#endif 344 .endm 345 346 .import swapper_pg_dir,code 347 348 /* Get the pgd. For faults on space zero (kernel space), this 349 * is simply swapper_pg_dir. For user space faults, the 350 * pgd is stored in %cr25 */ 351 .macro get_pgd spc,reg 352 ldil L%PA(swapper_pg_dir),\reg 353 ldo R%PA(swapper_pg_dir)(\reg),\reg 354 or,COND(=) %r0,\spc,%r0 355 mfctl %cr25,\reg 356 .endm 357 358 /* 359 space_check(spc,tmp,fault) 360 361 spc - The space we saw the fault with. 362 tmp - The place to store the current space. 363 fault - Function to call on failure. 364 365 Only allow faults on different spaces from the 366 currently active one if we're the kernel 367 368 */ 369 .macro space_check spc,tmp,fault 370 mfsp %sr7,\tmp 371 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ 372 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 373 * as kernel, so defeat the space 374 * check if it is */ 375 copy \spc,\tmp 376 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 377 cmpb,COND(<>),n \tmp,\spc,\fault 378 .endm 379 380 /* Look up a PTE in a 2-Level scheme (faulting at each 381 * level if the entry isn't present 382 * 383 * NOTE: we use ldw even for LP64, since the short pointers 384 * can address up to 1TB 385 */ 386 .macro L2_ptep pmd,pte,index,va,fault 387#if CONFIG_PGTABLE_LEVELS == 3 388 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 389#else 390 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 391#endif 392 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 393#if CONFIG_PGTABLE_LEVELS < 3 394 copy %r0,\pte 395#endif 396 ldw,s \index(\pmd),\pmd 397 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 398 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 399 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 400 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 401 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 402 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 403 .endm 404 405 /* Look up PTE in a 3-Level scheme. */ 406 .macro L3_ptep pgd,pte,index,va,fault 407#if CONFIG_PGTABLE_LEVELS == 3 408 copy %r0,\pte 409 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 410 ldw,s \index(\pgd),\pgd 411 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 412 shld \pgd,PxD_VALUE_SHIFT,\pgd 413#endif 414 L2_ptep \pgd,\pte,\index,\va,\fault 415 .endm 416 417 /* Acquire page_table_lock and check page is present. */ 418 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault 419#ifdef CONFIG_TLB_PTLOCK 42098: cmpib,COND(=),n 0,\spc,2f 421 get_ptl \tmp 4221: LDCW 0(\tmp),\tmp1 423 cmpib,COND(=) 0,\tmp1,1b 424 nop 425 LDREG 0(\ptp),\pte 426 bb,<,n \pte,_PAGE_PRESENT_BIT,3f 427 b \fault 428 stw \tmp1,0(\tmp) 42999: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 430#endif 4312: LDREG 0(\ptp),\pte 432 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 4333: 434 .endm 435 436 /* Release page_table_lock if for user space. We use an ordered 437 store to ensure all prior accesses are performed prior to 438 releasing the lock. Note stw may not be executed, so we 439 provide one extra nop when CONFIG_TLB_PTLOCK is defined. */ 440 .macro ptl_unlock spc,tmp,tmp2 441#ifdef CONFIG_TLB_PTLOCK 44298: get_ptl \tmp 443 ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2 444 or,COND(=) %r0,\spc,%r0 445 stw,ma \tmp2,0(\tmp) 44699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 447 insert_nops NUM_PIPELINE_INSNS - 4 448#else 449 insert_nops NUM_PIPELINE_INSNS - 1 450#endif 451 .endm 452 453 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 454 * don't needlessly dirty the cache line if it was already set */ 455 .macro update_accessed ptp,pte,tmp,tmp1 456 ldi _PAGE_ACCESSED,\tmp1 457 or \tmp1,\pte,\tmp 458 and,COND(<>) \tmp1,\pte,%r0 459 STREG \tmp,0(\ptp) 460 .endm 461 462 /* Set the dirty bit (and accessed bit). No need to be 463 * clever, this is only used from the dirty fault */ 464 .macro update_dirty ptp,pte,tmp 465 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 466 or \tmp,\pte,\pte 467 STREG \pte,0(\ptp) 468 .endm 469 470 /* We have (depending on the page size): 471 * - 38 to 52-bit Physical Page Number 472 * - 12 to 26-bit page offset 473 */ 474 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 475 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 476 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 477 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 478 #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT) 479 480 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 481 .macro convert_for_tlb_insert20 pte,tmp 482#ifdef CONFIG_HUGETLB_PAGE 483 copy \pte,\tmp 484 extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte 485 486 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 487 (63-58)+PAGE_ADD_SHIFT,\pte 488 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 489 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 490 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 491#else /* Huge pages disabled */ 492 extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte 493 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 494 (63-58)+PAGE_ADD_SHIFT,\pte 495#endif 496 .endm 497 498 /* Convert the pte and prot to tlb insertion values. How 499 * this happens is quite subtle, read below */ 500 .macro make_insert_tlb spc,pte,prot,tmp 501 space_to_prot \spc \prot /* create prot id from space */ 502 503#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT 504 /* need to drop DMB bit, as it's used as SPECIAL flag */ 505 depi 0,_PAGE_SPECIAL_BIT,1,\pte 506#endif 507 508 /* The following is the real subtlety. This is depositing 509 * T <-> _PAGE_REFTRAP 510 * D <-> _PAGE_DIRTY 511 * B <-> _PAGE_DMB (memory break) 512 * 513 * Then incredible subtlety: The access rights are 514 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 515 * See 3-14 of the parisc 2.0 manual 516 * 517 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 518 * trigger an access rights trap in user space if the user 519 * tries to read an unreadable page */ 520 depd \pte,8,7,\prot 521 522 /* PAGE_USER indicates the page can be read with user privileges, 523 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 524 * contains _PAGE_READ). While the kernel can't directly write 525 * user pages which have _PAGE_WRITE zero, it can read pages 526 * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel 527 * exception fault handler doesn't trigger when reading pages 528 * that aren't user read accessible */ 529 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 530 depdi 7,11,3,\prot 531 532 /* If we're a gateway page, drop PL2 back to zero for promotion 533 * to kernel privilege (so we can execute the page as kernel). 534 * Any privilege promotion page always denys read and write */ 535 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 536 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 537 538 /* Enforce uncacheable pages. 539 * This should ONLY be use for MMIO on PA 2.0 machines. 540 * Memory/DMA is cache coherent on all PA2.0 machines we support 541 * (that means T-class is NOT supported) and the memory controllers 542 * on most of those machines only handles cache transactions. 543 */ 544 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 545 depdi 1,12,1,\prot 546 547 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 548 convert_for_tlb_insert20 \pte \tmp 549 .endm 550 551 /* Identical macro to make_insert_tlb above, except it 552 * makes the tlb entry for the differently formatted pa11 553 * insertion instructions */ 554 .macro make_insert_tlb_11 spc,pte,prot 555#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT 556 /* need to drop DMB bit, as it's used as SPECIAL flag */ 557 depi 0,_PAGE_SPECIAL_BIT,1,\pte 558#endif 559 zdep \spc,30,15,\prot 560 dep \pte,8,7,\prot 561 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 562 depi 1,12,1,\prot 563 extru,= \pte,_PAGE_USER_BIT,1,%r0 564 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 565 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 566 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 567 568 /* Get rid of prot bits and convert to page addr for iitlba */ 569 570 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 571 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 572 .endm 573 574 /* This is for ILP32 PA2.0 only. The TLB insertion needs 575 * to extend into I/O space if the address is 0xfXXXXXXX 576 * so we extend the f's into the top word of the pte in 577 * this case */ 578 .macro f_extend pte,tmp 579 extrd,s \pte,42,4,\tmp 580 addi,<> 1,\tmp,%r0 581 extrd,s \pte,63,25,\pte 582 .endm 583 584 /* The alias region is comprised of a pair of 4 MB regions 585 * aligned to 8 MB. It is used to clear/copy/flush user pages 586 * using kernel virtual addresses congruent with the user 587 * virtual address. 588 * 589 * To use the alias page, you set %r26 up with the to TLB 590 * entry (identifying the physical page) and %r23 up with 591 * the from tlb entry (or nothing if only a to entry---for 592 * clear_user_page_asm) */ 593 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype 594 cmpib,COND(<>),n 0,\spc,\fault 595 ldil L%(TMPALIAS_MAP_START),\tmp 596 copy \va,\tmp1 597 depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1 598 cmpb,COND(<>),n \tmp,\tmp1,\fault 599 mfctl %cr19,\tmp /* iir */ 600 /* get the opcode (first six bits) into \tmp */ 601 extrw,u \tmp,5,6,\tmp 602 /* 603 * Only setting the T bit prevents data cache movein 604 * Setting access rights to zero prevents instruction cache movein 605 * 606 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 607 * to type field and _PAGE_READ goes to top bit of PL1 608 */ 609 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 610 /* 611 * so if the opcode is one (i.e. this is a memory management 612 * instruction) nullify the next load so \prot is only T. 613 * Otherwise this is a normal data operation 614 */ 615 cmpiclr,= 0x01,\tmp,%r0 616 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 617.ifc \patype,20 618 depd,z \prot,8,7,\prot 619.else 620.ifc \patype,11 621 depw,z \prot,8,7,\prot 622.else 623 .error "undefined PA type to do_alias" 624.endif 625.endif 626 /* 627 * OK, it is in the temp alias region, check whether "from" or "to". 628 * Check "subtle" note in pacache.S re: r23/r26. 629 */ 630 extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0 631 or,COND(tr) %r23,%r0,\pte 632 or %r26,%r0,\pte 633 634 /* convert phys addr in \pte (from r23 or r26) to tlb insert format */ 635 SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte 636 depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte 637 .endm 638 639 640 /* 641 * Fault_vectors are architecturally required to be aligned on a 2K 642 * boundary 643 */ 644 645 .section .text.hot 646 .align 2048 647 648ENTRY(fault_vector_20) 649 /* First vector is invalid (0) */ 650 .ascii "cows can fly" 651 .byte 0 652 .align 32 653 654 hpmc 1 655 def 2 656 def 3 657 extint 4 658 def 5 659 itlb_20 PARISC_ITLB_TRAP 660 def 7 661 def 8 662 def 9 663 def 10 664 def 11 665 def 12 666 def 13 667 def 14 668 dtlb_20 15 669 naitlb_20 16 670 nadtlb_20 17 671 def 18 672 def 19 673 dbit_20 20 674 def 21 675 def 22 676 def 23 677 def 24 678 def 25 679 def 26 680 def 27 681 def 28 682 def 29 683 def 30 684 def 31 685END(fault_vector_20) 686 687#ifndef CONFIG_64BIT 688 689 .align 2048 690 691ENTRY(fault_vector_11) 692 /* First vector is invalid (0) */ 693 .ascii "cows can fly" 694 .byte 0 695 .align 32 696 697 hpmc 1 698 def 2 699 def 3 700 extint 4 701 def 5 702 itlb_11 PARISC_ITLB_TRAP 703 def 7 704 def 8 705 def 9 706 def 10 707 def 11 708 def 12 709 def 13 710 def 14 711 dtlb_11 15 712 naitlb_11 16 713 nadtlb_11 17 714 def 18 715 def 19 716 dbit_11 20 717 def 21 718 def 22 719 def 23 720 def 24 721 def 25 722 def 26 723 def 27 724 def 28 725 def 29 726 def 30 727 def 31 728END(fault_vector_11) 729 730#endif 731 /* Fault vector is separately protected and *must* be on its own page */ 732 .align PAGE_SIZE 733 734 .import handle_interruption,code 735 .import do_cpu_irq_mask,code 736 737 /* 738 * Child Returns here 739 * 740 * copy_thread moved args into task save area. 741 */ 742 743ENTRY(ret_from_kernel_thread) 744 /* Call schedule_tail first though */ 745 BL schedule_tail, %r2 746 nop 747 748 mfctl %cr30,%r1 /* task_struct */ 749 LDREG TASK_PT_GR25(%r1), %r26 750#ifdef CONFIG_64BIT 751 LDREG TASK_PT_GR27(%r1), %r27 752#endif 753 LDREG TASK_PT_GR26(%r1), %r1 754 ble 0(%sr7, %r1) 755 copy %r31, %r2 756 b finish_child_return 757 nop 758END(ret_from_kernel_thread) 759 760 761 /* 762 * struct task_struct *_switch_to(struct task_struct *prev, 763 * struct task_struct *next) 764 * 765 * switch kernel stacks and return prev */ 766ENTRY_CFI(_switch_to) 767 STREG %r2, -RP_OFFSET(%r30) 768 769 callee_save_float 770 callee_save 771 772 load32 _switch_to_ret, %r2 773 774 STREG %r2, TASK_PT_KPC(%r26) 775 LDREG TASK_PT_KPC(%r25), %r2 776 777 STREG %r30, TASK_PT_KSP(%r26) 778 LDREG TASK_PT_KSP(%r25), %r30 779 bv %r0(%r2) 780 mtctl %r25,%cr30 781 782ENTRY(_switch_to_ret) 783 mtctl %r0, %cr0 /* Needed for single stepping */ 784 callee_rest 785 callee_rest_float 786 787 LDREG -RP_OFFSET(%r30), %r2 788 bv %r0(%r2) 789 copy %r26, %r28 790ENDPROC_CFI(_switch_to) 791 792 /* 793 * Common rfi return path for interruptions, kernel execve, and 794 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 795 * return via this path if the signal was received when the process 796 * was running; if the process was blocked on a syscall then the 797 * normal syscall_exit path is used. All syscalls for traced 798 * proceses exit via intr_restore. 799 * 800 * XXX If any syscalls that change a processes space id ever exit 801 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 802 * adjust IASQ[0..1]. 803 * 804 */ 805 806 .align PAGE_SIZE 807 808ENTRY_CFI(syscall_exit_rfi) 809 mfctl %cr30,%r16 /* task_struct */ 810 ldo TASK_REGS(%r16),%r16 811 /* Force iaoq to userspace, as the user has had access to our current 812 * context via sigcontext. Also Filter the PSW for the same reason. 813 */ 814 LDREG PT_IAOQ0(%r16),%r19 815 depi PRIV_USER,31,2,%r19 816 STREG %r19,PT_IAOQ0(%r16) 817 LDREG PT_IAOQ1(%r16),%r19 818 depi PRIV_USER,31,2,%r19 819 STREG %r19,PT_IAOQ1(%r16) 820 LDREG PT_PSW(%r16),%r19 821 load32 USER_PSW_MASK,%r1 822#ifdef CONFIG_64BIT 823 load32 USER_PSW_HI_MASK,%r20 824 depd %r20,31,32,%r1 825#endif 826 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 827 load32 USER_PSW,%r1 828 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 829 STREG %r19,PT_PSW(%r16) 830 831 /* 832 * If we aren't being traced, we never saved space registers 833 * (we don't store them in the sigcontext), so set them 834 * to "proper" values now (otherwise we'll wind up restoring 835 * whatever was last stored in the task structure, which might 836 * be inconsistent if an interrupt occurred while on the gateway 837 * page). Note that we may be "trashing" values the user put in 838 * them, but we don't support the user changing them. 839 */ 840 841 STREG %r0,PT_SR2(%r16) 842 mfsp %sr3,%r19 843 STREG %r19,PT_SR0(%r16) 844 STREG %r19,PT_SR1(%r16) 845 STREG %r19,PT_SR3(%r16) 846 STREG %r19,PT_SR4(%r16) 847 STREG %r19,PT_SR5(%r16) 848 STREG %r19,PT_SR6(%r16) 849 STREG %r19,PT_SR7(%r16) 850 851ENTRY(intr_return) 852 /* check for reschedule */ 853 mfctl %cr30,%r1 854 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 855 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 856 857 .import do_notify_resume,code 858intr_check_sig: 859 /* As above */ 860 mfctl %cr30,%r1 861 LDREG TASK_TI_FLAGS(%r1),%r19 862 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 863 and,COND(<>) %r19, %r20, %r0 864 b,n intr_restore /* skip past if we've nothing to do */ 865 866 /* This check is critical to having LWS 867 * working. The IASQ is zero on the gateway 868 * page and we cannot deliver any signals until 869 * we get off the gateway page. 870 * 871 * Only do signals if we are returning to user space 872 */ 873 LDREG PT_IASQ0(%r16), %r20 874 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 875 LDREG PT_IASQ1(%r16), %r20 876 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 877 878 copy %r0, %r25 /* long in_syscall = 0 */ 879#ifdef CONFIG_64BIT 880 ldo -16(%r30),%r29 /* Reference param save area */ 881#endif 882 883 /* NOTE: We need to enable interrupts if we have to deliver 884 * signals. We used to do this earlier but it caused kernel 885 * stack overflows. */ 886 ssm PSW_SM_I, %r0 887 888 BL do_notify_resume,%r2 889 copy %r16, %r26 /* struct pt_regs *regs */ 890 891 b,n intr_check_sig 892 893intr_restore: 894 copy %r16,%r29 895 ldo PT_FR31(%r29),%r1 896 rest_fp %r1 897 rest_general %r29 898 899 /* inverse of virt_map */ 900 pcxt_ssm_bug 901 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 902 tophys_r1 %r29 903 904 /* Restore space id's and special cr's from PT_REGS 905 * structure pointed to by r29 906 */ 907 rest_specials %r29 908 909 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 910 * It also restores r1 and r30. 911 */ 912 rest_stack 913 914 rfi 915 nop 916 917#ifndef CONFIG_PREEMPTION 918# define intr_do_preempt intr_restore 919#endif /* !CONFIG_PREEMPTION */ 920 921 .import schedule,code 922intr_do_resched: 923 /* Only call schedule on return to userspace. If we're returning 924 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise 925 * we jump back to intr_restore. 926 */ 927 LDREG PT_IASQ0(%r16), %r20 928 cmpib,COND(=) 0, %r20, intr_do_preempt 929 nop 930 LDREG PT_IASQ1(%r16), %r20 931 cmpib,COND(=) 0, %r20, intr_do_preempt 932 nop 933 934 /* NOTE: We need to enable interrupts if we schedule. We used 935 * to do this earlier but it caused kernel stack overflows. */ 936 ssm PSW_SM_I, %r0 937 938#ifdef CONFIG_64BIT 939 ldo -16(%r30),%r29 /* Reference param save area */ 940#endif 941 942 ldil L%intr_check_sig, %r2 943#ifndef CONFIG_64BIT 944 b schedule 945#else 946 load32 schedule, %r20 947 bv %r0(%r20) 948#endif 949 ldo R%intr_check_sig(%r2), %r2 950 951 /* preempt the current task on returning to kernel 952 * mode from an interrupt, iff need_resched is set, 953 * and preempt_count is 0. otherwise, we continue on 954 * our merry way back to the current running task. 955 */ 956#ifdef CONFIG_PREEMPTION 957 .import preempt_schedule_irq,code 958intr_do_preempt: 959 rsm PSW_SM_I, %r0 /* disable interrupts */ 960 961 /* current_thread_info()->preempt_count */ 962 mfctl %cr30, %r1 963 ldw TI_PRE_COUNT(%r1), %r19 964 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */ 965 nop /* prev insn branched backwards */ 966 967 /* check if we interrupted a critical path */ 968 LDREG PT_PSW(%r16), %r20 969 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 970 nop 971 972 /* ssm PSW_SM_I done later in intr_restore */ 973#ifdef CONFIG_MLONGCALLS 974 ldil L%intr_restore, %r2 975 load32 preempt_schedule_irq, %r1 976 bv %r0(%r1) 977 ldo R%intr_restore(%r2), %r2 978#else 979 ldil L%intr_restore, %r1 980 BL preempt_schedule_irq, %r2 981 ldo R%intr_restore(%r1), %r2 982#endif 983#endif /* CONFIG_PREEMPTION */ 984 985 /* 986 * External interrupts. 987 */ 988 989intr_extint: 990 cmpib,COND(=),n 0,%r16,1f 991 992 get_stack_use_cr30 993 b,n 2f 994 9951: 996 get_stack_use_r30 9972: 998 save_specials %r29 999 virt_map 1000 save_general %r29 1001 1002 ldo PT_FR0(%r29), %r24 1003 save_fp %r24 1004 1005 loadgp 1006 1007 copy %r29, %r26 /* arg0 is pt_regs */ 1008 copy %r29, %r16 /* save pt_regs */ 1009 1010 ldil L%intr_return, %r2 1011 1012#ifdef CONFIG_64BIT 1013 ldo -16(%r30),%r29 /* Reference param save area */ 1014#endif 1015 1016 b do_cpu_irq_mask 1017 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1018ENDPROC_CFI(syscall_exit_rfi) 1019 1020 1021 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1022 1023ENTRY_CFI(intr_save) /* for os_hpmc */ 1024 mfsp %sr7,%r16 1025 cmpib,COND(=),n 0,%r16,1f 1026 get_stack_use_cr30 1027 b 2f 1028 copy %r8,%r26 1029 10301: 1031 get_stack_use_r30 1032 copy %r8,%r26 1033 10342: 1035 save_specials %r29 1036 1037 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1038 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1039 1040 1041 mfctl %isr, %r16 1042 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1043 mfctl %ior, %r17 1044 1045 1046#ifdef CONFIG_64BIT 1047 /* 1048 * If the interrupted code was running with W bit off (32 bit), 1049 * clear the b bits (bits 0 & 1) in the ior. 1050 * save_specials left ipsw value in r8 for us to test. 1051 */ 1052 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1053 depdi 0,1,2,%r17 1054 1055 /* adjust isr/ior: get high bits from isr and deposit in ior */ 1056 space_adjust %r16,%r17,%r1 1057#endif 1058 STREG %r16, PT_ISR(%r29) 1059 STREG %r17, PT_IOR(%r29) 1060 1061#if defined(CONFIG_64BIT) 1062skip_save_ior: 1063 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1064 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1065 * above. 1066 */ 1067 bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 1068 LDREG PT_IASQ0(%r29), %r16 1069 LDREG PT_IAOQ0(%r29), %r17 1070 /* adjust iasq0/iaoq0 */ 1071 space_adjust %r16,%r17,%r1 1072 STREG %r16, PT_IASQ0(%r29) 1073 STREG %r17, PT_IAOQ0(%r29) 1074 1075 LDREG PT_IASQ1(%r29), %r16 1076 LDREG PT_IAOQ1(%r29), %r17 1077 /* adjust iasq1/iaoq1 */ 1078 space_adjust %r16,%r17,%r1 1079 STREG %r16, PT_IASQ1(%r29) 1080 STREG %r17, PT_IAOQ1(%r29) 1081#else 1082skip_save_ior: 1083#endif 1084 1085intr_save2: 1086 virt_map 1087 save_general %r29 1088 1089 ldo PT_FR0(%r29), %r25 1090 save_fp %r25 1091 1092 loadgp 1093 1094 copy %r29, %r25 /* arg1 is pt_regs */ 1095#ifdef CONFIG_64BIT 1096 ldo -16(%r30),%r29 /* Reference param save area */ 1097#endif 1098 1099 ldil L%intr_check_sig, %r2 1100 copy %r25, %r16 /* save pt_regs */ 1101 1102 b handle_interruption 1103 ldo R%intr_check_sig(%r2), %r2 1104ENDPROC_CFI(intr_save) 1105 1106 1107 /* 1108 * Note for all tlb miss handlers: 1109 * 1110 * cr24 contains a pointer to the kernel address space 1111 * page directory. 1112 * 1113 * cr25 contains a pointer to the current user address 1114 * space page directory. 1115 * 1116 * sr3 will contain the space id of the user address space 1117 * of the current running thread while that thread is 1118 * running in the kernel. 1119 */ 1120 1121 /* 1122 * register number allocations. Note that these are all 1123 * in the shadowed registers 1124 */ 1125 1126 t0 = r1 /* temporary register 0 */ 1127 va = r8 /* virtual address for which the trap occurred */ 1128 t1 = r9 /* temporary register 1 */ 1129 pte = r16 /* pte/phys page # */ 1130 prot = r17 /* prot bits */ 1131 spc = r24 /* space for which the trap occurred */ 1132 ptp = r25 /* page directory/page table pointer */ 1133 1134#ifdef CONFIG_64BIT 1135 1136dtlb_miss_20w: 1137 space_adjust spc,va,t0 1138 get_pgd spc,ptp 1139 space_check spc,t0,dtlb_fault 1140 1141 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1142 1143 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1144 update_accessed ptp,pte,t0,t1 1145 1146 make_insert_tlb spc,pte,prot,t1 1147 1148 idtlbt pte,prot 1149 1150 ptl_unlock spc,t0,t1 1151 rfir 1152 nop 1153 1154dtlb_check_alias_20w: 1155 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1156 1157 idtlbt pte,prot 1158 1159 insert_nops NUM_PIPELINE_INSNS - 1 1160 rfir 1161 nop 1162 1163nadtlb_miss_20w: 1164 space_adjust spc,va,t0 1165 get_pgd spc,ptp 1166 space_check spc,t0,nadtlb_fault 1167 1168 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1169 1170 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1171 update_accessed ptp,pte,t0,t1 1172 1173 make_insert_tlb spc,pte,prot,t1 1174 1175 idtlbt pte,prot 1176 1177 ptl_unlock spc,t0,t1 1178 rfir 1179 nop 1180 1181nadtlb_check_alias_20w: 1182 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1183 1184 idtlbt pte,prot 1185 1186 insert_nops NUM_PIPELINE_INSNS - 1 1187 rfir 1188 nop 1189 1190#else 1191 1192dtlb_miss_11: 1193 get_pgd spc,ptp 1194 1195 space_check spc,t0,dtlb_fault 1196 1197 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1198 1199 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 1200 update_accessed ptp,pte,t0,t1 1201 1202 make_insert_tlb_11 spc,pte,prot 1203 1204 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1205 mtsp spc,%sr1 1206 1207 idtlba pte,(%sr1,va) 1208 idtlbp prot,(%sr1,va) 1209 1210 mtsp t1, %sr1 /* Restore sr1 */ 1211 1212 ptl_unlock spc,t0,t1 1213 rfir 1214 nop 1215 1216dtlb_check_alias_11: 1217 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 1218 1219 idtlba pte,(va) 1220 idtlbp prot,(va) 1221 1222 insert_nops NUM_PIPELINE_INSNS - 1 1223 rfir 1224 nop 1225 1226nadtlb_miss_11: 1227 get_pgd spc,ptp 1228 1229 space_check spc,t0,nadtlb_fault 1230 1231 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1232 1233 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 1234 update_accessed ptp,pte,t0,t1 1235 1236 make_insert_tlb_11 spc,pte,prot 1237 1238 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1239 mtsp spc,%sr1 1240 1241 idtlba pte,(%sr1,va) 1242 idtlbp prot,(%sr1,va) 1243 1244 mtsp t1, %sr1 /* Restore sr1 */ 1245 1246 ptl_unlock spc,t0,t1 1247 rfir 1248 nop 1249 1250nadtlb_check_alias_11: 1251 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 1252 1253 idtlba pte,(va) 1254 idtlbp prot,(va) 1255 1256 insert_nops NUM_PIPELINE_INSNS - 1 1257 rfir 1258 nop 1259 1260dtlb_miss_20: 1261 space_adjust spc,va,t0 1262 get_pgd spc,ptp 1263 space_check spc,t0,dtlb_fault 1264 1265 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1266 1267 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1268 update_accessed ptp,pte,t0,t1 1269 1270 make_insert_tlb spc,pte,prot,t1 1271 1272 f_extend pte,t1 1273 1274 idtlbt pte,prot 1275 1276 ptl_unlock spc,t0,t1 1277 rfir 1278 nop 1279 1280dtlb_check_alias_20: 1281 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1282 1283 idtlbt pte,prot 1284 1285 insert_nops NUM_PIPELINE_INSNS - 1 1286 rfir 1287 nop 1288 1289nadtlb_miss_20: 1290 get_pgd spc,ptp 1291 1292 space_check spc,t0,nadtlb_fault 1293 1294 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1295 1296 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1297 update_accessed ptp,pte,t0,t1 1298 1299 make_insert_tlb spc,pte,prot,t1 1300 1301 f_extend pte,t1 1302 1303 idtlbt pte,prot 1304 1305 ptl_unlock spc,t0,t1 1306 rfir 1307 nop 1308 1309nadtlb_check_alias_20: 1310 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1311 1312 idtlbt pte,prot 1313 1314 insert_nops NUM_PIPELINE_INSNS - 1 1315 rfir 1316 nop 1317 1318#endif 1319 1320nadtlb_emulate: 1321 1322 /* 1323 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and 1324 * probei instructions. The kernel no longer faults doing flushes. 1325 * Use of lpa and probe instructions is rare. Given the issue 1326 * with shadow registers, we defer everything to the "slow" path. 1327 */ 1328 b,n nadtlb_fault 1329 1330#ifdef CONFIG_64BIT 1331itlb_miss_20w: 1332 1333 /* 1334 * I miss is a little different, since we allow users to fault 1335 * on the gateway page which is in the kernel address space. 1336 */ 1337 1338 space_adjust spc,va,t0 1339 get_pgd spc,ptp 1340 space_check spc,t0,itlb_fault 1341 1342 L3_ptep ptp,pte,t0,va,itlb_fault 1343 1344 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1345 update_accessed ptp,pte,t0,t1 1346 1347 make_insert_tlb spc,pte,prot,t1 1348 1349 iitlbt pte,prot 1350 1351 ptl_unlock spc,t0,t1 1352 rfir 1353 nop 1354 1355naitlb_miss_20w: 1356 1357 /* 1358 * I miss is a little different, since we allow users to fault 1359 * on the gateway page which is in the kernel address space. 1360 */ 1361 1362 space_adjust spc,va,t0 1363 get_pgd spc,ptp 1364 space_check spc,t0,naitlb_fault 1365 1366 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1367 1368 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1369 update_accessed ptp,pte,t0,t1 1370 1371 make_insert_tlb spc,pte,prot,t1 1372 1373 iitlbt pte,prot 1374 1375 ptl_unlock spc,t0,t1 1376 rfir 1377 nop 1378 1379naitlb_check_alias_20w: 1380 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1381 1382 iitlbt pte,prot 1383 1384 insert_nops NUM_PIPELINE_INSNS - 1 1385 rfir 1386 nop 1387 1388#else 1389 1390itlb_miss_11: 1391 get_pgd spc,ptp 1392 1393 space_check spc,t0,itlb_fault 1394 1395 L2_ptep ptp,pte,t0,va,itlb_fault 1396 1397 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1398 update_accessed ptp,pte,t0,t1 1399 1400 make_insert_tlb_11 spc,pte,prot 1401 1402 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1403 mtsp spc,%sr1 1404 1405 iitlba pte,(%sr1,va) 1406 iitlbp prot,(%sr1,va) 1407 1408 mtsp t1, %sr1 /* Restore sr1 */ 1409 1410 ptl_unlock spc,t0,t1 1411 rfir 1412 nop 1413 1414naitlb_miss_11: 1415 get_pgd spc,ptp 1416 1417 space_check spc,t0,naitlb_fault 1418 1419 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1420 1421 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 1422 update_accessed ptp,pte,t0,t1 1423 1424 make_insert_tlb_11 spc,pte,prot 1425 1426 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1427 mtsp spc,%sr1 1428 1429 iitlba pte,(%sr1,va) 1430 iitlbp prot,(%sr1,va) 1431 1432 mtsp t1, %sr1 /* Restore sr1 */ 1433 1434 ptl_unlock spc,t0,t1 1435 rfir 1436 nop 1437 1438naitlb_check_alias_11: 1439 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 1440 1441 iitlba pte,(%sr0, va) 1442 iitlbp prot,(%sr0, va) 1443 1444 insert_nops NUM_PIPELINE_INSNS - 1 1445 rfir 1446 nop 1447 1448 1449itlb_miss_20: 1450 get_pgd spc,ptp 1451 1452 space_check spc,t0,itlb_fault 1453 1454 L2_ptep ptp,pte,t0,va,itlb_fault 1455 1456 ptl_lock spc,ptp,pte,t0,t1,itlb_fault 1457 update_accessed ptp,pte,t0,t1 1458 1459 make_insert_tlb spc,pte,prot,t1 1460 1461 f_extend pte,t1 1462 1463 iitlbt pte,prot 1464 1465 ptl_unlock spc,t0,t1 1466 rfir 1467 nop 1468 1469naitlb_miss_20: 1470 get_pgd spc,ptp 1471 1472 space_check spc,t0,naitlb_fault 1473 1474 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1475 1476 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1477 update_accessed ptp,pte,t0,t1 1478 1479 make_insert_tlb spc,pte,prot,t1 1480 1481 f_extend pte,t1 1482 1483 iitlbt pte,prot 1484 1485 ptl_unlock spc,t0,t1 1486 rfir 1487 nop 1488 1489naitlb_check_alias_20: 1490 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1491 1492 iitlbt pte,prot 1493 1494 insert_nops NUM_PIPELINE_INSNS - 1 1495 rfir 1496 nop 1497 1498#endif 1499 1500#ifdef CONFIG_64BIT 1501 1502dbit_trap_20w: 1503 space_adjust spc,va,t0 1504 get_pgd spc,ptp 1505 space_check spc,t0,dbit_fault 1506 1507 L3_ptep ptp,pte,t0,va,dbit_fault 1508 1509 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1510 update_dirty ptp,pte,t1 1511 1512 make_insert_tlb spc,pte,prot,t1 1513 1514 idtlbt pte,prot 1515 1516 ptl_unlock spc,t0,t1 1517 rfir 1518 nop 1519#else 1520 1521dbit_trap_11: 1522 1523 get_pgd spc,ptp 1524 1525 space_check spc,t0,dbit_fault 1526 1527 L2_ptep ptp,pte,t0,va,dbit_fault 1528 1529 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1530 update_dirty ptp,pte,t1 1531 1532 make_insert_tlb_11 spc,pte,prot 1533 1534 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1535 mtsp spc,%sr1 1536 1537 idtlba pte,(%sr1,va) 1538 idtlbp prot,(%sr1,va) 1539 1540 mtsp t1, %sr1 /* Restore sr1 */ 1541 1542 ptl_unlock spc,t0,t1 1543 rfir 1544 nop 1545 1546dbit_trap_20: 1547 get_pgd spc,ptp 1548 1549 space_check spc,t0,dbit_fault 1550 1551 L2_ptep ptp,pte,t0,va,dbit_fault 1552 1553 ptl_lock spc,ptp,pte,t0,t1,dbit_fault 1554 update_dirty ptp,pte,t1 1555 1556 make_insert_tlb spc,pte,prot,t1 1557 1558 f_extend pte,t1 1559 1560 idtlbt pte,prot 1561 1562 ptl_unlock spc,t0,t1 1563 rfir 1564 nop 1565#endif 1566 1567 .import handle_interruption,code 1568 1569kernel_bad_space: 1570 b intr_save 1571 ldi 31,%r8 /* Use an unused code */ 1572 1573dbit_fault: 1574 b intr_save 1575 ldi 20,%r8 1576 1577itlb_fault: 1578 b intr_save 1579 ldi PARISC_ITLB_TRAP,%r8 1580 1581nadtlb_fault: 1582 b intr_save 1583 ldi 17,%r8 1584 1585naitlb_fault: 1586 b intr_save 1587 ldi 16,%r8 1588 1589dtlb_fault: 1590 b intr_save 1591 ldi 15,%r8 1592 1593 /* Register saving semantics for system calls: 1594 1595 %r1 clobbered by system call macro in userspace 1596 %r2 saved in PT_REGS by gateway page 1597 %r3 - %r18 preserved by C code (saved by signal code) 1598 %r19 - %r20 saved in PT_REGS by gateway page 1599 %r21 - %r22 non-standard syscall args 1600 stored in kernel stack by gateway page 1601 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1602 %r27 - %r30 saved in PT_REGS by gateway page 1603 %r31 syscall return pointer 1604 */ 1605 1606 /* Floating point registers (FIXME: what do we do with these?) 1607 1608 %fr0 - %fr3 status/exception, not preserved 1609 %fr4 - %fr7 arguments 1610 %fr8 - %fr11 not preserved by C code 1611 %fr12 - %fr21 preserved by C code 1612 %fr22 - %fr31 not preserved by C code 1613 */ 1614 1615 .macro reg_save regs 1616 STREG %r3, PT_GR3(\regs) 1617 STREG %r4, PT_GR4(\regs) 1618 STREG %r5, PT_GR5(\regs) 1619 STREG %r6, PT_GR6(\regs) 1620 STREG %r7, PT_GR7(\regs) 1621 STREG %r8, PT_GR8(\regs) 1622 STREG %r9, PT_GR9(\regs) 1623 STREG %r10,PT_GR10(\regs) 1624 STREG %r11,PT_GR11(\regs) 1625 STREG %r12,PT_GR12(\regs) 1626 STREG %r13,PT_GR13(\regs) 1627 STREG %r14,PT_GR14(\regs) 1628 STREG %r15,PT_GR15(\regs) 1629 STREG %r16,PT_GR16(\regs) 1630 STREG %r17,PT_GR17(\regs) 1631 STREG %r18,PT_GR18(\regs) 1632 .endm 1633 1634 .macro reg_restore regs 1635 LDREG PT_GR3(\regs), %r3 1636 LDREG PT_GR4(\regs), %r4 1637 LDREG PT_GR5(\regs), %r5 1638 LDREG PT_GR6(\regs), %r6 1639 LDREG PT_GR7(\regs), %r7 1640 LDREG PT_GR8(\regs), %r8 1641 LDREG PT_GR9(\regs), %r9 1642 LDREG PT_GR10(\regs),%r10 1643 LDREG PT_GR11(\regs),%r11 1644 LDREG PT_GR12(\regs),%r12 1645 LDREG PT_GR13(\regs),%r13 1646 LDREG PT_GR14(\regs),%r14 1647 LDREG PT_GR15(\regs),%r15 1648 LDREG PT_GR16(\regs),%r16 1649 LDREG PT_GR17(\regs),%r17 1650 LDREG PT_GR18(\regs),%r18 1651 .endm 1652 1653 .macro fork_like name 1654ENTRY_CFI(sys_\name\()_wrapper) 1655 mfctl %cr30,%r1 1656 ldo TASK_REGS(%r1),%r1 1657 reg_save %r1 1658 mfctl %cr27, %r28 1659 ldil L%sys_\name, %r31 1660 be R%sys_\name(%sr4,%r31) 1661 STREG %r28, PT_CR27(%r1) 1662ENDPROC_CFI(sys_\name\()_wrapper) 1663 .endm 1664 1665fork_like clone 1666fork_like clone3 1667fork_like fork 1668fork_like vfork 1669 1670 /* Set the return value for the child */ 1671ENTRY(child_return) 1672 BL schedule_tail, %r2 1673 nop 1674finish_child_return: 1675 mfctl %cr30,%r1 1676 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1677 1678 LDREG PT_CR27(%r1), %r3 1679 mtctl %r3, %cr27 1680 reg_restore %r1 1681 b syscall_exit 1682 copy %r0,%r28 1683END(child_return) 1684 1685ENTRY_CFI(sys_rt_sigreturn_wrapper) 1686 mfctl %cr30,%r26 1687 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1688 /* Don't save regs, we are going to restore them from sigcontext. */ 1689 STREG %r2, -RP_OFFSET(%r30) 1690#ifdef CONFIG_64BIT 1691 ldo FRAME_SIZE(%r30), %r30 1692 BL sys_rt_sigreturn,%r2 1693 ldo -16(%r30),%r29 /* Reference param save area */ 1694#else 1695 BL sys_rt_sigreturn,%r2 1696 ldo FRAME_SIZE(%r30), %r30 1697#endif 1698 1699 ldo -FRAME_SIZE(%r30), %r30 1700 LDREG -RP_OFFSET(%r30), %r2 1701 1702 /* FIXME: I think we need to restore a few more things here. */ 1703 mfctl %cr30,%r1 1704 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1705 reg_restore %r1 1706 1707 /* If the signal was received while the process was blocked on a 1708 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1709 * take us to syscall_exit_rfi and on to intr_return. 1710 */ 1711 bv %r0(%r2) 1712 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1713ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1714 1715ENTRY(syscall_exit) 1716 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1717 * via syscall_exit_rfi if the signal was received while the process 1718 * was running. 1719 */ 1720 1721 /* save return value now */ 1722 mfctl %cr30, %r1 1723 STREG %r28,TASK_PT_GR28(%r1) 1724 1725 /* Seems to me that dp could be wrong here, if the syscall involved 1726 * calling a module, and nothing got round to restoring dp on return. 1727 */ 1728 loadgp 1729 1730syscall_check_resched: 1731 1732 /* check for reschedule */ 1733 mfctl %cr30,%r19 1734 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */ 1735 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 1736 1737 .import do_signal,code 1738syscall_check_sig: 1739 mfctl %cr30,%r19 1740 LDREG TASK_TI_FLAGS(%r19),%r19 1741 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 1742 and,COND(<>) %r19, %r26, %r0 1743 b,n syscall_restore /* skip past if we've nothing to do */ 1744 1745syscall_do_signal: 1746 /* Save callee-save registers (for sigcontext). 1747 * FIXME: After this point the process structure should be 1748 * consistent with all the relevant state of the process 1749 * before the syscall. We need to verify this. 1750 */ 1751 mfctl %cr30,%r1 1752 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 1753 reg_save %r26 1754 1755#ifdef CONFIG_64BIT 1756 ldo -16(%r30),%r29 /* Reference param save area */ 1757#endif 1758 1759 BL do_notify_resume,%r2 1760 ldi 1, %r25 /* long in_syscall = 1 */ 1761 1762 mfctl %cr30,%r1 1763 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 1764 reg_restore %r20 1765 1766 b,n syscall_check_sig 1767 1768syscall_restore: 1769 mfctl %cr30,%r1 1770 1771 /* Are we being ptraced? */ 1772 LDREG TASK_TI_FLAGS(%r1),%r19 1773 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 1774 and,COND(=) %r19,%r2,%r0 1775 b,n syscall_restore_rfi 1776 1777 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 1778 rest_fp %r19 1779 1780 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 1781 mtsar %r19 1782 1783 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 1784 LDREG TASK_PT_GR19(%r1),%r19 1785 LDREG TASK_PT_GR20(%r1),%r20 1786 LDREG TASK_PT_GR21(%r1),%r21 1787 LDREG TASK_PT_GR22(%r1),%r22 1788 LDREG TASK_PT_GR23(%r1),%r23 1789 LDREG TASK_PT_GR24(%r1),%r24 1790 LDREG TASK_PT_GR25(%r1),%r25 1791 LDREG TASK_PT_GR26(%r1),%r26 1792 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 1793 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 1794 LDREG TASK_PT_GR29(%r1),%r29 1795 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 1796 1797 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 1798 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 1799 rsm PSW_SM_I, %r0 1800 copy %r1,%r30 /* Restore user sp */ 1801 mfsp %sr3,%r1 /* Get user space id */ 1802 mtsp %r1,%sr7 /* Restore sr7 */ 1803 ssm PSW_SM_I, %r0 1804 1805 /* Set sr2 to zero for userspace syscalls to work. */ 1806 mtsp %r0,%sr2 1807 mtsp %r1,%sr4 /* Restore sr4 */ 1808 mtsp %r1,%sr5 /* Restore sr5 */ 1809 mtsp %r1,%sr6 /* Restore sr6 */ 1810 1811 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */ 1812 1813#ifdef CONFIG_64BIT 1814 /* decide whether to reset the wide mode bit 1815 * 1816 * For a syscall, the W bit is stored in the lowest bit 1817 * of sp. Extract it and reset W if it is zero */ 1818 extrd,u,*<> %r30,63,1,%r1 1819 rsm PSW_SM_W, %r0 1820 /* now reset the lowest bit of sp if it was set */ 1821 xor %r30,%r1,%r30 1822#endif 1823 be,n 0(%sr3,%r31) /* return to user space */ 1824 1825 /* We have to return via an RFI, so that PSW T and R bits can be set 1826 * appropriately. 1827 * This sets up pt_regs so we can return via intr_restore, which is not 1828 * the most efficient way of doing things, but it works. 1829 */ 1830syscall_restore_rfi: 1831 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 1832 mtctl %r2,%cr0 /* for immediate trap */ 1833 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 1834 ldi 0x0b,%r20 /* Create new PSW */ 1835 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 1836 1837 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 1838 * set in thread_info.h and converted to PA bitmap 1839 * numbers in asm-offsets.c */ 1840 1841 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 1842 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 1843 depi -1,27,1,%r20 /* R bit */ 1844 1845 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 1846 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 1847 depi -1,7,1,%r20 /* T bit */ 1848 1849#ifdef CONFIG_64BIT 1850 extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0 1851 depi -1,4,1,%r20 /* W bit */ 1852#endif 1853 STREG %r20,TASK_PT_PSW(%r1) 1854 1855 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 1856 1857 mfsp %sr3,%r25 1858 STREG %r25,TASK_PT_SR3(%r1) 1859 STREG %r25,TASK_PT_SR4(%r1) 1860 STREG %r25,TASK_PT_SR5(%r1) 1861 STREG %r25,TASK_PT_SR6(%r1) 1862 STREG %r25,TASK_PT_SR7(%r1) 1863 STREG %r25,TASK_PT_IASQ0(%r1) 1864 STREG %r25,TASK_PT_IASQ1(%r1) 1865 1866 /* Now if old D bit is clear, it means we didn't save all registers 1867 * on syscall entry, so do that now. This only happens on TRACEME 1868 * calls, or if someone attached to us while we were on a syscall. 1869 * We could make this more efficient by not saving r3-r18, but 1870 * then we wouldn't be able to use the common intr_restore path. 1871 * It is only for traced processes anyway, so performance is not 1872 * an issue. 1873 */ 1874 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 1875 ldo TASK_REGS(%r1),%r25 1876 reg_save %r25 /* Save r3 to r18 */ 1877 1878 /* Save the current sr */ 1879 mfsp %sr0,%r2 1880 STREG %r2,TASK_PT_SR0(%r1) 1881 1882 /* Save the scratch sr */ 1883 mfsp %sr1,%r2 1884 STREG %r2,TASK_PT_SR1(%r1) 1885 1886 /* sr2 should be set to zero for userspace syscalls */ 1887 STREG %r0,TASK_PT_SR2(%r1) 1888 1889 LDREG TASK_PT_GR31(%r1),%r2 1890 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ 1891 STREG %r2,TASK_PT_IAOQ0(%r1) 1892 ldo 4(%r2),%r2 1893 STREG %r2,TASK_PT_IAOQ1(%r1) 1894 b intr_restore 1895 copy %r25,%r16 1896 1897pt_regs_ok: 1898 LDREG TASK_PT_IAOQ0(%r1),%r2 1899 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */ 1900 STREG %r2,TASK_PT_IAOQ0(%r1) 1901 LDREG TASK_PT_IAOQ1(%r1),%r2 1902 depi PRIV_USER,31,2,%r2 1903 STREG %r2,TASK_PT_IAOQ1(%r1) 1904 b intr_restore 1905 copy %r25,%r16 1906 1907syscall_do_resched: 1908 load32 syscall_check_resched,%r2 /* if resched, we start over again */ 1909 load32 schedule,%r19 1910 bv %r0(%r19) /* jumps to schedule() */ 1911#ifdef CONFIG_64BIT 1912 ldo -16(%r30),%r29 /* Reference param save area */ 1913#else 1914 nop 1915#endif 1916END(syscall_exit) 1917 1918 1919#ifdef CONFIG_FUNCTION_TRACER 1920 1921 .import ftrace_function_trampoline,code 1922 .align L1_CACHE_BYTES 1923ENTRY_CFI(mcount, caller) 1924_mcount: 1925 .export _mcount,data 1926 /* 1927 * The 64bit mcount() function pointer needs 4 dwords, of which the 1928 * first two are free. We optimize it here and put 2 instructions for 1929 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 1930 * have all on one L1 cacheline. 1931 */ 1932 ldi 0, %arg3 1933 b ftrace_function_trampoline 1934 copy %r3, %arg2 /* caller original %sp */ 1935ftrace_stub: 1936 .globl ftrace_stub 1937 .type ftrace_stub, @function 1938#ifdef CONFIG_64BIT 1939 bve (%rp) 1940#else 1941 bv %r0(%rp) 1942#endif 1943 nop 1944#ifdef CONFIG_64BIT 1945 .dword mcount 1946 .dword 0 /* code in head.S puts value of global gp here */ 1947#endif 1948ENDPROC_CFI(mcount) 1949 1950#ifdef CONFIG_DYNAMIC_FTRACE 1951 1952#ifdef CONFIG_64BIT 1953#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 1954#else 1955#define FTRACE_FRAME_SIZE FRAME_SIZE 1956#endif 1957ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 1958ftrace_caller: 1959 .global ftrace_caller 1960 1961 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 1962 ldo -FTRACE_FRAME_SIZE(%sp), %r3 1963 STREG %rp, -RP_OFFSET(%r3) 1964 1965 /* Offset 0 is already allocated for %r1 */ 1966 STREG %r23, 2*REG_SZ(%r3) 1967 STREG %r24, 3*REG_SZ(%r3) 1968 STREG %r25, 4*REG_SZ(%r3) 1969 STREG %r26, 5*REG_SZ(%r3) 1970 STREG %r28, 6*REG_SZ(%r3) 1971 STREG %r29, 7*REG_SZ(%r3) 1972#ifdef CONFIG_64BIT 1973 STREG %r19, 8*REG_SZ(%r3) 1974 STREG %r20, 9*REG_SZ(%r3) 1975 STREG %r21, 10*REG_SZ(%r3) 1976 STREG %r22, 11*REG_SZ(%r3) 1977 STREG %r27, 12*REG_SZ(%r3) 1978 STREG %r31, 13*REG_SZ(%r3) 1979 loadgp 1980 ldo -16(%sp),%r29 1981#endif 1982 LDREG 0(%r3), %r25 1983 copy %rp, %r26 1984 ldo -8(%r25), %r25 1985 ldi 0, %r23 /* no pt_regs */ 1986 b,l ftrace_function_trampoline, %rp 1987 copy %r3, %r24 1988 1989 LDREG -RP_OFFSET(%r3), %rp 1990 LDREG 2*REG_SZ(%r3), %r23 1991 LDREG 3*REG_SZ(%r3), %r24 1992 LDREG 4*REG_SZ(%r3), %r25 1993 LDREG 5*REG_SZ(%r3), %r26 1994 LDREG 6*REG_SZ(%r3), %r28 1995 LDREG 7*REG_SZ(%r3), %r29 1996#ifdef CONFIG_64BIT 1997 LDREG 8*REG_SZ(%r3), %r19 1998 LDREG 9*REG_SZ(%r3), %r20 1999 LDREG 10*REG_SZ(%r3), %r21 2000 LDREG 11*REG_SZ(%r3), %r22 2001 LDREG 12*REG_SZ(%r3), %r27 2002 LDREG 13*REG_SZ(%r3), %r31 2003#endif 2004 LDREG 1*REG_SZ(%r3), %r3 2005 2006 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2007 /* Adjust return point to jump back to beginning of traced function */ 2008 ldo -4(%r1), %r1 2009 bv,n (%r1) 2010 2011ENDPROC_CFI(ftrace_caller) 2012 2013#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 2014ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 2015 CALLS,SAVE_RP,SAVE_SP) 2016ftrace_regs_caller: 2017 .global ftrace_regs_caller 2018 2019 ldo -FTRACE_FRAME_SIZE(%sp), %r1 2020 STREG %rp, -RP_OFFSET(%r1) 2021 2022 copy %sp, %r1 2023 ldo PT_SZ_ALGN(%sp), %sp 2024 2025 STREG %rp, PT_GR2(%r1) 2026 STREG %r3, PT_GR3(%r1) 2027 STREG %r4, PT_GR4(%r1) 2028 STREG %r5, PT_GR5(%r1) 2029 STREG %r6, PT_GR6(%r1) 2030 STREG %r7, PT_GR7(%r1) 2031 STREG %r8, PT_GR8(%r1) 2032 STREG %r9, PT_GR9(%r1) 2033 STREG %r10, PT_GR10(%r1) 2034 STREG %r11, PT_GR11(%r1) 2035 STREG %r12, PT_GR12(%r1) 2036 STREG %r13, PT_GR13(%r1) 2037 STREG %r14, PT_GR14(%r1) 2038 STREG %r15, PT_GR15(%r1) 2039 STREG %r16, PT_GR16(%r1) 2040 STREG %r17, PT_GR17(%r1) 2041 STREG %r18, PT_GR18(%r1) 2042 STREG %r19, PT_GR19(%r1) 2043 STREG %r20, PT_GR20(%r1) 2044 STREG %r21, PT_GR21(%r1) 2045 STREG %r22, PT_GR22(%r1) 2046 STREG %r23, PT_GR23(%r1) 2047 STREG %r24, PT_GR24(%r1) 2048 STREG %r25, PT_GR25(%r1) 2049 STREG %r26, PT_GR26(%r1) 2050 STREG %r27, PT_GR27(%r1) 2051 STREG %r28, PT_GR28(%r1) 2052 STREG %r29, PT_GR29(%r1) 2053 STREG %r30, PT_GR30(%r1) 2054 STREG %r31, PT_GR31(%r1) 2055 mfctl %cr11, %r26 2056 STREG %r26, PT_SAR(%r1) 2057 2058 copy %rp, %r26 2059 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2060 ldo -8(%r25), %r25 2061 ldo -FTRACE_FRAME_SIZE(%r1), %arg2 2062 b,l ftrace_function_trampoline, %rp 2063 copy %r1, %arg3 /* struct pt_regs */ 2064 2065 ldo -PT_SZ_ALGN(%sp), %r1 2066 2067 LDREG PT_SAR(%r1), %rp 2068 mtctl %rp, %cr11 2069 2070 LDREG PT_GR2(%r1), %rp 2071 LDREG PT_GR3(%r1), %r3 2072 LDREG PT_GR4(%r1), %r4 2073 LDREG PT_GR5(%r1), %r5 2074 LDREG PT_GR6(%r1), %r6 2075 LDREG PT_GR7(%r1), %r7 2076 LDREG PT_GR8(%r1), %r8 2077 LDREG PT_GR9(%r1), %r9 2078 LDREG PT_GR10(%r1),%r10 2079 LDREG PT_GR11(%r1),%r11 2080 LDREG PT_GR12(%r1),%r12 2081 LDREG PT_GR13(%r1),%r13 2082 LDREG PT_GR14(%r1),%r14 2083 LDREG PT_GR15(%r1),%r15 2084 LDREG PT_GR16(%r1),%r16 2085 LDREG PT_GR17(%r1),%r17 2086 LDREG PT_GR18(%r1),%r18 2087 LDREG PT_GR19(%r1),%r19 2088 LDREG PT_GR20(%r1),%r20 2089 LDREG PT_GR21(%r1),%r21 2090 LDREG PT_GR22(%r1),%r22 2091 LDREG PT_GR23(%r1),%r23 2092 LDREG PT_GR24(%r1),%r24 2093 LDREG PT_GR25(%r1),%r25 2094 LDREG PT_GR26(%r1),%r26 2095 LDREG PT_GR27(%r1),%r27 2096 LDREG PT_GR28(%r1),%r28 2097 LDREG PT_GR29(%r1),%r29 2098 LDREG PT_GR30(%r1),%r30 2099 LDREG PT_GR31(%r1),%r31 2100 2101 ldo -PT_SZ_ALGN(%sp), %sp 2102 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2103 /* Adjust return point to jump back to beginning of traced function */ 2104 ldo -4(%r1), %r1 2105 bv,n (%r1) 2106 2107ENDPROC_CFI(ftrace_regs_caller) 2108 2109#endif 2110#endif 2111 2112#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2113 .align 8 2114ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2115 .export parisc_return_to_handler,data 2116parisc_return_to_handler: 2117 copy %r3,%r1 2118 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ 2119 copy %sp,%r3 2120 STREGM %r1,FRAME_SIZE(%sp) 2121 STREG %ret0,8(%r3) 2122 STREG %ret1,16(%r3) 2123 2124#ifdef CONFIG_64BIT 2125 loadgp 2126#endif 2127 2128 /* call ftrace_return_to_handler(0) */ 2129 .import ftrace_return_to_handler,code 2130 load32 ftrace_return_to_handler,%ret0 2131 load32 .Lftrace_ret,%r2 2132#ifdef CONFIG_64BIT 2133 ldo -16(%sp),%ret1 /* Reference param save area */ 2134 bve (%ret0) 2135#else 2136 bv %r0(%ret0) 2137#endif 2138 ldi 0,%r26 2139.Lftrace_ret: 2140 copy %ret0,%rp 2141 2142 /* restore original return values */ 2143 LDREG 8(%r3),%ret0 2144 LDREG 16(%r3),%ret1 2145 2146 /* return from function */ 2147#ifdef CONFIG_64BIT 2148 bve (%rp) 2149#else 2150 bv %r0(%rp) 2151#endif 2152 LDREGM -FRAME_SIZE(%sp),%r3 2153ENDPROC_CFI(return_to_handler) 2154 2155#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2156 2157#endif /* CONFIG_FUNCTION_TRACER */ 2158 2159#ifdef CONFIG_IRQSTACKS 2160/* void call_on_stack(unsigned long param1, void *func, 2161 unsigned long new_stack) */ 2162ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2163ENTRY(_call_on_stack) 2164 copy %sp, %r1 2165 2166 /* Regarding the HPPA calling conventions for function pointers, 2167 we assume the PIC register is not changed across call. For 2168 CONFIG_64BIT, the argument pointer is left to point at the 2169 argument region allocated for the call to call_on_stack. */ 2170 2171 /* Switch to new stack. We allocate two frames. */ 2172 ldo 2*FRAME_SIZE(%arg2), %sp 2173# ifdef CONFIG_64BIT 2174 /* Save previous stack pointer and return pointer in frame marker */ 2175 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2176 /* Calls always use function descriptor */ 2177 LDREG 16(%arg1), %arg1 2178 bve,l (%arg1), %rp 2179 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2180 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2181 bve (%rp) 2182 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2183# else 2184 /* Save previous stack pointer and return pointer in frame marker */ 2185 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2186 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2187 /* Calls use function descriptor if PLABEL bit is set */ 2188 bb,>=,n %arg1, 30, 1f 2189 depwi 0,31,2, %arg1 2190 LDREG 0(%arg1), %arg1 21911: 2192 be,l 0(%sr4,%arg1), %sr0, %r31 2193 copy %r31, %rp 2194 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2195 bv (%rp) 2196 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2197# endif /* CONFIG_64BIT */ 2198ENDPROC_CFI(call_on_stack) 2199#endif /* CONFIG_IRQSTACKS */ 2200 2201ENTRY_CFI(get_register) 2202 /* 2203 * get_register is used by the non access tlb miss handlers to 2204 * copy the value of the general register specified in r8 into 2205 * r1. This routine can't be used for shadowed registers, since 2206 * the rfir will restore the original value. So, for the shadowed 2207 * registers we put a -1 into r1 to indicate that the register 2208 * should not be used (the register being copied could also have 2209 * a -1 in it, but that is OK, it just means that we will have 2210 * to use the slow path instead). 2211 */ 2212 blr %r8,%r0 2213 nop 2214 bv %r0(%r25) /* r0 */ 2215 copy %r0,%r1 2216 bv %r0(%r25) /* r1 - shadowed */ 2217 ldi -1,%r1 2218 bv %r0(%r25) /* r2 */ 2219 copy %r2,%r1 2220 bv %r0(%r25) /* r3 */ 2221 copy %r3,%r1 2222 bv %r0(%r25) /* r4 */ 2223 copy %r4,%r1 2224 bv %r0(%r25) /* r5 */ 2225 copy %r5,%r1 2226 bv %r0(%r25) /* r6 */ 2227 copy %r6,%r1 2228 bv %r0(%r25) /* r7 */ 2229 copy %r7,%r1 2230 bv %r0(%r25) /* r8 - shadowed */ 2231 ldi -1,%r1 2232 bv %r0(%r25) /* r9 - shadowed */ 2233 ldi -1,%r1 2234 bv %r0(%r25) /* r10 */ 2235 copy %r10,%r1 2236 bv %r0(%r25) /* r11 */ 2237 copy %r11,%r1 2238 bv %r0(%r25) /* r12 */ 2239 copy %r12,%r1 2240 bv %r0(%r25) /* r13 */ 2241 copy %r13,%r1 2242 bv %r0(%r25) /* r14 */ 2243 copy %r14,%r1 2244 bv %r0(%r25) /* r15 */ 2245 copy %r15,%r1 2246 bv %r0(%r25) /* r16 - shadowed */ 2247 ldi -1,%r1 2248 bv %r0(%r25) /* r17 - shadowed */ 2249 ldi -1,%r1 2250 bv %r0(%r25) /* r18 */ 2251 copy %r18,%r1 2252 bv %r0(%r25) /* r19 */ 2253 copy %r19,%r1 2254 bv %r0(%r25) /* r20 */ 2255 copy %r20,%r1 2256 bv %r0(%r25) /* r21 */ 2257 copy %r21,%r1 2258 bv %r0(%r25) /* r22 */ 2259 copy %r22,%r1 2260 bv %r0(%r25) /* r23 */ 2261 copy %r23,%r1 2262 bv %r0(%r25) /* r24 - shadowed */ 2263 ldi -1,%r1 2264 bv %r0(%r25) /* r25 - shadowed */ 2265 ldi -1,%r1 2266 bv %r0(%r25) /* r26 */ 2267 copy %r26,%r1 2268 bv %r0(%r25) /* r27 */ 2269 copy %r27,%r1 2270 bv %r0(%r25) /* r28 */ 2271 copy %r28,%r1 2272 bv %r0(%r25) /* r29 */ 2273 copy %r29,%r1 2274 bv %r0(%r25) /* r30 */ 2275 copy %r30,%r1 2276 bv %r0(%r25) /* r31 */ 2277 copy %r31,%r1 2278ENDPROC_CFI(get_register) 2279 2280 2281ENTRY_CFI(set_register) 2282 /* 2283 * set_register is used by the non access tlb miss handlers to 2284 * copy the value of r1 into the general register specified in 2285 * r8. 2286 */ 2287 blr %r8,%r0 2288 nop 2289 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2290 copy %r1,%r0 2291 bv %r0(%r25) /* r1 */ 2292 copy %r1,%r1 2293 bv %r0(%r25) /* r2 */ 2294 copy %r1,%r2 2295 bv %r0(%r25) /* r3 */ 2296 copy %r1,%r3 2297 bv %r0(%r25) /* r4 */ 2298 copy %r1,%r4 2299 bv %r0(%r25) /* r5 */ 2300 copy %r1,%r5 2301 bv %r0(%r25) /* r6 */ 2302 copy %r1,%r6 2303 bv %r0(%r25) /* r7 */ 2304 copy %r1,%r7 2305 bv %r0(%r25) /* r8 */ 2306 copy %r1,%r8 2307 bv %r0(%r25) /* r9 */ 2308 copy %r1,%r9 2309 bv %r0(%r25) /* r10 */ 2310 copy %r1,%r10 2311 bv %r0(%r25) /* r11 */ 2312 copy %r1,%r11 2313 bv %r0(%r25) /* r12 */ 2314 copy %r1,%r12 2315 bv %r0(%r25) /* r13 */ 2316 copy %r1,%r13 2317 bv %r0(%r25) /* r14 */ 2318 copy %r1,%r14 2319 bv %r0(%r25) /* r15 */ 2320 copy %r1,%r15 2321 bv %r0(%r25) /* r16 */ 2322 copy %r1,%r16 2323 bv %r0(%r25) /* r17 */ 2324 copy %r1,%r17 2325 bv %r0(%r25) /* r18 */ 2326 copy %r1,%r18 2327 bv %r0(%r25) /* r19 */ 2328 copy %r1,%r19 2329 bv %r0(%r25) /* r20 */ 2330 copy %r1,%r20 2331 bv %r0(%r25) /* r21 */ 2332 copy %r1,%r21 2333 bv %r0(%r25) /* r22 */ 2334 copy %r1,%r22 2335 bv %r0(%r25) /* r23 */ 2336 copy %r1,%r23 2337 bv %r0(%r25) /* r24 */ 2338 copy %r1,%r24 2339 bv %r0(%r25) /* r25 */ 2340 copy %r1,%r25 2341 bv %r0(%r25) /* r26 */ 2342 copy %r1,%r26 2343 bv %r0(%r25) /* r27 */ 2344 copy %r1,%r27 2345 bv %r0(%r25) /* r28 */ 2346 copy %r1,%r28 2347 bv %r0(%r25) /* r29 */ 2348 copy %r1,%r29 2349 bv %r0(%r25) /* r30 */ 2350 copy %r1,%r30 2351 bv %r0(%r25) /* r31 */ 2352 copy %r1,%r31 2353ENDPROC_CFI(set_register) 2354 2355