1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 4 * 5 * kernel entry points (interruptions, system call wrappers) 6 * Copyright (C) 1999,2000 Philipp Rumpf 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 10 */ 11 12#include <asm/asm-offsets.h> 13 14/* we have the following possibilities to act on an interruption: 15 * - handle in assembly and use shadowed registers only 16 * - save registers to kernel stack and handle in assembly or C */ 17 18 19#include <asm/psw.h> 20#include <asm/cache.h> /* for L1_CACHE_SHIFT */ 21#include <asm/assembly.h> /* for LDREG/STREG defines */ 22#include <asm/signal.h> 23#include <asm/unistd.h> 24#include <asm/ldcw.h> 25#include <asm/traps.h> 26#include <asm/thread_info.h> 27#include <asm/alternative.h> 28 29#include <linux/linkage.h> 30#include <linux/pgtable.h> 31 32#ifdef CONFIG_64BIT 33 .level 2.0w 34#else 35 .level 2.0 36#endif 37 38 .import pa_tlb_lock,data 39 .macro load_pa_tlb_lock reg 40 mfctl %cr25,\reg 41 addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg 42 .endm 43 44 /* space_to_prot macro creates a prot id from a space id */ 45 46#if (SPACEID_SHIFT) == 0 47 .macro space_to_prot spc prot 48 depd,z \spc,62,31,\prot 49 .endm 50#else 51 .macro space_to_prot spc prot 52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 53 .endm 54#endif 55 56 /* Switch to virtual mapping, trashing only %r1 */ 57 .macro virt_map 58 /* pcxt_ssm_bug */ 59 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 60 mtsp %r0, %sr4 61 mtsp %r0, %sr5 62 mtsp %r0, %sr6 63 tovirt_r1 %r29 64 load32 KERNEL_PSW, %r1 65 66 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 67 mtctl %r0, %cr17 /* Clear IIASQ tail */ 68 mtctl %r0, %cr17 /* Clear IIASQ head */ 69 mtctl %r1, %ipsw 70 load32 4f, %r1 71 mtctl %r1, %cr18 /* Set IIAOQ tail */ 72 ldo 4(%r1), %r1 73 mtctl %r1, %cr18 /* Set IIAOQ head */ 74 rfir 75 nop 764: 77 .endm 78 79 /* 80 * The "get_stack" macros are responsible for determining the 81 * kernel stack value. 82 * 83 * If sr7 == 0 84 * Already using a kernel stack, so call the 85 * get_stack_use_r30 macro to push a pt_regs structure 86 * on the stack, and store registers there. 87 * else 88 * Need to set up a kernel stack, so call the 89 * get_stack_use_cr30 macro to set up a pointer 90 * to the pt_regs structure contained within the 91 * task pointer pointed to by cr30. Set the stack 92 * pointer to point to the end of the task structure. 93 * 94 * Note that we use shadowed registers for temps until 95 * we can save %r26 and %r29. %r26 is used to preserve 96 * %r8 (a shadowed register) which temporarily contained 97 * either the fault type ("code") or the eirr. We need 98 * to use a non-shadowed register to carry the value over 99 * the rfir in virt_map. We use %r26 since this value winds 100 * up being passed as the argument to either do_cpu_irq_mask 101 * or handle_interruption. %r29 is used to hold a pointer 102 * the register save area, and once again, it needs to 103 * be a non-shadowed register so that it survives the rfir. 104 * 105 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 106 */ 107 108 .macro get_stack_use_cr30 109 110 /* we save the registers in the task struct */ 111 112 copy %r30, %r17 113 mfctl %cr30, %r1 114 ldo THREAD_SZ_ALGN(%r1), %r30 115 mtsp %r0,%sr7 116 mtsp %r16,%sr3 117 tophys %r1,%r9 118 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 119 tophys %r1,%r9 120 ldo TASK_REGS(%r9),%r9 121 STREG %r17,PT_GR30(%r9) 122 STREG %r29,PT_GR29(%r9) 123 STREG %r26,PT_GR26(%r9) 124 STREG %r16,PT_SR7(%r9) 125 copy %r9,%r29 126 .endm 127 128 .macro get_stack_use_r30 129 130 /* we put a struct pt_regs on the stack and save the registers there */ 131 132 tophys %r30,%r9 133 copy %r30,%r1 134 ldo PT_SZ_ALGN(%r30),%r30 135 STREG %r1,PT_GR30(%r9) 136 STREG %r29,PT_GR29(%r9) 137 STREG %r26,PT_GR26(%r9) 138 STREG %r16,PT_SR7(%r9) 139 copy %r9,%r29 140 .endm 141 142 .macro rest_stack 143 LDREG PT_GR1(%r29), %r1 144 LDREG PT_GR30(%r29),%r30 145 LDREG PT_GR29(%r29),%r29 146 .endm 147 148 /* default interruption handler 149 * (calls traps.c:handle_interruption) */ 150 .macro def code 151 b intr_save 152 ldi \code, %r8 153 .align 32 154 .endm 155 156 /* Interrupt interruption handler 157 * (calls irq.c:do_cpu_irq_mask) */ 158 .macro extint code 159 b intr_extint 160 mfsp %sr7,%r16 161 .align 32 162 .endm 163 164 .import os_hpmc, code 165 166 /* HPMC handler */ 167 .macro hpmc code 168 nop /* must be a NOP, will be patched later */ 169 load32 PA(os_hpmc), %r3 170 bv,n 0(%r3) 171 nop 172 .word 0 /* checksum (will be patched) */ 173 .word 0 /* address of handler */ 174 .word 0 /* length of handler */ 175 .endm 176 177 /* 178 * Performance Note: Instructions will be moved up into 179 * this part of the code later on, once we are sure 180 * that the tlb miss handlers are close to final form. 181 */ 182 183 /* Register definitions for tlb miss handler macros */ 184 185 va = r8 /* virtual address for which the trap occurred */ 186 spc = r24 /* space for which the trap occurred */ 187 188#ifndef CONFIG_64BIT 189 190 /* 191 * itlb miss interruption handler (parisc 1.1 - 32 bit) 192 */ 193 194 .macro itlb_11 code 195 196 mfctl %pcsq, spc 197 b itlb_miss_11 198 mfctl %pcoq, va 199 200 .align 32 201 .endm 202#endif 203 204 /* 205 * itlb miss interruption handler (parisc 2.0) 206 */ 207 208 .macro itlb_20 code 209 mfctl %pcsq, spc 210#ifdef CONFIG_64BIT 211 b itlb_miss_20w 212#else 213 b itlb_miss_20 214#endif 215 mfctl %pcoq, va 216 217 .align 32 218 .endm 219 220#ifndef CONFIG_64BIT 221 /* 222 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 223 */ 224 225 .macro naitlb_11 code 226 227 mfctl %isr,spc 228 b naitlb_miss_11 229 mfctl %ior,va 230 231 .align 32 232 .endm 233#endif 234 235 /* 236 * naitlb miss interruption handler (parisc 2.0) 237 */ 238 239 .macro naitlb_20 code 240 241 mfctl %isr,spc 242#ifdef CONFIG_64BIT 243 b naitlb_miss_20w 244#else 245 b naitlb_miss_20 246#endif 247 mfctl %ior,va 248 249 .align 32 250 .endm 251 252#ifndef CONFIG_64BIT 253 /* 254 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 255 */ 256 257 .macro dtlb_11 code 258 259 mfctl %isr, spc 260 b dtlb_miss_11 261 mfctl %ior, va 262 263 .align 32 264 .endm 265#endif 266 267 /* 268 * dtlb miss interruption handler (parisc 2.0) 269 */ 270 271 .macro dtlb_20 code 272 273 mfctl %isr, spc 274#ifdef CONFIG_64BIT 275 b dtlb_miss_20w 276#else 277 b dtlb_miss_20 278#endif 279 mfctl %ior, va 280 281 .align 32 282 .endm 283 284#ifndef CONFIG_64BIT 285 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 286 287 .macro nadtlb_11 code 288 289 mfctl %isr,spc 290 b nadtlb_miss_11 291 mfctl %ior,va 292 293 .align 32 294 .endm 295#endif 296 297 /* nadtlb miss interruption handler (parisc 2.0) */ 298 299 .macro nadtlb_20 code 300 301 mfctl %isr,spc 302#ifdef CONFIG_64BIT 303 b nadtlb_miss_20w 304#else 305 b nadtlb_miss_20 306#endif 307 mfctl %ior,va 308 309 .align 32 310 .endm 311 312#ifndef CONFIG_64BIT 313 /* 314 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 315 */ 316 317 .macro dbit_11 code 318 319 mfctl %isr,spc 320 b dbit_trap_11 321 mfctl %ior,va 322 323 .align 32 324 .endm 325#endif 326 327 /* 328 * dirty bit trap interruption handler (parisc 2.0) 329 */ 330 331 .macro dbit_20 code 332 333 mfctl %isr,spc 334#ifdef CONFIG_64BIT 335 b dbit_trap_20w 336#else 337 b dbit_trap_20 338#endif 339 mfctl %ior,va 340 341 .align 32 342 .endm 343 344 /* In LP64, the space contains part of the upper 32 bits of the 345 * fault. We have to extract this and place it in the va, 346 * zeroing the corresponding bits in the space register */ 347 .macro space_adjust spc,va,tmp 348#ifdef CONFIG_64BIT 349 extrd,u \spc,63,SPACEID_SHIFT,\tmp 350 depd %r0,63,SPACEID_SHIFT,\spc 351 depd \tmp,31,SPACEID_SHIFT,\va 352#endif 353 .endm 354 355 .import swapper_pg_dir,code 356 357 /* Get the pgd. For faults on space zero (kernel space), this 358 * is simply swapper_pg_dir. For user space faults, the 359 * pgd is stored in %cr25 */ 360 .macro get_pgd spc,reg 361 ldil L%PA(swapper_pg_dir),\reg 362 ldo R%PA(swapper_pg_dir)(\reg),\reg 363 or,COND(=) %r0,\spc,%r0 364 mfctl %cr25,\reg 365 .endm 366 367 /* 368 space_check(spc,tmp,fault) 369 370 spc - The space we saw the fault with. 371 tmp - The place to store the current space. 372 fault - Function to call on failure. 373 374 Only allow faults on different spaces from the 375 currently active one if we're the kernel 376 377 */ 378 .macro space_check spc,tmp,fault 379 mfsp %sr7,\tmp 380 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */ 381 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 382 * as kernel, so defeat the space 383 * check if it is */ 384 copy \spc,\tmp 385 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 386 cmpb,COND(<>),n \tmp,\spc,\fault 387 .endm 388 389 /* Look up a PTE in a 2-Level scheme (faulting at each 390 * level if the entry isn't present 391 * 392 * NOTE: we use ldw even for LP64, since the short pointers 393 * can address up to 1TB 394 */ 395 .macro L2_ptep pmd,pte,index,va,fault 396#if CONFIG_PGTABLE_LEVELS == 3 397 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 398#else 399# if defined(CONFIG_64BIT) 400 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 401 #else 402 # if PAGE_SIZE > 4096 403 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index 404 # else 405 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 406 # endif 407# endif 408#endif 409 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 410 copy %r0,\pte 411 ldw,s \index(\pmd),\pmd 412 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 413 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 414 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 415 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 416 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 417 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ 418 .endm 419 420 /* Look up PTE in a 3-Level scheme. 421 * 422 * Here we implement a Hybrid L2/L3 scheme: we allocate the 423 * first pmd adjacent to the pgd. This means that we can 424 * subtract a constant offset to get to it. The pmd and pgd 425 * sizes are arranged so that a single pmd covers 4GB (giving 426 * a full LP64 process access to 8TB) so our lookups are 427 * effectively L2 for the first 4GB of the kernel (i.e. for 428 * all ILP32 processes and all the kernel for machines with 429 * under 4GB of memory) */ 430 .macro L3_ptep pgd,pte,index,va,fault 431#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 432 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 434 ldw,s \index(\pgd),\pgd 435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 438 shld \pgd,PxD_VALUE_SHIFT,\index 439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 440 copy \index,\pgd 441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 443#endif 444 L2_ptep \pgd,\pte,\index,\va,\fault 445 .endm 446 447 /* Acquire pa_tlb_lock lock and check page is present. */ 448 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault 449#ifdef CONFIG_SMP 45098: cmpib,COND(=),n 0,\spc,2f 451 load_pa_tlb_lock \tmp 4521: LDCW 0(\tmp),\tmp1 453 cmpib,COND(=) 0,\tmp1,1b 454 nop 455 LDREG 0(\ptp),\pte 456 bb,<,n \pte,_PAGE_PRESENT_BIT,3f 457 b \fault 458 stw \spc,0(\tmp) 45999: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 460#endif 4612: LDREG 0(\ptp),\pte 462 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 4633: 464 .endm 465 466 /* Release pa_tlb_lock lock without reloading lock address. 467 Note that the values in the register spc are limited to 468 NR_SPACE_IDS (262144). Thus, the stw instruction always 469 stores a nonzero value even when register spc is 64 bits. 470 We use an ordered store to ensure all prior accesses are 471 performed prior to releasing the lock. */ 472 .macro tlb_unlock0 spc,tmp 473#ifdef CONFIG_SMP 47498: or,COND(=) %r0,\spc,%r0 475 stw,ma \spc,0(\tmp) 47699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 477#endif 478 .endm 479 480 /* Release pa_tlb_lock lock. */ 481 .macro tlb_unlock1 spc,tmp 482#ifdef CONFIG_SMP 48398: load_pa_tlb_lock \tmp 48499: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) 485 tlb_unlock0 \spc,\tmp 486#endif 487 .endm 488 489 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 490 * don't needlessly dirty the cache line if it was already set */ 491 .macro update_accessed ptp,pte,tmp,tmp1 492 ldi _PAGE_ACCESSED,\tmp1 493 or \tmp1,\pte,\tmp 494 and,COND(<>) \tmp1,\pte,%r0 495 STREG \tmp,0(\ptp) 496 .endm 497 498 /* Set the dirty bit (and accessed bit). No need to be 499 * clever, this is only used from the dirty fault */ 500 .macro update_dirty ptp,pte,tmp 501 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 502 or \tmp,\pte,\pte 503 STREG \pte,0(\ptp) 504 .endm 505 506 /* We have (depending on the page size): 507 * - 38 to 52-bit Physical Page Number 508 * - 12 to 26-bit page offset 509 */ 510 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 511 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 512 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 513 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) 514 515 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 516 .macro convert_for_tlb_insert20 pte,tmp 517#ifdef CONFIG_HUGETLB_PAGE 518 copy \pte,\tmp 519 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 520 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 521 522 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 523 (63-58)+PAGE_ADD_SHIFT,\pte 524 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 525 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ 526 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte 527#else /* Huge pages disabled */ 528 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 529 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 530 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 531 (63-58)+PAGE_ADD_SHIFT,\pte 532#endif 533 .endm 534 535 /* Convert the pte and prot to tlb insertion values. How 536 * this happens is quite subtle, read below */ 537 .macro make_insert_tlb spc,pte,prot,tmp 538 space_to_prot \spc \prot /* create prot id from space */ 539 /* The following is the real subtlety. This is depositing 540 * T <-> _PAGE_REFTRAP 541 * D <-> _PAGE_DIRTY 542 * B <-> _PAGE_DMB (memory break) 543 * 544 * Then incredible subtlety: The access rights are 545 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE 546 * See 3-14 of the parisc 2.0 manual 547 * 548 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 549 * trigger an access rights trap in user space if the user 550 * tries to read an unreadable page */ 551 depd \pte,8,7,\prot 552 553 /* PAGE_USER indicates the page can be read with user privileges, 554 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 555 * contains _PAGE_READ) */ 556 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 557 depdi 7,11,3,\prot 558 /* If we're a gateway page, drop PL2 back to zero for promotion 559 * to kernel privilege (so we can execute the page as kernel). 560 * Any privilege promotion page always denys read and write */ 561 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 562 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 563 564 /* Enforce uncacheable pages. 565 * This should ONLY be use for MMIO on PA 2.0 machines. 566 * Memory/DMA is cache coherent on all PA2.0 machines we support 567 * (that means T-class is NOT supported) and the memory controllers 568 * on most of those machines only handles cache transactions. 569 */ 570 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 571 depdi 1,12,1,\prot 572 573 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 574 convert_for_tlb_insert20 \pte \tmp 575 .endm 576 577 /* Identical macro to make_insert_tlb above, except it 578 * makes the tlb entry for the differently formatted pa11 579 * insertion instructions */ 580 .macro make_insert_tlb_11 spc,pte,prot 581 zdep \spc,30,15,\prot 582 dep \pte,8,7,\prot 583 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 584 depi 1,12,1,\prot 585 extru,= \pte,_PAGE_USER_BIT,1,%r0 586 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 587 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 588 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 589 590 /* Get rid of prot bits and convert to page addr for iitlba */ 591 592 depi 0,31,ASM_PFN_PTE_SHIFT,\pte 593 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte 594 .endm 595 596 /* This is for ILP32 PA2.0 only. The TLB insertion needs 597 * to extend into I/O space if the address is 0xfXXXXXXX 598 * so we extend the f's into the top word of the pte in 599 * this case */ 600 .macro f_extend pte,tmp 601 extrd,s \pte,42,4,\tmp 602 addi,<> 1,\tmp,%r0 603 extrd,s \pte,63,25,\pte 604 .endm 605 606 /* The alias region is an 8MB aligned 16MB to do clear and 607 * copy user pages at addresses congruent with the user 608 * virtual address. 609 * 610 * To use the alias page, you set %r26 up with the to TLB 611 * entry (identifying the physical page) and %r23 up with 612 * the from tlb entry (or nothing if only a to entry---for 613 * clear_user_page_asm) */ 614 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype 615 cmpib,COND(<>),n 0,\spc,\fault 616 ldil L%(TMPALIAS_MAP_START),\tmp 617#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 618 /* on LP64, ldi will sign extend into the upper 32 bits, 619 * which is behaviour we don't want */ 620 depdi 0,31,32,\tmp 621#endif 622 copy \va,\tmp1 623 depi 0,31,23,\tmp1 624 cmpb,COND(<>),n \tmp,\tmp1,\fault 625 mfctl %cr19,\tmp /* iir */ 626 /* get the opcode (first six bits) into \tmp */ 627 extrw,u \tmp,5,6,\tmp 628 /* 629 * Only setting the T bit prevents data cache movein 630 * Setting access rights to zero prevents instruction cache movein 631 * 632 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go 633 * to type field and _PAGE_READ goes to top bit of PL1 634 */ 635 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot 636 /* 637 * so if the opcode is one (i.e. this is a memory management 638 * instruction) nullify the next load so \prot is only T. 639 * Otherwise this is a normal data operation 640 */ 641 cmpiclr,= 0x01,\tmp,%r0 642 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 643.ifc \patype,20 644 depd,z \prot,8,7,\prot 645.else 646.ifc \patype,11 647 depw,z \prot,8,7,\prot 648.else 649 .error "undefined PA type to do_alias" 650.endif 651.endif 652 /* 653 * OK, it is in the temp alias region, check whether "from" or "to". 654 * Check "subtle" note in pacache.S re: r23/r26. 655 */ 656#ifdef CONFIG_64BIT 657 extrd,u,*= \va,41,1,%r0 658#else 659 extrw,u,= \va,9,1,%r0 660#endif 661 or,COND(tr) %r23,%r0,\pte 662 or %r26,%r0,\pte 663 .endm 664 665 666 /* 667 * Fault_vectors are architecturally required to be aligned on a 2K 668 * boundary 669 */ 670 671 .section .text.hot 672 .align 2048 673 674ENTRY(fault_vector_20) 675 /* First vector is invalid (0) */ 676 .ascii "cows can fly" 677 .byte 0 678 .align 32 679 680 hpmc 1 681 def 2 682 def 3 683 extint 4 684 def 5 685 itlb_20 PARISC_ITLB_TRAP 686 def 7 687 def 8 688 def 9 689 def 10 690 def 11 691 def 12 692 def 13 693 def 14 694 dtlb_20 15 695 naitlb_20 16 696 nadtlb_20 17 697 def 18 698 def 19 699 dbit_20 20 700 def 21 701 def 22 702 def 23 703 def 24 704 def 25 705 def 26 706 def 27 707 def 28 708 def 29 709 def 30 710 def 31 711END(fault_vector_20) 712 713#ifndef CONFIG_64BIT 714 715 .align 2048 716 717ENTRY(fault_vector_11) 718 /* First vector is invalid (0) */ 719 .ascii "cows can fly" 720 .byte 0 721 .align 32 722 723 hpmc 1 724 def 2 725 def 3 726 extint 4 727 def 5 728 itlb_11 PARISC_ITLB_TRAP 729 def 7 730 def 8 731 def 9 732 def 10 733 def 11 734 def 12 735 def 13 736 def 14 737 dtlb_11 15 738 naitlb_11 16 739 nadtlb_11 17 740 def 18 741 def 19 742 dbit_11 20 743 def 21 744 def 22 745 def 23 746 def 24 747 def 25 748 def 26 749 def 27 750 def 28 751 def 29 752 def 30 753 def 31 754END(fault_vector_11) 755 756#endif 757 /* Fault vector is separately protected and *must* be on its own page */ 758 .align PAGE_SIZE 759 760 .import handle_interruption,code 761 .import do_cpu_irq_mask,code 762 763 /* 764 * Child Returns here 765 * 766 * copy_thread moved args into task save area. 767 */ 768 769ENTRY(ret_from_kernel_thread) 770 /* Call schedule_tail first though */ 771 BL schedule_tail, %r2 772 nop 773 774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 775 LDREG TASK_PT_GR25(%r1), %r26 776#ifdef CONFIG_64BIT 777 LDREG TASK_PT_GR27(%r1), %r27 778#endif 779 LDREG TASK_PT_GR26(%r1), %r1 780 ble 0(%sr7, %r1) 781 copy %r31, %r2 782 b finish_child_return 783 nop 784END(ret_from_kernel_thread) 785 786 787 /* 788 * struct task_struct *_switch_to(struct task_struct *prev, 789 * struct task_struct *next) 790 * 791 * switch kernel stacks and return prev */ 792ENTRY_CFI(_switch_to) 793 STREG %r2, -RP_OFFSET(%r30) 794 795 callee_save_float 796 callee_save 797 798 load32 _switch_to_ret, %r2 799 800 STREG %r2, TASK_PT_KPC(%r26) 801 LDREG TASK_PT_KPC(%r25), %r2 802 803 STREG %r30, TASK_PT_KSP(%r26) 804 LDREG TASK_PT_KSP(%r25), %r30 805 LDREG TASK_THREAD_INFO(%r25), %r25 806 bv %r0(%r2) 807 mtctl %r25,%cr30 808 809ENTRY(_switch_to_ret) 810 mtctl %r0, %cr0 /* Needed for single stepping */ 811 callee_rest 812 callee_rest_float 813 814 LDREG -RP_OFFSET(%r30), %r2 815 bv %r0(%r2) 816 copy %r26, %r28 817ENDPROC_CFI(_switch_to) 818 819 /* 820 * Common rfi return path for interruptions, kernel execve, and 821 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 822 * return via this path if the signal was received when the process 823 * was running; if the process was blocked on a syscall then the 824 * normal syscall_exit path is used. All syscalls for traced 825 * proceses exit via intr_restore. 826 * 827 * XXX If any syscalls that change a processes space id ever exit 828 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 829 * adjust IASQ[0..1]. 830 * 831 */ 832 833 .align PAGE_SIZE 834 835ENTRY_CFI(syscall_exit_rfi) 836 mfctl %cr30,%r16 837 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 838 ldo TASK_REGS(%r16),%r16 839 /* Force iaoq to userspace, as the user has had access to our current 840 * context via sigcontext. Also Filter the PSW for the same reason. 841 */ 842 LDREG PT_IAOQ0(%r16),%r19 843 depi 3,31,2,%r19 844 STREG %r19,PT_IAOQ0(%r16) 845 LDREG PT_IAOQ1(%r16),%r19 846 depi 3,31,2,%r19 847 STREG %r19,PT_IAOQ1(%r16) 848 LDREG PT_PSW(%r16),%r19 849 load32 USER_PSW_MASK,%r1 850#ifdef CONFIG_64BIT 851 load32 USER_PSW_HI_MASK,%r20 852 depd %r20,31,32,%r1 853#endif 854 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 855 load32 USER_PSW,%r1 856 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 857 STREG %r19,PT_PSW(%r16) 858 859 /* 860 * If we aren't being traced, we never saved space registers 861 * (we don't store them in the sigcontext), so set them 862 * to "proper" values now (otherwise we'll wind up restoring 863 * whatever was last stored in the task structure, which might 864 * be inconsistent if an interrupt occurred while on the gateway 865 * page). Note that we may be "trashing" values the user put in 866 * them, but we don't support the user changing them. 867 */ 868 869 STREG %r0,PT_SR2(%r16) 870 mfsp %sr3,%r19 871 STREG %r19,PT_SR0(%r16) 872 STREG %r19,PT_SR1(%r16) 873 STREG %r19,PT_SR3(%r16) 874 STREG %r19,PT_SR4(%r16) 875 STREG %r19,PT_SR5(%r16) 876 STREG %r19,PT_SR6(%r16) 877 STREG %r19,PT_SR7(%r16) 878 879ENTRY(intr_return) 880 /* check for reschedule */ 881 mfctl %cr30,%r1 882 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 883 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 884 885 .import do_notify_resume,code 886intr_check_sig: 887 /* As above */ 888 mfctl %cr30,%r1 889 LDREG TI_FLAGS(%r1),%r19 890 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20 891 and,COND(<>) %r19, %r20, %r0 892 b,n intr_restore /* skip past if we've nothing to do */ 893 894 /* This check is critical to having LWS 895 * working. The IASQ is zero on the gateway 896 * page and we cannot deliver any signals until 897 * we get off the gateway page. 898 * 899 * Only do signals if we are returning to user space 900 */ 901 LDREG PT_IASQ0(%r16), %r20 902 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 903 LDREG PT_IASQ1(%r16), %r20 904 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */ 905 906 copy %r0, %r25 /* long in_syscall = 0 */ 907#ifdef CONFIG_64BIT 908 ldo -16(%r30),%r29 /* Reference param save area */ 909#endif 910 911 /* NOTE: We need to enable interrupts if we have to deliver 912 * signals. We used to do this earlier but it caused kernel 913 * stack overflows. */ 914 ssm PSW_SM_I, %r0 915 916 BL do_notify_resume,%r2 917 copy %r16, %r26 /* struct pt_regs *regs */ 918 919 b,n intr_check_sig 920 921intr_restore: 922 copy %r16,%r29 923 ldo PT_FR31(%r29),%r1 924 rest_fp %r1 925 rest_general %r29 926 927 /* inverse of virt_map */ 928 pcxt_ssm_bug 929 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 930 tophys_r1 %r29 931 932 /* Restore space id's and special cr's from PT_REGS 933 * structure pointed to by r29 934 */ 935 rest_specials %r29 936 937 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 938 * It also restores r1 and r30. 939 */ 940 rest_stack 941 942 rfi 943 nop 944 945#ifndef CONFIG_PREEMPTION 946# define intr_do_preempt intr_restore 947#endif /* !CONFIG_PREEMPTION */ 948 949 .import schedule,code 950intr_do_resched: 951 /* Only call schedule on return to userspace. If we're returning 952 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise 953 * we jump back to intr_restore. 954 */ 955 LDREG PT_IASQ0(%r16), %r20 956 cmpib,COND(=) 0, %r20, intr_do_preempt 957 nop 958 LDREG PT_IASQ1(%r16), %r20 959 cmpib,COND(=) 0, %r20, intr_do_preempt 960 nop 961 962 /* NOTE: We need to enable interrupts if we schedule. We used 963 * to do this earlier but it caused kernel stack overflows. */ 964 ssm PSW_SM_I, %r0 965 966#ifdef CONFIG_64BIT 967 ldo -16(%r30),%r29 /* Reference param save area */ 968#endif 969 970 ldil L%intr_check_sig, %r2 971#ifndef CONFIG_64BIT 972 b schedule 973#else 974 load32 schedule, %r20 975 bv %r0(%r20) 976#endif 977 ldo R%intr_check_sig(%r2), %r2 978 979 /* preempt the current task on returning to kernel 980 * mode from an interrupt, iff need_resched is set, 981 * and preempt_count is 0. otherwise, we continue on 982 * our merry way back to the current running task. 983 */ 984#ifdef CONFIG_PREEMPTION 985 .import preempt_schedule_irq,code 986intr_do_preempt: 987 rsm PSW_SM_I, %r0 /* disable interrupts */ 988 989 /* current_thread_info()->preempt_count */ 990 mfctl %cr30, %r1 991 LDREG TI_PRE_COUNT(%r1), %r19 992 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */ 993 nop /* prev insn branched backwards */ 994 995 /* check if we interrupted a critical path */ 996 LDREG PT_PSW(%r16), %r20 997 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 998 nop 999 1000 /* ssm PSW_SM_I done later in intr_restore */ 1001#ifdef CONFIG_MLONGCALLS 1002 ldil L%intr_restore, %r2 1003 load32 preempt_schedule_irq, %r1 1004 bv %r0(%r1) 1005 ldo R%intr_restore(%r2), %r2 1006#else 1007 ldil L%intr_restore, %r1 1008 BL preempt_schedule_irq, %r2 1009 ldo R%intr_restore(%r1), %r2 1010#endif 1011#endif /* CONFIG_PREEMPTION */ 1012 1013 /* 1014 * External interrupts. 1015 */ 1016 1017intr_extint: 1018 cmpib,COND(=),n 0,%r16,1f 1019 1020 get_stack_use_cr30 1021 b,n 2f 1022 10231: 1024 get_stack_use_r30 10252: 1026 save_specials %r29 1027 virt_map 1028 save_general %r29 1029 1030 ldo PT_FR0(%r29), %r24 1031 save_fp %r24 1032 1033 loadgp 1034 1035 copy %r29, %r26 /* arg0 is pt_regs */ 1036 copy %r29, %r16 /* save pt_regs */ 1037 1038 ldil L%intr_return, %r2 1039 1040#ifdef CONFIG_64BIT 1041 ldo -16(%r30),%r29 /* Reference param save area */ 1042#endif 1043 1044 b do_cpu_irq_mask 1045 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1046ENDPROC_CFI(syscall_exit_rfi) 1047 1048 1049 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1050 1051ENTRY_CFI(intr_save) /* for os_hpmc */ 1052 mfsp %sr7,%r16 1053 cmpib,COND(=),n 0,%r16,1f 1054 get_stack_use_cr30 1055 b 2f 1056 copy %r8,%r26 1057 10581: 1059 get_stack_use_r30 1060 copy %r8,%r26 1061 10622: 1063 save_specials %r29 1064 1065 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1066 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1067 1068 1069 mfctl %isr, %r16 1070 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1071 mfctl %ior, %r17 1072 1073 1074#ifdef CONFIG_64BIT 1075 /* 1076 * If the interrupted code was running with W bit off (32 bit), 1077 * clear the b bits (bits 0 & 1) in the ior. 1078 * save_specials left ipsw value in r8 for us to test. 1079 */ 1080 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1081 depdi 0,1,2,%r17 1082 1083 /* adjust isr/ior: get high bits from isr and deposit in ior */ 1084 space_adjust %r16,%r17,%r1 1085#endif 1086 STREG %r16, PT_ISR(%r29) 1087 STREG %r17, PT_IOR(%r29) 1088 1089#if 0 && defined(CONFIG_64BIT) 1090 /* Revisit when we have 64-bit code above 4Gb */ 1091 b,n intr_save2 1092 1093skip_save_ior: 1094 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1095 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1096 * above. 1097 */ 1098 extrd,u,* %r8,PSW_W_BIT,1,%r1 1099 cmpib,COND(=),n 1,%r1,intr_save2 1100 LDREG PT_IASQ0(%r29), %r16 1101 LDREG PT_IAOQ0(%r29), %r17 1102 /* adjust iasq/iaoq */ 1103 space_adjust %r16,%r17,%r1 1104 STREG %r16, PT_IASQ0(%r29) 1105 STREG %r17, PT_IAOQ0(%r29) 1106#else 1107skip_save_ior: 1108#endif 1109 1110intr_save2: 1111 virt_map 1112 save_general %r29 1113 1114 ldo PT_FR0(%r29), %r25 1115 save_fp %r25 1116 1117 loadgp 1118 1119 copy %r29, %r25 /* arg1 is pt_regs */ 1120#ifdef CONFIG_64BIT 1121 ldo -16(%r30),%r29 /* Reference param save area */ 1122#endif 1123 1124 ldil L%intr_check_sig, %r2 1125 copy %r25, %r16 /* save pt_regs */ 1126 1127 b handle_interruption 1128 ldo R%intr_check_sig(%r2), %r2 1129ENDPROC_CFI(intr_save) 1130 1131 1132 /* 1133 * Note for all tlb miss handlers: 1134 * 1135 * cr24 contains a pointer to the kernel address space 1136 * page directory. 1137 * 1138 * cr25 contains a pointer to the current user address 1139 * space page directory. 1140 * 1141 * sr3 will contain the space id of the user address space 1142 * of the current running thread while that thread is 1143 * running in the kernel. 1144 */ 1145 1146 /* 1147 * register number allocations. Note that these are all 1148 * in the shadowed registers 1149 */ 1150 1151 t0 = r1 /* temporary register 0 */ 1152 va = r8 /* virtual address for which the trap occurred */ 1153 t1 = r9 /* temporary register 1 */ 1154 pte = r16 /* pte/phys page # */ 1155 prot = r17 /* prot bits */ 1156 spc = r24 /* space for which the trap occurred */ 1157 ptp = r25 /* page directory/page table pointer */ 1158 1159#ifdef CONFIG_64BIT 1160 1161dtlb_miss_20w: 1162 space_adjust spc,va,t0 1163 get_pgd spc,ptp 1164 space_check spc,t0,dtlb_fault 1165 1166 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1167 1168 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1169 update_accessed ptp,pte,t0,t1 1170 1171 make_insert_tlb spc,pte,prot,t1 1172 1173 idtlbt pte,prot 1174 1175 tlb_unlock1 spc,t0 1176 rfir 1177 nop 1178 1179dtlb_check_alias_20w: 1180 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1181 1182 idtlbt pte,prot 1183 1184 rfir 1185 nop 1186 1187nadtlb_miss_20w: 1188 space_adjust spc,va,t0 1189 get_pgd spc,ptp 1190 space_check spc,t0,nadtlb_fault 1191 1192 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1193 1194 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1195 update_accessed ptp,pte,t0,t1 1196 1197 make_insert_tlb spc,pte,prot,t1 1198 1199 idtlbt pte,prot 1200 1201 tlb_unlock1 spc,t0 1202 rfir 1203 nop 1204 1205nadtlb_check_alias_20w: 1206 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1207 1208 idtlbt pte,prot 1209 1210 rfir 1211 nop 1212 1213#else 1214 1215dtlb_miss_11: 1216 get_pgd spc,ptp 1217 1218 space_check spc,t0,dtlb_fault 1219 1220 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1221 1222 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11 1223 update_accessed ptp,pte,t0,t1 1224 1225 make_insert_tlb_11 spc,pte,prot 1226 1227 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1228 mtsp spc,%sr1 1229 1230 idtlba pte,(%sr1,va) 1231 idtlbp prot,(%sr1,va) 1232 1233 mtsp t1, %sr1 /* Restore sr1 */ 1234 1235 tlb_unlock1 spc,t0 1236 rfir 1237 nop 1238 1239dtlb_check_alias_11: 1240 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 1241 1242 idtlba pte,(va) 1243 idtlbp prot,(va) 1244 1245 rfir 1246 nop 1247 1248nadtlb_miss_11: 1249 get_pgd spc,ptp 1250 1251 space_check spc,t0,nadtlb_fault 1252 1253 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1254 1255 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11 1256 update_accessed ptp,pte,t0,t1 1257 1258 make_insert_tlb_11 spc,pte,prot 1259 1260 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1261 mtsp spc,%sr1 1262 1263 idtlba pte,(%sr1,va) 1264 idtlbp prot,(%sr1,va) 1265 1266 mtsp t1, %sr1 /* Restore sr1 */ 1267 1268 tlb_unlock1 spc,t0 1269 rfir 1270 nop 1271 1272nadtlb_check_alias_11: 1273 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 1274 1275 idtlba pte,(va) 1276 idtlbp prot,(va) 1277 1278 rfir 1279 nop 1280 1281dtlb_miss_20: 1282 space_adjust spc,va,t0 1283 get_pgd spc,ptp 1284 space_check spc,t0,dtlb_fault 1285 1286 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1287 1288 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1289 update_accessed ptp,pte,t0,t1 1290 1291 make_insert_tlb spc,pte,prot,t1 1292 1293 f_extend pte,t1 1294 1295 idtlbt pte,prot 1296 1297 tlb_unlock1 spc,t0 1298 rfir 1299 nop 1300 1301dtlb_check_alias_20: 1302 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 1303 1304 idtlbt pte,prot 1305 1306 rfir 1307 nop 1308 1309nadtlb_miss_20: 1310 get_pgd spc,ptp 1311 1312 space_check spc,t0,nadtlb_fault 1313 1314 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1315 1316 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1317 update_accessed ptp,pte,t0,t1 1318 1319 make_insert_tlb spc,pte,prot,t1 1320 1321 f_extend pte,t1 1322 1323 idtlbt pte,prot 1324 1325 tlb_unlock1 spc,t0 1326 rfir 1327 nop 1328 1329nadtlb_check_alias_20: 1330 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 1331 1332 idtlbt pte,prot 1333 1334 rfir 1335 nop 1336 1337#endif 1338 1339nadtlb_emulate: 1340 1341 /* 1342 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1343 * probei instructions. We don't want to fault for these 1344 * instructions (not only does it not make sense, it can cause 1345 * deadlocks, since some flushes are done with the mmap 1346 * semaphore held). If the translation doesn't exist, we can't 1347 * insert a translation, so have to emulate the side effects 1348 * of the instruction. Since we don't insert a translation 1349 * we can get a lot of faults during a flush loop, so it makes 1350 * sense to try to do it here with minimum overhead. We only 1351 * emulate fdc,fic,pdc,probew,prober instructions whose base 1352 * and index registers are not shadowed. We defer everything 1353 * else to the "slow" path. 1354 */ 1355 1356 mfctl %cr19,%r9 /* Get iir */ 1357 1358 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1359 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1360 1361 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1362 ldi 0x280,%r16 1363 and %r9,%r16,%r17 1364 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1365 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1366 BL get_register,%r25 1367 extrw,u %r9,15,5,%r8 /* Get index register # */ 1368 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1369 copy %r1,%r24 1370 BL get_register,%r25 1371 extrw,u %r9,10,5,%r8 /* Get base register # */ 1372 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1373 BL set_register,%r25 1374 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1375 1376nadtlb_nullify: 1377 mfctl %ipsw,%r8 1378 ldil L%PSW_N,%r9 1379 or %r8,%r9,%r8 /* Set PSW_N */ 1380 mtctl %r8,%ipsw 1381 1382 rfir 1383 nop 1384 1385 /* 1386 When there is no translation for the probe address then we 1387 must nullify the insn and return zero in the target register. 1388 This will indicate to the calling code that it does not have 1389 write/read privileges to this address. 1390 1391 This should technically work for prober and probew in PA 1.1, 1392 and also probe,r and probe,w in PA 2.0 1393 1394 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1395 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1396 1397 */ 1398nadtlb_probe_check: 1399 ldi 0x80,%r16 1400 and %r9,%r16,%r17 1401 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1402 BL get_register,%r25 /* Find the target register */ 1403 extrw,u %r9,31,5,%r8 /* Get target register */ 1404 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */ 1405 BL set_register,%r25 1406 copy %r0,%r1 /* Write zero to target register */ 1407 b nadtlb_nullify /* Nullify return insn */ 1408 nop 1409 1410 1411#ifdef CONFIG_64BIT 1412itlb_miss_20w: 1413 1414 /* 1415 * I miss is a little different, since we allow users to fault 1416 * on the gateway page which is in the kernel address space. 1417 */ 1418 1419 space_adjust spc,va,t0 1420 get_pgd spc,ptp 1421 space_check spc,t0,itlb_fault 1422 1423 L3_ptep ptp,pte,t0,va,itlb_fault 1424 1425 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1426 update_accessed ptp,pte,t0,t1 1427 1428 make_insert_tlb spc,pte,prot,t1 1429 1430 iitlbt pte,prot 1431 1432 tlb_unlock1 spc,t0 1433 rfir 1434 nop 1435 1436naitlb_miss_20w: 1437 1438 /* 1439 * I miss is a little different, since we allow users to fault 1440 * on the gateway page which is in the kernel address space. 1441 */ 1442 1443 space_adjust spc,va,t0 1444 get_pgd spc,ptp 1445 space_check spc,t0,naitlb_fault 1446 1447 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1448 1449 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1450 update_accessed ptp,pte,t0,t1 1451 1452 make_insert_tlb spc,pte,prot,t1 1453 1454 iitlbt pte,prot 1455 1456 tlb_unlock1 spc,t0 1457 rfir 1458 nop 1459 1460naitlb_check_alias_20w: 1461 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1462 1463 iitlbt pte,prot 1464 1465 rfir 1466 nop 1467 1468#else 1469 1470itlb_miss_11: 1471 get_pgd spc,ptp 1472 1473 space_check spc,t0,itlb_fault 1474 1475 L2_ptep ptp,pte,t0,va,itlb_fault 1476 1477 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1478 update_accessed ptp,pte,t0,t1 1479 1480 make_insert_tlb_11 spc,pte,prot 1481 1482 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1483 mtsp spc,%sr1 1484 1485 iitlba pte,(%sr1,va) 1486 iitlbp prot,(%sr1,va) 1487 1488 mtsp t1, %sr1 /* Restore sr1 */ 1489 1490 tlb_unlock1 spc,t0 1491 rfir 1492 nop 1493 1494naitlb_miss_11: 1495 get_pgd spc,ptp 1496 1497 space_check spc,t0,naitlb_fault 1498 1499 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1500 1501 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11 1502 update_accessed ptp,pte,t0,t1 1503 1504 make_insert_tlb_11 spc,pte,prot 1505 1506 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1507 mtsp spc,%sr1 1508 1509 iitlba pte,(%sr1,va) 1510 iitlbp prot,(%sr1,va) 1511 1512 mtsp t1, %sr1 /* Restore sr1 */ 1513 1514 tlb_unlock1 spc,t0 1515 rfir 1516 nop 1517 1518naitlb_check_alias_11: 1519 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 1520 1521 iitlba pte,(%sr0, va) 1522 iitlbp prot,(%sr0, va) 1523 1524 rfir 1525 nop 1526 1527 1528itlb_miss_20: 1529 get_pgd spc,ptp 1530 1531 space_check spc,t0,itlb_fault 1532 1533 L2_ptep ptp,pte,t0,va,itlb_fault 1534 1535 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1536 update_accessed ptp,pte,t0,t1 1537 1538 make_insert_tlb spc,pte,prot,t1 1539 1540 f_extend pte,t1 1541 1542 iitlbt pte,prot 1543 1544 tlb_unlock1 spc,t0 1545 rfir 1546 nop 1547 1548naitlb_miss_20: 1549 get_pgd spc,ptp 1550 1551 space_check spc,t0,naitlb_fault 1552 1553 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1554 1555 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1556 update_accessed ptp,pte,t0,t1 1557 1558 make_insert_tlb spc,pte,prot,t1 1559 1560 f_extend pte,t1 1561 1562 iitlbt pte,prot 1563 1564 tlb_unlock1 spc,t0 1565 rfir 1566 nop 1567 1568naitlb_check_alias_20: 1569 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 1570 1571 iitlbt pte,prot 1572 1573 rfir 1574 nop 1575 1576#endif 1577 1578#ifdef CONFIG_64BIT 1579 1580dbit_trap_20w: 1581 space_adjust spc,va,t0 1582 get_pgd spc,ptp 1583 space_check spc,t0,dbit_fault 1584 1585 L3_ptep ptp,pte,t0,va,dbit_fault 1586 1587 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1588 update_dirty ptp,pte,t1 1589 1590 make_insert_tlb spc,pte,prot,t1 1591 1592 idtlbt pte,prot 1593 1594 tlb_unlock0 spc,t0 1595 rfir 1596 nop 1597#else 1598 1599dbit_trap_11: 1600 1601 get_pgd spc,ptp 1602 1603 space_check spc,t0,dbit_fault 1604 1605 L2_ptep ptp,pte,t0,va,dbit_fault 1606 1607 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1608 update_dirty ptp,pte,t1 1609 1610 make_insert_tlb_11 spc,pte,prot 1611 1612 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1613 mtsp spc,%sr1 1614 1615 idtlba pte,(%sr1,va) 1616 idtlbp prot,(%sr1,va) 1617 1618 mtsp t1, %sr1 /* Restore sr1 */ 1619 1620 tlb_unlock0 spc,t0 1621 rfir 1622 nop 1623 1624dbit_trap_20: 1625 get_pgd spc,ptp 1626 1627 space_check spc,t0,dbit_fault 1628 1629 L2_ptep ptp,pte,t0,va,dbit_fault 1630 1631 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1632 update_dirty ptp,pte,t1 1633 1634 make_insert_tlb spc,pte,prot,t1 1635 1636 f_extend pte,t1 1637 1638 idtlbt pte,prot 1639 1640 tlb_unlock0 spc,t0 1641 rfir 1642 nop 1643#endif 1644 1645 .import handle_interruption,code 1646 1647kernel_bad_space: 1648 b intr_save 1649 ldi 31,%r8 /* Use an unused code */ 1650 1651dbit_fault: 1652 b intr_save 1653 ldi 20,%r8 1654 1655itlb_fault: 1656 b intr_save 1657 ldi PARISC_ITLB_TRAP,%r8 1658 1659nadtlb_fault: 1660 b intr_save 1661 ldi 17,%r8 1662 1663naitlb_fault: 1664 b intr_save 1665 ldi 16,%r8 1666 1667dtlb_fault: 1668 b intr_save 1669 ldi 15,%r8 1670 1671 /* Register saving semantics for system calls: 1672 1673 %r1 clobbered by system call macro in userspace 1674 %r2 saved in PT_REGS by gateway page 1675 %r3 - %r18 preserved by C code (saved by signal code) 1676 %r19 - %r20 saved in PT_REGS by gateway page 1677 %r21 - %r22 non-standard syscall args 1678 stored in kernel stack by gateway page 1679 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1680 %r27 - %r30 saved in PT_REGS by gateway page 1681 %r31 syscall return pointer 1682 */ 1683 1684 /* Floating point registers (FIXME: what do we do with these?) 1685 1686 %fr0 - %fr3 status/exception, not preserved 1687 %fr4 - %fr7 arguments 1688 %fr8 - %fr11 not preserved by C code 1689 %fr12 - %fr21 preserved by C code 1690 %fr22 - %fr31 not preserved by C code 1691 */ 1692 1693 .macro reg_save regs 1694 STREG %r3, PT_GR3(\regs) 1695 STREG %r4, PT_GR4(\regs) 1696 STREG %r5, PT_GR5(\regs) 1697 STREG %r6, PT_GR6(\regs) 1698 STREG %r7, PT_GR7(\regs) 1699 STREG %r8, PT_GR8(\regs) 1700 STREG %r9, PT_GR9(\regs) 1701 STREG %r10,PT_GR10(\regs) 1702 STREG %r11,PT_GR11(\regs) 1703 STREG %r12,PT_GR12(\regs) 1704 STREG %r13,PT_GR13(\regs) 1705 STREG %r14,PT_GR14(\regs) 1706 STREG %r15,PT_GR15(\regs) 1707 STREG %r16,PT_GR16(\regs) 1708 STREG %r17,PT_GR17(\regs) 1709 STREG %r18,PT_GR18(\regs) 1710 .endm 1711 1712 .macro reg_restore regs 1713 LDREG PT_GR3(\regs), %r3 1714 LDREG PT_GR4(\regs), %r4 1715 LDREG PT_GR5(\regs), %r5 1716 LDREG PT_GR6(\regs), %r6 1717 LDREG PT_GR7(\regs), %r7 1718 LDREG PT_GR8(\regs), %r8 1719 LDREG PT_GR9(\regs), %r9 1720 LDREG PT_GR10(\regs),%r10 1721 LDREG PT_GR11(\regs),%r11 1722 LDREG PT_GR12(\regs),%r12 1723 LDREG PT_GR13(\regs),%r13 1724 LDREG PT_GR14(\regs),%r14 1725 LDREG PT_GR15(\regs),%r15 1726 LDREG PT_GR16(\regs),%r16 1727 LDREG PT_GR17(\regs),%r17 1728 LDREG PT_GR18(\regs),%r18 1729 .endm 1730 1731 .macro fork_like name 1732ENTRY_CFI(sys_\name\()_wrapper) 1733 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1734 ldo TASK_REGS(%r1),%r1 1735 reg_save %r1 1736 mfctl %cr27, %r28 1737 ldil L%sys_\name, %r31 1738 be R%sys_\name(%sr4,%r31) 1739 STREG %r28, PT_CR27(%r1) 1740ENDPROC_CFI(sys_\name\()_wrapper) 1741 .endm 1742 1743fork_like clone 1744fork_like clone3 1745fork_like fork 1746fork_like vfork 1747 1748 /* Set the return value for the child */ 1749ENTRY(child_return) 1750 BL schedule_tail, %r2 1751 nop 1752finish_child_return: 1753 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1754 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1755 1756 LDREG PT_CR27(%r1), %r3 1757 mtctl %r3, %cr27 1758 reg_restore %r1 1759 b syscall_exit 1760 copy %r0,%r28 1761END(child_return) 1762 1763ENTRY_CFI(sys_rt_sigreturn_wrapper) 1764 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1765 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1766 /* Don't save regs, we are going to restore them from sigcontext. */ 1767 STREG %r2, -RP_OFFSET(%r30) 1768#ifdef CONFIG_64BIT 1769 ldo FRAME_SIZE(%r30), %r30 1770 BL sys_rt_sigreturn,%r2 1771 ldo -16(%r30),%r29 /* Reference param save area */ 1772#else 1773 BL sys_rt_sigreturn,%r2 1774 ldo FRAME_SIZE(%r30), %r30 1775#endif 1776 1777 ldo -FRAME_SIZE(%r30), %r30 1778 LDREG -RP_OFFSET(%r30), %r2 1779 1780 /* FIXME: I think we need to restore a few more things here. */ 1781 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1782 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1783 reg_restore %r1 1784 1785 /* If the signal was received while the process was blocked on a 1786 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1787 * take us to syscall_exit_rfi and on to intr_return. 1788 */ 1789 bv %r0(%r2) 1790 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1791ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1792 1793ENTRY(syscall_exit) 1794 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1795 * via syscall_exit_rfi if the signal was received while the process 1796 * was running. 1797 */ 1798 1799 /* save return value now */ 1800 1801 mfctl %cr30, %r1 1802 LDREG TI_TASK(%r1),%r1 1803 STREG %r28,TASK_PT_GR28(%r1) 1804 1805 /* Seems to me that dp could be wrong here, if the syscall involved 1806 * calling a module, and nothing got round to restoring dp on return. 1807 */ 1808 loadgp 1809 1810syscall_check_resched: 1811 1812 /* check for reschedule */ 1813 1814 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 1815 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 1816 1817 .import do_signal,code 1818syscall_check_sig: 1819 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 1820 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26 1821 and,COND(<>) %r19, %r26, %r0 1822 b,n syscall_restore /* skip past if we've nothing to do */ 1823 1824syscall_do_signal: 1825 /* Save callee-save registers (for sigcontext). 1826 * FIXME: After this point the process structure should be 1827 * consistent with all the relevant state of the process 1828 * before the syscall. We need to verify this. 1829 */ 1830 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1831 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */ 1832 reg_save %r26 1833 1834#ifdef CONFIG_64BIT 1835 ldo -16(%r30),%r29 /* Reference param save area */ 1836#endif 1837 1838 BL do_notify_resume,%r2 1839 ldi 1, %r25 /* long in_syscall = 1 */ 1840 1841 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1842 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 1843 reg_restore %r20 1844 1845 b,n syscall_check_sig 1846 1847syscall_restore: 1848 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1849 1850 /* Are we being ptraced? */ 1851 ldw TASK_FLAGS(%r1),%r19 1852 ldi _TIF_SYSCALL_TRACE_MASK,%r2 1853 and,COND(=) %r19,%r2,%r0 1854 b,n syscall_restore_rfi 1855 1856 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 1857 rest_fp %r19 1858 1859 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 1860 mtsar %r19 1861 1862 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 1863 LDREG TASK_PT_GR19(%r1),%r19 1864 LDREG TASK_PT_GR20(%r1),%r20 1865 LDREG TASK_PT_GR21(%r1),%r21 1866 LDREG TASK_PT_GR22(%r1),%r22 1867 LDREG TASK_PT_GR23(%r1),%r23 1868 LDREG TASK_PT_GR24(%r1),%r24 1869 LDREG TASK_PT_GR25(%r1),%r25 1870 LDREG TASK_PT_GR26(%r1),%r26 1871 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 1872 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 1873 LDREG TASK_PT_GR29(%r1),%r29 1874 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 1875 1876 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 1877 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ 1878 rsm PSW_SM_I, %r0 1879 copy %r1,%r30 /* Restore user sp */ 1880 mfsp %sr3,%r1 /* Get user space id */ 1881 mtsp %r1,%sr7 /* Restore sr7 */ 1882 ssm PSW_SM_I, %r0 1883 1884 /* Set sr2 to zero for userspace syscalls to work. */ 1885 mtsp %r0,%sr2 1886 mtsp %r1,%sr4 /* Restore sr4 */ 1887 mtsp %r1,%sr5 /* Restore sr5 */ 1888 mtsp %r1,%sr6 /* Restore sr6 */ 1889 1890 depi 3,31,2,%r31 /* ensure return to user mode. */ 1891 1892#ifdef CONFIG_64BIT 1893 /* decide whether to reset the wide mode bit 1894 * 1895 * For a syscall, the W bit is stored in the lowest bit 1896 * of sp. Extract it and reset W if it is zero */ 1897 extrd,u,*<> %r30,63,1,%r1 1898 rsm PSW_SM_W, %r0 1899 /* now reset the lowest bit of sp if it was set */ 1900 xor %r30,%r1,%r30 1901#endif 1902 be,n 0(%sr3,%r31) /* return to user space */ 1903 1904 /* We have to return via an RFI, so that PSW T and R bits can be set 1905 * appropriately. 1906 * This sets up pt_regs so we can return via intr_restore, which is not 1907 * the most efficient way of doing things, but it works. 1908 */ 1909syscall_restore_rfi: 1910 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 1911 mtctl %r2,%cr0 /* for immediate trap */ 1912 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 1913 ldi 0x0b,%r20 /* Create new PSW */ 1914 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 1915 1916 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are 1917 * set in thread_info.h and converted to PA bitmap 1918 * numbers in asm-offsets.c */ 1919 1920 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ 1921 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 1922 depi -1,27,1,%r20 /* R bit */ 1923 1924 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ 1925 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 1926 depi -1,7,1,%r20 /* T bit */ 1927 1928 STREG %r20,TASK_PT_PSW(%r1) 1929 1930 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 1931 1932 mfsp %sr3,%r25 1933 STREG %r25,TASK_PT_SR3(%r1) 1934 STREG %r25,TASK_PT_SR4(%r1) 1935 STREG %r25,TASK_PT_SR5(%r1) 1936 STREG %r25,TASK_PT_SR6(%r1) 1937 STREG %r25,TASK_PT_SR7(%r1) 1938 STREG %r25,TASK_PT_IASQ0(%r1) 1939 STREG %r25,TASK_PT_IASQ1(%r1) 1940 1941 /* XXX W bit??? */ 1942 /* Now if old D bit is clear, it means we didn't save all registers 1943 * on syscall entry, so do that now. This only happens on TRACEME 1944 * calls, or if someone attached to us while we were on a syscall. 1945 * We could make this more efficient by not saving r3-r18, but 1946 * then we wouldn't be able to use the common intr_restore path. 1947 * It is only for traced processes anyway, so performance is not 1948 * an issue. 1949 */ 1950 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 1951 ldo TASK_REGS(%r1),%r25 1952 reg_save %r25 /* Save r3 to r18 */ 1953 1954 /* Save the current sr */ 1955 mfsp %sr0,%r2 1956 STREG %r2,TASK_PT_SR0(%r1) 1957 1958 /* Save the scratch sr */ 1959 mfsp %sr1,%r2 1960 STREG %r2,TASK_PT_SR1(%r1) 1961 1962 /* sr2 should be set to zero for userspace syscalls */ 1963 STREG %r0,TASK_PT_SR2(%r1) 1964 1965 LDREG TASK_PT_GR31(%r1),%r2 1966 depi 3,31,2,%r2 /* ensure return to user mode. */ 1967 STREG %r2,TASK_PT_IAOQ0(%r1) 1968 ldo 4(%r2),%r2 1969 STREG %r2,TASK_PT_IAOQ1(%r1) 1970 b intr_restore 1971 copy %r25,%r16 1972 1973pt_regs_ok: 1974 LDREG TASK_PT_IAOQ0(%r1),%r2 1975 depi 3,31,2,%r2 /* ensure return to user mode. */ 1976 STREG %r2,TASK_PT_IAOQ0(%r1) 1977 LDREG TASK_PT_IAOQ1(%r1),%r2 1978 depi 3,31,2,%r2 1979 STREG %r2,TASK_PT_IAOQ1(%r1) 1980 b intr_restore 1981 copy %r25,%r16 1982 1983syscall_do_resched: 1984 load32 syscall_check_resched,%r2 /* if resched, we start over again */ 1985 load32 schedule,%r19 1986 bv %r0(%r19) /* jumps to schedule() */ 1987#ifdef CONFIG_64BIT 1988 ldo -16(%r30),%r29 /* Reference param save area */ 1989#else 1990 nop 1991#endif 1992END(syscall_exit) 1993 1994 1995#ifdef CONFIG_FUNCTION_TRACER 1996 1997 .import ftrace_function_trampoline,code 1998 .align L1_CACHE_BYTES 1999ENTRY_CFI(mcount, caller) 2000_mcount: 2001 .export _mcount,data 2002 /* 2003 * The 64bit mcount() function pointer needs 4 dwords, of which the 2004 * first two are free. We optimize it here and put 2 instructions for 2005 * calling mcount(), and 2 instructions for ftrace_stub(). That way we 2006 * have all on one L1 cacheline. 2007 */ 2008 ldi 0, %arg3 2009 b ftrace_function_trampoline 2010 copy %r3, %arg2 /* caller original %sp */ 2011ftrace_stub: 2012 .globl ftrace_stub 2013 .type ftrace_stub, @function 2014#ifdef CONFIG_64BIT 2015 bve (%rp) 2016#else 2017 bv %r0(%rp) 2018#endif 2019 nop 2020#ifdef CONFIG_64BIT 2021 .dword mcount 2022 .dword 0 /* code in head.S puts value of global gp here */ 2023#endif 2024ENDPROC_CFI(mcount) 2025 2026#ifdef CONFIG_DYNAMIC_FTRACE 2027 2028#ifdef CONFIG_64BIT 2029#define FTRACE_FRAME_SIZE (2*FRAME_SIZE) 2030#else 2031#define FTRACE_FRAME_SIZE FRAME_SIZE 2032#endif 2033ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2034ftrace_caller: 2035 .global ftrace_caller 2036 2037 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp) 2038 ldo -FTRACE_FRAME_SIZE(%sp), %r3 2039 STREG %rp, -RP_OFFSET(%r3) 2040 2041 /* Offset 0 is already allocated for %r1 */ 2042 STREG %r23, 2*REG_SZ(%r3) 2043 STREG %r24, 3*REG_SZ(%r3) 2044 STREG %r25, 4*REG_SZ(%r3) 2045 STREG %r26, 5*REG_SZ(%r3) 2046 STREG %r28, 6*REG_SZ(%r3) 2047 STREG %r29, 7*REG_SZ(%r3) 2048#ifdef CONFIG_64BIT 2049 STREG %r19, 8*REG_SZ(%r3) 2050 STREG %r20, 9*REG_SZ(%r3) 2051 STREG %r21, 10*REG_SZ(%r3) 2052 STREG %r22, 11*REG_SZ(%r3) 2053 STREG %r27, 12*REG_SZ(%r3) 2054 STREG %r31, 13*REG_SZ(%r3) 2055 loadgp 2056 ldo -16(%sp),%r29 2057#endif 2058 LDREG 0(%r3), %r25 2059 copy %rp, %r26 2060 ldo -8(%r25), %r25 2061 ldi 0, %r23 /* no pt_regs */ 2062 b,l ftrace_function_trampoline, %rp 2063 copy %r3, %r24 2064 2065 LDREG -RP_OFFSET(%r3), %rp 2066 LDREG 2*REG_SZ(%r3), %r23 2067 LDREG 3*REG_SZ(%r3), %r24 2068 LDREG 4*REG_SZ(%r3), %r25 2069 LDREG 5*REG_SZ(%r3), %r26 2070 LDREG 6*REG_SZ(%r3), %r28 2071 LDREG 7*REG_SZ(%r3), %r29 2072#ifdef CONFIG_64BIT 2073 LDREG 8*REG_SZ(%r3), %r19 2074 LDREG 9*REG_SZ(%r3), %r20 2075 LDREG 10*REG_SZ(%r3), %r21 2076 LDREG 11*REG_SZ(%r3), %r22 2077 LDREG 12*REG_SZ(%r3), %r27 2078 LDREG 13*REG_SZ(%r3), %r31 2079#endif 2080 LDREG 1*REG_SZ(%r3), %r3 2081 2082 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2083 /* Adjust return point to jump back to beginning of traced function */ 2084 ldo -4(%r1), %r1 2085 bv,n (%r1) 2086 2087ENDPROC_CFI(ftrace_caller) 2088 2089#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS 2090ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN, 2091 CALLS,SAVE_RP,SAVE_SP) 2092ftrace_regs_caller: 2093 .global ftrace_regs_caller 2094 2095 ldo -FTRACE_FRAME_SIZE(%sp), %r1 2096 STREG %rp, -RP_OFFSET(%r1) 2097 2098 copy %sp, %r1 2099 ldo PT_SZ_ALGN(%sp), %sp 2100 2101 STREG %rp, PT_GR2(%r1) 2102 STREG %r3, PT_GR3(%r1) 2103 STREG %r4, PT_GR4(%r1) 2104 STREG %r5, PT_GR5(%r1) 2105 STREG %r6, PT_GR6(%r1) 2106 STREG %r7, PT_GR7(%r1) 2107 STREG %r8, PT_GR8(%r1) 2108 STREG %r9, PT_GR9(%r1) 2109 STREG %r10, PT_GR10(%r1) 2110 STREG %r11, PT_GR11(%r1) 2111 STREG %r12, PT_GR12(%r1) 2112 STREG %r13, PT_GR13(%r1) 2113 STREG %r14, PT_GR14(%r1) 2114 STREG %r15, PT_GR15(%r1) 2115 STREG %r16, PT_GR16(%r1) 2116 STREG %r17, PT_GR17(%r1) 2117 STREG %r18, PT_GR18(%r1) 2118 STREG %r19, PT_GR19(%r1) 2119 STREG %r20, PT_GR20(%r1) 2120 STREG %r21, PT_GR21(%r1) 2121 STREG %r22, PT_GR22(%r1) 2122 STREG %r23, PT_GR23(%r1) 2123 STREG %r24, PT_GR24(%r1) 2124 STREG %r25, PT_GR25(%r1) 2125 STREG %r26, PT_GR26(%r1) 2126 STREG %r27, PT_GR27(%r1) 2127 STREG %r28, PT_GR28(%r1) 2128 STREG %r29, PT_GR29(%r1) 2129 STREG %r30, PT_GR30(%r1) 2130 STREG %r31, PT_GR31(%r1) 2131 mfctl %cr11, %r26 2132 STREG %r26, PT_SAR(%r1) 2133 2134 copy %rp, %r26 2135 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25 2136 ldo -8(%r25), %r25 2137 ldo -FTRACE_FRAME_SIZE(%r1), %arg2 2138 b,l ftrace_function_trampoline, %rp 2139 copy %r1, %arg3 /* struct pt_regs */ 2140 2141 ldo -PT_SZ_ALGN(%sp), %r1 2142 2143 LDREG PT_SAR(%r1), %rp 2144 mtctl %rp, %cr11 2145 2146 LDREG PT_GR2(%r1), %rp 2147 LDREG PT_GR3(%r1), %r3 2148 LDREG PT_GR4(%r1), %r4 2149 LDREG PT_GR5(%r1), %r5 2150 LDREG PT_GR6(%r1), %r6 2151 LDREG PT_GR7(%r1), %r7 2152 LDREG PT_GR8(%r1), %r8 2153 LDREG PT_GR9(%r1), %r9 2154 LDREG PT_GR10(%r1),%r10 2155 LDREG PT_GR11(%r1),%r11 2156 LDREG PT_GR12(%r1),%r12 2157 LDREG PT_GR13(%r1),%r13 2158 LDREG PT_GR14(%r1),%r14 2159 LDREG PT_GR15(%r1),%r15 2160 LDREG PT_GR16(%r1),%r16 2161 LDREG PT_GR17(%r1),%r17 2162 LDREG PT_GR18(%r1),%r18 2163 LDREG PT_GR19(%r1),%r19 2164 LDREG PT_GR20(%r1),%r20 2165 LDREG PT_GR21(%r1),%r21 2166 LDREG PT_GR22(%r1),%r22 2167 LDREG PT_GR23(%r1),%r23 2168 LDREG PT_GR24(%r1),%r24 2169 LDREG PT_GR25(%r1),%r25 2170 LDREG PT_GR26(%r1),%r26 2171 LDREG PT_GR27(%r1),%r27 2172 LDREG PT_GR28(%r1),%r28 2173 LDREG PT_GR29(%r1),%r29 2174 LDREG PT_GR30(%r1),%r30 2175 LDREG PT_GR31(%r1),%r31 2176 2177 ldo -PT_SZ_ALGN(%sp), %sp 2178 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1 2179 /* Adjust return point to jump back to beginning of traced function */ 2180 ldo -4(%r1), %r1 2181 bv,n (%r1) 2182 2183ENDPROC_CFI(ftrace_regs_caller) 2184 2185#endif 2186#endif 2187 2188#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2189 .align 8 2190ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2191 .export parisc_return_to_handler,data 2192parisc_return_to_handler: 2193 copy %r3,%r1 2194 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */ 2195 copy %sp,%r3 2196 STREGM %r1,FRAME_SIZE(%sp) 2197 STREG %ret0,8(%r3) 2198 STREG %ret1,16(%r3) 2199 2200#ifdef CONFIG_64BIT 2201 loadgp 2202#endif 2203 2204 /* call ftrace_return_to_handler(0) */ 2205 .import ftrace_return_to_handler,code 2206 load32 ftrace_return_to_handler,%ret0 2207 load32 .Lftrace_ret,%r2 2208#ifdef CONFIG_64BIT 2209 ldo -16(%sp),%ret1 /* Reference param save area */ 2210 bve (%ret0) 2211#else 2212 bv %r0(%ret0) 2213#endif 2214 ldi 0,%r26 2215.Lftrace_ret: 2216 copy %ret0,%rp 2217 2218 /* restore original return values */ 2219 LDREG 8(%r3),%ret0 2220 LDREG 16(%r3),%ret1 2221 2222 /* return from function */ 2223#ifdef CONFIG_64BIT 2224 bve (%rp) 2225#else 2226 bv %r0(%rp) 2227#endif 2228 LDREGM -FRAME_SIZE(%sp),%r3 2229ENDPROC_CFI(return_to_handler) 2230 2231#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2232 2233#endif /* CONFIG_FUNCTION_TRACER */ 2234 2235#ifdef CONFIG_IRQSTACKS 2236/* void call_on_stack(unsigned long param1, void *func, 2237 unsigned long new_stack) */ 2238ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2239ENTRY(_call_on_stack) 2240 copy %sp, %r1 2241 2242 /* Regarding the HPPA calling conventions for function pointers, 2243 we assume the PIC register is not changed across call. For 2244 CONFIG_64BIT, the argument pointer is left to point at the 2245 argument region allocated for the call to call_on_stack. */ 2246 2247 /* Switch to new stack. We allocate two frames. */ 2248 ldo 2*FRAME_SIZE(%arg2), %sp 2249# ifdef CONFIG_64BIT 2250 /* Save previous stack pointer and return pointer in frame marker */ 2251 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2252 /* Calls always use function descriptor */ 2253 LDREG 16(%arg1), %arg1 2254 bve,l (%arg1), %rp 2255 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2256 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2257 bve (%rp) 2258 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2259# else 2260 /* Save previous stack pointer and return pointer in frame marker */ 2261 STREG %r1, -FRAME_SIZE-REG_SZ(%sp) 2262 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp) 2263 /* Calls use function descriptor if PLABEL bit is set */ 2264 bb,>=,n %arg1, 30, 1f 2265 depwi 0,31,2, %arg1 2266 LDREG 0(%arg1), %arg1 22671: 2268 be,l 0(%sr4,%arg1), %sr0, %r31 2269 copy %r31, %rp 2270 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp 2271 bv (%rp) 2272 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp 2273# endif /* CONFIG_64BIT */ 2274ENDPROC_CFI(call_on_stack) 2275#endif /* CONFIG_IRQSTACKS */ 2276 2277ENTRY_CFI(get_register) 2278 /* 2279 * get_register is used by the non access tlb miss handlers to 2280 * copy the value of the general register specified in r8 into 2281 * r1. This routine can't be used for shadowed registers, since 2282 * the rfir will restore the original value. So, for the shadowed 2283 * registers we put a -1 into r1 to indicate that the register 2284 * should not be used (the register being copied could also have 2285 * a -1 in it, but that is OK, it just means that we will have 2286 * to use the slow path instead). 2287 */ 2288 blr %r8,%r0 2289 nop 2290 bv %r0(%r25) /* r0 */ 2291 copy %r0,%r1 2292 bv %r0(%r25) /* r1 - shadowed */ 2293 ldi -1,%r1 2294 bv %r0(%r25) /* r2 */ 2295 copy %r2,%r1 2296 bv %r0(%r25) /* r3 */ 2297 copy %r3,%r1 2298 bv %r0(%r25) /* r4 */ 2299 copy %r4,%r1 2300 bv %r0(%r25) /* r5 */ 2301 copy %r5,%r1 2302 bv %r0(%r25) /* r6 */ 2303 copy %r6,%r1 2304 bv %r0(%r25) /* r7 */ 2305 copy %r7,%r1 2306 bv %r0(%r25) /* r8 - shadowed */ 2307 ldi -1,%r1 2308 bv %r0(%r25) /* r9 - shadowed */ 2309 ldi -1,%r1 2310 bv %r0(%r25) /* r10 */ 2311 copy %r10,%r1 2312 bv %r0(%r25) /* r11 */ 2313 copy %r11,%r1 2314 bv %r0(%r25) /* r12 */ 2315 copy %r12,%r1 2316 bv %r0(%r25) /* r13 */ 2317 copy %r13,%r1 2318 bv %r0(%r25) /* r14 */ 2319 copy %r14,%r1 2320 bv %r0(%r25) /* r15 */ 2321 copy %r15,%r1 2322 bv %r0(%r25) /* r16 - shadowed */ 2323 ldi -1,%r1 2324 bv %r0(%r25) /* r17 - shadowed */ 2325 ldi -1,%r1 2326 bv %r0(%r25) /* r18 */ 2327 copy %r18,%r1 2328 bv %r0(%r25) /* r19 */ 2329 copy %r19,%r1 2330 bv %r0(%r25) /* r20 */ 2331 copy %r20,%r1 2332 bv %r0(%r25) /* r21 */ 2333 copy %r21,%r1 2334 bv %r0(%r25) /* r22 */ 2335 copy %r22,%r1 2336 bv %r0(%r25) /* r23 */ 2337 copy %r23,%r1 2338 bv %r0(%r25) /* r24 - shadowed */ 2339 ldi -1,%r1 2340 bv %r0(%r25) /* r25 - shadowed */ 2341 ldi -1,%r1 2342 bv %r0(%r25) /* r26 */ 2343 copy %r26,%r1 2344 bv %r0(%r25) /* r27 */ 2345 copy %r27,%r1 2346 bv %r0(%r25) /* r28 */ 2347 copy %r28,%r1 2348 bv %r0(%r25) /* r29 */ 2349 copy %r29,%r1 2350 bv %r0(%r25) /* r30 */ 2351 copy %r30,%r1 2352 bv %r0(%r25) /* r31 */ 2353 copy %r31,%r1 2354ENDPROC_CFI(get_register) 2355 2356 2357ENTRY_CFI(set_register) 2358 /* 2359 * set_register is used by the non access tlb miss handlers to 2360 * copy the value of r1 into the general register specified in 2361 * r8. 2362 */ 2363 blr %r8,%r0 2364 nop 2365 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2366 copy %r1,%r0 2367 bv %r0(%r25) /* r1 */ 2368 copy %r1,%r1 2369 bv %r0(%r25) /* r2 */ 2370 copy %r1,%r2 2371 bv %r0(%r25) /* r3 */ 2372 copy %r1,%r3 2373 bv %r0(%r25) /* r4 */ 2374 copy %r1,%r4 2375 bv %r0(%r25) /* r5 */ 2376 copy %r1,%r5 2377 bv %r0(%r25) /* r6 */ 2378 copy %r1,%r6 2379 bv %r0(%r25) /* r7 */ 2380 copy %r1,%r7 2381 bv %r0(%r25) /* r8 */ 2382 copy %r1,%r8 2383 bv %r0(%r25) /* r9 */ 2384 copy %r1,%r9 2385 bv %r0(%r25) /* r10 */ 2386 copy %r1,%r10 2387 bv %r0(%r25) /* r11 */ 2388 copy %r1,%r11 2389 bv %r0(%r25) /* r12 */ 2390 copy %r1,%r12 2391 bv %r0(%r25) /* r13 */ 2392 copy %r1,%r13 2393 bv %r0(%r25) /* r14 */ 2394 copy %r1,%r14 2395 bv %r0(%r25) /* r15 */ 2396 copy %r1,%r15 2397 bv %r0(%r25) /* r16 */ 2398 copy %r1,%r16 2399 bv %r0(%r25) /* r17 */ 2400 copy %r1,%r17 2401 bv %r0(%r25) /* r18 */ 2402 copy %r1,%r18 2403 bv %r0(%r25) /* r19 */ 2404 copy %r1,%r19 2405 bv %r0(%r25) /* r20 */ 2406 copy %r1,%r20 2407 bv %r0(%r25) /* r21 */ 2408 copy %r1,%r21 2409 bv %r0(%r25) /* r22 */ 2410 copy %r1,%r22 2411 bv %r0(%r25) /* r23 */ 2412 copy %r1,%r23 2413 bv %r0(%r25) /* r24 */ 2414 copy %r1,%r24 2415 bv %r0(%r25) /* r25 */ 2416 copy %r1,%r25 2417 bv %r0(%r25) /* r26 */ 2418 copy %r1,%r26 2419 bv %r0(%r25) /* r27 */ 2420 copy %r1,%r27 2421 bv %r0(%r25) /* r28 */ 2422 copy %r1,%r28 2423 bv %r0(%r25) /* r29 */ 2424 copy %r1,%r29 2425 bv %r0(%r25) /* r30 */ 2426 copy %r1,%r30 2427 bv %r0(%r25) /* r31 */ 2428 copy %r1,%r31 2429ENDPROC_CFI(set_register) 2430 2431