1/* 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 3 * 4 * kernel entry points (interruptions, system call wrappers) 5 * Copyright (C) 1999,2000 Philipp Rumpf 6 * Copyright (C) 1999 SuSE GmbH Nuernberg 7 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2, or (at your option) 13 * any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25#include <asm/asm-offsets.h> 26 27/* we have the following possibilities to act on an interruption: 28 * - handle in assembly and use shadowed registers only 29 * - save registers to kernel stack and handle in assembly or C */ 30 31 32#include <asm/psw.h> 33#include <asm/assembly.h> /* for LDREG/STREG defines */ 34#include <asm/pgtable.h> 35#include <asm/signal.h> 36#include <asm/unistd.h> 37#include <asm/thread_info.h> 38 39#ifdef CONFIG_64BIT 40#define CMPIB cmpib,* 41#define CMPB cmpb,* 42#define COND(x) *x 43 44 .level 2.0w 45#else 46#define CMPIB cmpib, 47#define CMPB cmpb, 48#define COND(x) x 49 50 .level 2.0 51#endif 52 53 .import pa_dbit_lock,data 54 55 /* space_to_prot macro creates a prot id from a space id */ 56 57#if (SPACEID_SHIFT) == 0 58 .macro space_to_prot spc prot 59 depd,z \spc,62,31,\prot 60 .endm 61#else 62 .macro space_to_prot spc prot 63 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 64 .endm 65#endif 66 67 /* Switch to virtual mapping, trashing only %r1 */ 68 .macro virt_map 69 /* pcxt_ssm_bug */ 70 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 71 mtsp %r0, %sr4 72 mtsp %r0, %sr5 73 mfsp %sr7, %r1 74 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ 75 mtsp %r1, %sr3 76 tovirt_r1 %r29 77 load32 KERNEL_PSW, %r1 78 79 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 80 mtsp %r0, %sr6 81 mtsp %r0, %sr7 82 mtctl %r0, %cr17 /* Clear IIASQ tail */ 83 mtctl %r0, %cr17 /* Clear IIASQ head */ 84 mtctl %r1, %ipsw 85 load32 4f, %r1 86 mtctl %r1, %cr18 /* Set IIAOQ tail */ 87 ldo 4(%r1), %r1 88 mtctl %r1, %cr18 /* Set IIAOQ head */ 89 rfir 90 nop 914: 92 .endm 93 94 /* 95 * The "get_stack" macros are responsible for determining the 96 * kernel stack value. 97 * 98 * For Faults: 99 * If sr7 == 0 100 * Already using a kernel stack, so call the 101 * get_stack_use_r30 macro to push a pt_regs structure 102 * on the stack, and store registers there. 103 * else 104 * Need to set up a kernel stack, so call the 105 * get_stack_use_cr30 macro to set up a pointer 106 * to the pt_regs structure contained within the 107 * task pointer pointed to by cr30. Set the stack 108 * pointer to point to the end of the task structure. 109 * 110 * For Interrupts: 111 * If sr7 == 0 112 * Already using a kernel stack, check to see if r30 113 * is already pointing to the per processor interrupt 114 * stack. If it is, call the get_stack_use_r30 macro 115 * to push a pt_regs structure on the stack, and store 116 * registers there. Otherwise, call get_stack_use_cr31 117 * to get a pointer to the base of the interrupt stack 118 * and push a pt_regs structure on that stack. 119 * else 120 * Need to set up a kernel stack, so call the 121 * get_stack_use_cr30 macro to set up a pointer 122 * to the pt_regs structure contained within the 123 * task pointer pointed to by cr30. Set the stack 124 * pointer to point to the end of the task structure. 125 * N.B: We don't use the interrupt stack for the 126 * first interrupt from userland, because signals/ 127 * resched's are processed when returning to userland, 128 * and we can sleep in those cases. 129 * 130 * Note that we use shadowed registers for temps until 131 * we can save %r26 and %r29. %r26 is used to preserve 132 * %r8 (a shadowed register) which temporarily contained 133 * either the fault type ("code") or the eirr. We need 134 * to use a non-shadowed register to carry the value over 135 * the rfir in virt_map. We use %r26 since this value winds 136 * up being passed as the argument to either do_cpu_irq_mask 137 * or handle_interruption. %r29 is used to hold a pointer 138 * the register save area, and once again, it needs to 139 * be a non-shadowed register so that it survives the rfir. 140 * 141 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 142 */ 143 144 .macro get_stack_use_cr30 145 146 /* we save the registers in the task struct */ 147 148 mfctl %cr30, %r1 149 tophys %r1,%r9 150 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 151 tophys %r1,%r9 152 ldo TASK_REGS(%r9),%r9 153 STREG %r30, PT_GR30(%r9) 154 STREG %r29,PT_GR29(%r9) 155 STREG %r26,PT_GR26(%r9) 156 copy %r9,%r29 157 mfctl %cr30, %r1 158 ldo THREAD_SZ_ALGN(%r1), %r30 159 .endm 160 161 .macro get_stack_use_r30 162 163 /* we put a struct pt_regs on the stack and save the registers there */ 164 165 tophys %r30,%r9 166 STREG %r30,PT_GR30(%r9) 167 ldo PT_SZ_ALGN(%r30),%r30 168 STREG %r29,PT_GR29(%r9) 169 STREG %r26,PT_GR26(%r9) 170 copy %r9,%r29 171 .endm 172 173 .macro rest_stack 174 LDREG PT_GR1(%r29), %r1 175 LDREG PT_GR30(%r29),%r30 176 LDREG PT_GR29(%r29),%r29 177 .endm 178 179 /* default interruption handler 180 * (calls traps.c:handle_interruption) */ 181 .macro def code 182 b intr_save 183 ldi \code, %r8 184 .align 32 185 .endm 186 187 /* Interrupt interruption handler 188 * (calls irq.c:do_cpu_irq_mask) */ 189 .macro extint code 190 b intr_extint 191 mfsp %sr7,%r16 192 .align 32 193 .endm 194 195 .import os_hpmc, code 196 197 /* HPMC handler */ 198 .macro hpmc code 199 nop /* must be a NOP, will be patched later */ 200 load32 PA(os_hpmc), %r3 201 bv,n 0(%r3) 202 nop 203 .word 0 /* checksum (will be patched) */ 204 .word PA(os_hpmc) /* address of handler */ 205 .word 0 /* length of handler */ 206 .endm 207 208 /* 209 * Performance Note: Instructions will be moved up into 210 * this part of the code later on, once we are sure 211 * that the tlb miss handlers are close to final form. 212 */ 213 214 /* Register definitions for tlb miss handler macros */ 215 216 va = r8 /* virtual address for which the trap occured */ 217 spc = r24 /* space for which the trap occured */ 218 219#ifndef CONFIG_64BIT 220 221 /* 222 * itlb miss interruption handler (parisc 1.1 - 32 bit) 223 */ 224 225 .macro itlb_11 code 226 227 mfctl %pcsq, spc 228 b itlb_miss_11 229 mfctl %pcoq, va 230 231 .align 32 232 .endm 233#endif 234 235 /* 236 * itlb miss interruption handler (parisc 2.0) 237 */ 238 239 .macro itlb_20 code 240 mfctl %pcsq, spc 241#ifdef CONFIG_64BIT 242 b itlb_miss_20w 243#else 244 b itlb_miss_20 245#endif 246 mfctl %pcoq, va 247 248 .align 32 249 .endm 250 251#ifndef CONFIG_64BIT 252 /* 253 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 254 * 255 * Note: naitlb misses will be treated 256 * as an ordinary itlb miss for now. 257 * However, note that naitlb misses 258 * have the faulting address in the 259 * IOR/ISR. 260 */ 261 262 .macro naitlb_11 code 263 264 mfctl %isr,spc 265 b itlb_miss_11 266 mfctl %ior,va 267 /* FIXME: If user causes a naitlb miss, the priv level may not be in 268 * lower bits of va, where the itlb miss handler is expecting them 269 */ 270 271 .align 32 272 .endm 273#endif 274 275 /* 276 * naitlb miss interruption handler (parisc 2.0) 277 * 278 * Note: naitlb misses will be treated 279 * as an ordinary itlb miss for now. 280 * However, note that naitlb misses 281 * have the faulting address in the 282 * IOR/ISR. 283 */ 284 285 .macro naitlb_20 code 286 287 mfctl %isr,spc 288#ifdef CONFIG_64BIT 289 b itlb_miss_20w 290#else 291 b itlb_miss_20 292#endif 293 mfctl %ior,va 294 /* FIXME: If user causes a naitlb miss, the priv level may not be in 295 * lower bits of va, where the itlb miss handler is expecting them 296 */ 297 298 .align 32 299 .endm 300 301#ifndef CONFIG_64BIT 302 /* 303 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 304 */ 305 306 .macro dtlb_11 code 307 308 mfctl %isr, spc 309 b dtlb_miss_11 310 mfctl %ior, va 311 312 .align 32 313 .endm 314#endif 315 316 /* 317 * dtlb miss interruption handler (parisc 2.0) 318 */ 319 320 .macro dtlb_20 code 321 322 mfctl %isr, spc 323#ifdef CONFIG_64BIT 324 b dtlb_miss_20w 325#else 326 b dtlb_miss_20 327#endif 328 mfctl %ior, va 329 330 .align 32 331 .endm 332 333#ifndef CONFIG_64BIT 334 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 335 336 .macro nadtlb_11 code 337 338 mfctl %isr,spc 339 b nadtlb_miss_11 340 mfctl %ior,va 341 342 .align 32 343 .endm 344#endif 345 346 /* nadtlb miss interruption handler (parisc 2.0) */ 347 348 .macro nadtlb_20 code 349 350 mfctl %isr,spc 351#ifdef CONFIG_64BIT 352 b nadtlb_miss_20w 353#else 354 b nadtlb_miss_20 355#endif 356 mfctl %ior,va 357 358 .align 32 359 .endm 360 361#ifndef CONFIG_64BIT 362 /* 363 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 364 */ 365 366 .macro dbit_11 code 367 368 mfctl %isr,spc 369 b dbit_trap_11 370 mfctl %ior,va 371 372 .align 32 373 .endm 374#endif 375 376 /* 377 * dirty bit trap interruption handler (parisc 2.0) 378 */ 379 380 .macro dbit_20 code 381 382 mfctl %isr,spc 383#ifdef CONFIG_64BIT 384 b dbit_trap_20w 385#else 386 b dbit_trap_20 387#endif 388 mfctl %ior,va 389 390 .align 32 391 .endm 392 393 /* The following are simple 32 vs 64 bit instruction 394 * abstractions for the macros */ 395 .macro EXTR reg1,start,length,reg2 396#ifdef CONFIG_64BIT 397 extrd,u \reg1,32+\start,\length,\reg2 398#else 399 extrw,u \reg1,\start,\length,\reg2 400#endif 401 .endm 402 403 .macro DEP reg1,start,length,reg2 404#ifdef CONFIG_64BIT 405 depd \reg1,32+\start,\length,\reg2 406#else 407 depw \reg1,\start,\length,\reg2 408#endif 409 .endm 410 411 .macro DEPI val,start,length,reg 412#ifdef CONFIG_64BIT 413 depdi \val,32+\start,\length,\reg 414#else 415 depwi \val,\start,\length,\reg 416#endif 417 .endm 418 419 /* In LP64, the space contains part of the upper 32 bits of the 420 * fault. We have to extract this and place it in the va, 421 * zeroing the corresponding bits in the space register */ 422 .macro space_adjust spc,va,tmp 423#ifdef CONFIG_64BIT 424 extrd,u \spc,63,SPACEID_SHIFT,\tmp 425 depd %r0,63,SPACEID_SHIFT,\spc 426 depd \tmp,31,SPACEID_SHIFT,\va 427#endif 428 .endm 429 430 .import swapper_pg_dir,code 431 432 /* Get the pgd. For faults on space zero (kernel space), this 433 * is simply swapper_pg_dir. For user space faults, the 434 * pgd is stored in %cr25 */ 435 .macro get_pgd spc,reg 436 ldil L%PA(swapper_pg_dir),\reg 437 ldo R%PA(swapper_pg_dir)(\reg),\reg 438 or,COND(=) %r0,\spc,%r0 439 mfctl %cr25,\reg 440 .endm 441 442 /* 443 space_check(spc,tmp,fault) 444 445 spc - The space we saw the fault with. 446 tmp - The place to store the current space. 447 fault - Function to call on failure. 448 449 Only allow faults on different spaces from the 450 currently active one if we're the kernel 451 452 */ 453 .macro space_check spc,tmp,fault 454 mfsp %sr7,\tmp 455 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 456 * as kernel, so defeat the space 457 * check if it is */ 458 copy \spc,\tmp 459 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 460 cmpb,COND(<>),n \tmp,\spc,\fault 461 .endm 462 463 /* Look up a PTE in a 2-Level scheme (faulting at each 464 * level if the entry isn't present 465 * 466 * NOTE: we use ldw even for LP64, since the short pointers 467 * can address up to 1TB 468 */ 469 .macro L2_ptep pmd,pte,index,va,fault 470#if PT_NLEVELS == 3 471 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 472#else 473 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 474#endif 475 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 476 copy %r0,\pte 477 ldw,s \index(\pmd),\pmd 478 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 479 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 480 copy \pmd,%r9 481#ifdef CONFIG_64BIT 482 shld %r9,PxD_VALUE_SHIFT,\pmd 483#else 484 shlw %r9,PxD_VALUE_SHIFT,\pmd 485#endif 486 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 487 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 488 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 489 LDREG %r0(\pmd),\pte /* pmd is now pte */ 490 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 491 .endm 492 493 /* Look up PTE in a 3-Level scheme. 494 * 495 * Here we implement a Hybrid L2/L3 scheme: we allocate the 496 * first pmd adjacent to the pgd. This means that we can 497 * subtract a constant offset to get to it. The pmd and pgd 498 * sizes are arranged so that a single pmd covers 4GB (giving 499 * a full LP64 process access to 8TB) so our lookups are 500 * effectively L2 for the first 4GB of the kernel (i.e. for 501 * all ILP32 processes and all the kernel for machines with 502 * under 4GB of memory) */ 503 .macro L3_ptep pgd,pte,index,va,fault 504#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ 505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 506 copy %r0,\pte 507 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 508 ldw,s \index(\pgd),\pgd 509 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 511 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 512 shld \pgd,PxD_VALUE_SHIFT,\index 513 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 514 copy \index,\pgd 515 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 517#endif 518 L2_ptep \pgd,\pte,\index,\va,\fault 519 .endm 520 521 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 522 * don't needlessly dirty the cache line if it was already set */ 523 .macro update_ptep ptep,pte,tmp,tmp1 524 ldi _PAGE_ACCESSED,\tmp1 525 or \tmp1,\pte,\tmp 526 and,COND(<>) \tmp1,\pte,%r0 527 STREG \tmp,0(\ptep) 528 .endm 529 530 /* Set the dirty bit (and accessed bit). No need to be 531 * clever, this is only used from the dirty fault */ 532 .macro update_dirty ptep,pte,tmp 533 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 534 or \tmp,\pte,\pte 535 STREG \pte,0(\ptep) 536 .endm 537 538 /* Convert the pte and prot to tlb insertion values. How 539 * this happens is quite subtle, read below */ 540 .macro make_insert_tlb spc,pte,prot 541 space_to_prot \spc \prot /* create prot id from space */ 542 /* The following is the real subtlety. This is depositing 543 * T <-> _PAGE_REFTRAP 544 * D <-> _PAGE_DIRTY 545 * B <-> _PAGE_DMB (memory break) 546 * 547 * Then incredible subtlety: The access rights are 548 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 549 * See 3-14 of the parisc 2.0 manual 550 * 551 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 552 * trigger an access rights trap in user space if the user 553 * tries to read an unreadable page */ 554 depd \pte,8,7,\prot 555 556 /* PAGE_USER indicates the page can be read with user privileges, 557 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 558 * contains _PAGE_READ */ 559 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 560 depdi 7,11,3,\prot 561 /* If we're a gateway page, drop PL2 back to zero for promotion 562 * to kernel privilege (so we can execute the page as kernel). 563 * Any privilege promotion page always denys read and write */ 564 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 565 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 566 567 /* Enforce uncacheable pages. 568 * This should ONLY be use for MMIO on PA 2.0 machines. 569 * Memory/DMA is cache coherent on all PA2.0 machines we support 570 * (that means T-class is NOT supported) and the memory controllers 571 * on most of those machines only handles cache transactions. 572 */ 573 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0 574 depi 1,12,1,\prot 575 576 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 577 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte 578 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte 579 .endm 580 581 /* Identical macro to make_insert_tlb above, except it 582 * makes the tlb entry for the differently formatted pa11 583 * insertion instructions */ 584 .macro make_insert_tlb_11 spc,pte,prot 585 zdep \spc,30,15,\prot 586 dep \pte,8,7,\prot 587 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 588 depi 1,12,1,\prot 589 extru,= \pte,_PAGE_USER_BIT,1,%r0 590 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 591 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 592 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 593 594 /* Get rid of prot bits and convert to page addr for iitlba */ 595 596 depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte 597 extru \pte,24,25,\pte 598 .endm 599 600 /* This is for ILP32 PA2.0 only. The TLB insertion needs 601 * to extend into I/O space if the address is 0xfXXXXXXX 602 * so we extend the f's into the top word of the pte in 603 * this case */ 604 .macro f_extend pte,tmp 605 extrd,s \pte,42,4,\tmp 606 addi,<> 1,\tmp,%r0 607 extrd,s \pte,63,25,\pte 608 .endm 609 610 /* The alias region is an 8MB aligned 16MB to do clear and 611 * copy user pages at addresses congruent with the user 612 * virtual address. 613 * 614 * To use the alias page, you set %r26 up with the to TLB 615 * entry (identifying the physical page) and %r23 up with 616 * the from tlb entry (or nothing if only a to entry---for 617 * clear_user_page_asm) */ 618 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 619 cmpib,COND(<>),n 0,\spc,\fault 620 ldil L%(TMPALIAS_MAP_START),\tmp 621#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 622 /* on LP64, ldi will sign extend into the upper 32 bits, 623 * which is behaviour we don't want */ 624 depdi 0,31,32,\tmp 625#endif 626 copy \va,\tmp1 627 DEPI 0,31,23,\tmp1 628 cmpb,COND(<>),n \tmp,\tmp1,\fault 629 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 630 depd,z \prot,8,7,\prot 631 /* 632 * OK, it is in the temp alias region, check whether "from" or "to". 633 * Check "subtle" note in pacache.S re: r23/r26. 634 */ 635#ifdef CONFIG_64BIT 636 extrd,u,*= \va,41,1,%r0 637#else 638 extrw,u,= \va,9,1,%r0 639#endif 640 or,COND(tr) %r23,%r0,\pte 641 or %r26,%r0,\pte 642 .endm 643 644 645 /* 646 * Align fault_vector_20 on 4K boundary so that both 647 * fault_vector_11 and fault_vector_20 are on the 648 * same page. This is only necessary as long as we 649 * write protect the kernel text, which we may stop 650 * doing once we use large page translations to cover 651 * the static part of the kernel address space. 652 */ 653 654 .export fault_vector_20 655 656 .text 657 658 .align 4096 659 660fault_vector_20: 661 /* First vector is invalid (0) */ 662 .ascii "cows can fly" 663 .byte 0 664 .align 32 665 666 hpmc 1 667 def 2 668 def 3 669 extint 4 670 def 5 671 itlb_20 6 672 def 7 673 def 8 674 def 9 675 def 10 676 def 11 677 def 12 678 def 13 679 def 14 680 dtlb_20 15 681#if 0 682 naitlb_20 16 683#else 684 def 16 685#endif 686 nadtlb_20 17 687 def 18 688 def 19 689 dbit_20 20 690 def 21 691 def 22 692 def 23 693 def 24 694 def 25 695 def 26 696 def 27 697 def 28 698 def 29 699 def 30 700 def 31 701 702#ifndef CONFIG_64BIT 703 704 .export fault_vector_11 705 706 .align 2048 707 708fault_vector_11: 709 /* First vector is invalid (0) */ 710 .ascii "cows can fly" 711 .byte 0 712 .align 32 713 714 hpmc 1 715 def 2 716 def 3 717 extint 4 718 def 5 719 itlb_11 6 720 def 7 721 def 8 722 def 9 723 def 10 724 def 11 725 def 12 726 def 13 727 def 14 728 dtlb_11 15 729#if 0 730 naitlb_11 16 731#else 732 def 16 733#endif 734 nadtlb_11 17 735 def 18 736 def 19 737 dbit_11 20 738 def 21 739 def 22 740 def 23 741 def 24 742 def 25 743 def 26 744 def 27 745 def 28 746 def 29 747 def 30 748 def 31 749 750#endif 751 752 .import handle_interruption,code 753 .import do_cpu_irq_mask,code 754 755 /* 756 * r26 = function to be called 757 * r25 = argument to pass in 758 * r24 = flags for do_fork() 759 * 760 * Kernel threads don't ever return, so they don't need 761 * a true register context. We just save away the arguments 762 * for copy_thread/ret_ to properly set up the child. 763 */ 764 765#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */ 766#define CLONE_UNTRACED 0x00800000 767 768 .export __kernel_thread, code 769 .import do_fork 770__kernel_thread: 771 STREG %r2, -RP_OFFSET(%r30) 772 773 copy %r30, %r1 774 ldo PT_SZ_ALGN(%r30),%r30 775#ifdef CONFIG_64BIT 776 /* Yo, function pointers in wide mode are little structs... -PB */ 777 ldd 24(%r26), %r2 778 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ 779 ldd 16(%r26), %r26 780 781 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */ 782 copy %r0, %r22 /* user_tid */ 783#endif 784 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ 785 STREG %r25, PT_GR25(%r1) 786 ldil L%CLONE_UNTRACED, %r26 787 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */ 788 or %r26, %r24, %r26 /* will have kernel mappings. */ 789 ldi 1, %r25 /* stack_start, signals kernel thread */ 790 stw %r0, -52(%r30) /* user_tid */ 791#ifdef CONFIG_64BIT 792 ldo -16(%r30),%r29 /* Reference param save area */ 793#endif 794 BL do_fork, %r2 795 copy %r1, %r24 /* pt_regs */ 796 797 /* Parent Returns here */ 798 799 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2 800 ldo -PT_SZ_ALGN(%r30), %r30 801 bv %r0(%r2) 802 nop 803 804 /* 805 * Child Returns here 806 * 807 * copy_thread moved args from temp save area set up above 808 * into task save area. 809 */ 810 811 .export ret_from_kernel_thread 812ret_from_kernel_thread: 813 814 /* Call schedule_tail first though */ 815 BL schedule_tail, %r2 816 nop 817 818 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 819 LDREG TASK_PT_GR25(%r1), %r26 820#ifdef CONFIG_64BIT 821 LDREG TASK_PT_GR27(%r1), %r27 822 LDREG TASK_PT_GR22(%r1), %r22 823#endif 824 LDREG TASK_PT_GR26(%r1), %r1 825 ble 0(%sr7, %r1) 826 copy %r31, %r2 827 828#ifdef CONFIG_64BIT 829 ldo -16(%r30),%r29 /* Reference param save area */ 830 loadgp /* Thread could have been in a module */ 831#endif 832#ifndef CONFIG_64BIT 833 b sys_exit 834#else 835 load32 sys_exit, %r1 836 bv %r0(%r1) 837#endif 838 ldi 0, %r26 839 840 .import sys_execve, code 841 .export __execve, code 842__execve: 843 copy %r2, %r15 844 copy %r30, %r16 845 ldo PT_SZ_ALGN(%r30), %r30 846 STREG %r26, PT_GR26(%r16) 847 STREG %r25, PT_GR25(%r16) 848 STREG %r24, PT_GR24(%r16) 849#ifdef CONFIG_64BIT 850 ldo -16(%r30),%r29 /* Reference param save area */ 851#endif 852 BL sys_execve, %r2 853 copy %r16, %r26 854 855 cmpib,=,n 0,%r28,intr_return /* forward */ 856 857 /* yes, this will trap and die. */ 858 copy %r15, %r2 859 copy %r16, %r30 860 bv %r0(%r2) 861 nop 862 863 .align 4 864 865 /* 866 * struct task_struct *_switch_to(struct task_struct *prev, 867 * struct task_struct *next) 868 * 869 * switch kernel stacks and return prev */ 870 .export _switch_to, code 871_switch_to: 872 STREG %r2, -RP_OFFSET(%r30) 873 874 callee_save_float 875 callee_save 876 877 load32 _switch_to_ret, %r2 878 879 STREG %r2, TASK_PT_KPC(%r26) 880 LDREG TASK_PT_KPC(%r25), %r2 881 882 STREG %r30, TASK_PT_KSP(%r26) 883 LDREG TASK_PT_KSP(%r25), %r30 884 LDREG TASK_THREAD_INFO(%r25), %r25 885 bv %r0(%r2) 886 mtctl %r25,%cr30 887 888_switch_to_ret: 889 mtctl %r0, %cr0 /* Needed for single stepping */ 890 callee_rest 891 callee_rest_float 892 893 LDREG -RP_OFFSET(%r30), %r2 894 bv %r0(%r2) 895 copy %r26, %r28 896 897 /* 898 * Common rfi return path for interruptions, kernel execve, and 899 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 900 * return via this path if the signal was received when the process 901 * was running; if the process was blocked on a syscall then the 902 * normal syscall_exit path is used. All syscalls for traced 903 * proceses exit via intr_restore. 904 * 905 * XXX If any syscalls that change a processes space id ever exit 906 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 907 * adjust IASQ[0..1]. 908 * 909 */ 910 911 .align 4096 912 913 .export syscall_exit_rfi 914syscall_exit_rfi: 915 mfctl %cr30,%r16 916 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 917 ldo TASK_REGS(%r16),%r16 918 /* Force iaoq to userspace, as the user has had access to our current 919 * context via sigcontext. Also Filter the PSW for the same reason. 920 */ 921 LDREG PT_IAOQ0(%r16),%r19 922 depi 3,31,2,%r19 923 STREG %r19,PT_IAOQ0(%r16) 924 LDREG PT_IAOQ1(%r16),%r19 925 depi 3,31,2,%r19 926 STREG %r19,PT_IAOQ1(%r16) 927 LDREG PT_PSW(%r16),%r19 928 load32 USER_PSW_MASK,%r1 929#ifdef CONFIG_64BIT 930 load32 USER_PSW_HI_MASK,%r20 931 depd %r20,31,32,%r1 932#endif 933 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 934 load32 USER_PSW,%r1 935 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 936 STREG %r19,PT_PSW(%r16) 937 938 /* 939 * If we aren't being traced, we never saved space registers 940 * (we don't store them in the sigcontext), so set them 941 * to "proper" values now (otherwise we'll wind up restoring 942 * whatever was last stored in the task structure, which might 943 * be inconsistent if an interrupt occured while on the gateway 944 * page) Note that we may be "trashing" values the user put in 945 * them, but we don't support the the user changing them. 946 */ 947 948 STREG %r0,PT_SR2(%r16) 949 mfsp %sr3,%r19 950 STREG %r19,PT_SR0(%r16) 951 STREG %r19,PT_SR1(%r16) 952 STREG %r19,PT_SR3(%r16) 953 STREG %r19,PT_SR4(%r16) 954 STREG %r19,PT_SR5(%r16) 955 STREG %r19,PT_SR6(%r16) 956 STREG %r19,PT_SR7(%r16) 957 958intr_return: 959 /* NOTE: Need to enable interrupts incase we schedule. */ 960 ssm PSW_SM_I, %r0 961 962 /* Check for software interrupts */ 963 964 .import irq_stat,data 965 966 load32 irq_stat,%r19 967#ifdef CONFIG_SMP 968 mfctl %cr30,%r1 969 ldw TI_CPU(%r1),%r1 /* get cpu # - int */ 970 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount 971 ** irq_stat[] is defined using ____cacheline_aligned. 972 */ 973#ifdef CONFIG_64BIT 974 shld %r1, 6, %r20 975#else 976 shlw %r1, 5, %r20 977#endif 978 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 979#endif /* CONFIG_SMP */ 980 981intr_check_resched: 982 983 /* check for reschedule */ 984 mfctl %cr30,%r1 985 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 986 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 987 988intr_check_sig: 989 /* As above */ 990 mfctl %cr30,%r1 991 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */ 992 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */ 993 994intr_restore: 995 copy %r16,%r29 996 ldo PT_FR31(%r29),%r1 997 rest_fp %r1 998 rest_general %r29 999 1000 /* inverse of virt_map */ 1001 pcxt_ssm_bug 1002 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 1003 tophys_r1 %r29 1004 1005 /* Restore space id's and special cr's from PT_REGS 1006 * structure pointed to by r29 1007 */ 1008 rest_specials %r29 1009 1010 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 1011 * It also restores r1 and r30. 1012 */ 1013 rest_stack 1014 1015 rfi 1016 nop 1017 nop 1018 nop 1019 nop 1020 nop 1021 nop 1022 nop 1023 nop 1024 1025#ifndef CONFIG_PREEMPT 1026# define intr_do_preempt intr_restore 1027#endif /* !CONFIG_PREEMPT */ 1028 1029 .import schedule,code 1030intr_do_resched: 1031 /* Only call schedule on return to userspace. If we're returning 1032 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise 1033 * we jump back to intr_restore. 1034 */ 1035 LDREG PT_IASQ0(%r16), %r20 1036 CMPIB= 0, %r20, intr_do_preempt 1037 nop 1038 LDREG PT_IASQ1(%r16), %r20 1039 CMPIB= 0, %r20, intr_do_preempt 1040 nop 1041 1042#ifdef CONFIG_64BIT 1043 ldo -16(%r30),%r29 /* Reference param save area */ 1044#endif 1045 1046 ldil L%intr_check_sig, %r2 1047#ifndef CONFIG_64BIT 1048 b schedule 1049#else 1050 load32 schedule, %r20 1051 bv %r0(%r20) 1052#endif 1053 ldo R%intr_check_sig(%r2), %r2 1054 1055 /* preempt the current task on returning to kernel 1056 * mode from an interrupt, iff need_resched is set, 1057 * and preempt_count is 0. otherwise, we continue on 1058 * our merry way back to the current running task. 1059 */ 1060#ifdef CONFIG_PREEMPT 1061 .import preempt_schedule_irq,code 1062intr_do_preempt: 1063 rsm PSW_SM_I, %r0 /* disable interrupts */ 1064 1065 /* current_thread_info()->preempt_count */ 1066 mfctl %cr30, %r1 1067 LDREG TI_PRE_COUNT(%r1), %r19 1068 CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */ 1069 nop /* prev insn branched backwards */ 1070 1071 /* check if we interrupted a critical path */ 1072 LDREG PT_PSW(%r16), %r20 1073 bb,<,n %r20, 31 - PSW_SM_I, intr_restore 1074 nop 1075 1076 BL preempt_schedule_irq, %r2 1077 nop 1078 1079 b intr_restore /* ssm PSW_SM_I done by intr_restore */ 1080#endif /* CONFIG_PREEMPT */ 1081 1082 .import do_signal,code 1083intr_do_signal: 1084 /* 1085 This check is critical to having LWS 1086 working. The IASQ is zero on the gateway 1087 page and we cannot deliver any signals until 1088 we get off the gateway page. 1089 1090 Only do signals if we are returning to user space 1091 */ 1092 LDREG PT_IASQ0(%r16), %r20 1093 CMPIB= 0,%r20,intr_restore /* backward */ 1094 nop 1095 LDREG PT_IASQ1(%r16), %r20 1096 CMPIB= 0,%r20,intr_restore /* backward */ 1097 nop 1098 1099 copy %r0, %r24 /* unsigned long in_syscall */ 1100 copy %r16, %r25 /* struct pt_regs *regs */ 1101#ifdef CONFIG_64BIT 1102 ldo -16(%r30),%r29 /* Reference param save area */ 1103#endif 1104 1105 BL do_signal,%r2 1106 copy %r0, %r26 /* sigset_t *oldset = NULL */ 1107 1108 b intr_check_sig 1109 nop 1110 1111 /* 1112 * External interrupts. 1113 */ 1114 1115intr_extint: 1116 CMPIB=,n 0,%r16,1f 1117 get_stack_use_cr30 1118 b,n 3f 1119 11201: 1121#if 0 /* Interrupt Stack support not working yet! */ 1122 mfctl %cr31,%r1 1123 copy %r30,%r17 1124 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/ 1125#ifdef CONFIG_64BIT 1126 depdi 0,63,15,%r17 1127#else 1128 depi 0,31,15,%r17 1129#endif 1130 CMPB=,n %r1,%r17,2f 1131 get_stack_use_cr31 1132 b,n 3f 1133#endif 11342: 1135 get_stack_use_r30 1136 11373: 1138 save_specials %r29 1139 virt_map 1140 save_general %r29 1141 1142 ldo PT_FR0(%r29), %r24 1143 save_fp %r24 1144 1145 loadgp 1146 1147 copy %r29, %r26 /* arg0 is pt_regs */ 1148 copy %r29, %r16 /* save pt_regs */ 1149 1150 ldil L%intr_return, %r2 1151 1152#ifdef CONFIG_64BIT 1153 ldo -16(%r30),%r29 /* Reference param save area */ 1154#endif 1155 1156 b do_cpu_irq_mask 1157 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1158 1159 1160 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1161 1162 .export intr_save, code /* for os_hpmc */ 1163 1164intr_save: 1165 mfsp %sr7,%r16 1166 CMPIB=,n 0,%r16,1f 1167 get_stack_use_cr30 1168 b 2f 1169 copy %r8,%r26 1170 11711: 1172 get_stack_use_r30 1173 copy %r8,%r26 1174 11752: 1176 save_specials %r29 1177 1178 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1179 1180 /* 1181 * FIXME: 1) Use a #define for the hardwired "6" below (and in 1182 * traps.c. 1183 * 2) Once we start executing code above 4 Gb, we need 1184 * to adjust iasq/iaoq here in the same way we 1185 * adjust isr/ior below. 1186 */ 1187 1188 CMPIB=,n 6,%r26,skip_save_ior 1189 1190 1191 mfctl %cr20, %r16 /* isr */ 1192 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1193 mfctl %cr21, %r17 /* ior */ 1194 1195 1196#ifdef CONFIG_64BIT 1197 /* 1198 * If the interrupted code was running with W bit off (32 bit), 1199 * clear the b bits (bits 0 & 1) in the ior. 1200 * save_specials left ipsw value in r8 for us to test. 1201 */ 1202 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1203 depdi 0,1,2,%r17 1204 1205 /* 1206 * FIXME: This code has hardwired assumptions about the split 1207 * between space bits and offset bits. This will change 1208 * when we allow alternate page sizes. 1209 */ 1210 1211 /* adjust isr/ior. */ 1212 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ 1213 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ 1214 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ 1215#endif 1216 STREG %r16, PT_ISR(%r29) 1217 STREG %r17, PT_IOR(%r29) 1218 1219 1220skip_save_ior: 1221 virt_map 1222 save_general %r29 1223 1224 ldo PT_FR0(%r29), %r25 1225 save_fp %r25 1226 1227 loadgp 1228 1229 copy %r29, %r25 /* arg1 is pt_regs */ 1230#ifdef CONFIG_64BIT 1231 ldo -16(%r30),%r29 /* Reference param save area */ 1232#endif 1233 1234 ldil L%intr_check_sig, %r2 1235 copy %r25, %r16 /* save pt_regs */ 1236 1237 b handle_interruption 1238 ldo R%intr_check_sig(%r2), %r2 1239 1240 1241 /* 1242 * Note for all tlb miss handlers: 1243 * 1244 * cr24 contains a pointer to the kernel address space 1245 * page directory. 1246 * 1247 * cr25 contains a pointer to the current user address 1248 * space page directory. 1249 * 1250 * sr3 will contain the space id of the user address space 1251 * of the current running thread while that thread is 1252 * running in the kernel. 1253 */ 1254 1255 /* 1256 * register number allocations. Note that these are all 1257 * in the shadowed registers 1258 */ 1259 1260 t0 = r1 /* temporary register 0 */ 1261 va = r8 /* virtual address for which the trap occured */ 1262 t1 = r9 /* temporary register 1 */ 1263 pte = r16 /* pte/phys page # */ 1264 prot = r17 /* prot bits */ 1265 spc = r24 /* space for which the trap occured */ 1266 ptp = r25 /* page directory/page table pointer */ 1267 1268#ifdef CONFIG_64BIT 1269 1270dtlb_miss_20w: 1271 space_adjust spc,va,t0 1272 get_pgd spc,ptp 1273 space_check spc,t0,dtlb_fault 1274 1275 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1276 1277 update_ptep ptp,pte,t0,t1 1278 1279 make_insert_tlb spc,pte,prot 1280 1281 idtlbt pte,prot 1282 1283 rfir 1284 nop 1285 1286dtlb_check_alias_20w: 1287 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1288 1289 idtlbt pte,prot 1290 1291 rfir 1292 nop 1293 1294nadtlb_miss_20w: 1295 space_adjust spc,va,t0 1296 get_pgd spc,ptp 1297 space_check spc,t0,nadtlb_fault 1298 1299 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1300 1301 update_ptep ptp,pte,t0,t1 1302 1303 make_insert_tlb spc,pte,prot 1304 1305 idtlbt pte,prot 1306 1307 rfir 1308 nop 1309 1310nadtlb_check_flush_20w: 1311 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1312 1313 /* Insert a "flush only" translation */ 1314 1315 depdi,z 7,7,3,prot 1316 depdi 1,10,1,prot 1317 1318 /* Get rid of prot bits and convert to page addr for idtlbt */ 1319 1320 depdi 0,63,12,pte 1321 extrd,u pte,56,52,pte 1322 idtlbt pte,prot 1323 1324 rfir 1325 nop 1326 1327#else 1328 1329dtlb_miss_11: 1330 get_pgd spc,ptp 1331 1332 space_check spc,t0,dtlb_fault 1333 1334 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1335 1336 update_ptep ptp,pte,t0,t1 1337 1338 make_insert_tlb_11 spc,pte,prot 1339 1340 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1341 mtsp spc,%sr1 1342 1343 idtlba pte,(%sr1,va) 1344 idtlbp prot,(%sr1,va) 1345 1346 mtsp t0, %sr1 /* Restore sr1 */ 1347 1348 rfir 1349 nop 1350 1351dtlb_check_alias_11: 1352 1353 /* Check to see if fault is in the temporary alias region */ 1354 1355 cmpib,<>,n 0,spc,dtlb_fault /* forward */ 1356 ldil L%(TMPALIAS_MAP_START),t0 1357 copy va,t1 1358 depwi 0,31,23,t1 1359 cmpb,<>,n t0,t1,dtlb_fault /* forward */ 1360 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot 1361 depw,z prot,8,7,prot 1362 1363 /* 1364 * OK, it is in the temp alias region, check whether "from" or "to". 1365 * Check "subtle" note in pacache.S re: r23/r26. 1366 */ 1367 1368 extrw,u,= va,9,1,r0 1369 or,tr %r23,%r0,pte /* If "from" use "from" page */ 1370 or %r26,%r0,pte /* else "to", use "to" page */ 1371 1372 idtlba pte,(va) 1373 idtlbp prot,(va) 1374 1375 rfir 1376 nop 1377 1378nadtlb_miss_11: 1379 get_pgd spc,ptp 1380 1381 space_check spc,t0,nadtlb_fault 1382 1383 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1384 1385 update_ptep ptp,pte,t0,t1 1386 1387 make_insert_tlb_11 spc,pte,prot 1388 1389 1390 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1391 mtsp spc,%sr1 1392 1393 idtlba pte,(%sr1,va) 1394 idtlbp prot,(%sr1,va) 1395 1396 mtsp t0, %sr1 /* Restore sr1 */ 1397 1398 rfir 1399 nop 1400 1401nadtlb_check_flush_11: 1402 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1403 1404 /* Insert a "flush only" translation */ 1405 1406 zdepi 7,7,3,prot 1407 depi 1,10,1,prot 1408 1409 /* Get rid of prot bits and convert to page addr for idtlba */ 1410 1411 depi 0,31,12,pte 1412 extru pte,24,25,pte 1413 1414 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1415 mtsp spc,%sr1 1416 1417 idtlba pte,(%sr1,va) 1418 idtlbp prot,(%sr1,va) 1419 1420 mtsp t0, %sr1 /* Restore sr1 */ 1421 1422 rfir 1423 nop 1424 1425dtlb_miss_20: 1426 space_adjust spc,va,t0 1427 get_pgd spc,ptp 1428 space_check spc,t0,dtlb_fault 1429 1430 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1431 1432 update_ptep ptp,pte,t0,t1 1433 1434 make_insert_tlb spc,pte,prot 1435 1436 f_extend pte,t0 1437 1438 idtlbt pte,prot 1439 1440 rfir 1441 nop 1442 1443dtlb_check_alias_20: 1444 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1445 1446 idtlbt pte,prot 1447 1448 rfir 1449 nop 1450 1451nadtlb_miss_20: 1452 get_pgd spc,ptp 1453 1454 space_check spc,t0,nadtlb_fault 1455 1456 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1457 1458 update_ptep ptp,pte,t0,t1 1459 1460 make_insert_tlb spc,pte,prot 1461 1462 f_extend pte,t0 1463 1464 idtlbt pte,prot 1465 1466 rfir 1467 nop 1468 1469nadtlb_check_flush_20: 1470 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1471 1472 /* Insert a "flush only" translation */ 1473 1474 depdi,z 7,7,3,prot 1475 depdi 1,10,1,prot 1476 1477 /* Get rid of prot bits and convert to page addr for idtlbt */ 1478 1479 depdi 0,63,12,pte 1480 extrd,u pte,56,32,pte 1481 idtlbt pte,prot 1482 1483 rfir 1484 nop 1485#endif 1486 1487nadtlb_emulate: 1488 1489 /* 1490 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1491 * probei instructions. We don't want to fault for these 1492 * instructions (not only does it not make sense, it can cause 1493 * deadlocks, since some flushes are done with the mmap 1494 * semaphore held). If the translation doesn't exist, we can't 1495 * insert a translation, so have to emulate the side effects 1496 * of the instruction. Since we don't insert a translation 1497 * we can get a lot of faults during a flush loop, so it makes 1498 * sense to try to do it here with minimum overhead. We only 1499 * emulate fdc,fic,pdc,probew,prober instructions whose base 1500 * and index registers are not shadowed. We defer everything 1501 * else to the "slow" path. 1502 */ 1503 1504 mfctl %cr19,%r9 /* Get iir */ 1505 1506 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1507 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1508 1509 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1510 ldi 0x280,%r16 1511 and %r9,%r16,%r17 1512 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1513 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1514 BL get_register,%r25 1515 extrw,u %r9,15,5,%r8 /* Get index register # */ 1516 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1517 copy %r1,%r24 1518 BL get_register,%r25 1519 extrw,u %r9,10,5,%r8 /* Get base register # */ 1520 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1521 BL set_register,%r25 1522 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1523 1524nadtlb_nullify: 1525 mfctl %ipsw,%r8 1526 ldil L%PSW_N,%r9 1527 or %r8,%r9,%r8 /* Set PSW_N */ 1528 mtctl %r8,%ipsw 1529 1530 rfir 1531 nop 1532 1533 /* 1534 When there is no translation for the probe address then we 1535 must nullify the insn and return zero in the target regsiter. 1536 This will indicate to the calling code that it does not have 1537 write/read privileges to this address. 1538 1539 This should technically work for prober and probew in PA 1.1, 1540 and also probe,r and probe,w in PA 2.0 1541 1542 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1543 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1544 1545 */ 1546nadtlb_probe_check: 1547 ldi 0x80,%r16 1548 and %r9,%r16,%r17 1549 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1550 BL get_register,%r25 /* Find the target register */ 1551 extrw,u %r9,31,5,%r8 /* Get target register */ 1552 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1553 BL set_register,%r25 1554 copy %r0,%r1 /* Write zero to target register */ 1555 b nadtlb_nullify /* Nullify return insn */ 1556 nop 1557 1558 1559#ifdef CONFIG_64BIT 1560itlb_miss_20w: 1561 1562 /* 1563 * I miss is a little different, since we allow users to fault 1564 * on the gateway page which is in the kernel address space. 1565 */ 1566 1567 space_adjust spc,va,t0 1568 get_pgd spc,ptp 1569 space_check spc,t0,itlb_fault 1570 1571 L3_ptep ptp,pte,t0,va,itlb_fault 1572 1573 update_ptep ptp,pte,t0,t1 1574 1575 make_insert_tlb spc,pte,prot 1576 1577 iitlbt pte,prot 1578 1579 rfir 1580 nop 1581 1582#else 1583 1584itlb_miss_11: 1585 get_pgd spc,ptp 1586 1587 space_check spc,t0,itlb_fault 1588 1589 L2_ptep ptp,pte,t0,va,itlb_fault 1590 1591 update_ptep ptp,pte,t0,t1 1592 1593 make_insert_tlb_11 spc,pte,prot 1594 1595 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1596 mtsp spc,%sr1 1597 1598 iitlba pte,(%sr1,va) 1599 iitlbp prot,(%sr1,va) 1600 1601 mtsp t0, %sr1 /* Restore sr1 */ 1602 1603 rfir 1604 nop 1605 1606itlb_miss_20: 1607 get_pgd spc,ptp 1608 1609 space_check spc,t0,itlb_fault 1610 1611 L2_ptep ptp,pte,t0,va,itlb_fault 1612 1613 update_ptep ptp,pte,t0,t1 1614 1615 make_insert_tlb spc,pte,prot 1616 1617 f_extend pte,t0 1618 1619 iitlbt pte,prot 1620 1621 rfir 1622 nop 1623 1624#endif 1625 1626#ifdef CONFIG_64BIT 1627 1628dbit_trap_20w: 1629 space_adjust spc,va,t0 1630 get_pgd spc,ptp 1631 space_check spc,t0,dbit_fault 1632 1633 L3_ptep ptp,pte,t0,va,dbit_fault 1634 1635#ifdef CONFIG_SMP 1636 CMPIB=,n 0,spc,dbit_nolock_20w 1637 load32 PA(pa_dbit_lock),t0 1638 1639dbit_spin_20w: 1640 LDCW 0(t0),t1 1641 cmpib,= 0,t1,dbit_spin_20w 1642 nop 1643 1644dbit_nolock_20w: 1645#endif 1646 update_dirty ptp,pte,t1 1647 1648 make_insert_tlb spc,pte,prot 1649 1650 idtlbt pte,prot 1651#ifdef CONFIG_SMP 1652 CMPIB=,n 0,spc,dbit_nounlock_20w 1653 ldi 1,t1 1654 stw t1,0(t0) 1655 1656dbit_nounlock_20w: 1657#endif 1658 1659 rfir 1660 nop 1661#else 1662 1663dbit_trap_11: 1664 1665 get_pgd spc,ptp 1666 1667 space_check spc,t0,dbit_fault 1668 1669 L2_ptep ptp,pte,t0,va,dbit_fault 1670 1671#ifdef CONFIG_SMP 1672 CMPIB=,n 0,spc,dbit_nolock_11 1673 load32 PA(pa_dbit_lock),t0 1674 1675dbit_spin_11: 1676 LDCW 0(t0),t1 1677 cmpib,= 0,t1,dbit_spin_11 1678 nop 1679 1680dbit_nolock_11: 1681#endif 1682 update_dirty ptp,pte,t1 1683 1684 make_insert_tlb_11 spc,pte,prot 1685 1686 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1687 mtsp spc,%sr1 1688 1689 idtlba pte,(%sr1,va) 1690 idtlbp prot,(%sr1,va) 1691 1692 mtsp t1, %sr1 /* Restore sr1 */ 1693#ifdef CONFIG_SMP 1694 CMPIB=,n 0,spc,dbit_nounlock_11 1695 ldi 1,t1 1696 stw t1,0(t0) 1697 1698dbit_nounlock_11: 1699#endif 1700 1701 rfir 1702 nop 1703 1704dbit_trap_20: 1705 get_pgd spc,ptp 1706 1707 space_check spc,t0,dbit_fault 1708 1709 L2_ptep ptp,pte,t0,va,dbit_fault 1710 1711#ifdef CONFIG_SMP 1712 CMPIB=,n 0,spc,dbit_nolock_20 1713 load32 PA(pa_dbit_lock),t0 1714 1715dbit_spin_20: 1716 LDCW 0(t0),t1 1717 cmpib,= 0,t1,dbit_spin_20 1718 nop 1719 1720dbit_nolock_20: 1721#endif 1722 update_dirty ptp,pte,t1 1723 1724 make_insert_tlb spc,pte,prot 1725 1726 f_extend pte,t1 1727 1728 idtlbt pte,prot 1729 1730#ifdef CONFIG_SMP 1731 CMPIB=,n 0,spc,dbit_nounlock_20 1732 ldi 1,t1 1733 stw t1,0(t0) 1734 1735dbit_nounlock_20: 1736#endif 1737 1738 rfir 1739 nop 1740#endif 1741 1742 .import handle_interruption,code 1743 1744kernel_bad_space: 1745 b intr_save 1746 ldi 31,%r8 /* Use an unused code */ 1747 1748dbit_fault: 1749 b intr_save 1750 ldi 20,%r8 1751 1752itlb_fault: 1753 b intr_save 1754 ldi 6,%r8 1755 1756nadtlb_fault: 1757 b intr_save 1758 ldi 17,%r8 1759 1760dtlb_fault: 1761 b intr_save 1762 ldi 15,%r8 1763 1764 /* Register saving semantics for system calls: 1765 1766 %r1 clobbered by system call macro in userspace 1767 %r2 saved in PT_REGS by gateway page 1768 %r3 - %r18 preserved by C code (saved by signal code) 1769 %r19 - %r20 saved in PT_REGS by gateway page 1770 %r21 - %r22 non-standard syscall args 1771 stored in kernel stack by gateway page 1772 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1773 %r27 - %r30 saved in PT_REGS by gateway page 1774 %r31 syscall return pointer 1775 */ 1776 1777 /* Floating point registers (FIXME: what do we do with these?) 1778 1779 %fr0 - %fr3 status/exception, not preserved 1780 %fr4 - %fr7 arguments 1781 %fr8 - %fr11 not preserved by C code 1782 %fr12 - %fr21 preserved by C code 1783 %fr22 - %fr31 not preserved by C code 1784 */ 1785 1786 .macro reg_save regs 1787 STREG %r3, PT_GR3(\regs) 1788 STREG %r4, PT_GR4(\regs) 1789 STREG %r5, PT_GR5(\regs) 1790 STREG %r6, PT_GR6(\regs) 1791 STREG %r7, PT_GR7(\regs) 1792 STREG %r8, PT_GR8(\regs) 1793 STREG %r9, PT_GR9(\regs) 1794 STREG %r10,PT_GR10(\regs) 1795 STREG %r11,PT_GR11(\regs) 1796 STREG %r12,PT_GR12(\regs) 1797 STREG %r13,PT_GR13(\regs) 1798 STREG %r14,PT_GR14(\regs) 1799 STREG %r15,PT_GR15(\regs) 1800 STREG %r16,PT_GR16(\regs) 1801 STREG %r17,PT_GR17(\regs) 1802 STREG %r18,PT_GR18(\regs) 1803 .endm 1804 1805 .macro reg_restore regs 1806 LDREG PT_GR3(\regs), %r3 1807 LDREG PT_GR4(\regs), %r4 1808 LDREG PT_GR5(\regs), %r5 1809 LDREG PT_GR6(\regs), %r6 1810 LDREG PT_GR7(\regs), %r7 1811 LDREG PT_GR8(\regs), %r8 1812 LDREG PT_GR9(\regs), %r9 1813 LDREG PT_GR10(\regs),%r10 1814 LDREG PT_GR11(\regs),%r11 1815 LDREG PT_GR12(\regs),%r12 1816 LDREG PT_GR13(\regs),%r13 1817 LDREG PT_GR14(\regs),%r14 1818 LDREG PT_GR15(\regs),%r15 1819 LDREG PT_GR16(\regs),%r16 1820 LDREG PT_GR17(\regs),%r17 1821 LDREG PT_GR18(\regs),%r18 1822 .endm 1823 1824 .export sys_fork_wrapper 1825 .export child_return 1826sys_fork_wrapper: 1827 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1828 ldo TASK_REGS(%r1),%r1 1829 reg_save %r1 1830 mfctl %cr27, %r3 1831 STREG %r3, PT_CR27(%r1) 1832 1833 STREG %r2,-RP_OFFSET(%r30) 1834 ldo FRAME_SIZE(%r30),%r30 1835#ifdef CONFIG_64BIT 1836 ldo -16(%r30),%r29 /* Reference param save area */ 1837#endif 1838 1839 /* These are call-clobbered registers and therefore 1840 also syscall-clobbered (we hope). */ 1841 STREG %r2,PT_GR19(%r1) /* save for child */ 1842 STREG %r30,PT_GR21(%r1) 1843 1844 LDREG PT_GR30(%r1),%r25 1845 copy %r1,%r24 1846 BL sys_clone,%r2 1847 ldi SIGCHLD,%r26 1848 1849 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1850wrapper_exit: 1851 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */ 1852 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1853 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1854 1855 LDREG PT_CR27(%r1), %r3 1856 mtctl %r3, %cr27 1857 reg_restore %r1 1858 1859 /* strace expects syscall # to be preserved in r20 */ 1860 ldi __NR_fork,%r20 1861 bv %r0(%r2) 1862 STREG %r20,PT_GR20(%r1) 1863 1864 /* Set the return value for the child */ 1865child_return: 1866 BL schedule_tail, %r2 1867 nop 1868 1869 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1 1870 LDREG TASK_PT_GR19(%r1),%r2 1871 b wrapper_exit 1872 copy %r0,%r28 1873 1874 1875 .export sys_clone_wrapper 1876sys_clone_wrapper: 1877 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1878 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1879 reg_save %r1 1880 mfctl %cr27, %r3 1881 STREG %r3, PT_CR27(%r1) 1882 1883 STREG %r2,-RP_OFFSET(%r30) 1884 ldo FRAME_SIZE(%r30),%r30 1885#ifdef CONFIG_64BIT 1886 ldo -16(%r30),%r29 /* Reference param save area */ 1887#endif 1888 1889 /* WARNING - Clobbers r19 and r21, userspace must save these! */ 1890 STREG %r2,PT_GR19(%r1) /* save for child */ 1891 STREG %r30,PT_GR21(%r1) 1892 BL sys_clone,%r2 1893 copy %r1,%r24 1894 1895 b wrapper_exit 1896 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1897 1898 .export sys_vfork_wrapper 1899sys_vfork_wrapper: 1900 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1901 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1902 reg_save %r1 1903 mfctl %cr27, %r3 1904 STREG %r3, PT_CR27(%r1) 1905 1906 STREG %r2,-RP_OFFSET(%r30) 1907 ldo FRAME_SIZE(%r30),%r30 1908#ifdef CONFIG_64BIT 1909 ldo -16(%r30),%r29 /* Reference param save area */ 1910#endif 1911 1912 STREG %r2,PT_GR19(%r1) /* save for child */ 1913 STREG %r30,PT_GR21(%r1) 1914 1915 BL sys_vfork,%r2 1916 copy %r1,%r26 1917 1918 b wrapper_exit 1919 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1920 1921 1922 .macro execve_wrapper execve 1923 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1924 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1925 1926 /* 1927 * Do we need to save/restore r3-r18 here? 1928 * I don't think so. why would new thread need old 1929 * threads registers? 1930 */ 1931 1932 /* %arg0 - %arg3 are already saved for us. */ 1933 1934 STREG %r2,-RP_OFFSET(%r30) 1935 ldo FRAME_SIZE(%r30),%r30 1936#ifdef CONFIG_64BIT 1937 ldo -16(%r30),%r29 /* Reference param save area */ 1938#endif 1939 BL \execve,%r2 1940 copy %r1,%arg0 1941 1942 ldo -FRAME_SIZE(%r30),%r30 1943 LDREG -RP_OFFSET(%r30),%r2 1944 1945 /* If exec succeeded we need to load the args */ 1946 1947 ldo -1024(%r0),%r1 1948 cmpb,>>= %r28,%r1,error_\execve 1949 copy %r2,%r19 1950 1951error_\execve: 1952 bv %r0(%r19) 1953 nop 1954 .endm 1955 1956 .export sys_execve_wrapper 1957 .import sys_execve 1958 1959sys_execve_wrapper: 1960 execve_wrapper sys_execve 1961 1962#ifdef CONFIG_64BIT 1963 .export sys32_execve_wrapper 1964 .import sys32_execve 1965 1966sys32_execve_wrapper: 1967 execve_wrapper sys32_execve 1968#endif 1969 1970 .export sys_rt_sigreturn_wrapper 1971sys_rt_sigreturn_wrapper: 1972 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1973 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1974 /* Don't save regs, we are going to restore them from sigcontext. */ 1975 STREG %r2, -RP_OFFSET(%r30) 1976#ifdef CONFIG_64BIT 1977 ldo FRAME_SIZE(%r30), %r30 1978 BL sys_rt_sigreturn,%r2 1979 ldo -16(%r30),%r29 /* Reference param save area */ 1980#else 1981 BL sys_rt_sigreturn,%r2 1982 ldo FRAME_SIZE(%r30), %r30 1983#endif 1984 1985 ldo -FRAME_SIZE(%r30), %r30 1986 LDREG -RP_OFFSET(%r30), %r2 1987 1988 /* FIXME: I think we need to restore a few more things here. */ 1989 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1990 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1991 reg_restore %r1 1992 1993 /* If the signal was received while the process was blocked on a 1994 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1995 * take us to syscall_exit_rfi and on to intr_return. 1996 */ 1997 bv %r0(%r2) 1998 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1999 2000 .export sys_sigaltstack_wrapper 2001sys_sigaltstack_wrapper: 2002 /* Get the user stack pointer */ 2003 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2004 ldo TASK_REGS(%r1),%r24 /* get pt regs */ 2005 LDREG TASK_PT_GR30(%r24),%r24 2006 STREG %r2, -RP_OFFSET(%r30) 2007#ifdef CONFIG_64BIT 2008 ldo FRAME_SIZE(%r30), %r30 2009 b,l do_sigaltstack,%r2 2010 ldo -16(%r30),%r29 /* Reference param save area */ 2011#else 2012 bl do_sigaltstack,%r2 2013 ldo FRAME_SIZE(%r30), %r30 2014#endif 2015 2016 ldo -FRAME_SIZE(%r30), %r30 2017 LDREG -RP_OFFSET(%r30), %r2 2018 bv %r0(%r2) 2019 nop 2020 2021#ifdef CONFIG_64BIT 2022 .export sys32_sigaltstack_wrapper 2023sys32_sigaltstack_wrapper: 2024 /* Get the user stack pointer */ 2025 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24 2026 LDREG TASK_PT_GR30(%r24),%r24 2027 STREG %r2, -RP_OFFSET(%r30) 2028 ldo FRAME_SIZE(%r30), %r30 2029 b,l do_sigaltstack32,%r2 2030 ldo -16(%r30),%r29 /* Reference param save area */ 2031 2032 ldo -FRAME_SIZE(%r30), %r30 2033 LDREG -RP_OFFSET(%r30), %r2 2034 bv %r0(%r2) 2035 nop 2036#endif 2037 2038 .export sys_rt_sigsuspend_wrapper 2039sys_rt_sigsuspend_wrapper: 2040 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 2041 ldo TASK_REGS(%r1),%r24 2042 reg_save %r24 2043 2044 STREG %r2, -RP_OFFSET(%r30) 2045#ifdef CONFIG_64BIT 2046 ldo FRAME_SIZE(%r30), %r30 2047 b,l sys_rt_sigsuspend,%r2 2048 ldo -16(%r30),%r29 /* Reference param save area */ 2049#else 2050 bl sys_rt_sigsuspend,%r2 2051 ldo FRAME_SIZE(%r30), %r30 2052#endif 2053 2054 ldo -FRAME_SIZE(%r30), %r30 2055 LDREG -RP_OFFSET(%r30), %r2 2056 2057 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 2058 ldo TASK_REGS(%r1),%r1 2059 reg_restore %r1 2060 2061 bv %r0(%r2) 2062 nop 2063 2064 .export syscall_exit 2065syscall_exit: 2066 2067 /* NOTE: HP-UX syscalls also come through here 2068 * after hpux_syscall_exit fixes up return 2069 * values. */ 2070 2071 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 2072 * via syscall_exit_rfi if the signal was received while the process 2073 * was running. 2074 */ 2075 2076 /* save return value now */ 2077 2078 mfctl %cr30, %r1 2079 LDREG TI_TASK(%r1),%r1 2080 STREG %r28,TASK_PT_GR28(%r1) 2081 2082#ifdef CONFIG_HPUX 2083 2084/* <linux/personality.h> cannot be easily included */ 2085#define PER_HPUX 0x10 2086 LDREG TASK_PERSONALITY(%r1),%r19 2087 2088 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */ 2089 ldo -PER_HPUX(%r19), %r19 2090 CMPIB<>,n 0,%r19,1f 2091 2092 /* Save other hpux returns if personality is PER_HPUX */ 2093 STREG %r22,TASK_PT_GR22(%r1) 2094 STREG %r29,TASK_PT_GR29(%r1) 20951: 2096 2097#endif /* CONFIG_HPUX */ 2098 2099 /* Seems to me that dp could be wrong here, if the syscall involved 2100 * calling a module, and nothing got round to restoring dp on return. 2101 */ 2102 loadgp 2103 2104syscall_check_bh: 2105 2106 /* Check for software interrupts */ 2107 2108 .import irq_stat,data 2109 2110 load32 irq_stat,%r19 2111 2112#ifdef CONFIG_SMP 2113 /* sched.h: int processor */ 2114 /* %r26 is used as scratch register to index into irq_stat[] */ 2115 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ 2116 2117 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ 2118#ifdef CONFIG_64BIT 2119 shld %r26, 6, %r20 2120#else 2121 shlw %r26, 5, %r20 2122#endif 2123 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 2124#endif /* CONFIG_SMP */ 2125 2126syscall_check_resched: 2127 2128 /* check for reschedule */ 2129 2130 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 2131 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 2132 2133syscall_check_sig: 2134 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */ 2135 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */ 2136 2137syscall_restore: 2138 /* Are we being ptraced? */ 2139 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2140 2141 LDREG TASK_PTRACE(%r1), %r19 2142 bb,< %r19,31,syscall_restore_rfi 2143 nop 2144 2145 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2146 rest_fp %r19 2147 2148 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 2149 mtsar %r19 2150 2151 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 2152 LDREG TASK_PT_GR19(%r1),%r19 2153 LDREG TASK_PT_GR20(%r1),%r20 2154 LDREG TASK_PT_GR21(%r1),%r21 2155 LDREG TASK_PT_GR22(%r1),%r22 2156 LDREG TASK_PT_GR23(%r1),%r23 2157 LDREG TASK_PT_GR24(%r1),%r24 2158 LDREG TASK_PT_GR25(%r1),%r25 2159 LDREG TASK_PT_GR26(%r1),%r26 2160 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 2161 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 2162 LDREG TASK_PT_GR29(%r1),%r29 2163 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2164 2165 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2166 rsm PSW_SM_I, %r0 2167 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ 2168 mfsp %sr3,%r1 /* Get users space id */ 2169 mtsp %r1,%sr7 /* Restore sr7 */ 2170 ssm PSW_SM_I, %r0 2171 2172 /* Set sr2 to zero for userspace syscalls to work. */ 2173 mtsp %r0,%sr2 2174 mtsp %r1,%sr4 /* Restore sr4 */ 2175 mtsp %r1,%sr5 /* Restore sr5 */ 2176 mtsp %r1,%sr6 /* Restore sr6 */ 2177 2178 depi 3,31,2,%r31 /* ensure return to user mode. */ 2179 2180#ifdef CONFIG_64BIT 2181 /* decide whether to reset the wide mode bit 2182 * 2183 * For a syscall, the W bit is stored in the lowest bit 2184 * of sp. Extract it and reset W if it is zero */ 2185 extrd,u,*<> %r30,63,1,%r1 2186 rsm PSW_SM_W, %r0 2187 /* now reset the lowest bit of sp if it was set */ 2188 xor %r30,%r1,%r30 2189#endif 2190 be,n 0(%sr3,%r31) /* return to user space */ 2191 2192 /* We have to return via an RFI, so that PSW T and R bits can be set 2193 * appropriately. 2194 * This sets up pt_regs so we can return via intr_restore, which is not 2195 * the most efficient way of doing things, but it works. 2196 */ 2197syscall_restore_rfi: 2198 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 2199 mtctl %r2,%cr0 /* for immediate trap */ 2200 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 2201 ldi 0x0b,%r20 /* Create new PSW */ 2202 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2203 2204 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2205 * set in include/linux/ptrace.h and converted to PA bitmap 2206 * numbers in asm-offsets.c */ 2207 2208 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2209 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2210 depi -1,27,1,%r20 /* R bit */ 2211 2212 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2213 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2214 depi -1,7,1,%r20 /* T bit */ 2215 2216 STREG %r20,TASK_PT_PSW(%r1) 2217 2218 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 2219 2220 mfsp %sr3,%r25 2221 STREG %r25,TASK_PT_SR3(%r1) 2222 STREG %r25,TASK_PT_SR4(%r1) 2223 STREG %r25,TASK_PT_SR5(%r1) 2224 STREG %r25,TASK_PT_SR6(%r1) 2225 STREG %r25,TASK_PT_SR7(%r1) 2226 STREG %r25,TASK_PT_IASQ0(%r1) 2227 STREG %r25,TASK_PT_IASQ1(%r1) 2228 2229 /* XXX W bit??? */ 2230 /* Now if old D bit is clear, it means we didn't save all registers 2231 * on syscall entry, so do that now. This only happens on TRACEME 2232 * calls, or if someone attached to us while we were on a syscall. 2233 * We could make this more efficient by not saving r3-r18, but 2234 * then we wouldn't be able to use the common intr_restore path. 2235 * It is only for traced processes anyway, so performance is not 2236 * an issue. 2237 */ 2238 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 2239 ldo TASK_REGS(%r1),%r25 2240 reg_save %r25 /* Save r3 to r18 */ 2241 2242 /* Save the current sr */ 2243 mfsp %sr0,%r2 2244 STREG %r2,TASK_PT_SR0(%r1) 2245 2246 /* Save the scratch sr */ 2247 mfsp %sr1,%r2 2248 STREG %r2,TASK_PT_SR1(%r1) 2249 2250 /* sr2 should be set to zero for userspace syscalls */ 2251 STREG %r0,TASK_PT_SR2(%r1) 2252 2253pt_regs_ok: 2254 LDREG TASK_PT_GR31(%r1),%r2 2255 depi 3,31,2,%r2 /* ensure return to user mode. */ 2256 STREG %r2,TASK_PT_IAOQ0(%r1) 2257 ldo 4(%r2),%r2 2258 STREG %r2,TASK_PT_IAOQ1(%r1) 2259 copy %r25,%r16 2260 b intr_restore 2261 nop 2262 2263 .import schedule,code 2264syscall_do_resched: 2265 BL schedule,%r2 2266#ifdef CONFIG_64BIT 2267 ldo -16(%r30),%r29 /* Reference param save area */ 2268#else 2269 nop 2270#endif 2271 b syscall_check_bh /* if resched, we start over again */ 2272 nop 2273 2274 .import do_signal,code 2275syscall_do_signal: 2276 /* Save callee-save registers (for sigcontext). 2277 FIXME: After this point the process structure should be 2278 consistent with all the relevant state of the process 2279 before the syscall. We need to verify this. */ 2280 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2281 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */ 2282 reg_save %r25 2283 2284 ldi 1, %r24 /* unsigned long in_syscall */ 2285 2286#ifdef CONFIG_64BIT 2287 ldo -16(%r30),%r29 /* Reference param save area */ 2288#endif 2289 BL do_signal,%r2 2290 copy %r0, %r26 /* sigset_t *oldset = NULL */ 2291 2292 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2293 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 2294 reg_restore %r20 2295 2296 b,n syscall_check_sig 2297 2298 /* 2299 * get_register is used by the non access tlb miss handlers to 2300 * copy the value of the general register specified in r8 into 2301 * r1. This routine can't be used for shadowed registers, since 2302 * the rfir will restore the original value. So, for the shadowed 2303 * registers we put a -1 into r1 to indicate that the register 2304 * should not be used (the register being copied could also have 2305 * a -1 in it, but that is OK, it just means that we will have 2306 * to use the slow path instead). 2307 */ 2308 2309get_register: 2310 blr %r8,%r0 2311 nop 2312 bv %r0(%r25) /* r0 */ 2313 copy %r0,%r1 2314 bv %r0(%r25) /* r1 - shadowed */ 2315 ldi -1,%r1 2316 bv %r0(%r25) /* r2 */ 2317 copy %r2,%r1 2318 bv %r0(%r25) /* r3 */ 2319 copy %r3,%r1 2320 bv %r0(%r25) /* r4 */ 2321 copy %r4,%r1 2322 bv %r0(%r25) /* r5 */ 2323 copy %r5,%r1 2324 bv %r0(%r25) /* r6 */ 2325 copy %r6,%r1 2326 bv %r0(%r25) /* r7 */ 2327 copy %r7,%r1 2328 bv %r0(%r25) /* r8 - shadowed */ 2329 ldi -1,%r1 2330 bv %r0(%r25) /* r9 - shadowed */ 2331 ldi -1,%r1 2332 bv %r0(%r25) /* r10 */ 2333 copy %r10,%r1 2334 bv %r0(%r25) /* r11 */ 2335 copy %r11,%r1 2336 bv %r0(%r25) /* r12 */ 2337 copy %r12,%r1 2338 bv %r0(%r25) /* r13 */ 2339 copy %r13,%r1 2340 bv %r0(%r25) /* r14 */ 2341 copy %r14,%r1 2342 bv %r0(%r25) /* r15 */ 2343 copy %r15,%r1 2344 bv %r0(%r25) /* r16 - shadowed */ 2345 ldi -1,%r1 2346 bv %r0(%r25) /* r17 - shadowed */ 2347 ldi -1,%r1 2348 bv %r0(%r25) /* r18 */ 2349 copy %r18,%r1 2350 bv %r0(%r25) /* r19 */ 2351 copy %r19,%r1 2352 bv %r0(%r25) /* r20 */ 2353 copy %r20,%r1 2354 bv %r0(%r25) /* r21 */ 2355 copy %r21,%r1 2356 bv %r0(%r25) /* r22 */ 2357 copy %r22,%r1 2358 bv %r0(%r25) /* r23 */ 2359 copy %r23,%r1 2360 bv %r0(%r25) /* r24 - shadowed */ 2361 ldi -1,%r1 2362 bv %r0(%r25) /* r25 - shadowed */ 2363 ldi -1,%r1 2364 bv %r0(%r25) /* r26 */ 2365 copy %r26,%r1 2366 bv %r0(%r25) /* r27 */ 2367 copy %r27,%r1 2368 bv %r0(%r25) /* r28 */ 2369 copy %r28,%r1 2370 bv %r0(%r25) /* r29 */ 2371 copy %r29,%r1 2372 bv %r0(%r25) /* r30 */ 2373 copy %r30,%r1 2374 bv %r0(%r25) /* r31 */ 2375 copy %r31,%r1 2376 2377 /* 2378 * set_register is used by the non access tlb miss handlers to 2379 * copy the value of r1 into the general register specified in 2380 * r8. 2381 */ 2382 2383set_register: 2384 blr %r8,%r0 2385 nop 2386 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2387 copy %r1,%r0 2388 bv %r0(%r25) /* r1 */ 2389 copy %r1,%r1 2390 bv %r0(%r25) /* r2 */ 2391 copy %r1,%r2 2392 bv %r0(%r25) /* r3 */ 2393 copy %r1,%r3 2394 bv %r0(%r25) /* r4 */ 2395 copy %r1,%r4 2396 bv %r0(%r25) /* r5 */ 2397 copy %r1,%r5 2398 bv %r0(%r25) /* r6 */ 2399 copy %r1,%r6 2400 bv %r0(%r25) /* r7 */ 2401 copy %r1,%r7 2402 bv %r0(%r25) /* r8 */ 2403 copy %r1,%r8 2404 bv %r0(%r25) /* r9 */ 2405 copy %r1,%r9 2406 bv %r0(%r25) /* r10 */ 2407 copy %r1,%r10 2408 bv %r0(%r25) /* r11 */ 2409 copy %r1,%r11 2410 bv %r0(%r25) /* r12 */ 2411 copy %r1,%r12 2412 bv %r0(%r25) /* r13 */ 2413 copy %r1,%r13 2414 bv %r0(%r25) /* r14 */ 2415 copy %r1,%r14 2416 bv %r0(%r25) /* r15 */ 2417 copy %r1,%r15 2418 bv %r0(%r25) /* r16 */ 2419 copy %r1,%r16 2420 bv %r0(%r25) /* r17 */ 2421 copy %r1,%r17 2422 bv %r0(%r25) /* r18 */ 2423 copy %r1,%r18 2424 bv %r0(%r25) /* r19 */ 2425 copy %r1,%r19 2426 bv %r0(%r25) /* r20 */ 2427 copy %r1,%r20 2428 bv %r0(%r25) /* r21 */ 2429 copy %r1,%r21 2430 bv %r0(%r25) /* r22 */ 2431 copy %r1,%r22 2432 bv %r0(%r25) /* r23 */ 2433 copy %r1,%r23 2434 bv %r0(%r25) /* r24 */ 2435 copy %r1,%r24 2436 bv %r0(%r25) /* r25 */ 2437 copy %r1,%r25 2438 bv %r0(%r25) /* r26 */ 2439 copy %r1,%r26 2440 bv %r0(%r25) /* r27 */ 2441 copy %r1,%r27 2442 bv %r0(%r25) /* r28 */ 2443 copy %r1,%r28 2444 bv %r0(%r25) /* r29 */ 2445 copy %r1,%r29 2446 bv %r0(%r25) /* r30 */ 2447 copy %r1,%r30 2448 bv %r0(%r25) /* r31 */ 2449 copy %r1,%r31 2450