1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 1988 AT&T 24 * All Rights Reserved 25 * 26 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29#pragma ident "%Z%%M% %I% %E% SMI" 30 31#include "machdep.h" 32#include "_audit.h" 33#if defined(lint) 34#include <sys/types.h> 35#include "_rtld.h" 36#else 37#include <sys/stack.h> 38#include <sys/asm_linkage.h> 39 40 .file "boot_elf.s" 41 .seg ".text" 42#endif 43 44/* 45 * We got here because the initial call to a function resolved to a procedure 46 * linkage table entry. That entry did a branch to the first PLT entry, which 47 * in turn did a call to elf_rtbndr (refer elf_plt_init()). 48 * 49 * the code sequence that got us here was: 50 * 51 * PLT entry for foo(): 52 * sethi (.-PLT0), %g1 53 * ba,a .PLT0 ! patched atomically 2nd 54 * nop ! patched 1st 55 * nop 56 * nop 57 * nop 58 * nop 59 * nop 60 * 61 * Therefore on entry, %i7 has the address of the call, which will be added 62 * to the offset to the plt entry in %g1 to calculate the plt entry address 63 * we must also subtract 4 because the address of PLT0 points to the 64 * save instruction before the call. 65 * 66 * The PLT entry is rewritten in one of several ways. For the full 64-bit 67 * span, the following sequence is generated: 68 * 69 * nop 70 * sethi %hh(entry_pt), %g1 71 * sethi %lm(entry_pt), %g5 72 * or %g1, %hm(entry_pt), %g1 73 * sllx %g1, 32, %g1 74 * or %g1, %g5, %g5 75 * jmpl %g5 + %lo(entry_pt), %g0 76 * nop 77 * 78 * Shorter code sequences are possible, depending on reachability 79 * constraints. Note that 'call' is not as useful as it might seem in 80 * this context, because it is only capable of plus or minus 2Gbyte 81 * PC-relative jumps, and the rdpc instruction is very slow. 82 * 83 * At the time of writing, the present and future SPARC CPUs that will use 84 * this code are only capable of addressing the bottom 43-bits and top 43-bits 85 * of the address space. And since shared libraries are placed at the top 86 * of the address space, the "top 44-bits" sequence will effectively always be 87 * used. See elf_plt_write() below. The "top 32-bits" are used when they 88 * can reach. 89 */ 90 91#if defined(lint) 92 93extern unsigned long elf_bndr(Rt_map *, unsigned long, caddr_t); 94 95/* 96 * We're called here from .PLTn in a new frame, with %o0 containing 97 * the result of a sethi (. - .PLT0), and %o1 containing the pc of 98 * the jmpl instruction we're got here with inside .PLT1 99 */ 100void 101elf_rtbndr(Rt_map *lmp, unsigned long pltoff, caddr_t from) 102{ 103 (void) elf_bndr(lmp, pltoff, from); 104} 105 106#else 107 .weak _elf_rtbndr ! keep dbx happy as it likes to 108 _elf_rtbndr = elf_rtbndr ! rummage around for our symbols 109 110 ENTRY(elf_rtbndr) 111 mov %i7, %o3 ! Save callers address(profiling) 112 save %sp, -SA(MINFRAME), %sp 113 mov %g4, %l5 ! Save g4 (safe across function calls) 114 sub %i1, 0x38, %o1 ! compute addr of .PLT0 from addr of .PLT1 jmpl 115 ldx [%o1 + 0x40], %o0 ! ld PLT2[X] into third arg 116 srl %i0, 10, %o1 ! shift offset set by sethi 117 call elf_bndr ! returns function address in %o0 118 mov %i3, %o2 ! Callers address is arg 3 119 mov %o0, %g1 ! save address of routine binded 120 mov %l5, %g4 ! restore g4 121 restore ! how many restores needed ? 2 122 jmp %g1 ! jump to it 123 restore 124 SET_SIZE(elf_rtbndr) 125 126#endif 127 128 129#if defined(lint) 130void 131elf_rtbndr_far(Rt_map *lmp, unsigned long pltoff, caddr_t from) 132{ 133 (void) elf_bndr(lmp, pltoff, from); 134} 135#else 136ENTRY(elf_rtbndr_far) 137 mov %i7, %o3 ! Save callers address 138 save %sp, -SA(MINFRAME), %sp 139 mov %g4, %l5 ! preserve %g4 140 sub %i1, 0x18, %o2 ! compute address of .PLT0 from 141 ! .PLT0 jmpl instr. 142 sub %i0, %o2, %o1 ! pltoff = pc - 0x10 - .PLT0 143 sub %o1, 0x10, %o1 144 ldx [%o2 + 0x40], %o0 ! ld PLT2[X] into third arg 145 call elf_bndr ! returns function address in %o0 146 mov %i3, %o2 ! Callers address is arg3 147 mov %o0, %g1 ! save address of routine binded 148 mov %l5, %g4 ! restore g4 149 restore ! how many restores needed ? 2 150 jmp %g1 ! jump to it 151 restore 152SET_SIZE(elf_rtbndr_far) 153#endif 154 155 156/* 157 * Initialize a plt entry so that function calls go to 'bindfunc' 158 * (We parameterize the binding function here because we call this 159 * routine twice - once for PLT0 and once for PLT1 with different 160 * binding functions.) 161 * 162 * The plt entries (PLT0 and PLT1) look like: 163 * 164 * save %sp, -176, %sp 165 * sethi %hh(bindfunc), %l0 166 * sethi %lm(bindfunc), %l1 167 * or %l0, %hm(bindfunc), %l0 168 * sllx %l0, 32, %l0 169 * or %l0, %l1, %l0 170 * jmpl %l0 + %lo(bindfunc), %o1 171 * mov %g1, %o0 172 */ 173 174#define M_SAVE_SP176SP 0x9de3bf50 /* save %sp, -176, %sp */ 175#define M_SETHI_L0 0x21000000 /* sethi 0x0, %l0 */ 176#define M_SETHI_L1 0x23000000 /* sethi 0x0, %l1 */ 177#define M_OR_L0L0 0xa0142000 /* or %l0, 0x0, %l0 */ 178#define M_SLLX_L032L0 0xa12c3020 /* sllx %l0, 32, %l0 */ 179#define M_OR_L0L1L0 0xa0140011 /* or %l0, %l1, %l0 */ 180#define M_JMPL_L0O1 0x93c42000 /* jmpl %l0 + 0, %o1 */ 181#define M_MOV_G1O0 0x90100001 /* or %g0, %g1, %o0 */ 182 183#if defined(lint) 184 185#define HH22(x) 0 /* for lint's benefit */ 186#define LM22(x) 0 187#define HM10(x) 0 188#define LO10(x) 0 189 190/* ARGSUSED */ 191void 192elf_plt_init(void *plt, caddr_t bindfunc) 193{ 194 uint_t *_plt; 195 196 _plt = (uint_t *)plt; 197 _plt[0] = M_SAVE_SP176SP; 198 _plt[1] = M_SETHI_L0 | HH22(bindfunc); 199 _plt[2] = M_SETHI_L1 | LM22(bindfunc); 200 _plt[3] = M_OR_L0L0 | HM10(bindfunc); 201 _plt[4] = M_SLLX_L032L0; 202 _plt[5] = M_OR_L0L1L0; 203 _plt[6] = M_JMPL_L0O1 | LO10(bindfunc); 204 _plt[7] = M_MOV_G1O0; 205} 206 207#else 208 ENTRY(elf_plt_init) 209 save %sp, -SA(MINFRAME), %sp ! Make a frame 210 211 sethi %hi(M_SAVE_SP176SP), %o0 ! Get save instruction 212 or %o0, %lo(M_SAVE_SP176SP), %o0 213 st %o0, [%i0] ! Store in plt[0] 214 215 sethi %hi(M_SETHI_L0), %o4 ! Get "sethi 0x0, %l0" insn 216 srlx %i1, 42, %o2 ! get %hh(function address) 217 or %o4, %o2, %o4 ! or value into instruction 218 st %o4, [%i0 + 0x4] ! Store instruction in plt[1] 219 iflush %i0 ! .. and flush 220 221 sethi %hi(M_SETHI_L1), %o4 ! Get "sethi 0x0, %l1" insn 222 srl %i1, 10, %o2 ! get %lm(function address) 223 or %o4, %o2, %o4 ! or value into instruction 224 st %o4, [%i0 + 0x8] ! Store instruction in plt[2] 225 226 sethi %hi(M_OR_L0L0), %o4 ! Get "or %l0, 0x0, %l0" insn 227 or %o4, %lo(M_OR_L0L0), %o4 228 srlx %i1, 32, %o2 ! get %hm(function address) 229 and %o2, 0x3ff, %o2 ! pick out bits 42-33 230 or %o4, %o2, %o4 ! or value into instruction 231 st %o4, [%i0 + 0xc] ! Store instruction in plt[3] 232 iflush %i0 + 8 ! .. and flush 233 234 sethi %hi(M_SLLX_L032L0), %o4 ! get "sllx %l0, 32, %l0" insn 235 or %o4, %lo(M_SLLX_L032L0), %o4 236 st %o4, [%i0 + 0x10] ! Store instruction in plt[4] 237 238 sethi %hi(M_OR_L0L1L0), %o4 ! get "or %l0, %l1, %l0" insn 239 or %o4, %lo(M_OR_L0L1L0), %o4 240 st %o4, [%i0 + 0x14] ! Store instruction in plt[5] 241 iflush %i0 + 0x10 ! .. and flush 242 243 sethi %hi(M_JMPL_L0O1), %o4 ! get "jmpl %l0 + 0, %o1" insn 244 or %o4, %lo(M_JMPL_L0O1), %o4 245 and %i1, 0x3ff, %o2 ! get %lo(function address) 246 or %o4, %o2, %o4 ! or value into instruction 247 st %o4, [%i0 + 0x18] ! Store instruction in plt[6] 248 249 sethi %hi(M_MOV_G1O0), %o4 ! get "mov %g1, %o0" insn 250 or %o4, %lo(M_MOV_G1O0), %o4 251 st %o4, [%i0 + 0x1c] ! Store instruction in plt[7] 252 iflush %i0 + 0x18 ! .. and flush 253 254 ret 255 restore 256 SET_SIZE(elf_plt_init) 257#endif 258 259 260 261 262#if defined(lint) 263/* 264 * The V9 ABI assigns the link map identifier, the 265 * Rt_map pointer, to the start of .PLT2. 266 */ 267void 268elf_plt2_init(unsigned int *plt2, Rt_map * lmp) 269{ 270 /* LINTED */ 271 *(unsigned long *)plt2 = (unsigned long)lmp; 272} 273#else 274 ENTRY(elf_plt2_init) 275 stx %o1, [%o0] 276 retl 277 iflush %o0 278 SET_SIZE(elf_plt2_init) 279#endif 280 281 282 283/* 284 * After the first call to a plt, elf_bndr() will have determined the true 285 * address of the function being bound. The plt is now rewritten so that 286 * any subsequent calls go directly to the bound function. If the library 287 * to which the function belongs is being profiled refer to _plt_cg_write. 288 * 289 * For complete 64-bit spanning, the new plt entry is: 290 * 291 * nop 292 * sethi %hh(function address), %g1 293 * sethi %lm(function address), %g5 294 * or %g1, %hm(function address), %g1 295 * sllx %g1, 32, %g1 296 * or %g1, %g5, %g5 297 * jmpl %g5, %lo(function address), %g0 298 * nop 299 * 300 * However, shorter instruction sequences are possible and useful. 301 * This version gets us anywhere in the top 44 bits of the 302 * address space - since this is where shared objects live most 303 * of the time, this case is worth optimizing. 304 * 305 * nop 306 * sethi %h44(~function_address), %g5 307 * xnor %g5, %m44(~function address), %g1 308 * sllx %g1, 12, %g1 309 * jmpl %g1 + %l44(function address), %g0 310 * nop 311 * nop 312 * nop 313 * 314 * This version gets anywhere in the top 32 bits: 315 * 316 * nop 317 * sethi %hi(~function_address), %g5 318 * xnor %g5, %lo(~function_address), %g1 319 * jmpl %g1, %g0 320 * nop 321 * nop 322 * nop 323 * nop 324 * 325 * This version get's us to a destination within 326 * +- 8megs of the PLT's address: 327 * 328 * nop 329 * ba,a <dest> 330 * nop 331 * nop 332 * nop 333 * nop 334 * nop 335 * nop 336 * 337 * This version get's us to a destination within 338 * +- 2megs of the PLT's address: 339 * 340 * nop 341 * ba,a,pt %icc, <dest> 342 * nop 343 * nop 344 * nop 345 * nop 346 * nop 347 * nop 348 * 349 * 350 * The PLT is written in reverse order to ensure re-entrant behaviour. 351 * Note that the first two instructions must be overwritten with a 352 * single stx. 353 * 354 * Note that even in the 44-bit case, we deliberately use both %g5 and 355 * %g1 to prevent anyone accidentally relying on either of them being 356 * non-volatile across a function call. 357 */ 358 359#define M_JMPL_G5G0 0x81c16000 /* jmpl %g5 + 0, %g0 */ 360#define M_OR_G1G5G5 0x8a104005 /* or %g1, %g5, %g5 */ 361#define M_SLLX_G132G1 0x83287020 /* sllx %g1, 32, %g1 */ 362#define M_OR_G1G1 0x82106000 /* or %g1, 0x0, %g1 */ 363#define M_SETHI_G5 0x0b000000 /* sethi 0x0, %g5 */ 364#define M_SETHI_G1 0x03000000 /* sethi 0x0, %g1 */ 365#define M_NOP 0x01000000 /* sethi 0x0, %g0 */ 366 367#define M_JMPL_G1G0 0x81c06000 /* jmpl %g1 + 0, %g0 */ 368#define M_SLLX_G112G1 0x8328700c /* sllx %g1, 12, %g1 */ 369#define M_XNOR_G5G1 0x82396000 /* xnor %g5, 0, %g1 */ 370 371#if defined(lint) 372 373/* ARGSUSED */ 374#define MASK(m) ((1ul << (m)) - 1ul) 375#define BITS(v, u, l) (((v) >> (l)) & MASK((u) - (l) + 1)) 376#define H44(v) BITS(v, 43, 22) 377#define M44(v) BITS(v, 21, 12) 378#define L44(v) BITS(v, 11, 0) 379 380#endif 381 382#if defined(lint) 383 384void 385/* ARGSUSED1 */ 386plt_upper_32(uintptr_t pc, uintptr_t symval) 387{ 388 ulong_t sym = (ulong_t)symval; 389 /* LINTED */ 390 ulong_t nsym = ~sym; 391 uint_t * plttab = (uint_t *)pc; 392 393 plttab[3] = M_JMPL_G1G0; 394 plttab[2] = (uint_t)(M_XNOR_G5G1 | LO10(nsym)); 395 *(ulong_t *)pc = 396 ((ulong_t)M_NOP << 32) | (M_SETHI_G5 | LM22(nsym)); 397} 398 399#else 400 401 402 ENTRY(plt_upper_32) 403 ! 404 ! Address lies in top 32-bits of address space, so use 405 ! compact PLT sequence 406 ! 407 sethi %hi(M_JMPL_G1G0), %o3 ! Get "jmpl %g1, %g0" insn 408 st %o3, [%o0 + 0xc] ! store instruction in plt[3] 409 iflush %o0 + 0xc ! .. and flush 410 411 not %o1, %o4 412 sethi %hi(M_XNOR_G5G1), %o3 ! Get "xnor %g5, %g1, %g1" insn 413 and %o4, 0x3ff, %o2 ! pick out bits 0-9 414 or %o3, %o2, %o3 ! or value into instruction 415 st %o3, [%o0 + 0x8] ! store instruction in plt[2] 416 iflush %o0 + 0x8 ! .. and flush 417 418 sethi %hi(M_SETHI_G5), %o3 ! Get "sethi 0x0, %g5" insn 419 srl %o4, 10, %o2 ! get %lm(~function address) 420 or %o3, %o2, %o3 ! or value into instruction 421 422 sethi %hi(M_NOP), %o4 ! Get "nop" instruction 423 sllx %o4, 32, %o4 ! shift to top of instruction pair 424 or %o3, %o4, %o3 ! or value into instruction pair 425 stx %o3, [%o0] ! store instructions into plt[0] plt[1] 426 retl 427 iflush %o0 ! .. and flush 428 SET_SIZE(plt_upper_32) 429#endif /* defined lint */ 430 431 432#if defined(lint) 433 434void 435/* ARGSUSED1 */ 436plt_upper_44(uintptr_t pc, uintptr_t symval) 437{ 438 ulong_t sym = (ulong_t)symval; 439 ulong_t nsym = ~sym; 440 uint_t * plttab = (uint_t *)pc; 441 442 /* LINTED */ 443 plttab[4] = (uint_t)(M_JMPL_G1G0 | L44(sym)); 444 plttab[3] = M_SLLX_G112G1; 445 /* LINTED */ 446 plttab[2] = (uint_t)(M_XNOR_G5G1 | M44(nsym)); 447 *(ulong_t *)pc = ((ulong_t)M_NOP << 32) | (M_SETHI_G5 | H44(nsym)); 448} 449 450#else 451 452 453 ENTRY(plt_upper_44) 454 ! 455 ! Address lies in top 44-bits of address space, so use 456 ! compact PLT sequence 457 ! 458 setuw M_JMPL_G1G0, %o3 ! Get "jmpl %g1, %g0" insn 459 and %o1, 0xfff, %o2 ! lower 12 bits of function address 460 or %o3, %o2, %o3 ! is or'ed into instruction 461 st %o3, [%o0 + 0x10] ! store instruction in plt[4] 462 iflush %o0 + 0x10 ! .. and flush 463 464 setuw M_SLLX_G112G1, %o3 ! Get "sllx %g1, 12, %g1" insn 465 st %o3, [%o0 + 0xc] ! store instruction in plt[3] 466 467 not %o1, %o4 468 setuw M_XNOR_G5G1, %o3 ! Get "xnor %g5, 0, %g1" insn 469 srlx %o4, 12, %o2 ! get %m44(0 - function address) 470 and %o2, 0x3ff, %o2 ! pick out bits 21-12 471 or %o3, %o2, %o3 ! or value into instruction 472 st %o3, [%o0 + 8] ! store instruction in plt[2] 473 iflush %o0 + 8 ! .. and flush 474 475 setuw M_SETHI_G5, %o3 ! Get "sethi 0x0, %g5" insn 476 srlx %o4, 22, %o2 ! get %h44(0 - function address) 477 or %o3, %o2, %o3 ! or value into instruction 478 479 setuw M_NOP, %o4 ! Get "nop" instruction 480 sllx %o4, 32, %o4 ! shift to top of instruction pair 481 or %o3, %o4, %o3 ! or value into instruction pair 482 stx %o3, [%o0] ! store instructions into plt[0] plt[1] 483 retl 484 iflush %o0 ! .. and flush 485 SET_SIZE(plt_upper_44) 486 487#endif /* defined(lint) */ 488 489 490#if defined(lint) 491 492void 493/* ARGSUSED1 */ 494plt_full_range(uintptr_t pc, uintptr_t symval) 495{ 496 uint_t * plttab = (uint_t *)pc; 497 498 plttab[6] = M_JMPL_G5G0 | LO10(symval); 499 plttab[5] = M_OR_G1G5G5; 500 plttab[4] = M_SLLX_G132G1; 501 plttab[3] = M_OR_G1G1 | HM10(symval); 502 plttab[2] = M_SETHI_G5 | LM22(symval); 503 *(ulong_t *)pc = 504 ((ulong_t)M_NOP << 32) | (M_SETHI_G1 | HH22(symval)); 505} 506 507#else 508 ENTRY(plt_full_range) 509 ! 510 ! Address lies anywhere in 64-bit address space, so use 511 ! full PLT sequence 512 ! 513 sethi %hi(M_JMPL_G5G0), %o3 ! Get "jmpl %g5, %g0" insn 514 and %o1, 0x3ff, %o2 ! lower 10 bits of function address 515 or %o3, %o2, %o3 ! is or'ed into instruction 516 st %o3, [%o0 + 0x18] ! store instruction in plt[6] 517 iflush %o0 + 0x18 ! .. and flush 518 519 sethi %hi(M_OR_G1G5G5), %o3 ! Get "or %g1, %g5, %g1" insn 520 or %o3, %lo(M_OR_G1G5G5), %o3 521 st %o3, [%o0 + 0x14] ! store instruction in plt[5] 522 523 sethi %hi(M_SLLX_G132G1), %o3 ! Get "sllx %g1, 32, %g1" insn 524 or %o3, %lo(M_SLLX_G132G1), %o3 525 st %o3, [%o0 + 0x10] ! store instruction in plt[4] 526 iflush %o0 + 0x10 ! .. and flush 527 528 sethi %hi(M_OR_G1G1), %o3 ! Get "or %g1, 0x0, %g1" insn 529 or %o3, %lo(M_OR_G1G1), %o3 530 srlx %o1, 32, %o2 ! get %hm(function address) 531 and %o2, 0x3ff, %o2 ! pick out bits 42-33 532 or %o3, %o2, %o3 ! or value into instruction 533 st %o3, [%o0 + 0xc] ! store instruction in plt[3] 534 535 sethi %hi(M_SETHI_G5), %o3 ! Get "sethi 0x0, %g5" insn 536 srl %o1, 10, %o2 ! get %lm(function address) 537 or %o3, %o2, %o3 ! or value into instruction 538 st %o3, [%o0 + 0x8] ! store instruction in plt[2] 539 iflush %o0 + 8 ! .. and flush 540 541 sethi %hi(M_SETHI_G1), %o3 ! Get "sethi 0x0, %g1" insn 542 srlx %o1, 42, %o2 ! get %hh(function address) 543 or %o3, %o2, %o3 ! or value into instruction 544 545 sethi %hi(M_NOP), %o4 ! Get "nop" instruction 546 sllx %o4, 32, %o4 ! shift to top of instruction pair 547 or %o3, %o4, %o3 ! or value into instruction pair 548 stx %o3, [%o0] ! store instructions into plt[0] plt[1] 549 retl 550 iflush %o0 ! .. and flush 551 552 SET_SIZE(plt_full_range) 553 554#endif /* defined(lint) */ 555 556/* 557 * performs the 'iflush' instruction on a range of memory. 558 */ 559#if defined(lint) 560void 561iflush_range(caddr_t addr, size_t len) 562{ 563 /* LINTED */ 564 uintptr_t base; 565 566 base = (uintptr_t)addr & ~7; /* round down to 8 byte alignment */ 567 len = (len + 7) & ~7; /* round up to multiple of 8 bytes */ 568 for (len -= 8; (long)len >= 0; len -= 8) 569 /* iflush(base + len) */; 570} 571#else 572 ENTRY(iflush_range) 573 add %o1, 7, %o1 574 andn %o0, 7, %o0 575 andn %o1, 7, %o1 5761: subcc %o1, 8, %o1 577 bge,a,pt %xcc, 1b 578 iflush %o0 + %o1 579 retl 580 nop 581 SET_SIZE(iflush_range) 582#endif 583 584 585#if defined(lint) 586 587ulong_t 588elf_plt_trace() 589{ 590 return (0); 591} 592#else 593 .global elf_plt_trace 594 .type elf_plt_trace, #function 595 596/* 597 * The dyn_plt that called us has already created a stack-frame for 598 * us and placed the following entries in it: 599 * 600 * [%fp + STACK_BIAS + -0x8] * dyndata 601 * [%fp + STACK_BIAS + -0x10] * prev stack size 602 * 603 * dyndata currently contains: 604 * 605 * dyndata: 606 * 0x0 Addr *reflmp 607 * 0x8 Addr *deflmp 608 * 0x10 Word symndx 609 * 0x14 Word sb_flags 610 * 0x18 Sym symdef.st_name 611 * 0x1c symdef.st_info 612 * 0x1d symdef.st_other 613 * 0x1e symdef.st_shndx 614 * 0x20 symdef.st_value 615 * 0x28 symdef.st_size 616 */ 617#define REFLMP_OFF 0x0 618#define DEFLMP_OFF 0x8 619#define SYMNDX_OFF 0x10 620#define SBFLAGS_OFF 0x14 621#define SYMDEF_OFF 0x18 622#define SYMDEF_VALUE_OFF 0x20 623 624#define LAREGSSZ 0x40 /* sizeof (La_sparcv9_regs) */ 625 626 627elf_plt_trace: 6281: call 2f 629 sethi %hi(_GLOBAL_OFFSET_TABLE_ - (1b - .)), %l7 6302: or %l7, %lo(_GLOBAL_OFFSET_TABLE_ - (1b - .)), %l7 631 add %l7, %o7, %l7 632 633 ldx [%fp + STACK_BIAS + -CLONGSIZE], %l1 ! l1 = * dyndata 634 lduw [%l1 + SBFLAGS_OFF], %l2 ! l2 = sb_flags 635 andcc %l2, LA_SYMB_NOPLTENTER, %g0 636 be,pt %icc, .start_pltenter 637 ldx [%l1 + SYMDEF_VALUE_OFF], %l0 ! l0 = 638 ! sym.st_value(calling address) 639 ba,a,pt %icc, .end_pltenter 640 nop 641 642 /* 643 * save all registers into La_sparcv9_regs 644 */ 645.start_pltenter: 646 sub %sp, LAREGSSZ, %sp ! create space for La_sparcv9_regs 647 ! storage on the stack. 648 649 add %fp, STACK_BIAS - (LAREGSSZ + (2 * CLONGSIZE)), %o4 ! addr of new space. 650 651 stx %i0, [%o4 + 0x0] 652 stx %i1, [%o4 + 0x8] 653 stx %i2, [%o4 + 0x10] 654 stx %i3, [%o4 + 0x18] ! because a regwindow shift has 655 stx %i4, [%o4 + 0x20] ! already occured our current %i* 656 stx %i5, [%o4 + 0x28] ! register's are the equivalent of 657 stx %i6, [%o4 + 0x30] ! the %o* registers that the final 658 stx %i7, [%o4 + 0x38] ! procedure shall see. 659 mov %g4, %l5 ! save g4 (safe across function calls) 660 661 662 ldx [%fp + STACK_BIAS + -CLONGSIZE], %l1 ! %l1 == * dyndata 663 ldx [%l1 + REFLMP_OFF], %o0 ! %o0 = reflmp 664 ldx [%l1 + DEFLMP_OFF], %o1 ! %o1 = deflmp 665 add %l1, SYMDEF_OFF, %o2 ! %o2 = symp 666 lduw [%l1 + SYMNDX_OFF], %o3 ! %o3 = symndx 667 call audit_pltenter 668 add %l1, SBFLAGS_OFF, %o5 ! %o3 = * sb_flags 669 670 mov %o0, %l0 ! %l0 == calling address 671 add %sp, LAREGSSZ, %sp ! cleanup La_sparcv9_regs off 672 ! of the stack. 673 674.end_pltenter: 675 /* 676 * If *no* la_pltexit() routines exist we do not need 677 * to keep the stack frame before we call the actual 678 * routine. Instead we jump to it and remove ourself 679 * from the stack at the same time. 680 */ 681 ldx [%l7+audit_flags], %l3 682 lduw [%l3], %l3 ! %l3 = audit_flags 683 andcc %l3, AF_PLTEXIT, %g0 ! AF_PLTEXIT = 2 684 be,pt %icc, .bypass_pltexit 685 ldx [%fp + STACK_BIAS + -CLONGSIZE], %l1 ! %l1 = * dyndata 686 lduw [%l1 + SBFLAGS_OFF], %l2 ! %l2 = sb_flags 687 andcc %l2, LA_SYMB_NOPLTEXIT, %g0 ! LA_SYMB_NOPLTEXIT = 2 688 bne,a,pt %icc, .bypass_pltexit 689 nop 690 691 ba,a,pt %icc, .start_pltexit 692 nop 693.bypass_pltexit: 694 mov %l5, %g4 ! restore g4 695 jmpl %l0, %g0 696 restore 697 698.start_pltexit: 699 /* 700 * In order to call la_pltexit() we must duplicate the 701 * arguments from the 'callers' stack on our stack frame. 702 * 703 * First we check the size of the callers stack and grow 704 * our stack to hold any of the arguments that need 705 * duplicating (these are arguments 6->N), because the 706 * first 6 (0->5) are passed via register windows on sparc. 707 */ 708 709 /* 710 * The first calculation is to determine how large the 711 * argument passing area might be. Since there is no 712 * way to distinquish between 'argument passing' and 713 * 'local storage' from the previous stack this amount must 714 * cover both. 715 */ 716 ldx [%fp + STACK_BIAS + -(2 * CLONGSIZE)], %l1 ! %l1 = callers 717 ! stack size 718 sub %l1, MINFRAME, %l1 ! %l1 = argument space on 719 ! caller's stack 720 /* 721 * Next we compare the prev. stack size against the audit_argcnt. We 722 * copy at most 'audit_argcnt' arguments. The default arg count is 64. 723 * 724 * NOTE: on sparc we always copy at least six args since these 725 * are in reg-windows and not on the stack. 726 * 727 * NOTE: Also note that we multiply (shift really) the arg count 728 * by 8 which is the 'word size' to calculate the amount 729 * of stack space needed. 730 */ 731 ldx [%l7 + audit_argcnt], %l2 732 lduw [%l2], %l2 ! %l2 = audit_argcnt 733 cmp %l2, 6 734 ble,pn %icc, .grow_stack 735 sub %l2, 6, %l2 736 sllx %l2, CLONGSHIFT, %l2 ! arg count * 8 737 cmp %l1, %l2 ! 738 ble,a,pn %icc, .grow_stack 739 nop 740 mov %l2, %l1 741.grow_stack: 742 /* 743 * When duplicating the stack we skip the first SA(MINFRAME) 744 * bytes. This is the space on the stack reserved for preserving 745 * the register windows and such and do not need to be duplicated 746 * on this new stack frame. We start duplicating at the portion 747 * of the stack reserved for argument's above 6. 748 */ 749 sub %sp, %l1, %sp ! grow our stack by amount required. 750 srax %l1, CLONGSHIFT, %l1 ! %l1 = %l1 / 8 (words to copy) 751 mov SA(MINFRAME), %l2 ! %l2 = index into stack & frame 752 7531: 754 cmp %l1, 0 755 ble,a,pn %icc, 2f 756 nop 757 758 add %fp, %l2, %l4 759 ldx [%l4 + STACK_BIAS], %l3 ! duplicate args from previous 760 add %sp, %l2, %l4 761 stx %l3, [%l4 + STACK_BIAS] ! stack onto current stack 762 763 add %l2, CLONGSIZE, %l2 764 ba,pt %icc, 1b 765 sub %l1, 0x1, %l1 7662: 767 mov %i0, %o0 ! copy ins to outs 768 mov %i1, %o1 769 mov %i2, %o2 770 mov %i3, %o3 771 mov %i4, %o4 772 mov %i5, %o5 773 call %l0 ! call original routine 774 mov %l5, %g4 ! restore g4 775 mov %o1, %l2 ! l2 = second 1/2 of return value 776 ! for those those 64 bit operations 777 ! link div64 - yuck... 778 779 ! %o0 = retval 780 ldx [%fp + STACK_BIAS + -CLONGSIZE], %l1 781 ldx [%l1 + REFLMP_OFF], %o1 ! %o1 = reflmp 782 ldx [%l1 + DEFLMP_OFF], %o2 ! %o2 = deflmp 783 add %l1, SYMDEF_OFF, %o3 ! %o3 = symp 784 call audit_pltexit 785 lduw [%l1 + SYMNDX_OFF], %o4 ! %o4 = symndx 786 787 mov %o0, %i0 ! pass on return code 788 mov %l2, %i1 789 ret 790 restore 791 .size elf_plt_trace, . - elf_plt_trace 792 793#endif 794 795