1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * $FreeBSD$ 23 */ 24/* 25 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29#define _ASM 30 31#include <machine/asmacros.h> 32#include <sys/cpuvar_defs.h> 33#include <sys/dtrace.h> 34 35#include "assym.s" 36 37 .globl calltrap 38 .type calltrap,@function 39 ENTRY(dtrace_invop_start) 40 41 pushl %eax /* push %eax -- may be return value */ 42 pushl %esp /* push stack pointer */ 43 addl $48, (%esp) /* adjust to incoming args */ 44 pushl 40(%esp) /* push calling EIP */ 45 46 /* 47 * Call dtrace_invop to let it check if the exception was 48 * a fbt one. The return value in %eax will tell us what 49 * dtrace_invop wants us to do. 50 */ 51 call dtrace_invop 52 ALTENTRY(dtrace_invop_callsite) 53 addl $12, %esp 54 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax 55 je invop_push 56 cmpl $DTRACE_INVOP_POPL_EBP, %eax 57 je invop_pop 58 cmpl $DTRACE_INVOP_LEAVE, %eax 59 je invop_leave 60 cmpl $DTRACE_INVOP_NOP, %eax 61 je invop_nop 62 63 /* When all else fails handle the trap in the usual way. */ 64 jmpl *dtrace_invop_calltrap_addr 65 66invop_push: 67 /* 68 * We must emulate a "pushl %ebp". To do this, we pull the stack 69 * down 4 bytes, and then store the base pointer. 70 */ 71 popal 72 subl $4, %esp /* make room for %ebp */ 73 pushl %eax /* push temp */ 74 movl 8(%esp), %eax /* load calling EIP */ 75 incl %eax /* increment over LOCK prefix */ 76 movl %eax, 4(%esp) /* store calling EIP */ 77 movl 12(%esp), %eax /* load calling CS */ 78 movl %eax, 8(%esp) /* store calling CS */ 79 movl 16(%esp), %eax /* load calling EFLAGS */ 80 movl %eax, 12(%esp) /* store calling EFLAGS */ 81 movl %ebp, 16(%esp) /* push %ebp */ 82 popl %eax /* pop off temp */ 83 iret /* Return from interrupt. */ 84invop_pop: 85 /* 86 * We must emulate a "popl %ebp". To do this, we do the opposite of 87 * the above: we remove the %ebp from the stack, and squeeze up the 88 * saved state from the trap. 89 */ 90 popal 91 pushl %eax /* push temp */ 92 movl 16(%esp), %ebp /* pop %ebp */ 93 movl 12(%esp), %eax /* load calling EFLAGS */ 94 movl %eax, 16(%esp) /* store calling EFLAGS */ 95 movl 8(%esp), %eax /* load calling CS */ 96 movl %eax, 12(%esp) /* store calling CS */ 97 movl 4(%esp), %eax /* load calling EIP */ 98 incl %eax /* increment over LOCK prefix */ 99 movl %eax, 8(%esp) /* store calling EIP */ 100 popl %eax /* pop off temp */ 101 addl $4, %esp /* adjust stack pointer */ 102 iret /* Return from interrupt. */ 103invop_leave: 104 /* 105 * We must emulate a "leave", which is the same as a "movl %ebp, %esp" 106 * followed by a "popl %ebp". This looks similar to the above, but 107 * requires two temporaries: one for the new base pointer, and one 108 * for the staging register. 109 */ 110 popa 111 pushl %eax /* push temp */ 112 pushl %ebx /* push temp */ 113 movl %ebp, %ebx /* set temp to old %ebp */ 114 movl (%ebx), %ebp /* pop %ebp */ 115 movl 16(%esp), %eax /* load calling EFLAGS */ 116 movl %eax, (%ebx) /* store calling EFLAGS */ 117 movl 12(%esp), %eax /* load calling CS */ 118 movl %eax, -4(%ebx) /* store calling CS */ 119 movl 8(%esp), %eax /* load calling EIP */ 120 incl %eax /* increment over LOCK prefix */ 121 movl %eax, -8(%ebx) /* store calling EIP */ 122 subl $8, %ebx /* adjust for three pushes, one pop */ 123 movl %ebx, 8(%esp) /* temporarily store new %esp */ 124 popl %ebx /* pop off temp */ 125 popl %eax /* pop off temp */ 126 movl (%esp), %esp /* set stack pointer */ 127 iret /* return from interrupt */ 128invop_nop: 129 /* 130 * We must emulate a "nop". This is obviously not hard: we need only 131 * advance the %eip by one. 132 */ 133 popa 134 incl (%esp) 135 iret /* return from interrupt */ 136 137 END(dtrace_invop_start) 138 139/* 140void dtrace_invop_init(void) 141*/ 142 ENTRY(dtrace_invop_init) 143 movl $dtrace_invop_start, dtrace_invop_jump_addr 144 ret 145 END(dtrace_invop_init) 146 147/* 148void dtrace_invop_uninit(void) 149*/ 150 ENTRY(dtrace_invop_uninit) 151 movl $0, dtrace_invop_jump_addr 152 ret 153 END(dtrace_invop_uninit) 154 155/* 156greg_t dtrace_getfp(void) 157*/ 158 159 ENTRY(dtrace_getfp) 160 movl %ebp, %eax 161 ret 162 END(dtrace_getfp) 163 164/* 165uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) 166*/ 167 168 ENTRY(dtrace_cas32) 169 ALTENTRY(dtrace_casptr) 170 movl 4(%esp), %edx 171 movl 8(%esp), %eax 172 movl 12(%esp), %ecx 173 lock 174 cmpxchgl %ecx, (%edx) 175 ret 176 END(dtrace_casptr) 177 END(dtrace_cas32) 178 179/* 180uintptr_t dtrace_caller(int aframes) 181*/ 182 183 ENTRY(dtrace_caller) 184 movl $-1, %eax 185 ret 186 END(dtrace_caller) 187 188/* 189void dtrace_copy(uintptr_t src, uintptr_t dest, size_t size) 190*/ 191 192 ENTRY(dtrace_copy) 193 pushl %ebp 194 movl %esp, %ebp 195 pushl %esi 196 pushl %edi 197 198 movl 8(%ebp), %esi /* Load source address */ 199 movl 12(%ebp), %edi /* Load destination address */ 200 movl 16(%ebp), %ecx /* Load count */ 201 repz /* Repeat for count... */ 202 smovb /* move from %ds:si to %es:di */ 203 204 popl %edi 205 popl %esi 206 movl %ebp, %esp 207 popl %ebp 208 ret 209 END(dtrace_copy) 210 211/* 212void dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size) 213*/ 214 215 ENTRY(dtrace_copystr) 216 217 pushl %ebp /* Setup stack frame */ 218 movl %esp, %ebp 219 pushl %ebx /* Save registers */ 220 221 movl 8(%ebp), %ebx /* Load source address */ 222 movl 12(%ebp), %edx /* Load destination address */ 223 movl 16(%ebp), %ecx /* Load count */ 224 2250: 226 movb (%ebx), %al /* Load from source */ 227 movb %al, (%edx) /* Store to destination */ 228 incl %ebx /* Increment source pointer */ 229 incl %edx /* Increment destination pointer */ 230 decl %ecx /* Decrement remaining count */ 231 cmpb $0, %al 232 je 1f 233 cmpl $0, %ecx 234 jne 0b 235 2361: 237 popl %ebx 238 movl %ebp, %esp 239 popl %ebp 240 ret 241 242 END(dtrace_copystr) 243 244/* 245uintptr_t dtrace_fulword(void *addr) 246*/ 247 248 ENTRY(dtrace_fulword) 249 movl 4(%esp), %ecx 250 xorl %eax, %eax 251 movl (%ecx), %eax 252 ret 253 END(dtrace_fulword) 254 255/* 256uint8_t dtrace_fuword8_nocheck(void *addr) 257*/ 258 259 ENTRY(dtrace_fuword8_nocheck) 260 movl 4(%esp), %ecx 261 xorl %eax, %eax 262 movzbl (%ecx), %eax 263 ret 264 END(dtrace_fuword8_nocheck) 265 266/* 267uint16_t dtrace_fuword16_nocheck(void *addr) 268*/ 269 270 ENTRY(dtrace_fuword16_nocheck) 271 movl 4(%esp), %ecx 272 xorl %eax, %eax 273 movzwl (%ecx), %eax 274 ret 275 END(dtrace_fuword16_nocheck) 276 277/* 278uint32_t dtrace_fuword32_nocheck(void *addr) 279*/ 280 281 ENTRY(dtrace_fuword32_nocheck) 282 movl 4(%esp), %ecx 283 xorl %eax, %eax 284 movl (%ecx), %eax 285 ret 286 END(dtrace_fuword32_nocheck) 287 288/* 289uint64_t dtrace_fuword64_nocheck(void *addr) 290*/ 291 292 ENTRY(dtrace_fuword64_nocheck) 293 movl 4(%esp), %ecx 294 xorl %eax, %eax 295 xorl %edx, %edx 296 movl (%ecx), %eax 297 movl 4(%ecx), %edx 298 ret 299 END(dtrace_fuword64_nocheck) 300 301/* 302void dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, int fault, int fltoffs, uintptr_t illval) 303*/ 304 305 ENTRY(dtrace_probe_error) 306 pushl %ebp 307 movl %esp, %ebp 308 pushl 0x1c(%ebp) 309 pushl 0x18(%ebp) 310 pushl 0x14(%ebp) 311 pushl 0x10(%ebp) 312 pushl 0xc(%ebp) 313 pushl 0x8(%ebp) 314 pushl dtrace_probeid_error 315 call dtrace_probe 316 movl %ebp, %esp 317 popl %ebp 318 ret 319 END(dtrace_probe_error) 320 321/* 322void dtrace_membar_producer(void) 323*/ 324 325 ENTRY(dtrace_membar_producer) 326 rep; ret /* use 2 byte return instruction when branch target */ 327 /* AMD Software Optimization Guide - Section 6.2 */ 328 END(dtrace_membar_producer) 329 330/* 331void dtrace_membar_consumer(void) 332*/ 333 334 ENTRY(dtrace_membar_consumer) 335 rep; ret /* use 2 byte return instruction when branch target */ 336 /* AMD Software Optimization Guide - Section 6.2 */ 337 END(dtrace_membar_consumer) 338 339/* 340dtrace_icookie_t dtrace_interrupt_disable(void) 341*/ 342 ENTRY(dtrace_interrupt_disable) 343 pushfl 344 popl %eax 345 cli 346 ret 347 END(dtrace_interrupt_disable) 348 349/* 350void dtrace_interrupt_enable(dtrace_icookie_t cookie) 351*/ 352 ENTRY(dtrace_interrupt_enable) 353 movl 4(%esp), %eax 354 pushl %eax 355 popfl 356 ret 357 END(dtrace_interrupt_enable) 358 359/* 360 * The panic() and cmn_err() functions invoke vpanic() as a common entry point 361 * into the panic code implemented in panicsys(). vpanic() is responsible 362 * for passing through the format string and arguments, and constructing a 363 * regs structure on the stack into which it saves the current register 364 * values. If we are not dying due to a fatal trap, these registers will 365 * then be preserved in panicbuf as the current processor state. Before 366 * invoking panicsys(), vpanic() activates the first panic trigger (see 367 * common/os/panic.c) and switches to the panic_stack if successful. Note that 368 * DTrace takes a slightly different panic path if it must panic from probe 369 * context. Instead of calling panic, it calls into dtrace_vpanic(), which 370 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and 371 * branches back into vpanic(). 372 */ 373/* 374void vpanic(const char *format, va_list alist) 375*/ 376 ENTRY(vpanic) /* Initial stack layout: */ 377 378 pushl %ebp /* | %eip | 20 */ 379 movl %esp, %ebp /* | %ebp | 16 */ 380 pushl %eax /* | %eax | 12 */ 381 pushl %ebx /* | %ebx | 8 */ 382 pushl %ecx /* | %ecx | 4 */ 383 pushl %edx /* | %edx | 0 */ 384 385 movl %esp, %ebx /* %ebx = current stack pointer */ 386 387 lea panic_quiesce, %eax /* %eax = &panic_quiesce */ 388 pushl %eax /* push &panic_quiesce */ 389 call panic_trigger /* %eax = panic_trigger() */ 390 addl $4, %esp /* reset stack pointer */ 391 392vpanic_common: 393 cmpl $0, %eax /* if (%eax == 0) */ 394 je 0f /* goto 0f; */ 395 396 /* 397 * If panic_trigger() was successful, we are the first to initiate a 398 * panic: we now switch to the reserved panic_stack before continuing. 399 */ 400 lea panic_stack, %esp /* %esp = panic_stack */ 401 addl $PANICSTKSIZE, %esp /* %esp += PANICSTKSIZE */ 402 4030: subl $REGSIZE, %esp /* allocate struct regs */ 404 405 /* 406 * Now that we've got everything set up, store the register values as 407 * they were when we entered vpanic() to the designated location in 408 * the regs structure we allocated on the stack. 409 */ 410#ifdef notyet 411 mov %gs, %edx 412 mov %edx, REGOFF_GS(%esp) 413 mov %fs, %edx 414 mov %edx, REGOFF_FS(%esp) 415 mov %es, %edx 416 mov %edx, REGOFF_ES(%esp) 417 mov %ds, %edx 418 mov %edx, REGOFF_DS(%esp) 419 movl %edi, REGOFF_EDI(%esp) 420 movl %esi, REGOFF_ESI(%esp) 421 movl 16(%ebx), %ecx 422 movl %ecx, REGOFF_EBP(%esp) 423 movl %ebx, %ecx 424 addl $20, %ecx 425 movl %ecx, REGOFF_ESP(%esp) 426 movl 8(%ebx), %ecx 427 movl %ecx, REGOFF_EBX(%esp) 428 movl 0(%ebx), %ecx 429 movl %ecx, REGOFF_EDX(%esp) 430 movl 4(%ebx), %ecx 431 movl %ecx, REGOFF_ECX(%esp) 432 movl 12(%ebx), %ecx 433 movl %ecx, REGOFF_EAX(%esp) 434 movl $0, REGOFF_TRAPNO(%esp) 435 movl $0, REGOFF_ERR(%esp) 436 lea vpanic, %ecx 437 movl %ecx, REGOFF_EIP(%esp) 438 mov %cs, %edx 439 movl %edx, REGOFF_CS(%esp) 440 pushfl 441 popl %ecx 442 movl %ecx, REGOFF_EFL(%esp) 443 movl $0, REGOFF_UESP(%esp) 444 mov %ss, %edx 445 movl %edx, REGOFF_SS(%esp) 446 447 movl %esp, %ecx /* %ecx = ®s */ 448 pushl %eax /* push on_panic_stack */ 449 pushl %ecx /* push ®s */ 450 movl 12(%ebp), %ecx /* %ecx = alist */ 451 pushl %ecx /* push alist */ 452 movl 8(%ebp), %ecx /* %ecx = format */ 453 pushl %ecx /* push format */ 454 call panicsys /* panicsys(); */ 455 addl $16, %esp /* pop arguments */ 456 457 addl $REGSIZE, %esp 458#endif 459 popl %edx 460 popl %ecx 461 popl %ebx 462 popl %eax 463 leave 464 ret 465 END(vpanic) 466 467/* 468void dtrace_vpanic(const char *format, va_list alist) 469*/ 470 ENTRY(dtrace_vpanic) /* Initial stack layout: */ 471 472 pushl %ebp /* | %eip | 20 */ 473 movl %esp, %ebp /* | %ebp | 16 */ 474 pushl %eax /* | %eax | 12 */ 475 pushl %ebx /* | %ebx | 8 */ 476 pushl %ecx /* | %ecx | 4 */ 477 pushl %edx /* | %edx | 0 */ 478 479 movl %esp, %ebx /* %ebx = current stack pointer */ 480 481 lea panic_quiesce, %eax /* %eax = &panic_quiesce */ 482 pushl %eax /* push &panic_quiesce */ 483 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */ 484 addl $4, %esp /* reset stack pointer */ 485 jmp vpanic_common /* jump back to common code */ 486 487 END(dtrace_vpanic) 488 489/* 490int 491panic_trigger(int *tp) 492*/ 493 ENTRY(panic_trigger) 494 xorl %eax, %eax 495 movl $0xdefacedd, %edx 496 lock 497 xchgl %edx, (%edi) 498 cmpl $0, %edx 499 je 0f 500 movl $0, %eax 501 ret 5020: movl $1, %eax 503 ret 504 END(panic_trigger) 505 506/* 507int 508dtrace_panic_trigger(int *tp) 509*/ 510 ENTRY(dtrace_panic_trigger) 511 xorl %eax, %eax 512 movl $0xdefacedd, %edx 513 lock 514 xchgl %edx, (%edi) 515 cmpl $0, %edx 516 je 0f 517 movl $0, %eax 518 ret 5190: movl $1, %eax 520 ret 521 END(dtrace_panic_trigger) 522