1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2008 John Birrell <jb@freebsd.org> 22 * 23 * $FreeBSD$ 24 * 25 */ 26/* 27 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31#define _ASM 32 33#include <machine/asmacros.h> 34#include <sys/cpuvar_defs.h> 35#include <sys/dtrace.h> 36 37#include "assym.inc" 38 39#define INTR_POP \ 40 MEXITCOUNT; \ 41 movq TF_RDI(%rsp),%rdi; \ 42 movq TF_RSI(%rsp),%rsi; \ 43 movq TF_RDX(%rsp),%rdx; \ 44 movq TF_RCX(%rsp),%rcx; \ 45 movq TF_R8(%rsp),%r8; \ 46 movq TF_R9(%rsp),%r9; \ 47 movq TF_RAX(%rsp),%rax; \ 48 movq TF_RBX(%rsp),%rbx; \ 49 movq TF_RBP(%rsp),%rbp; \ 50 movq TF_R10(%rsp),%r10; \ 51 movq TF_R11(%rsp),%r11; \ 52 movq TF_R12(%rsp),%r12; \ 53 movq TF_R13(%rsp),%r13; \ 54 movq TF_R14(%rsp),%r14; \ 55 movq TF_R15(%rsp),%r15; \ 56 testb $SEL_RPL_MASK,TF_CS(%rsp); \ 57 jz 1f; \ 58 cli; \ 59 swapgs; \ 601: addq $TF_RIP,%rsp; 61 62 63 ENTRY(dtrace_invop_start) 64 65 /* 66 * #BP traps with %rip set to the next address. We need to decrement 67 * the value to indicate the address of the int3 (0xcc) instruction 68 * that we substituted. 69 */ 70 movq TF_RIP(%rsp), %rdi 71 decq %rdi 72 movq %rsp, %rsi 73 movq TF_RAX(%rsp), %rdx 74 call dtrace_invop 75 ALTENTRY(dtrace_invop_callsite) 76 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax 77 je bp_push 78 cmpl $DTRACE_INVOP_LEAVE, %eax 79 je bp_leave 80 cmpl $DTRACE_INVOP_NOP, %eax 81 je bp_nop 82 cmpl $DTRACE_INVOP_RET, %eax 83 je bp_ret 84 85 /* When all else fails handle the trap in the usual way. */ 86 jmpq *dtrace_invop_calltrap_addr 87 88bp_push: 89 /* 90 * We must emulate a "pushq %rbp". To do this, we pull the stack 91 * down 8 bytes, and then store the base pointer. 92 */ 93 INTR_POP 94 subq $16, %rsp /* make room for %rbp */ 95 pushq %rax /* push temp */ 96 movq 24(%rsp), %rax /* load calling RIP */ 97 movq %rax, 8(%rsp) /* store calling RIP */ 98 movq 32(%rsp), %rax /* load calling CS */ 99 movq %rax, 16(%rsp) /* store calling CS */ 100 movq 40(%rsp), %rax /* load calling RFLAGS */ 101 movq %rax, 24(%rsp) /* store calling RFLAGS */ 102 movq 48(%rsp), %rax /* load calling RSP */ 103 subq $8, %rax /* make room for %rbp */ 104 movq %rax, 32(%rsp) /* store calling RSP */ 105 movq 56(%rsp), %rax /* load calling SS */ 106 movq %rax, 40(%rsp) /* store calling SS */ 107 movq 32(%rsp), %rax /* reload calling RSP */ 108 movq %rbp, (%rax) /* store %rbp there */ 109 popq %rax /* pop off temp */ 110 iretq /* return from interrupt */ 111 /*NOTREACHED*/ 112 113bp_leave: 114 /* 115 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp" 116 * followed by a "popq %rbp". This is quite a bit simpler on amd64 117 * than it is on i386 -- we can exploit the fact that the %rsp is 118 * explicitly saved to effect the pop without having to reshuffle 119 * the other data pushed for the trap. 120 */ 121 INTR_POP 122 pushq %rax /* push temp */ 123 movq 8(%rsp), %rax /* load calling RIP */ 124 movq %rax, 8(%rsp) /* store calling RIP */ 125 movq (%rbp), %rax /* get new %rbp */ 126 addq $8, %rbp /* adjust new %rsp */ 127 movq %rbp, 32(%rsp) /* store new %rsp */ 128 movq %rax, %rbp /* set new %rbp */ 129 popq %rax /* pop off temp */ 130 iretq /* return from interrupt */ 131 /*NOTREACHED*/ 132 133bp_nop: 134 /* We must emulate a "nop". */ 135 INTR_POP 136 iretq 137 /*NOTREACHED*/ 138 139bp_ret: 140 INTR_POP 141 pushq %rax /* push temp */ 142 movq 32(%rsp), %rax /* load %rsp */ 143 movq (%rax), %rax /* load calling RIP */ 144 movq %rax, 8(%rsp) /* store calling RIP */ 145 addq $8, 32(%rsp) /* adjust new %rsp */ 146 popq %rax /* pop off temp */ 147 iretq /* return from interrupt */ 148 /*NOTREACHED*/ 149 150 END(dtrace_invop_start) 151 152/* 153greg_t dtrace_getfp(void) 154*/ 155 ENTRY(dtrace_getfp) 156 movq %rbp, %rax 157 ret 158 END(dtrace_getfp) 159 160/* 161uint32_t 162dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) 163*/ 164 ENTRY(dtrace_cas32) 165 movl %esi, %eax 166 lock 167 cmpxchgl %edx, (%rdi) 168 ret 169 END(dtrace_cas32) 170 171/* 172void * 173dtrace_casptr(void *target, void *cmp, void *new) 174*/ 175 ENTRY(dtrace_casptr) 176 movq %rsi, %rax 177 lock 178 cmpxchgq %rdx, (%rdi) 179 ret 180 END(dtrace_casptr) 181 182/* 183uintptr_t 184dtrace_caller(int aframes) 185*/ 186 ENTRY(dtrace_caller) 187 movq $-1, %rax 188 ret 189 END(dtrace_caller) 190 191/* 192void 193dtrace_copy(uintptr_t src, uintptr_t dest, size_t size) 194*/ 195 ENTRY(dtrace_copy_nosmap) 196 pushq %rbp 197 movq %rsp, %rbp 198 199 xchgq %rdi, %rsi /* make %rsi source, %rdi dest */ 200 movq %rdx, %rcx /* load count */ 201 repz /* repeat for count ... */ 202 smovb /* move from %ds:rsi to %ed:rdi */ 203 leave 204 ret 205 END(dtrace_copy_nosmap) 206 207 ENTRY(dtrace_copy_smap) 208 pushq %rbp 209 movq %rsp, %rbp 210 211 xchgq %rdi, %rsi /* make %rsi source, %rdi dest */ 212 movq %rdx, %rcx /* load count */ 213 stac 214 repz /* repeat for count ... */ 215 smovb /* move from %ds:rsi to %ed:rdi */ 216 clac 217 leave 218 ret 219 END(dtrace_copy_smap) 220 221/* 222void 223dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size, 224 volatile uint16_t *flags) 225*/ 226 ENTRY(dtrace_copystr_nosmap) 227 pushq %rbp 228 movq %rsp, %rbp 229 2300: 231 movb (%rdi), %al /* load from source */ 232 movb %al, (%rsi) /* store to destination */ 233 addq $1, %rdi /* increment source pointer */ 234 addq $1, %rsi /* increment destination pointer */ 235 subq $1, %rdx /* decrement remaining count */ 236 cmpb $0, %al 237 je 2f 238 testq $0xfff, %rdx /* test if count is 4k-aligned */ 239 jnz 1f /* if not, continue with copying */ 240 testq $CPU_DTRACE_BADADDR, (%rcx) /* load and test dtrace flags */ 241 jnz 2f 2421: 243 cmpq $0, %rdx 244 jne 0b 2452: 246 leave 247 ret 248 249 END(dtrace_copystr_nosmap) 250 251 ENTRY(dtrace_copystr_smap) 252 pushq %rbp 253 movq %rsp, %rbp 254 255 stac 2560: 257 movb (%rdi), %al /* load from source */ 258 movb %al, (%rsi) /* store to destination */ 259 addq $1, %rdi /* increment source pointer */ 260 addq $1, %rsi /* increment destination pointer */ 261 subq $1, %rdx /* decrement remaining count */ 262 cmpb $0, %al 263 je 2f 264 testq $0xfff, %rdx /* test if count is 4k-aligned */ 265 jnz 1f /* if not, continue with copying */ 266 testq $CPU_DTRACE_BADADDR, (%rcx) /* load and test dtrace flags */ 267 jnz 2f 2681: 269 cmpq $0, %rdx 270 jne 0b 2712: 272 clac 273 leave 274 ret 275 276 END(dtrace_copystr_smap) 277 278/* 279uintptr_t 280dtrace_fulword(void *addr) 281*/ 282 ENTRY(dtrace_fulword_nosmap) 283 movq (%rdi), %rax 284 ret 285 END(dtrace_fulword_nosmap) 286 287 ENTRY(dtrace_fulword_smap) 288 stac 289 movq (%rdi), %rax 290 clac 291 ret 292 END(dtrace_fulword_smap) 293 294/* 295uint8_t 296dtrace_fuword8_nocheck(void *addr) 297*/ 298 ENTRY(dtrace_fuword8_nocheck_nosmap) 299 xorq %rax, %rax 300 movb (%rdi), %al 301 ret 302 END(dtrace_fuword8_nocheck_nosmap) 303 304 ENTRY(dtrace_fuword8_nocheck_smap) 305 stac 306 xorq %rax, %rax 307 movb (%rdi), %al 308 clac 309 ret 310 END(dtrace_fuword8_nocheck_smap) 311 312/* 313uint16_t 314dtrace_fuword16_nocheck(void *addr) 315*/ 316 ENTRY(dtrace_fuword16_nocheck_nosmap) 317 xorq %rax, %rax 318 movw (%rdi), %ax 319 ret 320 END(dtrace_fuword16_nocheck_nosmap) 321 322 ENTRY(dtrace_fuword16_nocheck_smap) 323 stac 324 xorq %rax, %rax 325 movw (%rdi), %ax 326 clac 327 ret 328 END(dtrace_fuword16_nocheck_smap) 329 330/* 331uint32_t 332dtrace_fuword32_nocheck(void *addr) 333*/ 334 ENTRY(dtrace_fuword32_nocheck_nosmap) 335 xorq %rax, %rax 336 movl (%rdi), %eax 337 ret 338 END(dtrace_fuword32_nocheck_nosmap) 339 340 ENTRY(dtrace_fuword32_nocheck_smap) 341 stac 342 xorq %rax, %rax 343 movl (%rdi), %eax 344 clac 345 ret 346 END(dtrace_fuword32_nocheck_smap) 347 348/* 349uint64_t 350dtrace_fuword64_nocheck(void *addr) 351*/ 352 ENTRY(dtrace_fuword64_nocheck_nosmap) 353 movq (%rdi), %rax 354 ret 355 END(dtrace_fuword64_nocheck_nosmap) 356 357 ENTRY(dtrace_fuword64_nocheck_smap) 358 stac 359 movq (%rdi), %rax 360 clac 361 ret 362 END(dtrace_fuword64_nocheck_smap) 363 364/* 365void 366dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, 367 int fault, int fltoffs, uintptr_t illval) 368*/ 369 ENTRY(dtrace_probe_error) 370 pushq %rbp 371 movq %rsp, %rbp 372 subq $0x8, %rsp 373 movq %r9, (%rsp) 374 movq %r8, %r9 375 movq %rcx, %r8 376 movq %rdx, %rcx 377 movq %rsi, %rdx 378 movq %rdi, %rsi 379 movl dtrace_probeid_error(%rip), %edi 380 call dtrace_probe 381 addq $0x8, %rsp 382 leave 383 ret 384 END(dtrace_probe_error) 385 386/* 387void 388dtrace_membar_producer(void) 389*/ 390 ENTRY(dtrace_membar_producer) 391 rep; ret /* use 2 byte return instruction when branch target */ 392 /* AMD Software Optimization Guide - Section 6.2 */ 393 END(dtrace_membar_producer) 394 395/* 396void 397dtrace_membar_consumer(void) 398*/ 399 ENTRY(dtrace_membar_consumer) 400 rep; ret /* use 2 byte return instruction when branch target */ 401 /* AMD Software Optimization Guide - Section 6.2 */ 402 END(dtrace_membar_consumer) 403 404/* 405dtrace_icookie_t 406dtrace_interrupt_disable(void) 407*/ 408 ENTRY(dtrace_interrupt_disable) 409 pushfq 410 popq %rax 411 cli 412 ret 413 END(dtrace_interrupt_disable) 414 415/* 416void 417dtrace_interrupt_enable(dtrace_icookie_t cookie) 418*/ 419 ENTRY(dtrace_interrupt_enable) 420 pushq %rdi 421 popfq 422 ret 423 END(dtrace_interrupt_enable) 424