1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * $FreeBSD$ 23 * 24 */ 25 /* 26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/malloc.h> 34 #include <sys/kmem.h> 35 #include <sys/proc.h> 36 #include <sys/smp.h> 37 #include <sys/dtrace_impl.h> 38 #include <sys/dtrace_bsd.h> 39 #include <cddl/dev/dtrace/dtrace_cddl.h> 40 #include <machine/armreg.h> 41 #include <machine/clock.h> 42 #include <machine/frame.h> 43 #include <machine/trap.h> 44 #include <vm/pmap.h> 45 46 #define DELAYBRANCH(x) ((int)(x) < 0) 47 48 #define BIT_PC 15 49 #define BIT_LR 14 50 #define BIT_SP 13 51 52 extern dtrace_id_t dtrace_probeid_error; 53 extern int (*dtrace_invop_jump_addr)(struct trapframe *); 54 extern void dtrace_getnanotime(struct timespec *tsp); 55 extern void dtrace_getnanouptime(struct timespec *tsp); 56 57 int dtrace_invop(uintptr_t, struct trapframe *, uintptr_t); 58 void dtrace_invop_init(void); 59 void dtrace_invop_uninit(void); 60 61 typedef struct dtrace_invop_hdlr { 62 int (*dtih_func)(uintptr_t, struct trapframe *, uintptr_t); 63 struct dtrace_invop_hdlr *dtih_next; 64 } dtrace_invop_hdlr_t; 65 66 dtrace_invop_hdlr_t *dtrace_invop_hdlr; 67 68 int 69 dtrace_invop(uintptr_t addr, struct trapframe *frame, uintptr_t eax) 70 { 71 struct thread *td; 72 dtrace_invop_hdlr_t *hdlr; 73 int rval; 74 75 rval = 0; 76 td = curthread; 77 td->t_dtrace_trapframe = frame; 78 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next) 79 if ((rval = hdlr->dtih_func(addr, frame, eax)) != 0) 80 break; 81 td->t_dtrace_trapframe = NULL; 82 return (rval); 83 } 84 85 86 void 87 dtrace_invop_add(int (*func)(uintptr_t, struct trapframe *, uintptr_t)) 88 { 89 dtrace_invop_hdlr_t *hdlr; 90 91 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP); 92 hdlr->dtih_func = func; 93 hdlr->dtih_next = dtrace_invop_hdlr; 94 dtrace_invop_hdlr = hdlr; 95 } 96 97 void 98 dtrace_invop_remove(int (*func)(uintptr_t, struct trapframe *, uintptr_t)) 99 { 100 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL; 101 102 for (;;) { 103 if (hdlr == NULL) 104 panic("attempt to remove non-existent invop handler"); 105 106 if (hdlr->dtih_func == func) 107 break; 108 109 prev = hdlr; 110 hdlr = hdlr->dtih_next; 111 } 112 113 if (prev == NULL) { 114 ASSERT(dtrace_invop_hdlr == hdlr); 115 dtrace_invop_hdlr = hdlr->dtih_next; 116 } else { 117 ASSERT(dtrace_invop_hdlr != hdlr); 118 prev->dtih_next = hdlr->dtih_next; 119 } 120 121 kmem_free(hdlr, 0); 122 } 123 124 125 /*ARGSUSED*/ 126 void 127 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit)) 128 { 129 130 /* 131 * There are no ranges to exclude that are common to all 32-bit arm 132 * platforms. This function only needs to exclude ranges "... in 133 * which it is impossible to recover from such a load after it has been 134 * attempted." -- i.e., accessing within the range causes some sort 135 * fault in the system which is not handled by the normal arm 136 * exception-handling mechanisms. If systems exist where that is the 137 * case, a method to handle this functionality would have to be added to 138 * the platform_if interface so that those systems could provide their 139 * specific toxic range(s). 140 */ 141 } 142 143 void 144 dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg) 145 { 146 cpuset_t cpus; 147 148 if (cpu == DTRACE_CPUALL) 149 cpus = all_cpus; 150 else 151 CPU_SETOF(cpu, &cpus); 152 153 smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func, 154 smp_no_rendezvous_barrier, arg); 155 } 156 157 static void 158 dtrace_sync_func(void) 159 { 160 } 161 162 void 163 dtrace_sync(void) 164 { 165 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL); 166 } 167 168 /* 169 * DTrace needs a high resolution time function which can 170 * be called from a probe context and guaranteed not to have 171 * instrumented with probes itself. 172 * 173 * Returns nanoseconds since boot. 174 */ 175 uint64_t 176 dtrace_gethrtime(void) 177 { 178 struct timespec curtime; 179 180 dtrace_getnanouptime(&curtime); 181 182 return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec); 183 184 } 185 186 uint64_t 187 dtrace_gethrestime(void) 188 { 189 struct timespec current_time; 190 191 dtrace_getnanotime(¤t_time); 192 193 return (current_time.tv_sec * 1000000000UL + current_time.tv_nsec); 194 } 195 196 /* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */ 197 int 198 dtrace_trap(struct trapframe *frame, u_int type) 199 { 200 /* 201 * A trap can occur while DTrace executes a probe. Before 202 * executing the probe, DTrace blocks re-scheduling and sets 203 * a flag in its per-cpu flags to indicate that it doesn't 204 * want to fault. On returning from the probe, the no-fault 205 * flag is cleared and finally re-scheduling is enabled. 206 * 207 * Check if DTrace has enabled 'no-fault' mode: 208 * 209 */ 210 if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) { 211 /* 212 * There are only a couple of trap types that are expected. 213 * All the rest will be handled in the usual way. 214 */ 215 switch (type) { 216 /* Page fault. */ 217 case FAULT_ALIGN: 218 /* Flag a bad address. */ 219 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR; 220 cpu_core[curcpu].cpuc_dtrace_illval = 0; 221 222 /* 223 * Offset the instruction pointer to the instruction 224 * following the one causing the fault. 225 */ 226 frame->tf_pc += sizeof(int); 227 return (1); 228 default: 229 /* Handle all other traps in the usual way. */ 230 break; 231 } 232 } 233 234 /* Handle the trap in the usual way. */ 235 return (0); 236 } 237 238 void 239 dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, 240 int fault, int fltoffs, uintptr_t illval) 241 { 242 243 dtrace_probe(dtrace_probeid_error, (uint64_t)(uintptr_t)state, 244 (uintptr_t)epid, 245 (uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs); 246 } 247 248 static int 249 dtrace_invop_start(struct trapframe *frame) 250 { 251 register_t *r0, *sp; 252 int data, invop, reg, update_sp; 253 254 invop = dtrace_invop(frame->tf_pc, frame, frame->tf_r0); 255 switch (invop & DTRACE_INVOP_MASK) { 256 case DTRACE_INVOP_PUSHM: 257 sp = (register_t *)frame->tf_svc_sp; 258 r0 = &frame->tf_r0; 259 data = DTRACE_INVOP_DATA(invop); 260 261 /* 262 * Store the pc, lr, and sp. These have their own 263 * entries in the struct. 264 */ 265 if (data & (1 << BIT_PC)) { 266 sp--; 267 *sp = frame->tf_pc; 268 } 269 if (data & (1 << BIT_LR)) { 270 sp--; 271 *sp = frame->tf_svc_lr; 272 } 273 if (data & (1 << BIT_SP)) { 274 sp--; 275 *sp = frame->tf_svc_sp; 276 } 277 278 /* Store the general registers */ 279 for (reg = 12; reg >= 0; reg--) { 280 if (data & (1 << reg)) { 281 sp--; 282 *sp = r0[reg]; 283 } 284 } 285 286 /* Update the stack pointer and program counter to continue */ 287 frame->tf_svc_sp = (register_t)sp; 288 frame->tf_pc += 4; 289 break; 290 case DTRACE_INVOP_POPM: 291 sp = (register_t *)frame->tf_svc_sp; 292 r0 = &frame->tf_r0; 293 data = DTRACE_INVOP_DATA(invop); 294 295 /* Read the general registers */ 296 for (reg = 0; reg <= 12; reg++) { 297 if (data & (1 << reg)) { 298 r0[reg] = *sp; 299 sp++; 300 } 301 } 302 303 /* 304 * Set the stack pointer. If we don't update it here we will 305 * need to update it at the end as the instruction would do 306 */ 307 update_sp = 1; 308 if (data & (1 << BIT_SP)) { 309 frame->tf_svc_sp = *sp; 310 *sp++; 311 update_sp = 0; 312 } 313 314 /* Update the link register, we need to use the correct copy */ 315 if (data & (1 << BIT_LR)) { 316 frame->tf_svc_lr = *sp; 317 *sp++; 318 } 319 /* 320 * And the program counter. If it's not in the list skip over 321 * it when we return so to not hit this again. 322 */ 323 if (data & (1 << BIT_PC)) { 324 frame->tf_pc = *sp; 325 *sp++; 326 } else 327 frame->tf_pc += 4; 328 329 /* Update the stack pointer if we haven't already done so */ 330 if (update_sp) 331 frame->tf_svc_sp = (register_t)sp; 332 break; 333 case DTRACE_INVOP_B: 334 data = DTRACE_INVOP_DATA(invop) & 0x00ffffff; 335 /* Sign extend the data */ 336 if ((data & (1 << 23)) != 0) 337 data |= 0xff000000; 338 /* The data is the number of 4-byte words to change the pc */ 339 data *= 4; 340 data += 8; 341 frame->tf_pc += data; 342 break; 343 default: 344 return (-1); 345 break; 346 } 347 348 return (0); 349 } 350 351 void dtrace_invop_init(void) 352 { 353 dtrace_invop_jump_addr = dtrace_invop_start; 354 } 355 356 void dtrace_invop_uninit(void) 357 { 358 dtrace_invop_jump_addr = 0; 359 } 360