1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
22 */
23 /*
24 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
25 * Use is subject to license terms.
26 */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
32 #include <sys/kmem.h>
33 #include <sys/proc.h>
34 #include <sys/smp.h>
35 #include <sys/dtrace_impl.h>
36 #include <sys/dtrace_bsd.h>
37 #include <cddl/dev/dtrace/dtrace_cddl.h>
38 #include <machine/armreg.h>
39 #include <machine/clock.h>
40 #include <machine/frame.h>
41 #include <machine/trap.h>
42 #include <machine/vmparam.h>
43 #include <vm/pmap.h>
44
45 extern dtrace_id_t dtrace_probeid_error;
46 extern int (*dtrace_invop_jump_addr)(struct trapframe *);
47 extern void dtrace_getnanotime(struct timespec *tsp);
48 extern void dtrace_getnanouptime(struct timespec *tsp);
49
50 int dtrace_invop(uintptr_t, struct trapframe *, uintptr_t);
51 void dtrace_invop_init(void);
52 void dtrace_invop_uninit(void);
53
54 typedef struct dtrace_invop_hdlr {
55 int (*dtih_func)(uintptr_t, struct trapframe *, uintptr_t);
56 struct dtrace_invop_hdlr *dtih_next;
57 } dtrace_invop_hdlr_t;
58
59 dtrace_invop_hdlr_t *dtrace_invop_hdlr;
60
61 int
dtrace_invop(uintptr_t addr,struct trapframe * frame,uintptr_t eax)62 dtrace_invop(uintptr_t addr, struct trapframe *frame, uintptr_t eax)
63 {
64 struct thread *td;
65 dtrace_invop_hdlr_t *hdlr;
66 int rval;
67
68 rval = 0;
69 td = curthread;
70 td->t_dtrace_trapframe = frame;
71 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
72 if ((rval = hdlr->dtih_func(addr, frame, eax)) != 0)
73 break;
74 td->t_dtrace_trapframe = NULL;
75 return (rval);
76 }
77
78 void
dtrace_invop_add(int (* func)(uintptr_t,struct trapframe *,uintptr_t))79 dtrace_invop_add(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
80 {
81 dtrace_invop_hdlr_t *hdlr;
82
83 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
84 hdlr->dtih_func = func;
85 hdlr->dtih_next = dtrace_invop_hdlr;
86 dtrace_invop_hdlr = hdlr;
87 }
88
89 void
dtrace_invop_remove(int (* func)(uintptr_t,struct trapframe *,uintptr_t))90 dtrace_invop_remove(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
91 {
92 dtrace_invop_hdlr_t *hdlr, *prev;
93
94 hdlr = dtrace_invop_hdlr;
95 prev = NULL;
96
97 for (;;) {
98 if (hdlr == NULL)
99 panic("attempt to remove non-existent invop handler");
100
101 if (hdlr->dtih_func == func)
102 break;
103
104 prev = hdlr;
105 hdlr = hdlr->dtih_next;
106 }
107
108 if (prev == NULL) {
109 ASSERT(dtrace_invop_hdlr == hdlr);
110 dtrace_invop_hdlr = hdlr->dtih_next;
111 } else {
112 ASSERT(dtrace_invop_hdlr != hdlr);
113 prev->dtih_next = hdlr->dtih_next;
114 }
115
116 kmem_free(hdlr, 0);
117 }
118
119 /*ARGSUSED*/
120 void
dtrace_toxic_ranges(void (* func)(uintptr_t base,uintptr_t limit))121 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
122 {
123
124 (*func)(0, (uintptr_t)VM_MIN_KERNEL_ADDRESS);
125 }
126
127 void
dtrace_xcall(processorid_t cpu,dtrace_xcall_t func,void * arg)128 dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
129 {
130 cpuset_t cpus;
131
132 if (cpu == DTRACE_CPUALL)
133 cpus = all_cpus;
134 else
135 CPU_SETOF(cpu, &cpus);
136
137 smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
138 smp_no_rendezvous_barrier, arg);
139 }
140
141 static void
dtrace_sync_func(void)142 dtrace_sync_func(void)
143 {
144
145 }
146
147 void
dtrace_sync(void)148 dtrace_sync(void)
149 {
150
151 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
152 }
153
154 static uint64_t nsec_scale;
155
156 #define SCALE_SHIFT 25
157
158 /*
159 * Choose scaling factors which let us convert a cntvct_el0 value to nanoseconds
160 * without overflow, as in the amd64 implementation.
161 *
162 * Documentation for the ARM generic timer states that typical counter
163 * frequencies are in the range 1Mhz-50Mhz; in ARMv9 the frequency is fixed at
164 * 1GHz. The lower bound of 1MHz forces the shift to be at most 25 bits. At
165 * that frequency, the calculation (hi * scale) << (32 - shift) will not
166 * overflow for over 100 years, assuming that the counter value starts at 0 upon
167 * boot.
168 */
169 static void
dtrace_gethrtime_init(void * arg __unused)170 dtrace_gethrtime_init(void *arg __unused)
171 {
172 uint64_t freq;
173
174 freq = READ_SPECIALREG(cntfrq_el0);
175 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / freq;
176 }
177 SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY,
178 dtrace_gethrtime_init, NULL);
179
180 /*
181 * DTrace needs a high resolution time function which can be called from a
182 * probe context and guaranteed not to have instrumented with probes itself.
183 *
184 * Returns nanoseconds since some arbitrary point in time (likely SoC reset?).
185 */
186 uint64_t
dtrace_gethrtime(void)187 dtrace_gethrtime(void)
188 {
189 uint64_t count, freq;
190 uint32_t lo, hi;
191
192 count = READ_SPECIALREG(cntvct_el0);
193 lo = count;
194 hi = count >> 32;
195 return (((lo * nsec_scale) >> SCALE_SHIFT) +
196 ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
197 }
198
199 /*
200 * Return a much lower resolution wallclock time based on the system clock
201 * updated by the timer. If needed, we could add a version interpolated from
202 * the system clock as is the case with dtrace_gethrtime().
203 */
204 uint64_t
dtrace_gethrestime(void)205 dtrace_gethrestime(void)
206 {
207 struct timespec current_time;
208
209 dtrace_getnanotime(¤t_time);
210
211 return (current_time.tv_sec * 1000000000UL + current_time.tv_nsec);
212 }
213
214 /* Function to handle DTrace traps during probes. See arm64/arm64/trap.c */
215 int
dtrace_trap(struct trapframe * frame,u_int type)216 dtrace_trap(struct trapframe *frame, u_int type)
217 {
218 /*
219 * A trap can occur while DTrace executes a probe. Before
220 * executing the probe, DTrace blocks re-scheduling and sets
221 * a flag in its per-cpu flags to indicate that it doesn't
222 * want to fault. On returning from the probe, the no-fault
223 * flag is cleared and finally re-scheduling is enabled.
224 *
225 * Check if DTrace has enabled 'no-fault' mode:
226 *
227 */
228
229 if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
230 /*
231 * There are only a couple of trap types that are expected.
232 * All the rest will be handled in the usual way.
233 */
234 switch (type) {
235 case EXCP_DATA_ABORT:
236 /* Flag a bad address. */
237 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
238 cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_far;
239
240 /*
241 * Offset the instruction pointer to the instruction
242 * following the one causing the fault.
243 */
244 frame->tf_elr += 4;
245 return (1);
246 default:
247 /* Handle all other traps in the usual way. */
248 break;
249 }
250 }
251
252 /* Handle the trap in the usual way. */
253 return (0);
254 }
255
256 void
dtrace_probe_error(dtrace_state_t * state,dtrace_epid_t epid,int which,int fault,int fltoffs,uintptr_t illval)257 dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
258 int fault, int fltoffs, uintptr_t illval)
259 {
260
261 dtrace_probe(dtrace_probeid_error, (uint64_t)(uintptr_t)state,
262 (uintptr_t)epid,
263 (uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs);
264 }
265
266 static void
dtrace_load64(uint64_t * addr,struct trapframe * frame,u_int reg)267 dtrace_load64(uint64_t *addr, struct trapframe *frame, u_int reg)
268 {
269
270 KASSERT(reg <= 31, ("dtrace_load64: Invalid register %u", reg));
271 if (reg < nitems(frame->tf_x))
272 frame->tf_x[reg] = *addr;
273 else if (reg == 30) /* lr */
274 frame->tf_lr = *addr;
275 /* Nothing to do for load to xzr */
276 }
277
278 static void
dtrace_store64(uint64_t * addr,struct trapframe * frame,u_int reg)279 dtrace_store64(uint64_t *addr, struct trapframe *frame, u_int reg)
280 {
281
282 KASSERT(reg <= 31, ("dtrace_store64: Invalid register %u", reg));
283 if (reg < nitems(frame->tf_x))
284 *addr = frame->tf_x[reg];
285 else if (reg == 30) /* lr */
286 *addr = frame->tf_lr;
287 else if (reg == 31) /* xzr */
288 *addr = 0;
289 }
290
291 static int
dtrace_invop_start(struct trapframe * frame)292 dtrace_invop_start(struct trapframe *frame)
293 {
294 int data, invop, tmp;
295
296 invop = dtrace_invop(frame->tf_elr, frame, frame->tf_x[0]);
297
298 tmp = (invop & LDP_STP_MASK);
299 if (tmp == STP_64 || tmp == LDP_64) {
300 register_t arg1, arg2, *sp;
301 int offs;
302
303 sp = (register_t *)frame->tf_sp;
304 data = invop;
305 arg1 = (data >> ARG1_SHIFT) & ARG1_MASK;
306 arg2 = (data >> ARG2_SHIFT) & ARG2_MASK;
307
308 offs = (data >> OFFSET_SHIFT) & OFFSET_MASK;
309
310 switch (tmp) {
311 case STP_64:
312 if (offs >> (OFFSET_SIZE - 1))
313 sp -= (~offs & OFFSET_MASK) + 1;
314 else
315 sp += (offs);
316 dtrace_store64(sp + 0, frame, arg1);
317 dtrace_store64(sp + 1, frame, arg2);
318 break;
319 case LDP_64:
320 dtrace_load64(sp + 0, frame, arg1);
321 dtrace_load64(sp + 1, frame, arg2);
322 if (offs >> (OFFSET_SIZE - 1))
323 sp -= (~offs & OFFSET_MASK) + 1;
324 else
325 sp += (offs);
326 break;
327 default:
328 break;
329 }
330
331 /* Update the stack pointer and program counter to continue */
332 frame->tf_sp = (register_t)sp;
333 frame->tf_elr += INSN_SIZE;
334 return (0);
335 }
336
337 if ((invop & SUB_MASK) == SUB_INSTR) {
338 frame->tf_sp -= (invop >> SUB_IMM_SHIFT) & SUB_IMM_MASK;
339 frame->tf_elr += INSN_SIZE;
340 return (0);
341 }
342
343 if (invop == NOP_INSTR) {
344 frame->tf_elr += INSN_SIZE;
345 return (0);
346 }
347
348 if ((invop & B_MASK) == B_INSTR) {
349 data = (invop & B_DATA_MASK);
350 /* The data is the number of 4-byte words to change the pc */
351 data *= 4;
352 frame->tf_elr += data;
353 return (0);
354 }
355
356 if (invop == RET_INSTR) {
357 frame->tf_elr = frame->tf_lr;
358 return (0);
359 }
360
361 return (-1);
362 }
363
364 void
dtrace_invop_init(void)365 dtrace_invop_init(void)
366 {
367
368 dtrace_invop_jump_addr = dtrace_invop_start;
369 }
370
371 void
dtrace_invop_uninit(void)372 dtrace_invop_uninit(void)
373 {
374
375 dtrace_invop_jump_addr = 0;
376 }
377