1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 /* All Rights Reserved */
27
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/vmparam.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/signal.h>
35 #include <sys/stack.h>
36 #include <sys/frame.h>
37 #include <sys/proc.h>
38 #include <sys/ucontext.h>
39 #include <sys/siginfo.h>
40 #include <sys/cpuvar.h>
41 #include <sys/asm_linkage.h>
42 #include <sys/kmem.h>
43 #include <sys/errno.h>
44 #include <sys/bootconf.h>
45 #include <sys/archsystm.h>
46 #include <sys/fpu/fpusystm.h>
47 #include <sys/auxv.h>
48 #include <sys/debug.h>
49 #include <sys/elf.h>
50 #include <sys/elf_SPARC.h>
51 #include <sys/cmn_err.h>
52 #include <sys/spl.h>
53 #include <sys/privregs.h>
54 #include <sys/kobj.h>
55 #include <sys/modctl.h>
56 #include <sys/reboot.h>
57 #include <sys/time.h>
58 #include <sys/panic.h>
59 #include <vm/seg_kmem.h>
60 #include <vm/page.h>
61 #include <sys/machpcb.h>
62
63 extern struct bootops *bootops;
64
65 /*
66 * Workaround for broken FDDI driver (remove when 4289172 is fixed)
67 */
68 short cputype = 0x80;
69
70 extern int getpcstack_top(pc_t *pcstack, int limit, uintptr_t *lastfp,
71 pc_t *lastpc);
72
73 /*
74 * Get a pc-only stacktrace. Used for kmem_alloc() buffer ownership tracking.
75 * Returns MIN(current stack depth, pcstack_limit).
76 */
77 int
getpcstack(pc_t * pcstack,int pcstack_limit)78 getpcstack(pc_t *pcstack, int pcstack_limit)
79 {
80 struct frame *fp, *minfp, *stacktop;
81 uintptr_t nextfp;
82 pc_t nextpc;
83 int depth;
84 int on_intr;
85 pc_t pcswin[MAXWIN];
86 int npcwin = MIN(MAXWIN, pcstack_limit);
87
88 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
89 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
90 else
91 stacktop = (struct frame *)curthread->t_stk;
92
93 minfp = (struct frame *)((uintptr_t)getfp() + STACK_BIAS);
94
95 /*
96 * getpcstack_top() processes the frames still in register windows,
97 * fills nextfp and nextpc with our starting point, and returns
98 * the number of frames it wrote into pcstack.
99 *
100 * Since we cannot afford to take a relocation trap while we are
101 * messing with register windows, we pass getpcstack_top() a buffer
102 * on our stack and then copy the result out to the pcstack buffer
103 * provided by the caller. The size of this buffer is the maximum
104 * supported number of SPARC register windows; however we ASSERT
105 * that it returns fewer than that, since it will skip the current
106 * frame.
107 */
108 npcwin = getpcstack_top(pcswin, npcwin, &nextfp, &nextpc);
109 ASSERT(npcwin >= 0 && npcwin < MAXWIN && npcwin <= pcstack_limit);
110 for (depth = 0; depth < npcwin; depth++) {
111 pcstack[depth] = pcswin[depth];
112 }
113
114 fp = (struct frame *)(nextfp + STACK_BIAS);
115
116 while (depth < pcstack_limit) {
117 if (fp <= minfp || fp >= stacktop) {
118 if (on_intr) {
119 /*
120 * Hop from interrupt stack to thread stack.
121 */
122 stacktop = (struct frame *)curthread->t_stk;
123 minfp = (struct frame *)curthread->t_stkbase;
124 on_intr = 0;
125 continue;
126 }
127 break;
128 }
129
130 pcstack[depth++] = nextpc;
131 minfp = fp;
132
133 nextpc = (pc_t)fp->fr_savpc;
134 fp = (struct frame *)((uintptr_t)fp->fr_savfp + STACK_BIAS);
135 }
136
137 return (depth);
138 }
139
140 /*
141 * The following ELF header fields are defined as processor-specific
142 * in the SPARC V8 ABI:
143 *
144 * e_ident[EI_DATA] encoding of the processor-specific
145 * data in the object file
146 * e_machine processor identification
147 * e_flags processor-specific flags associated
148 * with the file
149 */
150
151 /*
152 * The value of at_flags reflects a platform's cpu module support.
153 * at_flags is used to check for allowing a binary to execute and
154 * is passed as the value of the AT_FLAGS auxiliary vector.
155 */
156 int at_flags = 0;
157
158 /*
159 * Check the processor-specific fields of an ELF header.
160 *
161 * returns 1 if the fields are valid, 0 otherwise
162 */
163 int
elfheadcheck(unsigned char e_data,Elf32_Half e_machine,Elf32_Word e_flags)164 elfheadcheck(
165 unsigned char e_data,
166 Elf32_Half e_machine,
167 Elf32_Word e_flags)
168 {
169 Elf32_Word needed_flags;
170 int supported_flags;
171
172 if (e_data != ELFDATA2MSB)
173 return (0);
174
175 switch (e_machine) {
176 case EM_SPARC:
177 if (e_flags == 0)
178 return (1);
179 else
180 return (0);
181 case EM_SPARCV9:
182 /*
183 * Check that ELF flags are set to supported SPARC V9 flags
184 */
185 needed_flags = e_flags & EF_SPARC_EXT_MASK;
186 supported_flags = at_flags & ~EF_SPARC_32PLUS;
187
188 if (needed_flags & ~supported_flags)
189 return (0);
190 else
191 return (1);
192 case EM_SPARC32PLUS:
193 if ((e_flags & EF_SPARC_32PLUS) != 0 &&
194 ((e_flags & ~at_flags) & EF_SPARC_32PLUS_MASK) == 0)
195 return (1);
196 else
197 return (0);
198 default:
199 return (0);
200 }
201 }
202
203 uint_t auxv_hwcap_include = 0; /* patch to enable unrecognized features */
204 uint_t auxv_hwcap_exclude = 0; /* patch for broken cpus, debugging */
205 #if defined(_SYSCALL32_IMPL)
206 uint_t auxv_hwcap32_include = 0; /* ditto for 32-bit apps */
207 uint_t auxv_hwcap32_exclude = 0; /* ditto for 32-bit apps */
208 #endif
209
210 uint_t cpu_hwcap_flags = 0; /* set by cpu-dependent code */
211
212 /*
213 * Gather information about the processor and place it into auxv_hwcap
214 * so that it can be exported to the linker via the aux vector.
215 *
216 * We use this seemingly complicated mechanism so that we can ensure
217 * that /etc/system can be used to override what the system can or
218 * cannot discover for itself.
219 */
220 void
bind_hwcap(void)221 bind_hwcap(void)
222 {
223 auxv_hwcap = (auxv_hwcap_include | cpu_hwcap_flags) &
224 ~auxv_hwcap_exclude;
225
226 if (auxv_hwcap_include || auxv_hwcap_exclude)
227 cmn_err(CE_CONT, "?user ABI extensions: %b\n",
228 auxv_hwcap, FMT_AV_SPARC);
229
230 #if defined(_SYSCALL32_IMPL)
231 /*
232 * These are now a compatibility artifact; all supported SPARC CPUs
233 * are V9-capable (and thus support v8plus) and fully implement
234 * {s,u}mul and {s,u}div.
235 */
236 cpu_hwcap_flags |= AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS;
237
238 auxv_hwcap32 = (auxv_hwcap32_include | cpu_hwcap_flags) &
239 ~auxv_hwcap32_exclude;
240
241 if (auxv_hwcap32_include || auxv_hwcap32_exclude)
242 cmn_err(CE_CONT, "?32-bit user ABI extensions: %b\n",
243 auxv_hwcap32, FMT_AV_SPARC);
244 #endif
245 }
246
247 int
__ipltospl(int ipl)248 __ipltospl(int ipl)
249 {
250 return (ipltospl(ipl));
251 }
252
253 /*
254 * Print a stack backtrace using the specified stack pointer. We delay two
255 * seconds before continuing, unless this is the panic traceback.
256 * If we are in the process of panicking, we also attempt to write the
257 * stack backtrace to a staticly assigned buffer, to allow the panic
258 * code to find it and write it in to uncompressed pages within the
259 * system crash dump.
260 *
261 * Note that the frame for the starting stack pointer value is omitted because
262 * the corresponding %pc is not known.
263 */
264
265 extern char *dump_stack_scratch;
266
267 void
traceback(caddr_t sp)268 traceback(caddr_t sp)
269 {
270 struct frame *fp = (struct frame *)(sp + STACK_BIAS);
271 struct frame *nextfp, *minfp, *stacktop;
272 int on_intr;
273
274 cpu_t *cpu;
275
276 uint_t offset = 0;
277 uint_t next_offset = 0;
278 char stack_buffer[2048];
279 char local_buffer[1024];
280
281 flush_windows();
282
283 if (!panicstr)
284 printf("traceback: %%sp = %p\n", (void *)sp);
285
286 if (panicstr && !dump_stack_scratch) {
287 printf("Warning - stack not written to the dumpbuf\n");
288 }
289
290 /*
291 * If we are panicking, the high-level interrupt information in
292 * CPU was overwritten. panic_cpu has the correct values.
293 */
294 kpreempt_disable(); /* prevent migration */
295
296 cpu = (panicstr && CPU->cpu_id == panic_cpu.cpu_id)? &panic_cpu : CPU;
297
298 if ((on_intr = CPU_ON_INTR(cpu)) != 0)
299 stacktop = (struct frame *)(cpu->cpu_intr_stack + SA(MINFRAME));
300 else
301 stacktop = (struct frame *)curthread->t_stk;
302
303 kpreempt_enable();
304
305 minfp = fp;
306
307 while ((uintptr_t)fp >= KERNELBASE) {
308 uintptr_t pc = (uintptr_t)fp->fr_savpc;
309 ulong_t off;
310 char *sym;
311
312 nextfp = (struct frame *)((uintptr_t)fp->fr_savfp + STACK_BIAS);
313 if (nextfp <= minfp || nextfp >= stacktop) {
314 if (on_intr) {
315 /*
316 * Hop from interrupt stack to thread stack.
317 */
318 stacktop = (struct frame *)curthread->t_stk;
319 minfp = (struct frame *)curthread->t_stkbase;
320 on_intr = 0;
321 continue;
322 }
323 break; /* we're outside of the expected range */
324 }
325
326 if ((uintptr_t)nextfp & (STACK_ALIGN - 1)) {
327 printf(" >> mis-aligned %%fp = %p\n", (void *)nextfp);
328 break;
329 }
330
331 if ((sym = kobj_getsymname(pc, &off)) != NULL) {
332 printf("%016lx %s:%s+%lx "
333 "(%lx, %lx, %lx, %lx, %lx, %lx)\n", (ulong_t)nextfp,
334 mod_containing_pc((caddr_t)pc), sym, off,
335 nextfp->fr_arg[0], nextfp->fr_arg[1],
336 nextfp->fr_arg[2], nextfp->fr_arg[3],
337 nextfp->fr_arg[4], nextfp->fr_arg[5]);
338 (void) snprintf(stack_buffer, sizeof (stack_buffer),
339 "%s:%s+%lx "
340 "(%lx, %lx, %lx, %lx, %lx, %lx) | ",
341 mod_containing_pc((caddr_t)pc), sym, off,
342 nextfp->fr_arg[0], nextfp->fr_arg[1],
343 nextfp->fr_arg[2], nextfp->fr_arg[3],
344 nextfp->fr_arg[4], nextfp->fr_arg[5]);
345 } else {
346 (void) printf("%016lx %p (%lx, %lx, %lx, "
347 "%lx, %lx, %lx)\n",
348 (ulong_t)nextfp, (void *)pc,
349 nextfp->fr_arg[0], nextfp->fr_arg[1],
350 nextfp->fr_arg[2], nextfp->fr_arg[3],
351 nextfp->fr_arg[4], nextfp->fr_arg[5]);
352 (void) snprintf(stack_buffer, sizeof (stack_buffer),
353 "%p (%lx, %lx, %lx, %lx, %lx, %lx) | ",
354 (void *)pc,
355 nextfp->fr_arg[0], nextfp->fr_arg[1],
356 nextfp->fr_arg[2], nextfp->fr_arg[3],
357 nextfp->fr_arg[4], nextfp->fr_arg[5]);
358 }
359
360 (void) snprintf(local_buffer, sizeof (local_buffer),
361 " %%l0-3: %016lx %016lx %016lx %016lx\n"
362 " %%l4-7: %016lx %016lx %016lx %016lx\n",
363 nextfp->fr_local[0], nextfp->fr_local[1],
364 nextfp->fr_local[2], nextfp->fr_local[3],
365 nextfp->fr_local[4], nextfp->fr_local[5],
366 nextfp->fr_local[6], nextfp->fr_local[7]);
367 if (panicstr && dump_stack_scratch) {
368 next_offset = offset + strlen(stack_buffer);
369 if (next_offset < STACK_BUF_SIZE) {
370 bcopy(stack_buffer, dump_stack_scratch + offset,
371 strlen(stack_buffer));
372 offset = next_offset;
373 } else {
374 /*
375 * In attempting to save the panic stack
376 * to the dumpbuf we have overflowed that area.
377 * Print a warning and continue to printf the
378 * stack to the msgbuf
379 */
380 printf("Warning: stack in the dump buffer"
381 " may be incomplete\n");
382 }
383 }
384 printf("%s", local_buffer);
385
386 fp = nextfp;
387 minfp = fp;
388 }
389
390 if (!panicstr) {
391 printf("end of traceback\n");
392 DELAY(2 * MICROSEC);
393 } else if (dump_stack_scratch) {
394 dump_stack_scratch[offset] = '\0';
395 }
396 }
397
398 /*
399 * Generate a stack backtrace from a saved register set.
400 */
401 void
traceregs(struct regs * rp)402 traceregs(struct regs *rp)
403 {
404 traceback((caddr_t)rp->r_sp);
405 }
406
407 void
exec_set_sp(size_t stksize)408 exec_set_sp(size_t stksize)
409 {
410 klwp_t *lwp = ttolwp(curthread);
411
412 lwp->lwp_pcb.pcb_xregstat = XREGNONE;
413 if (curproc->p_model == DATAMODEL_NATIVE)
414 stksize += sizeof (struct rwindow) + STACK_BIAS;
415 else
416 stksize += sizeof (struct rwindow32);
417 lwptoregs(lwp)->r_sp = (uintptr_t)curproc->p_usrstack - stksize;
418 }
419
420 /*
421 * Allocate a region of virtual address space, unmapped.
422 *
423 * When a hard-redzone (firewall) is in effect, redzone violations are
424 * caught by the hardware the instant they happen because the first byte
425 * past the logical end of a firewalled buffer lies at the start of an
426 * unmapped page. This firewalling is accomplished by bumping up the
427 * requested address allocation, effectively removing an additional page
428 * beyond the original request from the available virtual memory arena.
429 * However, the size of the allocation passed to boot, in boot_alloc(),
430 * doesn't reflect this additional page and fragmentation of the OBP
431 * "virtual-memory" "available" lists property occurs. Calling
432 * prom_claim_virt() for the firewall page avoids this fragmentation.
433 */
434 void *
boot_virt_alloc(void * addr,size_t size)435 boot_virt_alloc(void *addr, size_t size)
436 {
437 return (BOP_ALLOC_VIRT((caddr_t)addr, size));
438 }
439
440
441 /*ARGSUSED*/
442 int
xcopyin_nta(const void * uaddr,void * kaddr,size_t count,int dummy)443 xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int dummy)
444 {
445 return (xcopyin(uaddr, kaddr, count));
446 }
447 /*ARGSUSED*/
448 int
xcopyout_nta(const void * kaddr,void * uaddr,size_t count,int dummy)449 xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int dummy)
450 {
451 return (xcopyout(kaddr, uaddr, count));
452 }
453 /*ARGSUSED*/
454 int
kcopy_nta(const void * from,void * to,size_t count,int dummy)455 kcopy_nta(const void *from, void *to, size_t count, int dummy)
456 {
457 return (kcopy(from, to, count));
458 }
459