xref: /illumos-gate/usr/src/uts/intel/os/sundep.c (revision 21bcbe6e4903d8521ec66863bf0c21d9ed378cff)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2021 Joyent, Inc.
24  */
25 
26 /*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
27 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T   */
28 /*	All Rights Reserved   */
29 
30 #include <sys/types.h>
31 #include <sys/param.h>
32 #include <sys/sysmacros.h>
33 #include <sys/signal.h>
34 #include <sys/systm.h>
35 #include <sys/user.h>
36 #include <sys/mman.h>
37 #include <sys/class.h>
38 #include <sys/proc.h>
39 #include <sys/procfs.h>
40 #include <sys/buf.h>
41 #include <sys/kmem.h>
42 #include <sys/cred.h>
43 #include <sys/archsystm.h>
44 #include <sys/vmparam.h>
45 #include <sys/prsystm.h>
46 #include <sys/reboot.h>
47 #include <sys/uadmin.h>
48 #include <sys/vfs.h>
49 #include <sys/vnode.h>
50 #include <sys/file.h>
51 #include <sys/session.h>
52 #include <sys/ucontext.h>
53 #include <sys/dnlc.h>
54 #include <sys/var.h>
55 #include <sys/cmn_err.h>
56 #include <sys/debugreg.h>
57 #include <sys/thread.h>
58 #include <sys/vtrace.h>
59 #include <sys/consdev.h>
60 #include <sys/psw.h>
61 #include <sys/regset.h>
62 #include <sys/privregs.h>
63 #include <sys/cpu.h>
64 #include <sys/stack.h>
65 #include <sys/swap.h>
66 #include <vm/hat.h>
67 #include <vm/anon.h>
68 #include <vm/as.h>
69 #include <vm/page.h>
70 #include <vm/seg.h>
71 #include <vm/seg_kmem.h>
72 #include <vm/seg_map.h>
73 #include <vm/seg_vn.h>
74 #include <sys/exec.h>
75 #include <sys/acct.h>
76 #include <sys/core.h>
77 #include <sys/corectl.h>
78 #include <sys/modctl.h>
79 #include <sys/tuneable.h>
80 #include <c2/audit.h>
81 #include <sys/bootconf.h>
82 #include <sys/brand.h>
83 #include <sys/dumphdr.h>
84 #include <sys/promif.h>
85 #include <sys/systeminfo.h>
86 #include <sys/kdi.h>
87 #include <sys/contract_impl.h>
88 #include <sys/x86_archext.h>
89 #include <sys/segments.h>
90 #include <sys/ontrap.h>
91 #include <sys/cpu.h>
92 #ifdef __xpv
93 #include <sys/hypervisor.h>
94 #endif
95 
96 /*
97  * Compare the version of boot that boot says it is against
98  * the version of boot the kernel expects.
99  */
100 int
101 check_boot_version(int boots_version)
102 {
103 	if (boots_version == BO_VERSION)
104 		return (0);
105 
106 	prom_printf("Wrong boot interface - kernel needs v%d found v%d\n",
107 	    BO_VERSION, boots_version);
108 	prom_panic("halting");
109 	/*NOTREACHED*/
110 }
111 
112 /*
113  * Process the physical installed list for boot.
114  * Finds:
115  * 1) the pfn of the highest installed physical page,
116  * 2) the number of pages installed
117  * 3) the number of distinct contiguous regions these pages fall into.
118  * 4) the number of contiguous memory ranges
119  */
120 void
121 installed_top_size_ex(
122 	struct memlist *list,	/* pointer to start of installed list */
123 	pfn_t *high_pfn,	/* return ptr for top value */
124 	pgcnt_t *pgcnt,		/* return ptr for sum of installed pages */
125 	int	*ranges)	/* return ptr for the count of contig. ranges */
126 {
127 	pfn_t top = 0;
128 	pgcnt_t sumpages = 0;
129 	pfn_t highp;		/* high page in a chunk */
130 	int cnt = 0;
131 
132 	for (; list; list = list->ml_next) {
133 		++cnt;
134 		highp = (list->ml_address + list->ml_size - 1) >> PAGESHIFT;
135 		if (top < highp)
136 			top = highp;
137 		sumpages += btop(list->ml_size);
138 	}
139 
140 	*high_pfn = top;
141 	*pgcnt = sumpages;
142 	*ranges = cnt;
143 }
144 
145 void
146 installed_top_size(
147 	struct memlist *list,	/* pointer to start of installed list */
148 	pfn_t *high_pfn,	/* return ptr for top value */
149 	pgcnt_t *pgcnt)		/* return ptr for sum of installed pages */
150 {
151 	int ranges;
152 
153 	installed_top_size_ex(list, high_pfn, pgcnt, &ranges);
154 }
155 
156 void
157 phys_install_has_changed(void)
158 {}
159 
160 /*
161  * Copy in a memory list from boot to kernel, with a filter function
162  * to remove pages. The filter function can increase the address and/or
163  * decrease the size to filter out pages.  It will also align addresses and
164  * sizes to PAGESIZE.
165  */
166 void
167 copy_memlist_filter(
168 	struct memlist *src,
169 	struct memlist **dstp,
170 	void (*filter)(uint64_t *, uint64_t *))
171 {
172 	struct memlist *dst, *prev;
173 	uint64_t addr;
174 	uint64_t size;
175 	uint64_t eaddr;
176 
177 	dst = *dstp;
178 	prev = dst;
179 
180 	/*
181 	 * Move through the memlist applying a filter against
182 	 * each range of memory. Note that we may apply the
183 	 * filter multiple times against each memlist entry.
184 	 */
185 	for (; src; src = src->ml_next) {
186 		addr = P2ROUNDUP(src->ml_address, PAGESIZE);
187 		eaddr = P2ALIGN(src->ml_address + src->ml_size, PAGESIZE);
188 		while (addr < eaddr) {
189 			size = eaddr - addr;
190 			if (filter != NULL)
191 				filter(&addr, &size);
192 			if (size == 0)
193 				break;
194 			dst->ml_address = addr;
195 			dst->ml_size = size;
196 			dst->ml_next = 0;
197 			if (prev == dst) {
198 				dst->ml_prev = 0;
199 				dst++;
200 			} else {
201 				dst->ml_prev = prev;
202 				prev->ml_next = dst;
203 				dst++;
204 				prev++;
205 			}
206 			addr += size;
207 		}
208 	}
209 
210 	*dstp = dst;
211 }
212 
213 /*
214  * Kernel setup code, called from startup().
215  */
216 void
217 kern_setup1(void)
218 {
219 	proc_t *pp;
220 
221 	pp = &p0;
222 
223 	proc_sched = pp;
224 
225 	/*
226 	 * Initialize process 0 data structures
227 	 */
228 	pp->p_stat = SRUN;
229 	pp->p_flag = SSYS;
230 
231 	pp->p_pidp = &pid0;
232 	pp->p_pgidp = &pid0;
233 	pp->p_sessp = &session0;
234 	pp->p_tlist = &t0;
235 	pid0.pid_pglink = pp;
236 	pid0.pid_pgtail = pp;
237 
238 	/*
239 	 * XXX - we asssume that the u-area is zeroed out except for
240 	 * ttolwp(curthread)->lwp_regs.
241 	 */
242 	PTOU(curproc)->u_cmask = (mode_t)CMASK;
243 
244 	thread_init();		/* init thread_free list */
245 	pid_init();		/* initialize pid (proc) table */
246 	contract_init();	/* initialize contracts */
247 
248 	init_pages_pp_maximum();
249 }
250 
251 /*
252  * Load a procedure into a thread.
253  */
254 void
255 thread_load(kthread_t *t, void (*start)(), caddr_t arg, size_t len)
256 {
257 	caddr_t sp;
258 	size_t framesz;
259 	caddr_t argp;
260 	long *p;
261 	extern void thread_start();
262 
263 	/*
264 	 * Push a "c" call frame onto the stack to represent
265 	 * the caller of "start".
266 	 */
267 	sp = t->t_stk;
268 	ASSERT(((uintptr_t)t->t_stk & (STACK_ENTRY_ALIGN - 1)) == 0);
269 	if (len != 0) {
270 		/*
271 		 * the object that arg points at is copied into the
272 		 * caller's frame.
273 		 */
274 		framesz = SA(len);
275 		sp -= framesz;
276 		ASSERT(sp > t->t_stkbase);
277 		argp = sp + SA(MINFRAME);
278 		bcopy(arg, argp, len);
279 		arg = argp;
280 	}
281 	/*
282 	 * Set up arguments (arg and len) on the caller's stack frame.
283 	 */
284 	p = (long *)sp;
285 
286 	*--p = 0;		/* fake call */
287 	*--p = 0;		/* null frame pointer terminates stack trace */
288 	*--p = (long)len;
289 	*--p = (intptr_t)arg;
290 	*--p = (intptr_t)start;
291 
292 	/*
293 	 * initialize thread to resume at thread_start() which will
294 	 * turn around and invoke (*start)(arg, len).
295 	 */
296 	t->t_pc = (uintptr_t)thread_start;
297 	t->t_sp = (uintptr_t)p;
298 
299 	ASSERT((t->t_sp & (STACK_ENTRY_ALIGN - 1)) == 0);
300 }
301 
302 /*
303  * load user registers into lwp.
304  */
305 /*ARGSUSED2*/
306 void
307 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
308 {
309 	struct regs *rp = lwptoregs(lwp);
310 
311 	setgregs(lwp, grp);
312 	rp->r_ps = PSL_USER;
313 
314 	/*
315 	 * For 64-bit lwps, we allow one magic %fs selector value, and one
316 	 * magic %gs selector to point anywhere in the address space using
317 	 * %fsbase and %gsbase behind the scenes.  libc uses %fs to point
318 	 * at the ulwp_t structure.
319 	 *
320 	 * For 32-bit lwps, libc wedges its lwp thread pointer into the
321 	 * ucontext ESP slot (which is otherwise irrelevant to setting a
322 	 * ucontext) and LWPGS_SEL value into gregs[REG_GS].  This is so
323 	 * syslwp_create() can atomically setup %gs.
324 	 *
325 	 * See setup_context() in libc.
326 	 */
327 #ifdef _SYSCALL32_IMPL
328 	if (lwp_getdatamodel(lwp) == DATAMODEL_ILP32) {
329 		if (grp[REG_GS] == LWPGS_SEL)
330 			(void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr);
331 	} else {
332 		/*
333 		 * See lwp_setprivate in kernel and setup_context in libc.
334 		 *
335 		 * Currently libc constructs a ucontext from whole cloth for
336 		 * every new (not main) lwp created.  For 64 bit processes
337 		 * %fsbase is directly set to point to current thread pointer.
338 		 * In the past (solaris 10) %fs was also set LWPFS_SEL to
339 		 * indicate %fsbase. Now we use the null GDT selector for
340 		 * this purpose. LWP[FS|GS]_SEL are only intended for 32 bit
341 		 * processes. To ease transition we support older libcs in
342 		 * the newer kernel by forcing %fs or %gs selector to null
343 		 * by calling lwp_setprivate if LWP[FS|GS]_SEL is passed in
344 		 * the ucontext.  This is should be ripped out at some future
345 		 * date.  Another fix would be for libc to do a getcontext
346 		 * and inherit the null %fs/%gs from the current context but
347 		 * that means an extra system call and could hurt performance.
348 		 */
349 		if (grp[REG_FS] == 0x1bb) /* hard code legacy LWPFS_SEL */
350 			(void) lwp_setprivate(lwp, _LWP_FSBASE,
351 			    (uintptr_t)grp[REG_FSBASE]);
352 
353 		if (grp[REG_GS] == 0x1c3) /* hard code legacy LWPGS_SEL */
354 			(void) lwp_setprivate(lwp, _LWP_GSBASE,
355 			    (uintptr_t)grp[REG_GSBASE]);
356 	}
357 #else
358 	if (grp[GS] == LWPGS_SEL)
359 		(void) lwp_setprivate(lwp, _LWP_GSBASE, thrptr);
360 #endif
361 
362 	lwp->lwp_eosys = JUSTRETURN;
363 	lwptot(lwp)->t_post_sys = 1;
364 }
365 
366 /*
367  * set syscall()'s return values for a lwp.
368  */
369 void
370 lwp_setrval(klwp_t *lwp, int v1, int v2)
371 {
372 	lwptoregs(lwp)->r_ps &= ~PS_C;
373 	lwptoregs(lwp)->r_r0 = v1;
374 	lwptoregs(lwp)->r_r1 = v2;
375 }
376 
377 /*
378  * set syscall()'s return values for a lwp.
379  */
380 void
381 lwp_setsp(klwp_t *lwp, caddr_t sp)
382 {
383 	lwptoregs(lwp)->r_sp = (intptr_t)sp;
384 }
385 
386 /*
387  * Copy regs from parent to child.
388  */
389 void
390 lwp_forkregs(klwp_t *lwp, klwp_t *clwp)
391 {
392 	struct pcb *pcb = &clwp->lwp_pcb;
393 	struct regs *rp = lwptoregs(lwp);
394 
395 	if (!PCB_NEED_UPDATE_SEGS(pcb)) {
396 		pcb->pcb_ds = rp->r_ds;
397 		pcb->pcb_es = rp->r_es;
398 		pcb->pcb_fs = rp->r_fs;
399 		pcb->pcb_gs = rp->r_gs;
400 		PCB_SET_UPDATE_SEGS(pcb);
401 		lwptot(clwp)->t_post_sys = 1;
402 	}
403 	ASSERT(lwptot(clwp)->t_post_sys);
404 
405 	fp_lwp_dup(clwp);
406 
407 	bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct regs));
408 }
409 
410 /*
411  * This function is currently unused on x86.
412  */
413 /*ARGSUSED*/
414 void
415 lwp_freeregs(klwp_t *lwp, int isexec)
416 {}
417 
418 /*
419  * This function is currently unused on x86.
420  */
421 void
422 lwp_pcb_exit(void)
423 {}
424 
425 /*
426  * Lwp context ops for segment registers.
427  */
428 
429 /*
430  * Every time we come into the kernel (syscall, interrupt or trap
431  * but not fast-traps) we capture the current values of the user's
432  * segment registers into the lwp's reg structure. This includes
433  * lcall for i386 generic system call support since it is handled
434  * as a segment-not-present trap.
435  *
436  * Here we save the current values from the lwp regs into the pcb
437  * and or PCB_UPDATE_SEGS (1) in pcb->pcb_rupdate to tell the rest
438  * of the kernel that the pcb copy of the segment registers is the
439  * current one.  This ensures the lwp's next trip to user land via
440  * update_sregs.  Finally we set t_post_sys to ensure that no
441  * system call fast-path's its way out of the kernel via sysret.
442  *
443  * (This means that we need to have interrupts disabled when we
444  * test t->t_post_sys in the syscall handlers; if the test fails,
445  * we need to keep interrupts disabled until we return to userland
446  * so we can't be switched away.)
447  *
448  * As a result of all this, we don't really have to do a whole lot
449  * if the thread is just mucking about in the kernel, switching on
450  * and off the cpu for whatever reason it feels like. And yet we
451  * still preserve fast syscalls, cause if we -don't- get
452  * descheduled, we never come here either.
453  */
454 
455 #define	VALID_LWP_DESC(udp) ((udp)->usd_type == SDT_MEMRWA && \
456 	    (udp)->usd_p == 1 && (udp)->usd_dpl == SEL_UPL)
457 
458 /*ARGSUSED*/
459 void
460 lwp_segregs_save(klwp_t *lwp)
461 {
462 	pcb_t *pcb = &lwp->lwp_pcb;
463 	struct regs *rp;
464 
465 	ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc));
466 	ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc));
467 
468 	if (!PCB_NEED_UPDATE_SEGS(pcb)) {
469 		rp = lwptoregs(lwp);
470 
471 		/*
472 		 * If there's no update already pending, capture the current
473 		 * %ds/%es/%fs/%gs values from lwp's regs in case the user
474 		 * changed them; %fsbase and %gsbase are privileged so the
475 		 * kernel versions of these registers in pcb_fsbase and
476 		 * pcb_gsbase are always up-to-date.
477 		 */
478 		pcb->pcb_ds = rp->r_ds;
479 		pcb->pcb_es = rp->r_es;
480 		pcb->pcb_fs = rp->r_fs;
481 		pcb->pcb_gs = rp->r_gs;
482 		PCB_SET_UPDATE_SEGS(pcb);
483 		lwp->lwp_thread->t_post_sys = 1;
484 	}
485 
486 #if !defined(__xpv)	/* XXPV not sure if we can re-read gdt? */
487 	ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPFS], &lwp->lwp_pcb.pcb_fsdesc,
488 	    sizeof (lwp->lwp_pcb.pcb_fsdesc)) == 0);
489 	ASSERT(bcmp(&CPU->cpu_gdt[GDT_LWPGS], &lwp->lwp_pcb.pcb_gsdesc,
490 	    sizeof (lwp->lwp_pcb.pcb_gsdesc)) == 0);
491 #endif
492 }
493 
494 /*
495  * Update the segment registers with new values from the pcb.
496  *
497  * We have to do this carefully, and in the following order,
498  * in case any of the selectors points at a bogus descriptor.
499  * If they do, we'll catch trap with on_trap and return 1.
500  * returns 0 on success.
501  *
502  * This is particularly tricky for %gs.
503  * This routine must be executed under a cli.
504  */
505 int
506 update_sregs(struct regs *rp,  klwp_t *lwp)
507 {
508 	pcb_t *pcb = &lwp->lwp_pcb;
509 	ulong_t	kgsbase;
510 	on_trap_data_t	otd;
511 	int rc = 0;
512 
513 	if (!on_trap(&otd, OT_SEGMENT_ACCESS)) {
514 
515 #if defined(__xpv)
516 		/*
517 		 * On the hyervisor this is easy. The hypercall below will
518 		 * swapgs and load %gs with the user selector. If the user
519 		 * selector is bad the hypervisor will catch the fault and
520 		 * load %gs with the null selector instead. Either way the
521 		 * kernel's gsbase is not damaged.
522 		 */
523 		kgsbase = (ulong_t)CPU;
524 		if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL,
525 		    pcb->pcb_gs) != 0) {
526 				no_trap();
527 				return (1);
528 		}
529 
530 		rp->r_gs = pcb->pcb_gs;
531 		ASSERT((cpu_t *)kgsbase == CPU);
532 
533 #else	/* __xpv */
534 
535 		/*
536 		 * A little more complicated running native.
537 		 */
538 		kgsbase = (ulong_t)CPU;
539 		__set_gs(pcb->pcb_gs);
540 
541 		/*
542 		 * If __set_gs fails it's because the new %gs is a bad %gs,
543 		 * we'll be taking a trap but with the original %gs and %gsbase
544 		 * undamaged (i.e. pointing at curcpu).
545 		 *
546 		 * We've just mucked up the kernel's gsbase.  Oops.  In
547 		 * particular we can't take any traps at all.  Make the newly
548 		 * computed gsbase be the hidden gs via swapgs, and fix
549 		 * the kernel's gsbase back again. Later, when we return to
550 		 * userland we'll swapgs again restoring gsbase just loaded
551 		 * above.
552 		 */
553 		__asm__ __volatile__("mfence; swapgs");
554 
555 		rp->r_gs = pcb->pcb_gs;
556 
557 		/*
558 		 * Restore kernel's gsbase. Note that this also serializes any
559 		 * attempted speculation from loading the user-controlled
560 		 * %gsbase.
561 		 */
562 		wrmsr(MSR_AMD_GSBASE, kgsbase);
563 
564 #endif	/* __xpv */
565 
566 		/*
567 		 * Only override the descriptor base address if
568 		 * r_gs == LWPGS_SEL or if r_gs == NULL. A note on
569 		 * NULL descriptors -- 32-bit programs take faults
570 		 * if they deference NULL descriptors; however,
571 		 * when 64-bit programs load them into %fs or %gs,
572 		 * they DONT fault -- only the base address remains
573 		 * whatever it was from the last load.   Urk.
574 		 *
575 		 * XXX - note that lwp_setprivate now sets %fs/%gs to the
576 		 * null selector for 64 bit processes. Whereas before
577 		 * %fs/%gs were set to LWP(FS|GS)_SEL regardless of
578 		 * the process's data model. For now we check for both
579 		 * values so that the kernel can also support the older
580 		 * libc. This should be ripped out at some point in the
581 		 * future.
582 		 */
583 		if (pcb->pcb_gs == LWPGS_SEL || pcb->pcb_gs == 0) {
584 #if defined(__xpv)
585 			if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER,
586 			    pcb->pcb_gsbase)) {
587 				no_trap();
588 				return (1);
589 			}
590 #else
591 			wrmsr(MSR_AMD_KGSBASE, pcb->pcb_gsbase);
592 #endif
593 		}
594 
595 		__set_ds(pcb->pcb_ds);
596 		rp->r_ds = pcb->pcb_ds;
597 
598 		__set_es(pcb->pcb_es);
599 		rp->r_es = pcb->pcb_es;
600 
601 		__set_fs(pcb->pcb_fs);
602 		rp->r_fs = pcb->pcb_fs;
603 
604 		/*
605 		 * Same as for %gs
606 		 */
607 		if (pcb->pcb_fs == LWPFS_SEL || pcb->pcb_fs == 0) {
608 #if defined(__xpv)
609 			if (HYPERVISOR_set_segment_base(SEGBASE_FS,
610 			    pcb->pcb_fsbase)) {
611 				no_trap();
612 				return (1);
613 			}
614 #else
615 			wrmsr(MSR_AMD_FSBASE, pcb->pcb_fsbase);
616 #endif
617 		}
618 
619 	} else {
620 		cli();
621 		rc = 1;
622 	}
623 	no_trap();
624 	return (rc);
625 }
626 
627 /*
628  * Make sure any stale selectors are cleared from the segment registers
629  * by putting KDS_SEL (the kernel's default %ds gdt selector) into them.
630  * This is necessary because the kernel itself does not use %es, %fs, nor
631  * %ds. (%cs and %ss are necessary, and are set up by the kernel - along with
632  * %gs - to point to the current cpu struct.) If we enter kmdb while in the
633  * kernel and resume with a stale ldt or brandz selector sitting there in a
634  * segment register, kmdb will #gp fault if the stale selector points to,
635  * for example, an ldt in the context of another process.
636  *
637  * WARNING: Intel and AMD chips behave differently when storing
638  * the null selector into %fs and %gs while in long mode. On AMD
639  * chips fsbase and gsbase are not cleared. But on Intel chips, storing
640  * a null selector into %fs or %gs has the side effect of clearing
641  * fsbase or gsbase. For that reason we use KDS_SEL, which has
642  * consistent behavor between AMD and Intel.
643  *
644  * Caller responsible for preventing cpu migration.
645  */
646 void
647 reset_sregs(void)
648 {
649 	ulong_t kgsbase = (ulong_t)CPU;
650 
651 	ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL);
652 
653 	cli();
654 	__set_gs(KGS_SEL);
655 
656 	/*
657 	 * restore kernel gsbase
658 	 */
659 #if defined(__xpv)
660 	xen_set_segment_base(SEGBASE_GS_KERNEL, kgsbase);
661 #else
662 	wrmsr(MSR_AMD_GSBASE, kgsbase);
663 #endif
664 
665 	sti();
666 
667 	__set_ds(KDS_SEL);
668 	__set_es(0 | SEL_KPL);	/* selector RPL not ring 0 on hypervisor */
669 	__set_fs(KFS_SEL);
670 }
671 
672 
673 #ifdef _SYSCALL32_IMPL
674 
675 /*
676  * Make it impossible for a process to change its data model.
677  * We do this by toggling the present bits for the 32 and
678  * 64-bit user code descriptors. That way if a user lwp attempts
679  * to change its data model (by using the wrong code descriptor in
680  * %cs) it will fault immediately. This also allows us to simplify
681  * assertions and checks in the kernel.
682  */
683 
684 static void
685 gdt_ucode_model(model_t model)
686 {
687 	kpreempt_disable();
688 	if (model == DATAMODEL_NATIVE) {
689 		gdt_update_usegd(GDT_UCODE, &ucs_on);
690 		gdt_update_usegd(GDT_U32CODE, &ucs32_off);
691 	} else {
692 		gdt_update_usegd(GDT_U32CODE, &ucs32_on);
693 		gdt_update_usegd(GDT_UCODE, &ucs_off);
694 	}
695 	kpreempt_enable();
696 }
697 
698 #endif	/* _SYSCALL32_IMPL */
699 
700 /*
701  * Restore lwp private fs and gs segment descriptors
702  * on current cpu's GDT.
703  */
704 static void
705 lwp_segregs_restore(klwp_t *lwp)
706 {
707 	pcb_t *pcb = &lwp->lwp_pcb;
708 
709 	ASSERT(VALID_LWP_DESC(&pcb->pcb_fsdesc));
710 	ASSERT(VALID_LWP_DESC(&pcb->pcb_gsdesc));
711 
712 #ifdef	_SYSCALL32_IMPL
713 	gdt_ucode_model(DATAMODEL_NATIVE);
714 #endif
715 
716 	gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc);
717 	gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc);
718 
719 }
720 
721 #ifdef _SYSCALL32_IMPL
722 
723 static void
724 lwp_segregs_restore32(klwp_t *lwp)
725 {
726 	/*LINTED*/
727 	cpu_t *cpu = CPU;
728 	pcb_t *pcb = &lwp->lwp_pcb;
729 
730 	ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_fsdesc));
731 	ASSERT(VALID_LWP_DESC(&lwp->lwp_pcb.pcb_gsdesc));
732 
733 	gdt_ucode_model(DATAMODEL_ILP32);
734 	gdt_update_usegd(GDT_LWPFS, &pcb->pcb_fsdesc);
735 	gdt_update_usegd(GDT_LWPGS, &pcb->pcb_gsdesc);
736 }
737 
738 #endif	/* _SYSCALL32_IMPL */
739 
740 /*
741  * If this is a process in a branded zone, then we want it to use the brand
742  * syscall entry points instead of the standard Solaris entry points.  This
743  * routine must be called when a new lwp is created within a branded zone
744  * or when an existing lwp moves into a branded zone via a zone_enter()
745  * operation.
746  */
747 void
748 lwp_attach_brand_hdlrs(klwp_t *lwp)
749 {
750 	kthread_t *t = lwptot(lwp);
751 
752 	ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));
753 
754 	ASSERT(removectx(t, NULL, brand_interpositioning_disable,
755 	    brand_interpositioning_enable, NULL, NULL,
756 	    brand_interpositioning_disable, NULL) == 0);
757 	installctx(t, NULL, brand_interpositioning_disable,
758 	    brand_interpositioning_enable, NULL, NULL,
759 	    brand_interpositioning_disable, NULL, NULL);
760 
761 	if (t == curthread) {
762 		kpreempt_disable();
763 		brand_interpositioning_enable();
764 		kpreempt_enable();
765 	}
766 }
767 
768 /*
769  * If this is a process in a branded zone, then we want it to disable the
770  * brand syscall entry points.  This routine must be called when the last
771  * lwp in a process is exiting in proc_exit().
772  */
773 void
774 lwp_detach_brand_hdlrs(klwp_t *lwp)
775 {
776 	kthread_t *t = lwptot(lwp);
777 
778 	ASSERT(PROC_IS_BRANDED(lwptoproc(lwp)));
779 	if (t == curthread)
780 		kpreempt_disable();
781 
782 	/* Remove the original context handlers */
783 	VERIFY(removectx(t, NULL, brand_interpositioning_disable,
784 	    brand_interpositioning_enable, NULL, NULL,
785 	    brand_interpositioning_disable, NULL) != 0);
786 
787 	if (t == curthread) {
788 		/* Cleanup our MSR and IDT entries. */
789 		brand_interpositioning_disable();
790 		kpreempt_enable();
791 	}
792 }
793 
794 /*
795  * Add any lwp-associated context handlers to the lwp at the beginning
796  * of the lwp's useful life.
797  *
798  * All paths which create lwp's invoke lwp_create(); lwp_create()
799  * invokes lwp_stk_init() which initializes the stack, sets up
800  * lwp_regs, and invokes this routine.
801  *
802  * All paths which destroy lwp's invoke lwp_exit() to rip the lwp
803  * apart and put it on 'lwp_deathrow'; if the lwp is destroyed it
804  * ends up in thread_free() which invokes freectx(t, 0) before
805  * invoking lwp_stk_fini().  When the lwp is recycled from death
806  * row, lwp_stk_fini() is invoked, then thread_free(), and thus
807  * freectx(t, 0) as before.
808  *
809  * In the case of exec, the surviving lwp is thoroughly scrubbed
810  * clean; exec invokes freectx(t, 1) to destroy associated contexts.
811  * On the way back to the new image, it invokes setregs() which
812  * in turn invokes this routine.
813  */
814 void
815 lwp_installctx(klwp_t *lwp)
816 {
817 	kthread_t *t = lwptot(lwp);
818 	int thisthread = t == curthread;
819 #ifdef _SYSCALL32_IMPL
820 	void (*restop)(klwp_t *) = lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ?
821 	    lwp_segregs_restore : lwp_segregs_restore32;
822 #else
823 	void (*restop)(klwp_t *) = lwp_segregs_restore;
824 #endif
825 	struct ctxop *ctx;
826 
827 	/*
828 	 * Install the basic lwp context handlers on each lwp.
829 	 *
830 	 * On the amd64 kernel, the context handlers are responsible for
831 	 * virtualizing %ds, %es, %fs, and %gs to the lwp.  The register
832 	 * values are only ever changed via sys_rtt when the
833 	 * PCB_UPDATE_SEGS bit (1) is set in pcb->pcb_rupdate. Only
834 	 * sys_rtt gets to clear the bit.
835 	 *
836 	 * On the i386 kernel, the context handlers are responsible for
837 	 * virtualizing %gs/%fs to the lwp by updating the per-cpu GDTs
838 	 */
839 	ASSERT(removectx(t, lwp, lwp_segregs_save, restop,
840 	    NULL, NULL, NULL, NULL) == 0);
841 	if (thisthread) {
842 		ctx = installctx_preallocate();
843 		kpreempt_disable();
844 	} else {
845 		ctx = NULL;
846 	}
847 	installctx(t, lwp, lwp_segregs_save, restop,
848 	    NULL, NULL, NULL, NULL, ctx);
849 	if (thisthread) {
850 		/*
851 		 * Since we're the right thread, set the values in the GDT
852 		 */
853 		restop(lwp);
854 		kpreempt_enable();
855 	}
856 
857 	/*
858 	 * If we have sysenter/sysexit instructions enabled, we need
859 	 * to ensure that the hardware mechanism is kept up-to-date with the
860 	 * lwp's kernel stack pointer across context switches.
861 	 *
862 	 * sep_save zeros the sysenter stack pointer msr; sep_restore sets
863 	 * it to the lwp's kernel stack pointer (kstktop).
864 	 */
865 	if (is_x86_feature(x86_featureset, X86FSET_SEP)) {
866 		caddr_t kstktop = (caddr_t)lwp->lwp_regs;
867 		ASSERT(removectx(t, kstktop,
868 		    sep_save, sep_restore, NULL, NULL, NULL, NULL) == 0);
869 
870 		if (thisthread) {
871 			ctx = installctx_preallocate();
872 			kpreempt_disable();
873 		} else {
874 			ctx = NULL;
875 		}
876 		installctx(t, kstktop,
877 		    sep_save, sep_restore, NULL, NULL, NULL, NULL, ctx);
878 		if (thisthread) {
879 			/*
880 			 * We're the right thread, so set the stack pointer
881 			 * for the first sysenter instruction to use
882 			 */
883 			sep_restore(kstktop);
884 			kpreempt_enable();
885 		}
886 	}
887 
888 	if (PROC_IS_BRANDED(ttoproc(t)))
889 		lwp_attach_brand_hdlrs(lwp);
890 }
891 
892 /*
893  * Clear registers on exec(2).
894  */
895 void
896 setregs(uarg_t *args)
897 {
898 	struct regs *rp;
899 	kthread_t *t = curthread;
900 	klwp_t *lwp = ttolwp(t);
901 	pcb_t *pcb = &lwp->lwp_pcb;
902 	greg_t sp;
903 
904 	/*
905 	 * Initialize user registers
906 	 */
907 	(void) save_syscall_args();	/* copy args from registers first */
908 	rp = lwptoregs(lwp);
909 	sp = rp->r_sp;
910 	bzero(rp, sizeof (*rp));
911 
912 	rp->r_ss = UDS_SEL;
913 	rp->r_sp = sp;
914 	rp->r_pc = args->entry;
915 	rp->r_ps = PSL_USER;
916 
917 	pcb->pcb_fs = pcb->pcb_gs = 0;
918 	pcb->pcb_fsbase = pcb->pcb_gsbase = 0;
919 
920 	if (ttoproc(t)->p_model == DATAMODEL_NATIVE) {
921 
922 		rp->r_cs = UCS_SEL;
923 
924 		/*
925 		 * Only allow 64-bit user code descriptor to be present.
926 		 */
927 		gdt_ucode_model(DATAMODEL_NATIVE);
928 
929 		/*
930 		 * Arrange that the virtualized %fs and %gs GDT descriptors
931 		 * have a well-defined initial state (present, ring 3
932 		 * and of type data).
933 		 */
934 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
935 
936 		/*
937 		 * thrptr is either NULL or a value used by DTrace.
938 		 * 64-bit processes use %fs as their "thread" register.
939 		 */
940 		if (args->thrptr)
941 			(void) lwp_setprivate(lwp, _LWP_FSBASE, args->thrptr);
942 
943 	} else {
944 
945 		rp->r_cs = U32CS_SEL;
946 		rp->r_ds = rp->r_es = UDS_SEL;
947 
948 		/*
949 		 * only allow 32-bit user code selector to be present.
950 		 */
951 		gdt_ucode_model(DATAMODEL_ILP32);
952 
953 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc;
954 
955 		/*
956 		 * thrptr is either NULL or a value used by DTrace.
957 		 * 32-bit processes use %gs as their "thread" register.
958 		 */
959 		if (args->thrptr)
960 			(void) lwp_setprivate(lwp, _LWP_GSBASE, args->thrptr);
961 
962 	}
963 
964 	pcb->pcb_ds = rp->r_ds;
965 	pcb->pcb_es = rp->r_es;
966 	PCB_SET_UPDATE_SEGS(pcb);
967 
968 	lwp->lwp_eosys = JUSTRETURN;
969 	t->t_post_sys = 1;
970 
971 	/*
972 	 * Add the lwp context handlers that virtualize segment registers,
973 	 * and/or system call stacks etc.
974 	 */
975 	lwp_installctx(lwp);
976 
977 	/*
978 	 * Reset the FPU flags and then initialize the FPU for this lwp.
979 	 */
980 	fp_exec();
981 }
982 
983 user_desc_t *
984 cpu_get_gdt(void)
985 {
986 	return (CPU->cpu_gdt);
987 }
988 
989 
990 #if !defined(lwp_getdatamodel)
991 
992 /*
993  * Return the datamodel of the given lwp.
994  */
995 /*ARGSUSED*/
996 model_t
997 lwp_getdatamodel(klwp_t *lwp)
998 {
999 	return (lwp->lwp_procp->p_model);
1000 }
1001 
1002 #endif	/* !lwp_getdatamodel */
1003 
1004 #if !defined(get_udatamodel)
1005 
1006 model_t
1007 get_udatamodel(void)
1008 {
1009 	return (curproc->p_model);
1010 }
1011 
1012 #endif	/* !get_udatamodel */
1013