xref: /illumos-gate/usr/src/uts/i86pc/sys/machcpuvar.h (revision 90f7985f020eb82d06bd0d75396ff794105f7528)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2019 Joyent, Inc.
27  */
28 
29 #ifndef	_SYS_MACHCPUVAR_H
30 #define	_SYS_MACHCPUVAR_H
31 
32 #ifdef	__cplusplus
33 extern "C" {
34 #endif
35 
36 #include <sys/inttypes.h>
37 #include <sys/x_call.h>
38 #include <sys/tss.h>
39 #include <sys/segments.h>
40 #include <sys/rm_platter.h>
41 #include <sys/avintr.h>
42 #include <sys/pte.h>
43 #include <sys/stddef.h>
44 #include <sys/debug.h>
45 #include <sys/cpuvar.h>
46 
47 #ifndef	_ASM
48 /*
49  * On a virtualized platform a virtual cpu may not be actually
50  * on a physical cpu, especially in situations where a configuration has
51  * more vcpus than pcpus.  This function tells us (if it's able) if the
52  * specified vcpu is currently running on a pcpu.  Note if it is not
53  * known or not able to determine, it will return the unknown state.
54  */
55 #define	VCPU_STATE_UNKNOWN	0
56 #define	VCPU_ON_PCPU		1
57 #define	VCPU_NOT_ON_PCPU	2
58 
59 extern int vcpu_on_pcpu(processorid_t);
60 
61 /*
62  * Machine specific fields of the cpu struct
63  * defined in common/sys/cpuvar.h.
64  *
65  * Note:  This is kinda kludgy but seems to be the best
66  * of our alternatives.
67  */
68 
69 struct cpuid_info;
70 struct cpu_ucode_info;
71 struct cmi_hdl;
72 
73 /*
74  * A note about the hypervisor affinity bits: a one bit in the affinity mask
75  * means the corresponding event channel is allowed to be serviced
76  * by this cpu.
77  */
78 struct xen_evt_data {
79 	ulong_t		pending_sel[PIL_MAX + 1]; /* event array selectors */
80 	ulong_t		pending_evts[PIL_MAX + 1][sizeof (ulong_t) * 8];
81 	ulong_t		evt_affinity[sizeof (ulong_t) * 8]; /* service on cpu */
82 };
83 
84 enum fast_syscall_state {
85 	FSS_DISABLED		= 0,
86 	FSS_ASYSC_ENABLED	= (1 << 0),
87 	FSS_SEP_ENABLED		= (1 << 1)
88 };
89 
90 struct kpti_frame {
91 	uint64_t	kf_lower_redzone;
92 
93 	/* Stashed value of %cr3 when we entered the trampoline. */
94 	greg_t		kf_tr_cr3;
95 
96 	/*
97 	 * We use %r13-r14 as scratch registers in the trampoline code,
98 	 * so stash those here "below" the rest of the stack so they can be
99 	 * pushed/popped if needed.
100 	 */
101 	greg_t		kf_r14;
102 	greg_t		kf_r13;
103 
104 	/*
105 	 * Part of this struct is used as the HW stack frame when taking an
106 	 * interrupt on the user page table. The CPU is going to push a bunch
107 	 * of regs onto the stack pointer set in the TSS/IDT (which we set to
108 	 * &kf_rsp here).
109 	 *
110 	 * This is only a temporary holding area for them (we'll move them over
111 	 * to the real interrupt stack once we've set %cr3).
112 	 *
113 	 * Note that these must be cleared during a process switch on this cpu.
114 	 */
115 	greg_t		kf_err;		/* Bottom of initial hw stack frame */
116 	greg_t		kf_rip;
117 	greg_t		kf_cs;
118 	greg_t		kf_rflags;
119 	greg_t		kf_rsp;
120 	greg_t		kf_ss;
121 
122 	greg_t		kf_tr_rsp;	/* Top of HW stack frame */
123 	/* We also write this with the %rsp value on tramp entry */
124 
125 	/* Written to 0x1 when this kpti_frame is in use. */
126 	uint64_t	kf_tr_flag;
127 
128 	uint64_t	kf_middle_redzone;
129 
130 	/*
131 	 * The things we need to write to %cr3 to change between page tables.
132 	 * These live "above" the HW stack.
133 	 */
134 	greg_t		kf_kernel_cr3;
135 	greg_t		kf_user_cr3;
136 	greg_t		kf_tr_ret_rsp;
137 
138 	uint64_t	kf_unused;		/* For 16-byte align */
139 
140 	uint64_t	kf_upper_redzone;
141 };
142 
143 typedef struct cpu_smt {
144 	lock_t cs_lock;
145 	char cs_pad[56];
146 	struct cpu *cs_sib;
147 	volatile uint64_t cs_intr_depth;
148 	volatile uint64_t cs_state;
149 	volatile uint64_t cs_sibstate;
150 } cpu_smt_t;
151 
152 /*
153  * This first value, MACHCPU_SIZE is the size of all the members in the cpu_t
154  * AND struct machcpu, before we get to the mcpu_pad and the kpti area.
155  * The KPTI is used to contain per-CPU data that is visible in both sets of
156  * page-tables, and hence must be page-aligned and page-sized. See
157  * hat_pcp_setup().
158  *
159  * There are CTASSERTs in os/intr.c that verify this all works out.
160  */
161 #define	MACHCPU_SIZE	(1568 + 696)
162 #define	MACHCPU_PAD	(MMU_PAGESIZE - MACHCPU_SIZE)
163 #define	MACHCPU_PAD2	(MMU_PAGESIZE - 16 - 3 * sizeof (struct kpti_frame))
164 
165 struct	machcpu {
166 	/*
167 	 * x_call fields - used for interprocessor cross calls
168 	 */
169 	struct xc_msg	*xc_msgbox;
170 	struct xc_msg	*xc_curmsg;
171 	struct xc_msg	*xc_free;
172 	xc_data_t	xc_data;
173 	uint32_t	xc_wait_cnt;
174 	volatile uint32_t xc_work_cnt;
175 
176 	int		mcpu_nodeid;		/* node-id */
177 	int		mcpu_pri;		/* CPU priority */
178 
179 	struct hat	*mcpu_current_hat; /* cpu's current hat */
180 
181 	struct hat_cpu_info	*mcpu_hat_info;
182 
183 	volatile ulong_t	mcpu_tlb_info;
184 
185 	/* i86 hardware table addresses that cannot be shared */
186 
187 	user_desc_t	*mcpu_gdt;	/* GDT */
188 	gate_desc_t	*mcpu_idt;	/* current IDT */
189 
190 	tss_t		*mcpu_tss;	/* TSS */
191 	void		*mcpu_ldt;
192 	size_t		mcpu_ldt_len;
193 
194 	kmutex_t	mcpu_ppaddr_mutex;
195 	caddr_t		mcpu_caddr1;	/* per cpu CADDR1 */
196 	caddr_t		mcpu_caddr2;	/* per cpu CADDR2 */
197 	uint64_t	mcpu_caddr1pte;
198 	uint64_t	mcpu_caddr2pte;
199 
200 	struct softint	mcpu_softinfo;
201 	uint64_t	pil_high_start[HIGH_LEVELS];
202 	uint64_t	intrstat[PIL_MAX + 1][2];
203 
204 	struct cpuid_info	 *mcpu_cpi;
205 
206 #if defined(__amd64)
207 	greg_t	mcpu_rtmp_rsp;		/* syscall: temporary %rsp stash */
208 	greg_t	mcpu_rtmp_r15;		/* syscall: temporary %r15 stash */
209 #endif
210 
211 	struct vcpu_info *mcpu_vcpu_info;
212 	uint64_t	mcpu_gdtpa;	/* hypervisor: GDT physical address */
213 
214 	uint16_t mcpu_intr_pending;	/* hypervisor: pending intrpt levels */
215 	uint16_t mcpu_ec_mbox;		/* hypervisor: evtchn_dev mailbox */
216 	struct xen_evt_data *mcpu_evt_pend; /* hypervisor: pending events */
217 
218 	volatile uint32_t *mcpu_mwait;	/* MONITOR/MWAIT buffer */
219 	void (*mcpu_idle_cpu)(void);	/* idle function */
220 	uint16_t mcpu_idle_type;	/* CPU next idle type */
221 	uint16_t max_cstates;		/* supported max cstates */
222 
223 	enum fast_syscall_state	mcpu_fast_syscall_state;
224 
225 	struct cpu_ucode_info	*mcpu_ucode_info;
226 
227 	void			*mcpu_pm_mach_state;
228 	struct cmi_hdl		*mcpu_cmi_hdl;
229 	void			*mcpu_mach_ctx_ptr;
230 
231 	/*
232 	 * A stamp that is unique per processor and changes
233 	 * whenever an interrupt happens. Userful for detecting
234 	 * if a section of code gets interrupted.
235 	 * The high order 16 bits will hold the cpu->cpu_id.
236 	 * The low order bits will be incremented on every interrupt.
237 	 */
238 	volatile uint32_t	mcpu_istamp;
239 
240 	cpu_smt_t		mcpu_smt;
241 
242 	char			mcpu_pad[MACHCPU_PAD];
243 
244 	/* This is the start of the page */
245 	char			mcpu_pad2[MACHCPU_PAD2];
246 	struct kpti_frame	mcpu_kpti;
247 	struct kpti_frame	mcpu_kpti_flt;
248 	struct kpti_frame	mcpu_kpti_dbg;
249 	char			mcpu_pad3[16];
250 };
251 
252 #define	NINTR_THREADS	(LOCK_LEVEL-1)	/* number of interrupt threads */
253 #define	MWAIT_HALTED	(1)		/* mcpu_mwait set when halting */
254 #define	MWAIT_RUNNING	(0)		/* mcpu_mwait set to wakeup */
255 #define	MWAIT_WAKEUP_IPI	(2)	/* need IPI to wakeup */
256 #define	MWAIT_WAKEUP(cpu)	(*((cpu)->cpu_m.mcpu_mwait) = MWAIT_RUNNING)
257 
258 #endif	/* _ASM */
259 
260 /* Please DON'T add any more of this namespace-poisoning sewage here */
261 
262 #define	cpu_nodeid cpu_m.mcpu_nodeid
263 #define	cpu_pri cpu_m.mcpu_pri
264 #define	cpu_current_hat cpu_m.mcpu_current_hat
265 #define	cpu_hat_info cpu_m.mcpu_hat_info
266 #define	cpu_ppaddr_mutex cpu_m.mcpu_ppaddr_mutex
267 #define	cpu_gdt cpu_m.mcpu_gdt
268 #define	cpu_idt cpu_m.mcpu_idt
269 #define	cpu_tss cpu_m.mcpu_tss
270 #define	cpu_caddr1 cpu_m.mcpu_caddr1
271 #define	cpu_caddr2 cpu_m.mcpu_caddr2
272 #define	cpu_softinfo cpu_m.mcpu_softinfo
273 #define	cpu_caddr1pte cpu_m.mcpu_caddr1pte
274 #define	cpu_caddr2pte cpu_m.mcpu_caddr2pte
275 
276 #ifdef	__cplusplus
277 }
278 #endif
279 
280 #endif	/* _SYS_MACHCPUVAR_H */
281