xref: /linux/arch/x86/include/asm/x86_init.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PLATFORM_H
3 #define _ASM_X86_PLATFORM_H
4 
5 struct ghcb;
6 struct mpc_bus;
7 struct mpc_cpu;
8 struct pt_regs;
9 struct mpc_table;
10 struct cpuinfo_x86;
11 struct irq_domain;
12 
13 /**
14  * struct x86_init_mpparse - platform specific mpparse ops
15  * @setup_ioapic_ids:		platform specific ioapic id override
16  * @find_mptable:		Find MPTABLE early to reserve the memory region
17  * @early_parse_smp_cfg:	Parse the SMP configuration data early before initmem_init()
18  * @parse_smp_cfg:		Parse the SMP configuration data
19  */
20 struct x86_init_mpparse {
21 	void (*setup_ioapic_ids)(void);
22 	void (*find_mptable)(void);
23 	void (*early_parse_smp_cfg)(void);
24 	void (*parse_smp_cfg)(void);
25 };
26 
27 /**
28  * struct x86_init_resources - platform specific resource related ops
29  * @probe_roms:			probe BIOS roms
30  * @reserve_resources:		reserve the standard resources for the
31  *				platform
32  * @memory_setup:		platform specific memory setup
33  * @dmi_setup:			platform specific DMI setup
34  */
35 struct x86_init_resources {
36 	void (*probe_roms)(void);
37 	void (*reserve_resources)(void);
38 	char *(*memory_setup)(void);
39 	void (*dmi_setup)(void);
40 };
41 
42 /**
43  * struct x86_init_irqs - platform specific interrupt setup
44  * @pre_vector_init:		init code to run before interrupt vectors
45  *				are set up.
46  * @intr_init:			interrupt init code
47  * @intr_mode_select:		interrupt delivery mode selection
48  * @intr_mode_init:		interrupt delivery mode setup
49  * @create_pci_msi_domain:	Create the PCI/MSI interrupt domain
50  */
51 struct x86_init_irqs {
52 	void (*pre_vector_init)(void);
53 	void (*intr_init)(void);
54 	void (*intr_mode_select)(void);
55 	void (*intr_mode_init)(void);
56 	struct irq_domain *(*create_pci_msi_domain)(void);
57 };
58 
59 /**
60  * struct x86_init_oem - oem platform specific customizing functions
61  * @arch_setup:			platform specific architecture setup
62  * @banner:			print a platform specific banner
63  */
64 struct x86_init_oem {
65 	void (*arch_setup)(void);
66 	void (*banner)(void);
67 };
68 
69 /**
70  * struct x86_init_paging - platform specific paging functions
71  * @pagetable_init:	platform specific paging initialization call to setup
72  *			the kernel pagetables and prepare accessors functions.
73  *			Callback must call paging_init(). Called once after the
74  *			direct mapping for phys memory is available.
75  */
76 struct x86_init_paging {
77 	void (*pagetable_init)(void);
78 };
79 
80 /**
81  * struct x86_init_timers - platform specific timer setup
82  * @setup_perpcu_clockev:	set up the per cpu clock event device for the
83  *				boot cpu
84  * @timer_init:			initialize the platform timer (default PIT/HPET)
85  * @wallclock_init:		init the wallclock device
86  */
87 struct x86_init_timers {
88 	void (*setup_percpu_clockev)(void);
89 	void (*timer_init)(void);
90 	void (*wallclock_init)(void);
91 };
92 
93 /**
94  * struct x86_init_iommu - platform specific iommu setup
95  * @iommu_init:			platform specific iommu setup
96  */
97 struct x86_init_iommu {
98 	int (*iommu_init)(void);
99 };
100 
101 /**
102  * struct x86_init_pci - platform specific pci init functions
103  * @arch_init:			platform specific pci arch init call
104  * @init:			platform specific pci subsystem init
105  * @init_irq:			platform specific pci irq init
106  * @fixup_irqs:			platform specific pci irq fixup
107  */
108 struct x86_init_pci {
109 	int (*arch_init)(void);
110 	int (*init)(void);
111 	void (*init_irq)(void);
112 	void (*fixup_irqs)(void);
113 };
114 
115 /**
116  * struct x86_hyper_init - x86 hypervisor init functions
117  * @init_platform:		platform setup
118  * @guest_late_init:		guest late init
119  * @x2apic_available:		X2APIC detection
120  * @msi_ext_dest_id:		MSI supports 15-bit APIC IDs
121  * @init_mem_mapping:		setup early mappings during init_mem_mapping()
122  * @init_after_bootmem:		guest init after boot allocator is finished
123  */
124 struct x86_hyper_init {
125 	void (*init_platform)(void);
126 	void (*guest_late_init)(void);
127 	bool (*x2apic_available)(void);
128 	bool (*msi_ext_dest_id)(void);
129 	void (*init_mem_mapping)(void);
130 	void (*init_after_bootmem)(void);
131 };
132 
133 /**
134  * struct x86_init_acpi - x86 ACPI init functions
135  * @set_root_poitner:		set RSDP address
136  * @get_root_pointer:		get RSDP address
137  * @reduced_hw_early_init:	hardware reduced platform early init
138  */
139 struct x86_init_acpi {
140 	void (*set_root_pointer)(u64 addr);
141 	u64 (*get_root_pointer)(void);
142 	void (*reduced_hw_early_init)(void);
143 };
144 
145 /**
146  * struct x86_guest - Functions used by misc guest incarnations like SEV, TDX, etc.
147  *
148  * @enc_status_change_prepare	Notify HV before the encryption status of a range is changed
149  * @enc_status_change_finish	Notify HV after the encryption status of a range is changed
150  * @enc_tlb_flush_required	Returns true if a TLB flush is needed before changing page encryption status
151  * @enc_cache_flush_required	Returns true if a cache flush is needed before changing page encryption status
152  * @enc_kexec_begin		Begin the two-step process of converting shared memory back
153  *				to private. It stops the new conversions from being started
154  *				and waits in-flight conversions to finish, if possible.
155  * @enc_kexec_finish		Finish the two-step process of converting shared memory to
156  *				private. All memory is private after the call when
157  *				the function returns.
158  *				It is called on only one CPU while the others are shut down
159  *				and with interrupts disabled.
160  */
161 struct x86_guest {
162 	int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
163 	int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
164 	bool (*enc_tlb_flush_required)(bool enc);
165 	bool (*enc_cache_flush_required)(void);
166 	void (*enc_kexec_begin)(void);
167 	void (*enc_kexec_finish)(void);
168 };
169 
170 /**
171  * struct x86_init_ops - functions for platform specific setup
172  *
173  */
174 struct x86_init_ops {
175 	struct x86_init_resources	resources;
176 	struct x86_init_mpparse		mpparse;
177 	struct x86_init_irqs		irqs;
178 	struct x86_init_oem		oem;
179 	struct x86_init_paging		paging;
180 	struct x86_init_timers		timers;
181 	struct x86_init_iommu		iommu;
182 	struct x86_init_pci		pci;
183 	struct x86_hyper_init		hyper;
184 	struct x86_init_acpi		acpi;
185 };
186 
187 /**
188  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
189  * @setup_percpu_clockev:	set up the per cpu clock event device
190  * @early_percpu_clock_init:	early init of the per cpu clock event device
191  * @fixup_cpu_id:		fixup function for cpuinfo_x86::topo.pkg_id
192  * @parallel_bringup:		Parallel bringup control
193  */
194 struct x86_cpuinit_ops {
195 	void (*setup_percpu_clockev)(void);
196 	void (*early_percpu_clock_init)(void);
197 	void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
198 	bool parallel_bringup;
199 };
200 
201 struct timespec64;
202 
203 /**
204  * struct x86_legacy_devices - legacy x86 devices
205  *
206  * @pnpbios: this platform can have a PNPBIOS. If this is disabled the platform
207  * 	is known to never have a PNPBIOS.
208  *
209  * These are devices known to require LPC or ISA bus. The definition of legacy
210  * devices adheres to the ACPI 5.2.9.3 IA-PC Boot Architecture flag
211  * ACPI_FADT_LEGACY_DEVICES. These devices consist of user visible devices on
212  * the LPC or ISA bus. User visible devices are devices that have end-user
213  * accessible connectors (for example, LPT parallel port). Legacy devices on
214  * the LPC bus consist for example of serial and parallel ports, PS/2 keyboard
215  * / mouse, and the floppy disk controller. A system that lacks all known
216  * legacy devices can assume all devices can be detected exclusively via
217  * standard device enumeration mechanisms including the ACPI namespace.
218  *
219  * A system which has does not have ACPI_FADT_LEGACY_DEVICES enabled must not
220  * have any of the legacy devices enumerated below present.
221  */
222 struct x86_legacy_devices {
223 	int pnpbios;
224 };
225 
226 /**
227  * enum x86_legacy_i8042_state - i8042 keyboard controller state
228  * @X86_LEGACY_I8042_PLATFORM_ABSENT: the controller is always absent on
229  *	given platform/subarch.
230  * @X86_LEGACY_I8042_FIRMWARE_ABSENT: firmware reports that the controller
231  *	is absent.
232  * @X86_LEGACY_i8042_EXPECTED_PRESENT: the controller is likely to be
233  *	present, the i8042 driver should probe for controller existence.
234  */
235 enum x86_legacy_i8042_state {
236 	X86_LEGACY_I8042_PLATFORM_ABSENT,
237 	X86_LEGACY_I8042_FIRMWARE_ABSENT,
238 	X86_LEGACY_I8042_EXPECTED_PRESENT,
239 };
240 
241 /**
242  * struct x86_legacy_features - legacy x86 features
243  *
244  * @i8042: indicated if we expect the device to have i8042 controller
245  *	present.
246  * @rtc: this device has a CMOS real-time clock present
247  * @reserve_bios_regions: boot code will search for the EBDA address and the
248  * 	start of the 640k - 1M BIOS region.  If false, the platform must
249  * 	ensure that its memory map correctly reserves sub-1MB regions as needed.
250  * @devices: legacy x86 devices, refer to struct x86_legacy_devices
251  * 	documentation for further details.
252  */
253 struct x86_legacy_features {
254 	enum x86_legacy_i8042_state i8042;
255 	int rtc;
256 	int warm_reset;
257 	int no_vga;
258 	int reserve_bios_regions;
259 	struct x86_legacy_devices devices;
260 };
261 
262 /**
263  * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks
264  *
265  * @pin_vcpu:			pin current vcpu to specified physical
266  *				cpu (run rarely)
267  * @sev_es_hcall_prepare:	Load additional hypervisor-specific
268  *				state into the GHCB when doing a VMMCALL under
269  *				SEV-ES. Called from the #VC exception handler.
270  * @sev_es_hcall_finish:	Copies state from the GHCB back into the
271  *				processor (or pt_regs). Also runs checks on the
272  *				state returned from the hypervisor after a
273  *				VMMCALL under SEV-ES.  Needs to return 'false'
274  *				if the checks fail.  Called from the #VC
275  *				exception handler.
276  * @is_private_mmio:		For CoCo VMs, must map MMIO address as private.
277  *				Used when device is emulated by a paravisor
278  *				layer in the VM context.
279  */
280 struct x86_hyper_runtime {
281 	void (*pin_vcpu)(int cpu);
282 	void (*sev_es_hcall_prepare)(struct ghcb *ghcb, struct pt_regs *regs);
283 	bool (*sev_es_hcall_finish)(struct ghcb *ghcb, struct pt_regs *regs);
284 	bool (*is_private_mmio)(u64 addr);
285 };
286 
287 /**
288  * struct x86_platform_ops - platform specific runtime functions
289  * @calibrate_cpu:		calibrate CPU
290  * @calibrate_tsc:		calibrate TSC, if different from CPU
291  * @get_wallclock:		get time from HW clock like RTC etc.
292  * @set_wallclock:		set time back to HW clock
293  * @is_untracked_pat_range	exclude from PAT logic
294  * @nmi_init			enable NMI on cpus
295  * @save_sched_clock_state:	save state for sched_clock() on suspend
296  * @restore_sched_clock_state:	restore state for sched_clock() on resume
297  * @apic_post_init:		adjust apic if needed
298  * @legacy:			legacy features
299  * @set_legacy_features:	override legacy features. Use of this callback
300  * 				is highly discouraged. You should only need
301  * 				this if your hardware platform requires further
302  * 				custom fine tuning far beyond what may be
303  * 				possible in x86_early_init_platform_quirks() by
304  * 				only using the current x86_hardware_subarch
305  * 				semantics.
306  * @realmode_reserve:		reserve memory for realmode trampoline
307  * @realmode_init:		initialize realmode trampoline
308  * @hyper:			x86 hypervisor specific runtime callbacks
309  */
310 struct x86_platform_ops {
311 	unsigned long (*calibrate_cpu)(void);
312 	unsigned long (*calibrate_tsc)(void);
313 	void (*get_wallclock)(struct timespec64 *ts);
314 	int (*set_wallclock)(const struct timespec64 *ts);
315 	void (*iommu_shutdown)(void);
316 	bool (*is_untracked_pat_range)(u64 start, u64 end);
317 	void (*nmi_init)(void);
318 	unsigned char (*get_nmi_reason)(void);
319 	void (*save_sched_clock_state)(void);
320 	void (*restore_sched_clock_state)(void);
321 	void (*apic_post_init)(void);
322 	struct x86_legacy_features legacy;
323 	void (*set_legacy_features)(void);
324 	void (*realmode_reserve)(void);
325 	void (*realmode_init)(void);
326 	struct x86_hyper_runtime hyper;
327 	struct x86_guest guest;
328 };
329 
330 struct x86_apic_ops {
331 	unsigned int	(*io_apic_read)   (unsigned int apic, unsigned int reg);
332 	void		(*restore)(void);
333 };
334 
335 extern struct x86_init_ops x86_init;
336 extern struct x86_cpuinit_ops x86_cpuinit;
337 extern struct x86_platform_ops x86_platform;
338 extern struct x86_msi_ops x86_msi;
339 extern struct x86_apic_ops x86_apic_ops;
340 
341 extern void x86_early_init_platform_quirks(void);
342 extern void x86_init_noop(void);
343 extern void x86_init_uint_noop(unsigned int unused);
344 extern bool bool_x86_init_noop(void);
345 extern void x86_op_int_noop(int cpu);
346 extern bool x86_pnpbios_disabled(void);
347 extern int set_rtc_noop(const struct timespec64 *now);
348 extern void get_rtc_noop(struct timespec64 *now);
349 
350 #endif
351