xref: /linux/arch/x86/include/asm/efi.h (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_EFI_H
3 #define _ASM_X86_EFI_H
4 
5 #include <asm/fpu/api.h>
6 #include <asm/processor-flags.h>
7 #include <asm/tlb.h>
8 #include <asm/nospec-branch.h>
9 #include <asm/mmu_context.h>
10 #include <asm/ibt.h>
11 #include <linux/build_bug.h>
12 #include <linux/kernel.h>
13 #include <linux/pgtable.h>
14 
15 extern unsigned long efi_fw_vendor, efi_config_table;
16 extern unsigned long efi_mixed_mode_stack_pa;
17 
18 /*
19  * We map the EFI regions needed for runtime services non-contiguously,
20  * with preserved alignment on virtual addresses starting from -4G down
21  * for a total max space of 64G. This way, we provide for stable runtime
22  * services addresses across kernels so that a kexec'd kernel can still
23  * use them.
24  *
25  * This is the main reason why we're doing stable VA mappings for RT
26  * services.
27  */
28 
29 #define EFI32_LOADER_SIGNATURE	"EL32"
30 #define EFI64_LOADER_SIGNATURE	"EL64"
31 
32 #define ARCH_EFI_IRQ_FLAGS_MASK	X86_EFLAGS_IF
33 
34 /*
35  * The EFI services are called through variadic functions in many cases. These
36  * functions are implemented in assembler and support only a fixed number of
37  * arguments. The macros below allows us to check at build time that we don't
38  * try to call them with too many arguments.
39  *
40  * __efi_nargs() will return the number of arguments if it is 7 or less, and
41  * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
42  * impossible to calculate the exact number of arguments beyond some
43  * pre-defined limit. The maximum number of arguments currently supported by
44  * any of the thunks is 7, so this is good enough for now and can be extended
45  * in the obvious way if we ever need more.
46  */
47 
48 #define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
49 #define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__,	\
50 	__efi_arg_sentinel(9), __efi_arg_sentinel(8),		\
51 	__efi_arg_sentinel(7), __efi_arg_sentinel(6),		\
52 	__efi_arg_sentinel(5), __efi_arg_sentinel(4),		\
53 	__efi_arg_sentinel(3), __efi_arg_sentinel(2),		\
54 	__efi_arg_sentinel(1), __efi_arg_sentinel(0))
55 #define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, n, ...)	\
56 	__take_second_arg(n,					\
57 		({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 10; }))
58 #define __efi_arg_sentinel(n) , n
59 
60 /*
61  * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
62  * represents more than n arguments.
63  */
64 
65 #define __efi_nargs_check(f, n, ...)					\
66 	__efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
67 #define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
68 #define __efi_nargs_check__(f, p, n) ({					\
69 	BUILD_BUG_ON_MSG(						\
70 		(p) > (n),						\
71 		#f " called with too many arguments (" #p ">" #n ")");	\
72 })
73 
74 static inline void efi_fpu_begin(void)
75 {
76 	/*
77 	 * The UEFI calling convention (UEFI spec 2.3.2 and 2.3.4) requires
78 	 * that FCW and MXCSR (64-bit) must be initialized prior to calling
79 	 * UEFI code.  (Oddly the spec does not require that the FPU stack
80 	 * be empty.)
81 	 */
82 	kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
83 }
84 
85 static inline void efi_fpu_end(void)
86 {
87 	kernel_fpu_end();
88 }
89 
90 #ifdef CONFIG_X86_32
91 #define arch_efi_call_virt_setup()					\
92 ({									\
93 	efi_fpu_begin();						\
94 	firmware_restrict_branch_speculation_start();			\
95 })
96 
97 #define arch_efi_call_virt_teardown()					\
98 ({									\
99 	firmware_restrict_branch_speculation_end();			\
100 	efi_fpu_end();							\
101 })
102 
103 #else /* !CONFIG_X86_32 */
104 
105 #define EFI_LOADER_SIGNATURE	"EL64"
106 
107 extern asmlinkage u64 __efi_call(void *fp, ...);
108 
109 extern bool efi_disable_ibt_for_runtime;
110 
111 #define efi_call(...) ({						\
112 	__efi_nargs_check(efi_call, 7, __VA_ARGS__);			\
113 	__efi_call(__VA_ARGS__);					\
114 })
115 
116 #define arch_efi_call_virt_setup()					\
117 ({									\
118 	efi_sync_low_kernel_mappings();					\
119 	efi_fpu_begin();						\
120 	firmware_restrict_branch_speculation_start();			\
121 	efi_enter_mm();							\
122 })
123 
124 #undef arch_efi_call_virt
125 #define arch_efi_call_virt(p, f, args...) ({				\
126 	u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime);		\
127 	ret = efi_call((void *)p->f, args);				\
128 	ibt_restore(ibt);						\
129 	ret;								\
130 })
131 
132 #define arch_efi_call_virt_teardown()					\
133 ({									\
134 	efi_leave_mm();							\
135 	firmware_restrict_branch_speculation_end();			\
136 	efi_fpu_end();							\
137 })
138 
139 #ifdef CONFIG_KASAN
140 /*
141  * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
142  * only in kernel binary.  Since the EFI stub linked into a separate binary it
143  * doesn't have __memset().  So we should use standard memset from
144  * arch/x86/boot/compressed/string.c.  The same applies to memcpy and memmove.
145  */
146 #undef memcpy
147 #undef memset
148 #undef memmove
149 #endif
150 
151 #endif /* CONFIG_X86_32 */
152 
153 extern int __init efi_memblock_x86_reserve_range(void);
154 extern void __init efi_print_memmap(void);
155 extern void __init efi_map_region(efi_memory_desc_t *md);
156 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
157 extern void efi_sync_low_kernel_mappings(void);
158 extern int __init efi_alloc_page_tables(void);
159 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
160 extern void __init efi_runtime_update_mappings(void);
161 extern void __init efi_dump_pagetable(void);
162 extern void __init efi_apply_memmap_quirks(void);
163 extern int __init efi_reuse_config(u64 tables, int nr_tables);
164 extern void efi_delete_dummy_variable(void);
165 extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
166 extern void efi_free_boot_services(void);
167 
168 void efi_enter_mm(void);
169 void efi_leave_mm(void);
170 
171 /* kexec external ABI */
172 struct efi_setup_data {
173 	u64 fw_vendor;
174 	u64 __unused;
175 	u64 tables;
176 	u64 smbios;
177 	u64 reserved[8];
178 };
179 
180 extern u64 efi_setup;
181 
182 #ifdef CONFIG_EFI
183 extern u64 __efi64_thunk(u32, ...);
184 
185 #define efi64_thunk(...) ({						\
186 	u64 __pad[3]; /* must have space for 3 args on the stack */	\
187 	__efi_nargs_check(efi64_thunk, 9, __VA_ARGS__);			\
188 	__efi64_thunk(__VA_ARGS__, __pad);				\
189 })
190 
191 static inline bool efi_is_mixed(void)
192 {
193 	if (!IS_ENABLED(CONFIG_EFI_MIXED))
194 		return false;
195 	return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
196 }
197 
198 static inline bool efi_runtime_supported(void)
199 {
200 	if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
201 		return true;
202 
203 	return IS_ENABLED(CONFIG_EFI_MIXED);
204 }
205 
206 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
207 
208 extern void efi_thunk_runtime_setup(void);
209 efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
210 					 unsigned long descriptor_size,
211 					 u32 descriptor_version,
212 					 efi_memory_desc_t *virtual_map,
213 					 unsigned long systab_phys);
214 
215 /* arch specific definitions used by the stub code */
216 
217 #ifdef CONFIG_EFI_MIXED
218 
219 #define ARCH_HAS_EFISTUB_WRAPPERS
220 
221 static inline bool efi_is_64bit(void)
222 {
223 	extern const bool efi_is64;
224 
225 	return efi_is64;
226 }
227 
228 static inline bool efi_is_native(void)
229 {
230 	return efi_is_64bit();
231 }
232 
233 #define efi_table_attr(inst, attr)					\
234 	(efi_is_native() ? (inst)->attr					\
235 			 : efi_mixed_table_attr((inst), attr))
236 
237 #define efi_mixed_table_attr(inst, attr)				\
238 	(__typeof__(inst->attr))					\
239 		_Generic(inst->mixed_mode.attr,				\
240 		u32:		(unsigned long)(inst->mixed_mode.attr),	\
241 		default:	(inst->mixed_mode.attr))
242 
243 /*
244  * The following macros allow translating arguments if necessary from native to
245  * mixed mode. The use case for this is to initialize the upper 32 bits of
246  * output parameters, and where the 32-bit method requires a 64-bit argument,
247  * which must be split up into two arguments to be thunked properly.
248  *
249  * As examples, the AllocatePool boot service returns the address of the
250  * allocation, but it will not set the high 32 bits of the address. To ensure
251  * that the full 64-bit address is initialized, we zero-init the address before
252  * calling the thunk.
253  *
254  * The FreePages boot service takes a 64-bit physical address even in 32-bit
255  * mode. For the thunk to work correctly, a native 64-bit call of
256  * 	free_pages(addr, size)
257  * must be translated to
258  * 	efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
259  * so that the two 32-bit halves of addr get pushed onto the stack separately.
260  */
261 
262 static inline void *efi64_zero_upper(void *p)
263 {
264 	((u32 *)p)[1] = 0;
265 	return p;
266 }
267 
268 static inline u32 efi64_convert_status(efi_status_t status)
269 {
270 	return (u32)(status | (u64)status >> 32);
271 }
272 
273 #define __efi64_split(val)		(val) & U32_MAX, (u64)(val) >> 32
274 
275 #define __efi64_argmap_free_pages(addr, size)				\
276 	((addr), 0, (size))
277 
278 #define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver)	\
279 	((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))
280 
281 #define __efi64_argmap_allocate_pool(type, size, buffer)		\
282 	((type), (size), efi64_zero_upper(buffer))
283 
284 #define __efi64_argmap_create_event(type, tpl, f, c, event)		\
285 	((type), (tpl), (f), (c), efi64_zero_upper(event))
286 
287 #define __efi64_argmap_set_timer(event, type, time)			\
288 	((event), (type), lower_32_bits(time), upper_32_bits(time))
289 
290 #define __efi64_argmap_wait_for_event(num, event, index)		\
291 	((num), (event), efi64_zero_upper(index))
292 
293 #define __efi64_argmap_handle_protocol(handle, protocol, interface)	\
294 	((handle), (protocol), efi64_zero_upper(interface))
295 
296 #define __efi64_argmap_locate_protocol(protocol, reg, interface)	\
297 	((protocol), (reg), efi64_zero_upper(interface))
298 
299 #define __efi64_argmap_locate_device_path(protocol, path, handle)	\
300 	((protocol), (path), efi64_zero_upper(handle))
301 
302 #define __efi64_argmap_exit(handle, status, size, data)			\
303 	((handle), efi64_convert_status(status), (size), (data))
304 
305 /* PCI I/O */
306 #define __efi64_argmap_get_location(protocol, seg, bus, dev, func)	\
307 	((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus),	\
308 	 efi64_zero_upper(dev), efi64_zero_upper(func))
309 
310 /* LoadFile */
311 #define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf)	\
312 	((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))
313 
314 /* Graphics Output Protocol */
315 #define __efi64_argmap_query_mode(gop, mode, size, info)		\
316 	((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info))
317 
318 /* TCG2 protocol */
319 #define __efi64_argmap_hash_log_extend_event(prot, fl, addr, size, ev)	\
320 	((prot), (fl), 0ULL, (u64)(addr), 0ULL, (u64)(size), 0ULL, ev)
321 
322 /* DXE services */
323 #define __efi64_argmap_get_memory_space_descriptor(phys, desc) \
324 	(__efi64_split(phys), (desc))
325 
326 #define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \
327 	(__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
328 
329 /* file protocol */
330 #define __efi64_argmap_open(prot, newh, fname, mode, attr) \
331 	((prot), efi64_zero_upper(newh), (fname), __efi64_split(mode), \
332 	 __efi64_split(attr))
333 
334 #define __efi64_argmap_set_position(pos) (__efi64_split(pos))
335 
336 /* file system protocol */
337 #define __efi64_argmap_open_volume(prot, file) \
338 	((prot), efi64_zero_upper(file))
339 
340 /* Memory Attribute Protocol */
341 #define __efi64_argmap_get_memory_attributes(protocol, phys, size, flags) \
342 	((protocol), __efi64_split(phys), __efi64_split(size), (flags))
343 
344 #define __efi64_argmap_set_memory_attributes(protocol, phys, size, flags) \
345 	((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
346 
347 #define __efi64_argmap_clear_memory_attributes(protocol, phys, size, flags) \
348 	((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
349 
350 /*
351  * The macros below handle the plumbing for the argument mapping. To add a
352  * mapping for a specific EFI method, simply define a macro
353  * __efi64_argmap_<method name>, following the examples above.
354  */
355 
356 #define __efi64_thunk_map(inst, func, ...)				\
357 	efi64_thunk(inst->mixed_mode.func,				\
358 		__efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__),	\
359 			       (__VA_ARGS__)))
360 
361 #define __efi64_argmap(mapped, args)					\
362 	__PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
363 #define __efi64_argmap__0(mapped, args) __efi_eval mapped
364 #define __efi64_argmap__1(mapped, args) __efi_eval args
365 
366 #define __efi_eat(...)
367 #define __efi_eval(...) __VA_ARGS__
368 
369 static inline efi_status_t __efi64_widen_efi_status(u64 status)
370 {
371 	/* use rotate to move the value of bit #31 into position #63 */
372 	return ror64(rol32(status, 1), 1);
373 }
374 
375 /* The macro below handles dispatching via the thunk if needed */
376 
377 #define efi_fn_call(inst, func, ...)					\
378 	(efi_is_native() ? (inst)->func(__VA_ARGS__)			\
379 			 : efi_mixed_call((inst), func, ##__VA_ARGS__))
380 
381 #define efi_mixed_call(inst, func, ...)					\
382 	_Generic(inst->func(__VA_ARGS__),				\
383 	efi_status_t:							\
384 		__efi64_widen_efi_status(				\
385 			__efi64_thunk_map(inst, func, ##__VA_ARGS__)),	\
386 	u64: ({ BUILD_BUG(); ULONG_MAX; }),				\
387 	default:							\
388 		(__typeof__(inst->func(__VA_ARGS__)))			\
389 			__efi64_thunk_map(inst, func, ##__VA_ARGS__))
390 
391 #else /* CONFIG_EFI_MIXED */
392 
393 static inline bool efi_is_64bit(void)
394 {
395 	return IS_ENABLED(CONFIG_X86_64);
396 }
397 
398 #endif /* CONFIG_EFI_MIXED */
399 
400 extern bool efi_reboot_required(void);
401 extern bool efi_is_table_address(unsigned long phys_addr);
402 
403 extern void efi_reserve_boot_services(void);
404 #else
405 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
406 static inline bool efi_reboot_required(void)
407 {
408 	return false;
409 }
410 static inline  bool efi_is_table_address(unsigned long phys_addr)
411 {
412 	return false;
413 }
414 static inline void efi_reserve_boot_services(void)
415 {
416 }
417 #endif /* CONFIG_EFI */
418 
419 #ifdef CONFIG_EFI_FAKE_MEMMAP
420 extern void __init efi_fake_memmap_early(void);
421 extern void __init efi_fake_memmap(void);
422 #else
423 static inline void efi_fake_memmap_early(void)
424 {
425 }
426 
427 static inline void efi_fake_memmap(void)
428 {
429 }
430 #endif
431 
432 extern int __init efi_memmap_alloc(unsigned int num_entries,
433 				   struct efi_memory_map_data *data);
434 extern void __efi_memmap_free(u64 phys, unsigned long size,
435 			      unsigned long flags);
436 #define __efi_memmap_free __efi_memmap_free
437 
438 extern int __init efi_memmap_install(struct efi_memory_map_data *data);
439 extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
440 					 struct range *range);
441 extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
442 				     void *buf, struct efi_mem_range *mem);
443 
444 #define arch_ima_efi_boot_mode	\
445 	({ extern struct boot_params boot_params; boot_params.secure_boot; })
446 
447 #ifdef CONFIG_EFI_RUNTIME_MAP
448 int efi_get_runtime_map_size(void);
449 int efi_get_runtime_map_desc_size(void);
450 int efi_runtime_map_copy(void *buf, size_t bufsz);
451 #else
452 static inline int efi_get_runtime_map_size(void)
453 {
454 	return 0;
455 }
456 
457 static inline int efi_get_runtime_map_desc_size(void)
458 {
459 	return 0;
460 }
461 
462 static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
463 {
464 	return 0;
465 }
466 
467 #endif
468 
469 #endif /* _ASM_X86_EFI_H */
470