xref: /linux/arch/x86/kernel/machine_kexec_64.c (revision 593043d35ddff8ab033546c2a89bb1d4080d03e1)
1 /*
2  * handle transition of Linux booting another kernel
3  * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #define pr_fmt(fmt)	"kexec: " fmt
10 
11 #include <linux/mm.h>
12 #include <linux/kexec.h>
13 #include <linux/string.h>
14 #include <linux/gfp.h>
15 #include <linux/reboot.h>
16 #include <linux/numa.h>
17 #include <linux/ftrace.h>
18 #include <linux/io.h>
19 #include <linux/suspend.h>
20 #include <linux/vmalloc.h>
21 
22 #include <asm/init.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
26 #include <asm/io_apic.h>
27 #include <asm/debugreg.h>
28 #include <asm/kexec-bzimage64.h>
29 #include <asm/setup.h>
30 #include <asm/set_memory.h>
31 
32 #ifdef CONFIG_KEXEC_FILE
33 static struct kexec_file_ops *kexec_file_loaders[] = {
34 		&kexec_bzImage64_ops,
35 };
36 #endif
37 
38 static void free_transition_pgtable(struct kimage *image)
39 {
40 	free_page((unsigned long)image->arch.p4d);
41 	free_page((unsigned long)image->arch.pud);
42 	free_page((unsigned long)image->arch.pmd);
43 	free_page((unsigned long)image->arch.pte);
44 }
45 
46 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
47 {
48 	p4d_t *p4d;
49 	pud_t *pud;
50 	pmd_t *pmd;
51 	pte_t *pte;
52 	unsigned long vaddr, paddr;
53 	int result = -ENOMEM;
54 
55 	vaddr = (unsigned long)relocate_kernel;
56 	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
57 	pgd += pgd_index(vaddr);
58 	if (!pgd_present(*pgd)) {
59 		p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
60 		if (!p4d)
61 			goto err;
62 		image->arch.p4d = p4d;
63 		set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
64 	}
65 	p4d = p4d_offset(pgd, vaddr);
66 	if (!p4d_present(*p4d)) {
67 		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
68 		if (!pud)
69 			goto err;
70 		image->arch.pud = pud;
71 		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
72 	}
73 	pud = pud_offset(p4d, vaddr);
74 	if (!pud_present(*pud)) {
75 		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
76 		if (!pmd)
77 			goto err;
78 		image->arch.pmd = pmd;
79 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
80 	}
81 	pmd = pmd_offset(pud, vaddr);
82 	if (!pmd_present(*pmd)) {
83 		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
84 		if (!pte)
85 			goto err;
86 		image->arch.pte = pte;
87 		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
88 	}
89 	pte = pte_offset_kernel(pmd, vaddr);
90 	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
91 	return 0;
92 err:
93 	free_transition_pgtable(image);
94 	return result;
95 }
96 
97 static void *alloc_pgt_page(void *data)
98 {
99 	struct kimage *image = (struct kimage *)data;
100 	struct page *page;
101 	void *p = NULL;
102 
103 	page = kimage_alloc_control_pages(image, 0);
104 	if (page) {
105 		p = page_address(page);
106 		clear_page(p);
107 	}
108 
109 	return p;
110 }
111 
112 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
113 {
114 	struct x86_mapping_info info = {
115 		.alloc_pgt_page	= alloc_pgt_page,
116 		.context	= image,
117 		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
118 	};
119 	unsigned long mstart, mend;
120 	pgd_t *level4p;
121 	int result;
122 	int i;
123 
124 	level4p = (pgd_t *)__va(start_pgtable);
125 	clear_page(level4p);
126 	for (i = 0; i < nr_pfn_mapped; i++) {
127 		mstart = pfn_mapped[i].start << PAGE_SHIFT;
128 		mend   = pfn_mapped[i].end << PAGE_SHIFT;
129 
130 		result = kernel_ident_mapping_init(&info,
131 						 level4p, mstart, mend);
132 		if (result)
133 			return result;
134 	}
135 
136 	/*
137 	 * segments's mem ranges could be outside 0 ~ max_pfn,
138 	 * for example when jump back to original kernel from kexeced kernel.
139 	 * or first kernel is booted with user mem map, and second kernel
140 	 * could be loaded out of that range.
141 	 */
142 	for (i = 0; i < image->nr_segments; i++) {
143 		mstart = image->segment[i].mem;
144 		mend   = mstart + image->segment[i].memsz;
145 
146 		result = kernel_ident_mapping_init(&info,
147 						 level4p, mstart, mend);
148 
149 		if (result)
150 			return result;
151 	}
152 
153 	return init_transition_pgtable(image, level4p);
154 }
155 
156 static void set_idt(void *newidt, u16 limit)
157 {
158 	struct desc_ptr curidt;
159 
160 	/* x86-64 supports unaliged loads & stores */
161 	curidt.size    = limit;
162 	curidt.address = (unsigned long)newidt;
163 
164 	__asm__ __volatile__ (
165 		"lidtq %0\n"
166 		: : "m" (curidt)
167 		);
168 };
169 
170 
171 static void set_gdt(void *newgdt, u16 limit)
172 {
173 	struct desc_ptr curgdt;
174 
175 	/* x86-64 supports unaligned loads & stores */
176 	curgdt.size    = limit;
177 	curgdt.address = (unsigned long)newgdt;
178 
179 	__asm__ __volatile__ (
180 		"lgdtq %0\n"
181 		: : "m" (curgdt)
182 		);
183 };
184 
185 static void load_segments(void)
186 {
187 	__asm__ __volatile__ (
188 		"\tmovl %0,%%ds\n"
189 		"\tmovl %0,%%es\n"
190 		"\tmovl %0,%%ss\n"
191 		"\tmovl %0,%%fs\n"
192 		"\tmovl %0,%%gs\n"
193 		: : "a" (__KERNEL_DS) : "memory"
194 		);
195 }
196 
197 #ifdef CONFIG_KEXEC_FILE
198 /* Update purgatory as needed after various image segments have been prepared */
199 static int arch_update_purgatory(struct kimage *image)
200 {
201 	int ret = 0;
202 
203 	if (!image->file_mode)
204 		return 0;
205 
206 	/* Setup copying of backup region */
207 	if (image->type == KEXEC_TYPE_CRASH) {
208 		ret = kexec_purgatory_get_set_symbol(image,
209 				"purgatory_backup_dest",
210 				&image->arch.backup_load_addr,
211 				sizeof(image->arch.backup_load_addr), 0);
212 		if (ret)
213 			return ret;
214 
215 		ret = kexec_purgatory_get_set_symbol(image,
216 				"purgatory_backup_src",
217 				&image->arch.backup_src_start,
218 				sizeof(image->arch.backup_src_start), 0);
219 		if (ret)
220 			return ret;
221 
222 		ret = kexec_purgatory_get_set_symbol(image,
223 				"purgatory_backup_sz",
224 				&image->arch.backup_src_sz,
225 				sizeof(image->arch.backup_src_sz), 0);
226 		if (ret)
227 			return ret;
228 	}
229 
230 	return ret;
231 }
232 #else /* !CONFIG_KEXEC_FILE */
233 static inline int arch_update_purgatory(struct kimage *image)
234 {
235 	return 0;
236 }
237 #endif /* CONFIG_KEXEC_FILE */
238 
239 int machine_kexec_prepare(struct kimage *image)
240 {
241 	unsigned long start_pgtable;
242 	int result;
243 
244 	/* Calculate the offsets */
245 	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
246 
247 	/* Setup the identity mapped 64bit page table */
248 	result = init_pgtable(image, start_pgtable);
249 	if (result)
250 		return result;
251 
252 	/* update purgatory as needed */
253 	result = arch_update_purgatory(image);
254 	if (result)
255 		return result;
256 
257 	return 0;
258 }
259 
260 void machine_kexec_cleanup(struct kimage *image)
261 {
262 	free_transition_pgtable(image);
263 }
264 
265 /*
266  * Do not allocate memory (or fail in any way) in machine_kexec().
267  * We are past the point of no return, committed to rebooting now.
268  */
269 void machine_kexec(struct kimage *image)
270 {
271 	unsigned long page_list[PAGES_NR];
272 	void *control_page;
273 	int save_ftrace_enabled;
274 
275 #ifdef CONFIG_KEXEC_JUMP
276 	if (image->preserve_context)
277 		save_processor_state();
278 #endif
279 
280 	save_ftrace_enabled = __ftrace_enabled_save();
281 
282 	/* Interrupts aren't acceptable while we reboot */
283 	local_irq_disable();
284 	hw_breakpoint_disable();
285 
286 	if (image->preserve_context) {
287 #ifdef CONFIG_X86_IO_APIC
288 		/*
289 		 * We need to put APICs in legacy mode so that we can
290 		 * get timer interrupts in second kernel. kexec/kdump
291 		 * paths already have calls to disable_IO_APIC() in
292 		 * one form or other. kexec jump path also need
293 		 * one.
294 		 */
295 		disable_IO_APIC();
296 #endif
297 	}
298 
299 	control_page = page_address(image->control_code_page) + PAGE_SIZE;
300 	memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
301 
302 	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
303 	page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
304 	page_list[PA_TABLE_PAGE] =
305 	  (unsigned long)__pa(page_address(image->control_code_page));
306 
307 	if (image->type == KEXEC_TYPE_DEFAULT)
308 		page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
309 						<< PAGE_SHIFT);
310 
311 	/*
312 	 * The segment registers are funny things, they have both a
313 	 * visible and an invisible part.  Whenever the visible part is
314 	 * set to a specific selector, the invisible part is loaded
315 	 * with from a table in memory.  At no other time is the
316 	 * descriptor table in memory accessed.
317 	 *
318 	 * I take advantage of this here by force loading the
319 	 * segments, before I zap the gdt with an invalid value.
320 	 */
321 	load_segments();
322 	/*
323 	 * The gdt & idt are now invalid.
324 	 * If you want to load them you must set up your own idt & gdt.
325 	 */
326 	set_gdt(phys_to_virt(0), 0);
327 	set_idt(phys_to_virt(0), 0);
328 
329 	/* now call it */
330 	image->start = relocate_kernel((unsigned long)image->head,
331 				       (unsigned long)page_list,
332 				       image->start,
333 				       image->preserve_context);
334 
335 #ifdef CONFIG_KEXEC_JUMP
336 	if (image->preserve_context)
337 		restore_processor_state();
338 #endif
339 
340 	__ftrace_enabled_restore(save_ftrace_enabled);
341 }
342 
343 void arch_crash_save_vmcoreinfo(void)
344 {
345 	VMCOREINFO_NUMBER(phys_base);
346 	VMCOREINFO_SYMBOL(init_level4_pgt);
347 
348 #ifdef CONFIG_NUMA
349 	VMCOREINFO_SYMBOL(node_data);
350 	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
351 #endif
352 	vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
353 			      kaslr_offset());
354 	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
355 }
356 
357 /* arch-dependent functionality related to kexec file-based syscall */
358 
359 #ifdef CONFIG_KEXEC_FILE
360 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
361 				  unsigned long buf_len)
362 {
363 	int i, ret = -ENOEXEC;
364 	struct kexec_file_ops *fops;
365 
366 	for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
367 		fops = kexec_file_loaders[i];
368 		if (!fops || !fops->probe)
369 			continue;
370 
371 		ret = fops->probe(buf, buf_len);
372 		if (!ret) {
373 			image->fops = fops;
374 			return ret;
375 		}
376 	}
377 
378 	return ret;
379 }
380 
381 void *arch_kexec_kernel_image_load(struct kimage *image)
382 {
383 	vfree(image->arch.elf_headers);
384 	image->arch.elf_headers = NULL;
385 
386 	if (!image->fops || !image->fops->load)
387 		return ERR_PTR(-ENOEXEC);
388 
389 	return image->fops->load(image, image->kernel_buf,
390 				 image->kernel_buf_len, image->initrd_buf,
391 				 image->initrd_buf_len, image->cmdline_buf,
392 				 image->cmdline_buf_len);
393 }
394 
395 int arch_kimage_file_post_load_cleanup(struct kimage *image)
396 {
397 	if (!image->fops || !image->fops->cleanup)
398 		return 0;
399 
400 	return image->fops->cleanup(image->image_loader_data);
401 }
402 
403 #ifdef CONFIG_KEXEC_VERIFY_SIG
404 int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
405 				 unsigned long kernel_len)
406 {
407 	if (!image->fops || !image->fops->verify_sig) {
408 		pr_debug("kernel loader does not support signature verification.");
409 		return -EKEYREJECTED;
410 	}
411 
412 	return image->fops->verify_sig(kernel, kernel_len);
413 }
414 #endif
415 
416 /*
417  * Apply purgatory relocations.
418  *
419  * ehdr: Pointer to elf headers
420  * sechdrs: Pointer to section headers.
421  * relsec: section index of SHT_RELA section.
422  *
423  * TODO: Some of the code belongs to generic code. Move that in kexec.c.
424  */
425 int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
426 				     Elf64_Shdr *sechdrs, unsigned int relsec)
427 {
428 	unsigned int i;
429 	Elf64_Rela *rel;
430 	Elf64_Sym *sym;
431 	void *location;
432 	Elf64_Shdr *section, *symtabsec;
433 	unsigned long address, sec_base, value;
434 	const char *strtab, *name, *shstrtab;
435 
436 	/*
437 	 * ->sh_offset has been modified to keep the pointer to section
438 	 * contents in memory
439 	 */
440 	rel = (void *)sechdrs[relsec].sh_offset;
441 
442 	/* Section to which relocations apply */
443 	section = &sechdrs[sechdrs[relsec].sh_info];
444 
445 	pr_debug("Applying relocate section %u to %u\n", relsec,
446 		 sechdrs[relsec].sh_info);
447 
448 	/* Associated symbol table */
449 	symtabsec = &sechdrs[sechdrs[relsec].sh_link];
450 
451 	/* String table */
452 	if (symtabsec->sh_link >= ehdr->e_shnum) {
453 		/* Invalid strtab section number */
454 		pr_err("Invalid string table section index %d\n",
455 		       symtabsec->sh_link);
456 		return -ENOEXEC;
457 	}
458 
459 	strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
460 
461 	/* section header string table */
462 	shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
463 
464 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
465 
466 		/*
467 		 * rel[i].r_offset contains byte offset from beginning
468 		 * of section to the storage unit affected.
469 		 *
470 		 * This is location to update (->sh_offset). This is temporary
471 		 * buffer where section is currently loaded. This will finally
472 		 * be loaded to a different address later, pointed to by
473 		 * ->sh_addr. kexec takes care of moving it
474 		 *  (kexec_load_segment()).
475 		 */
476 		location = (void *)(section->sh_offset + rel[i].r_offset);
477 
478 		/* Final address of the location */
479 		address = section->sh_addr + rel[i].r_offset;
480 
481 		/*
482 		 * rel[i].r_info contains information about symbol table index
483 		 * w.r.t which relocation must be made and type of relocation
484 		 * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
485 		 * these respectively.
486 		 */
487 		sym = (Elf64_Sym *)symtabsec->sh_offset +
488 				ELF64_R_SYM(rel[i].r_info);
489 
490 		if (sym->st_name)
491 			name = strtab + sym->st_name;
492 		else
493 			name = shstrtab + sechdrs[sym->st_shndx].sh_name;
494 
495 		pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n",
496 			 name, sym->st_info, sym->st_shndx, sym->st_value,
497 			 sym->st_size);
498 
499 		if (sym->st_shndx == SHN_UNDEF) {
500 			pr_err("Undefined symbol: %s\n", name);
501 			return -ENOEXEC;
502 		}
503 
504 		if (sym->st_shndx == SHN_COMMON) {
505 			pr_err("symbol '%s' in common section\n", name);
506 			return -ENOEXEC;
507 		}
508 
509 		if (sym->st_shndx == SHN_ABS)
510 			sec_base = 0;
511 		else if (sym->st_shndx >= ehdr->e_shnum) {
512 			pr_err("Invalid section %d for symbol %s\n",
513 			       sym->st_shndx, name);
514 			return -ENOEXEC;
515 		} else
516 			sec_base = sechdrs[sym->st_shndx].sh_addr;
517 
518 		value = sym->st_value;
519 		value += sec_base;
520 		value += rel[i].r_addend;
521 
522 		switch (ELF64_R_TYPE(rel[i].r_info)) {
523 		case R_X86_64_NONE:
524 			break;
525 		case R_X86_64_64:
526 			*(u64 *)location = value;
527 			break;
528 		case R_X86_64_32:
529 			*(u32 *)location = value;
530 			if (value != *(u32 *)location)
531 				goto overflow;
532 			break;
533 		case R_X86_64_32S:
534 			*(s32 *)location = value;
535 			if ((s64)value != *(s32 *)location)
536 				goto overflow;
537 			break;
538 		case R_X86_64_PC32:
539 			value -= (u64)address;
540 			*(u32 *)location = value;
541 			break;
542 		default:
543 			pr_err("Unknown rela relocation: %llu\n",
544 			       ELF64_R_TYPE(rel[i].r_info));
545 			return -ENOEXEC;
546 		}
547 	}
548 	return 0;
549 
550 overflow:
551 	pr_err("Overflow in relocation type %d value 0x%lx\n",
552 	       (int)ELF64_R_TYPE(rel[i].r_info), value);
553 	return -ENOEXEC;
554 }
555 #endif /* CONFIG_KEXEC_FILE */
556 
557 static int
558 kexec_mark_range(unsigned long start, unsigned long end, bool protect)
559 {
560 	struct page *page;
561 	unsigned int nr_pages;
562 
563 	/*
564 	 * For physical range: [start, end]. We must skip the unassigned
565 	 * crashk resource with zero-valued "end" member.
566 	 */
567 	if (!end || start > end)
568 		return 0;
569 
570 	page = pfn_to_page(start >> PAGE_SHIFT);
571 	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
572 	if (protect)
573 		return set_pages_ro(page, nr_pages);
574 	else
575 		return set_pages_rw(page, nr_pages);
576 }
577 
578 static void kexec_mark_crashkres(bool protect)
579 {
580 	unsigned long control;
581 
582 	kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect);
583 
584 	/* Don't touch the control code page used in crash_kexec().*/
585 	control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
586 	/* Control code page is located in the 2nd page. */
587 	kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
588 	control += KEXEC_CONTROL_PAGE_SIZE;
589 	kexec_mark_range(control, crashk_res.end, protect);
590 }
591 
592 void arch_kexec_protect_crashkres(void)
593 {
594 	kexec_mark_crashkres(true);
595 }
596 
597 void arch_kexec_unprotect_crashkres(void)
598 {
599 	kexec_mark_crashkres(false);
600 }
601