crash.c (b23c83ad2c638420ec0608a9de354507c41bec29) crash.c (ea53ad9cf73b6b48608a69e626caeae87e5ddd11)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 *
7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.

--- 34 unchanged lines hidden (view full) ---

43
44/* Used while preparing memory map entries for second kernel */
45struct crash_memmap_data {
46 struct boot_params *params;
47 /* Type of memory */
48 unsigned int type;
49};
50
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 *
7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.

--- 34 unchanged lines hidden (view full) ---

43
44/* Used while preparing memory map entries for second kernel */
45struct crash_memmap_data {
46 struct boot_params *params;
47 /* Type of memory */
48 unsigned int type;
49};
50
51/*
52 * This is used to VMCLEAR all VMCSs loaded on the
53 * processor. And when loading kvm_intel module, the
54 * callback function pointer will be assigned.
55 *
56 * protected by rcu.
57 */
58crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
59EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
60
61static inline void cpu_crash_vmclear_loaded_vmcss(void)
62{
63 crash_vmclear_fn *do_vmclear_operation = NULL;
64
65 rcu_read_lock();
66 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
67 if (do_vmclear_operation)
68 do_vmclear_operation();
69 rcu_read_unlock();
70}
71
51#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
52
53static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
54{
55 crash_save_cpu(regs, cpu);
56
57 /*
72#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
73
74static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
75{
76 crash_save_cpu(regs, cpu);
77
78 /*
79 * VMCLEAR VMCSs loaded on all cpus if needed.
80 */
81 cpu_crash_vmclear_loaded_vmcss();
82
83 /*
58 * Disable Intel PT to stop its logging
59 */
60 cpu_emergency_stop_pt();
61
62 disable_local_APIC();
63}
64
65void kdump_nmi_shootdown_cpus(void)

--- 36 unchanged lines hidden (view full) ---

102 * In practice this means shooting down the other cpus in
103 * an SMP system.
104 */
105 /* The kernel is broken so disable interrupts */
106 local_irq_disable();
107
108 crash_smp_send_stop();
109
84 * Disable Intel PT to stop its logging
85 */
86 cpu_emergency_stop_pt();
87
88 disable_local_APIC();
89}
90
91void kdump_nmi_shootdown_cpus(void)

--- 36 unchanged lines hidden (view full) ---

128 * In practice this means shooting down the other cpus in
129 * an SMP system.
130 */
131 /* The kernel is broken so disable interrupts */
132 local_irq_disable();
133
134 crash_smp_send_stop();
135
136 /*
137 * VMCLEAR VMCSs loaded on this cpu if needed.
138 */
139 cpu_crash_vmclear_loaded_vmcss();
140
110 cpu_emergency_disable_virtualization();
111
112 /*
113 * Disable Intel PT to stop its logging
114 */
115 cpu_emergency_stop_pt();
116
117#ifdef CONFIG_X86_IO_APIC

--- 4 unchanged lines hidden (view full) ---

122 lapic_shutdown();
123 restore_boot_irq_mode();
124#ifdef CONFIG_HPET_TIMER
125 hpet_disable();
126#endif
127 crash_save_cpu(regs, safe_smp_processor_id());
128}
129
141 cpu_emergency_disable_virtualization();
142
143 /*
144 * Disable Intel PT to stop its logging
145 */
146 cpu_emergency_stop_pt();
147
148#ifdef CONFIG_X86_IO_APIC

--- 4 unchanged lines hidden (view full) ---

153 lapic_shutdown();
154 restore_boot_irq_mode();
155#ifdef CONFIG_HPET_TIMER
156 hpet_disable();
157#endif
158 crash_save_cpu(regs, safe_smp_processor_id());
159}
160
130#ifdef CONFIG_KEXEC_FILE
131
161#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_HOTPLUG)
132static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
133{
134 unsigned int *nr_ranges = arg;
135
136 (*nr_ranges)++;
137 return 0;
138}
139

--- 55 unchanged lines hidden (view full) ---

195 cmem->ranges[cmem->nr_ranges].end = res->end;
196 cmem->nr_ranges++;
197
198 return 0;
199}
200
201/* Prepare elf headers. Return addr and size */
202static int prepare_elf_headers(struct kimage *image, void **addr,
162static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
163{
164 unsigned int *nr_ranges = arg;
165
166 (*nr_ranges)++;
167 return 0;
168}
169

--- 55 unchanged lines hidden (view full) ---

225 cmem->ranges[cmem->nr_ranges].end = res->end;
226 cmem->nr_ranges++;
227
228 return 0;
229}
230
231/* Prepare elf headers. Return addr and size */
232static int prepare_elf_headers(struct kimage *image, void **addr,
203 unsigned long *sz)
233 unsigned long *sz, unsigned long *nr_mem_ranges)
204{
205 struct crash_mem *cmem;
206 int ret;
207
208 cmem = fill_up_crash_elf_data();
209 if (!cmem)
210 return -ENOMEM;
211
212 ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
213 if (ret)
214 goto out;
215
216 /* Exclude unwanted mem ranges */
217 ret = elf_header_exclude_ranges(cmem);
218 if (ret)
219 goto out;
220
234{
235 struct crash_mem *cmem;
236 int ret;
237
238 cmem = fill_up_crash_elf_data();
239 if (!cmem)
240 return -ENOMEM;
241
242 ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
243 if (ret)
244 goto out;
245
246 /* Exclude unwanted mem ranges */
247 ret = elf_header_exclude_ranges(cmem);
248 if (ret)
249 goto out;
250
251 /* Return the computed number of memory ranges, for hotplug usage */
252 *nr_mem_ranges = cmem->nr_ranges;
253
221 /* By default prepare 64bit headers */
222 ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
223
224out:
225 vfree(cmem);
226 return ret;
227}
254 /* By default prepare 64bit headers */
255 ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
256
257out:
258 vfree(cmem);
259 return ret;
260}
261#endif
228
262
263#ifdef CONFIG_KEXEC_FILE
229static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
230{
231 unsigned int nr_e820_entries;
232
233 nr_e820_entries = params->e820_entries;
234 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
235 return 1;
236

--- 98 unchanged lines hidden (view full) ---

335out:
336 vfree(cmem);
337 return ret;
338}
339
340int crash_load_segments(struct kimage *image)
341{
342 int ret;
264static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
265{
266 unsigned int nr_e820_entries;
267
268 nr_e820_entries = params->e820_entries;
269 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
270 return 1;
271

--- 98 unchanged lines hidden (view full) ---

370out:
371 vfree(cmem);
372 return ret;
373}
374
375int crash_load_segments(struct kimage *image)
376{
377 int ret;
378 unsigned long pnum = 0;
343 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
344 .buf_max = ULONG_MAX, .top_down = false };
345
346 /* Prepare elf headers and add a segment */
379 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
380 .buf_max = ULONG_MAX, .top_down = false };
381
382 /* Prepare elf headers and add a segment */
347 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
383 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz, &pnum);
348 if (ret)
349 return ret;
350
384 if (ret)
385 return ret;
386
351 image->elf_headers = kbuf.buffer;
352 image->elf_headers_sz = kbuf.bufsz;
387 image->elf_headers = kbuf.buffer;
388 image->elf_headers_sz = kbuf.bufsz;
389 kbuf.memsz = kbuf.bufsz;
353
390
354 kbuf.memsz = kbuf.bufsz;
391#ifdef CONFIG_CRASH_HOTPLUG
392 /*
393 * The elfcorehdr segment size accounts for VMCOREINFO, kernel_map,
394 * maximum CPUs and maximum memory ranges.
395 */
396 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
397 pnum = 2 + CONFIG_NR_CPUS_DEFAULT + CONFIG_CRASH_MAX_MEMORY_RANGES;
398 else
399 pnum += 2 + CONFIG_NR_CPUS_DEFAULT;
400
401 if (pnum < (unsigned long)PN_XNUM) {
402 kbuf.memsz = pnum * sizeof(Elf64_Phdr);
403 kbuf.memsz += sizeof(Elf64_Ehdr);
404
405 image->elfcorehdr_index = image->nr_segments;
406
407 /* Mark as usable to crash kernel, else crash kernel fails on boot */
408 image->elf_headers_sz = kbuf.memsz;
409 } else {
410 pr_err("number of Phdrs %lu exceeds max\n", pnum);
411 }
412#endif
413
355 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
356 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
357 ret = kexec_add_buffer(&kbuf);
358 if (ret)
359 return ret;
360 image->elf_load_addr = kbuf.mem;
361 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
362 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
363
364 return ret;
365}
366#endif /* CONFIG_KEXEC_FILE */
414 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
415 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
416 ret = kexec_add_buffer(&kbuf);
417 if (ret)
418 return ret;
419 image->elf_load_addr = kbuf.mem;
420 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
421 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
422
423 return ret;
424}
425#endif /* CONFIG_KEXEC_FILE */
426
427#ifdef CONFIG_CRASH_HOTPLUG
428
429#undef pr_fmt
430#define pr_fmt(fmt) "crash hp: " fmt
431
432/**
433 * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes
434 * @image: a pointer to kexec_crash_image
435 *
436 * Prepare the new elfcorehdr and replace the existing elfcorehdr.
437 */
438void arch_crash_handle_hotplug_event(struct kimage *image)
439{
440 void *elfbuf = NULL, *old_elfcorehdr;
441 unsigned long nr_mem_ranges;
442 unsigned long mem, memsz;
443 unsigned long elfsz = 0;
444
445 /*
446 * Create the new elfcorehdr reflecting the changes to CPU and/or
447 * memory resources.
448 */
449 if (prepare_elf_headers(image, &elfbuf, &elfsz, &nr_mem_ranges)) {
450 pr_err("unable to create new elfcorehdr");
451 goto out;
452 }
453
454 /*
455 * Obtain address and size of the elfcorehdr segment, and
456 * check it against the new elfcorehdr buffer.
457 */
458 mem = image->segment[image->elfcorehdr_index].mem;
459 memsz = image->segment[image->elfcorehdr_index].memsz;
460 if (elfsz > memsz) {
461 pr_err("update elfcorehdr elfsz %lu > memsz %lu",
462 elfsz, memsz);
463 goto out;
464 }
465
466 /*
467 * Copy new elfcorehdr over the old elfcorehdr at destination.
468 */
469 old_elfcorehdr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT));
470 if (!old_elfcorehdr) {
471 pr_err("mapping elfcorehdr segment failed\n");
472 goto out;
473 }
474
475 /*
476 * Temporarily invalidate the crash image while the
477 * elfcorehdr is updated.
478 */
479 xchg(&kexec_crash_image, NULL);
480 memcpy_flushcache(old_elfcorehdr, elfbuf, elfsz);
481 xchg(&kexec_crash_image, image);
482 kunmap_local(old_elfcorehdr);
483 pr_debug("updated elfcorehdr\n");
484
485out:
486 vfree(elfbuf);
487}
488#endif