1 /* 2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps. 3 * 4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 5 * 6 * Copyright (C) IBM Corporation, 2004. All rights reserved. 7 * Copyright (C) Red Hat Inc., 2014. All rights reserved. 8 * Authors: 9 * Vivek Goyal <vgoyal@redhat.com> 10 * 11 */ 12 13 #define pr_fmt(fmt) "kexec: " fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/smp.h> 18 #include <linux/reboot.h> 19 #include <linux/kexec.h> 20 #include <linux/delay.h> 21 #include <linux/elf.h> 22 #include <linux/elfcore.h> 23 #include <linux/export.h> 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/processor.h> 28 #include <asm/hardirq.h> 29 #include <asm/nmi.h> 30 #include <asm/hw_irq.h> 31 #include <asm/apic.h> 32 #include <asm/e820/types.h> 33 #include <asm/io_apic.h> 34 #include <asm/hpet.h> 35 #include <linux/kdebug.h> 36 #include <asm/cpu.h> 37 #include <asm/reboot.h> 38 #include <asm/virtext.h> 39 #include <asm/intel_pt.h> 40 41 /* Used while preparing memory map entries for second kernel */ 42 struct crash_memmap_data { 43 struct boot_params *params; 44 /* Type of memory */ 45 unsigned int type; 46 }; 47 48 /* 49 * This is used to VMCLEAR all VMCSs loaded on the 50 * processor. And when loading kvm_intel module, the 51 * callback function pointer will be assigned. 52 * 53 * protected by rcu. 54 */ 55 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL; 56 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss); 57 unsigned long crash_zero_bytes; 58 59 static inline void cpu_crash_vmclear_loaded_vmcss(void) 60 { 61 crash_vmclear_fn *do_vmclear_operation = NULL; 62 63 rcu_read_lock(); 64 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss); 65 if (do_vmclear_operation) 66 do_vmclear_operation(); 67 rcu_read_unlock(); 68 } 69 70 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 71 72 static void kdump_nmi_callback(int cpu, struct pt_regs *regs) 73 { 74 #ifdef CONFIG_X86_32 75 struct pt_regs fixed_regs; 76 77 if (!user_mode(regs)) { 78 crash_fixup_ss_esp(&fixed_regs, regs); 79 regs = &fixed_regs; 80 } 81 #endif 82 crash_save_cpu(regs, cpu); 83 84 /* 85 * VMCLEAR VMCSs loaded on all cpus if needed. 86 */ 87 cpu_crash_vmclear_loaded_vmcss(); 88 89 /* Disable VMX or SVM if needed. 90 * 91 * We need to disable virtualization on all CPUs. 92 * Having VMX or SVM enabled on any CPU may break rebooting 93 * after the kdump kernel has finished its task. 94 */ 95 cpu_emergency_vmxoff(); 96 cpu_emergency_svm_disable(); 97 98 /* 99 * Disable Intel PT to stop its logging 100 */ 101 cpu_emergency_stop_pt(); 102 103 disable_local_APIC(); 104 } 105 106 void kdump_nmi_shootdown_cpus(void) 107 { 108 nmi_shootdown_cpus(kdump_nmi_callback); 109 110 disable_local_APIC(); 111 } 112 113 /* Override the weak function in kernel/panic.c */ 114 void crash_smp_send_stop(void) 115 { 116 static int cpus_stopped; 117 118 if (cpus_stopped) 119 return; 120 121 if (smp_ops.crash_stop_other_cpus) 122 smp_ops.crash_stop_other_cpus(); 123 else 124 smp_send_stop(); 125 126 cpus_stopped = 1; 127 } 128 129 #else 130 void crash_smp_send_stop(void) 131 { 132 /* There are no cpus to shootdown */ 133 } 134 #endif 135 136 void native_machine_crash_shutdown(struct pt_regs *regs) 137 { 138 /* This function is only called after the system 139 * has panicked or is otherwise in a critical state. 140 * The minimum amount of code to allow a kexec'd kernel 141 * to run successfully needs to happen here. 142 * 143 * In practice this means shooting down the other cpus in 144 * an SMP system. 145 */ 146 /* The kernel is broken so disable interrupts */ 147 local_irq_disable(); 148 149 crash_smp_send_stop(); 150 151 /* 152 * VMCLEAR VMCSs loaded on this cpu if needed. 153 */ 154 cpu_crash_vmclear_loaded_vmcss(); 155 156 /* Booting kdump kernel with VMX or SVM enabled won't work, 157 * because (among other limitations) we can't disable paging 158 * with the virt flags. 159 */ 160 cpu_emergency_vmxoff(); 161 cpu_emergency_svm_disable(); 162 163 /* 164 * Disable Intel PT to stop its logging 165 */ 166 cpu_emergency_stop_pt(); 167 168 #ifdef CONFIG_X86_IO_APIC 169 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ 170 ioapic_zap_locks(); 171 clear_IO_APIC(); 172 #endif 173 lapic_shutdown(); 174 restore_boot_irq_mode(); 175 #ifdef CONFIG_HPET_TIMER 176 hpet_disable(); 177 #endif 178 crash_save_cpu(regs, safe_smp_processor_id()); 179 } 180 181 #ifdef CONFIG_KEXEC_FILE 182 static int get_nr_ram_ranges_callback(struct resource *res, void *arg) 183 { 184 unsigned int *nr_ranges = arg; 185 186 (*nr_ranges)++; 187 return 0; 188 } 189 190 /* Gather all the required information to prepare elf headers for ram regions */ 191 static struct crash_mem *fill_up_crash_elf_data(void) 192 { 193 unsigned int nr_ranges = 0; 194 struct crash_mem *cmem; 195 196 walk_system_ram_res(0, -1, &nr_ranges, 197 get_nr_ram_ranges_callback); 198 if (!nr_ranges) 199 return NULL; 200 201 /* 202 * Exclusion of crash region and/or crashk_low_res may cause 203 * another range split. So add extra two slots here. 204 */ 205 nr_ranges += 2; 206 cmem = vzalloc(sizeof(struct crash_mem) + 207 sizeof(struct crash_mem_range) * nr_ranges); 208 if (!cmem) 209 return NULL; 210 211 cmem->max_nr_ranges = nr_ranges; 212 cmem->nr_ranges = 0; 213 214 return cmem; 215 } 216 217 /* 218 * Look for any unwanted ranges between mstart, mend and remove them. This 219 * might lead to split and split ranges are put in cmem->ranges[] array 220 */ 221 static int elf_header_exclude_ranges(struct crash_mem *cmem) 222 { 223 int ret = 0; 224 225 /* Exclude crashkernel region */ 226 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); 227 if (ret) 228 return ret; 229 230 if (crashk_low_res.end) { 231 ret = crash_exclude_mem_range(cmem, crashk_low_res.start, 232 crashk_low_res.end); 233 if (ret) 234 return ret; 235 } 236 237 return ret; 238 } 239 240 static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) 241 { 242 struct crash_mem *cmem = arg; 243 244 cmem->ranges[cmem->nr_ranges].start = res->start; 245 cmem->ranges[cmem->nr_ranges].end = res->end; 246 cmem->nr_ranges++; 247 248 return 0; 249 } 250 251 /* Prepare elf headers. Return addr and size */ 252 static int prepare_elf_headers(struct kimage *image, void **addr, 253 unsigned long *sz) 254 { 255 struct crash_mem *cmem; 256 Elf64_Ehdr *ehdr; 257 Elf64_Phdr *phdr; 258 int ret, i; 259 260 cmem = fill_up_crash_elf_data(); 261 if (!cmem) 262 return -ENOMEM; 263 264 ret = walk_system_ram_res(0, -1, cmem, 265 prepare_elf64_ram_headers_callback); 266 if (ret) 267 goto out; 268 269 /* Exclude unwanted mem ranges */ 270 ret = elf_header_exclude_ranges(cmem); 271 if (ret) 272 goto out; 273 274 /* By default prepare 64bit headers */ 275 ret = crash_prepare_elf64_headers(cmem, 276 IS_ENABLED(CONFIG_X86_64), addr, sz); 277 if (ret) 278 goto out; 279 280 /* 281 * If a range matches backup region, adjust offset to backup 282 * segment. 283 */ 284 ehdr = (Elf64_Ehdr *)*addr; 285 phdr = (Elf64_Phdr *)(ehdr + 1); 286 for (i = 0; i < ehdr->e_phnum; phdr++, i++) 287 if (phdr->p_type == PT_LOAD && 288 phdr->p_paddr == image->arch.backup_src_start && 289 phdr->p_memsz == image->arch.backup_src_sz) { 290 phdr->p_offset = image->arch.backup_load_addr; 291 break; 292 } 293 out: 294 vfree(cmem); 295 return ret; 296 } 297 298 static int add_e820_entry(struct boot_params *params, struct e820_entry *entry) 299 { 300 unsigned int nr_e820_entries; 301 302 nr_e820_entries = params->e820_entries; 303 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE) 304 return 1; 305 306 memcpy(¶ms->e820_table[nr_e820_entries], entry, 307 sizeof(struct e820_entry)); 308 params->e820_entries++; 309 return 0; 310 } 311 312 static int memmap_entry_callback(struct resource *res, void *arg) 313 { 314 struct crash_memmap_data *cmd = arg; 315 struct boot_params *params = cmd->params; 316 struct e820_entry ei; 317 318 ei.addr = res->start; 319 ei.size = resource_size(res); 320 ei.type = cmd->type; 321 add_e820_entry(params, &ei); 322 323 return 0; 324 } 325 326 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, 327 unsigned long long mstart, 328 unsigned long long mend) 329 { 330 unsigned long start, end; 331 int ret = 0; 332 333 cmem->ranges[0].start = mstart; 334 cmem->ranges[0].end = mend; 335 cmem->nr_ranges = 1; 336 337 /* Exclude Backup region */ 338 start = image->arch.backup_load_addr; 339 end = start + image->arch.backup_src_sz - 1; 340 ret = crash_exclude_mem_range(cmem, start, end); 341 if (ret) 342 return ret; 343 344 /* Exclude elf header region */ 345 start = image->arch.elf_load_addr; 346 end = start + image->arch.elf_headers_sz - 1; 347 return crash_exclude_mem_range(cmem, start, end); 348 } 349 350 /* Prepare memory map for crash dump kernel */ 351 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) 352 { 353 int i, ret = 0; 354 unsigned long flags; 355 struct e820_entry ei; 356 struct crash_memmap_data cmd; 357 struct crash_mem *cmem; 358 359 cmem = vzalloc(sizeof(struct crash_mem)); 360 if (!cmem) 361 return -ENOMEM; 362 363 memset(&cmd, 0, sizeof(struct crash_memmap_data)); 364 cmd.params = params; 365 366 /* Add first 640K segment */ 367 ei.addr = image->arch.backup_src_start; 368 ei.size = image->arch.backup_src_sz; 369 ei.type = E820_TYPE_RAM; 370 add_e820_entry(params, &ei); 371 372 /* Add ACPI tables */ 373 cmd.type = E820_TYPE_ACPI; 374 flags = IORESOURCE_MEM | IORESOURCE_BUSY; 375 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, 376 memmap_entry_callback); 377 378 /* Add ACPI Non-volatile Storage */ 379 cmd.type = E820_TYPE_NVS; 380 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, 381 memmap_entry_callback); 382 383 /* Add crashk_low_res region */ 384 if (crashk_low_res.end) { 385 ei.addr = crashk_low_res.start; 386 ei.size = crashk_low_res.end - crashk_low_res.start + 1; 387 ei.type = E820_TYPE_RAM; 388 add_e820_entry(params, &ei); 389 } 390 391 /* Exclude some ranges from crashk_res and add rest to memmap */ 392 ret = memmap_exclude_ranges(image, cmem, crashk_res.start, 393 crashk_res.end); 394 if (ret) 395 goto out; 396 397 for (i = 0; i < cmem->nr_ranges; i++) { 398 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; 399 400 /* If entry is less than a page, skip it */ 401 if (ei.size < PAGE_SIZE) 402 continue; 403 ei.addr = cmem->ranges[i].start; 404 ei.type = E820_TYPE_RAM; 405 add_e820_entry(params, &ei); 406 } 407 408 out: 409 vfree(cmem); 410 return ret; 411 } 412 413 static int determine_backup_region(struct resource *res, void *arg) 414 { 415 struct kimage *image = arg; 416 417 image->arch.backup_src_start = res->start; 418 image->arch.backup_src_sz = resource_size(res); 419 420 /* Expecting only one range for backup region */ 421 return 1; 422 } 423 424 int crash_load_segments(struct kimage *image) 425 { 426 int ret; 427 struct kexec_buf kbuf = { .image = image, .buf_min = 0, 428 .buf_max = ULONG_MAX, .top_down = false }; 429 430 /* 431 * Determine and load a segment for backup area. First 640K RAM 432 * region is backup source 433 */ 434 435 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END, 436 image, determine_backup_region); 437 438 /* Zero or postive return values are ok */ 439 if (ret < 0) 440 return ret; 441 442 /* Add backup segment. */ 443 if (image->arch.backup_src_sz) { 444 kbuf.buffer = &crash_zero_bytes; 445 kbuf.bufsz = sizeof(crash_zero_bytes); 446 kbuf.memsz = image->arch.backup_src_sz; 447 kbuf.buf_align = PAGE_SIZE; 448 /* 449 * Ideally there is no source for backup segment. This is 450 * copied in purgatory after crash. Just add a zero filled 451 * segment for now to make sure checksum logic works fine. 452 */ 453 ret = kexec_add_buffer(&kbuf); 454 if (ret) 455 return ret; 456 image->arch.backup_load_addr = kbuf.mem; 457 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n", 458 image->arch.backup_load_addr, 459 image->arch.backup_src_start, kbuf.memsz); 460 } 461 462 /* Prepare elf headers and add a segment */ 463 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz); 464 if (ret) 465 return ret; 466 467 image->arch.elf_headers = kbuf.buffer; 468 image->arch.elf_headers_sz = kbuf.bufsz; 469 470 kbuf.memsz = kbuf.bufsz; 471 kbuf.buf_align = ELF_CORE_HEADER_ALIGN; 472 ret = kexec_add_buffer(&kbuf); 473 if (ret) { 474 vfree((void *)image->arch.elf_headers); 475 return ret; 476 } 477 image->arch.elf_load_addr = kbuf.mem; 478 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", 479 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); 480 481 return ret; 482 } 483 #endif /* CONFIG_KEXEC_FILE */ 484