1 /* 2 * arch/s390/kernel/machine_kexec.c 3 * 4 * Copyright IBM Corp. 2005,2011 5 * 6 * Author(s): Rolf Adelsberger, 7 * Heiko Carstens <heiko.carstens@de.ibm.com> 8 * Michael Holzheu <holzheu@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/device.h> 12 #include <linux/mm.h> 13 #include <linux/kexec.h> 14 #include <linux/delay.h> 15 #include <linux/reboot.h> 16 #include <linux/ftrace.h> 17 #include <asm/cio.h> 18 #include <asm/setup.h> 19 #include <asm/pgtable.h> 20 #include <asm/pgalloc.h> 21 #include <asm/system.h> 22 #include <asm/smp.h> 23 #include <asm/reset.h> 24 #include <asm/ipl.h> 25 #include <asm/diag.h> 26 #include <asm/asm-offsets.h> 27 28 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 29 30 extern const unsigned char relocate_kernel[]; 31 extern const unsigned long long relocate_kernel_len; 32 33 #ifdef CONFIG_CRASH_DUMP 34 35 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); 36 37 /* 38 * Create ELF notes for one CPU 39 */ 40 static void add_elf_notes(int cpu) 41 { 42 struct save_area *sa = (void *) 4608 + store_prefix(); 43 void *ptr; 44 45 memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); 46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); 47 ptr = fill_cpu_elf_notes(ptr, sa); 48 memset(ptr, 0, sizeof(struct elf_note)); 49 } 50 51 /* 52 * Store status of next available physical CPU 53 */ 54 static int store_status_next(int start_cpu, int this_cpu) 55 { 56 struct save_area *sa = (void *) 4608 + store_prefix(); 57 int cpu, rc; 58 59 for (cpu = start_cpu; cpu < 65536; cpu++) { 60 if (cpu == this_cpu) 61 continue; 62 do { 63 rc = raw_sigp(cpu, sigp_stop_and_store_status); 64 } while (rc == sigp_busy); 65 if (rc != sigp_order_code_accepted) 66 continue; 67 if (sa->pref_reg) 68 return cpu; 69 } 70 return -1; 71 } 72 73 /* 74 * Initialize CPU ELF notes 75 */ 76 void setup_regs(void) 77 { 78 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; 79 int cpu, this_cpu, phys_cpu = 0, first = 1; 80 81 this_cpu = stap(); 82 83 if (!S390_lowcore.prefixreg_save_area) 84 first = 0; 85 for_each_online_cpu(cpu) { 86 if (first) { 87 add_elf_notes(cpu); 88 first = 0; 89 continue; 90 } 91 phys_cpu = store_status_next(phys_cpu, this_cpu); 92 if (phys_cpu == -1) 93 break; 94 add_elf_notes(cpu); 95 phys_cpu++; 96 } 97 /* Copy dump CPU store status info to absolute zero */ 98 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); 99 } 100 101 #endif 102 103 /* 104 * Start kdump: We expect here that a store status has been done on our CPU 105 */ 106 static void __do_machine_kdump(void *image) 107 { 108 #ifdef CONFIG_CRASH_DUMP 109 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; 110 111 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); 112 setup_regs(); 113 start_kdump(1); 114 #endif 115 } 116 117 /* 118 * Check if kdump checksums are valid: We call purgatory with parameter "0" 119 */ 120 static int kdump_csum_valid(struct kimage *image) 121 { 122 #ifdef CONFIG_CRASH_DUMP 123 int (*start_kdump)(int) = (void *)image->start; 124 int rc; 125 126 __arch_local_irq_stnsm(0xfb); /* disable DAT */ 127 rc = start_kdump(0); 128 __arch_local_irq_stosm(0x04); /* enable DAT */ 129 return rc ? 0 : -EINVAL; 130 #else 131 return -EINVAL; 132 #endif 133 } 134 135 /* 136 * Map or unmap crashkernel memory 137 */ 138 static void crash_map_pages(int enable) 139 { 140 unsigned long size = resource_size(&crashk_res); 141 142 BUG_ON(crashk_res.start % KEXEC_CRASH_MEM_ALIGN || 143 size % KEXEC_CRASH_MEM_ALIGN); 144 if (enable) 145 vmem_add_mapping(crashk_res.start, size); 146 else 147 vmem_remove_mapping(crashk_res.start, size); 148 } 149 150 /* 151 * Map crashkernel memory 152 */ 153 void crash_map_reserved_pages(void) 154 { 155 crash_map_pages(1); 156 } 157 158 /* 159 * Unmap crashkernel memory 160 */ 161 void crash_unmap_reserved_pages(void) 162 { 163 crash_map_pages(0); 164 } 165 166 /* 167 * Give back memory to hypervisor before new kdump is loaded 168 */ 169 static int machine_kexec_prepare_kdump(void) 170 { 171 #ifdef CONFIG_CRASH_DUMP 172 if (MACHINE_IS_VM) 173 diag10_range(PFN_DOWN(crashk_res.start), 174 PFN_DOWN(crashk_res.end - crashk_res.start + 1)); 175 return 0; 176 #else 177 return -EINVAL; 178 #endif 179 } 180 181 int machine_kexec_prepare(struct kimage *image) 182 { 183 void *reboot_code_buffer; 184 185 /* Can't replace kernel image since it is read-only. */ 186 if (ipl_flags & IPL_NSS_VALID) 187 return -ENOSYS; 188 189 if (image->type == KEXEC_TYPE_CRASH) 190 return machine_kexec_prepare_kdump(); 191 192 /* We don't support anything but the default image type for now. */ 193 if (image->type != KEXEC_TYPE_DEFAULT) 194 return -EINVAL; 195 196 /* Get the destination where the assembler code should be copied to.*/ 197 reboot_code_buffer = (void *) page_to_phys(image->control_code_page); 198 199 /* Then copy it */ 200 memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len); 201 return 0; 202 } 203 204 void machine_kexec_cleanup(struct kimage *image) 205 { 206 } 207 208 void arch_crash_save_vmcoreinfo(void) 209 { 210 VMCOREINFO_SYMBOL(lowcore_ptr); 211 VMCOREINFO_SYMBOL(high_memory); 212 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); 213 } 214 215 void machine_shutdown(void) 216 { 217 } 218 219 /* 220 * Do normal kexec 221 */ 222 static void __do_machine_kexec(void *data) 223 { 224 relocate_kernel_t data_mover; 225 struct kimage *image = data; 226 227 data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); 228 229 /* Call the moving routine */ 230 (*data_mover)(&image->head, image->start); 231 } 232 233 /* 234 * Reset system and call either kdump or normal kexec 235 */ 236 static void __machine_kexec(void *data) 237 { 238 struct kimage *image = data; 239 240 pfault_fini(); 241 if (image->type == KEXEC_TYPE_CRASH) 242 s390_reset_system(__do_machine_kdump, data); 243 else 244 s390_reset_system(__do_machine_kexec, data); 245 disabled_wait((unsigned long) __builtin_return_address(0)); 246 } 247 248 /* 249 * Do either kdump or normal kexec. In case of kdump we first ask 250 * purgatory, if kdump checksums are valid. 251 */ 252 void machine_kexec(struct kimage *image) 253 { 254 if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image)) 255 return; 256 tracer_disable(); 257 smp_send_stop(); 258 smp_switch_to_ipl_cpu(__machine_kexec, image); 259 } 260