1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * s390 code for kexec_file_load system call 4 * 5 * Copyright IBM Corp. 2018 6 * 7 * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> 8 */ 9 10 #define pr_fmt(fmt) "kexec: " fmt 11 12 #include <linux/elf.h> 13 #include <linux/errno.h> 14 #include <linux/kexec.h> 15 #include <linux/module_signature.h> 16 #include <linux/verification.h> 17 #include <linux/vmalloc.h> 18 #include <asm/boot_data.h> 19 #include <asm/ipl.h> 20 #include <asm/setup.h> 21 22 const struct kexec_file_ops * const kexec_file_loaders[] = { 23 &s390_kexec_elf_ops, 24 &s390_kexec_image_ops, 25 NULL, 26 }; 27 28 #ifdef CONFIG_KEXEC_SIG 29 int s390_verify_sig(const char *kernel, unsigned long kernel_len) 30 { 31 const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1; 32 struct module_signature *ms; 33 unsigned long sig_len; 34 int ret; 35 36 /* Skip signature verification when not secure IPLed. */ 37 if (!ipl_secure_flag) 38 return 0; 39 40 if (marker_len > kernel_len) 41 return -EKEYREJECTED; 42 43 if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING, 44 marker_len)) 45 return -EKEYREJECTED; 46 kernel_len -= marker_len; 47 48 ms = (void *)kernel + kernel_len - sizeof(*ms); 49 kernel_len -= sizeof(*ms); 50 51 sig_len = be32_to_cpu(ms->sig_len); 52 if (sig_len >= kernel_len) 53 return -EKEYREJECTED; 54 kernel_len -= sig_len; 55 56 if (ms->id_type != PKEY_ID_PKCS7) 57 return -EKEYREJECTED; 58 59 if (ms->algo != 0 || 60 ms->hash != 0 || 61 ms->signer_len != 0 || 62 ms->key_id_len != 0 || 63 ms->__pad[0] != 0 || 64 ms->__pad[1] != 0 || 65 ms->__pad[2] != 0) { 66 return -EBADMSG; 67 } 68 69 ret = verify_pkcs7_signature(kernel, kernel_len, 70 kernel + kernel_len, sig_len, 71 VERIFY_USE_SECONDARY_KEYRING, 72 VERIFYING_MODULE_SIGNATURE, 73 NULL, NULL); 74 if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) 75 ret = verify_pkcs7_signature(kernel, kernel_len, 76 kernel + kernel_len, sig_len, 77 VERIFY_USE_PLATFORM_KEYRING, 78 VERIFYING_MODULE_SIGNATURE, 79 NULL, NULL); 80 return ret; 81 } 82 #endif /* CONFIG_KEXEC_SIG */ 83 84 static int kexec_file_update_purgatory(struct kimage *image, 85 struct s390_load_data *data) 86 { 87 u64 entry, type; 88 int ret; 89 90 if (image->type == KEXEC_TYPE_CRASH) { 91 entry = STARTUP_KDUMP_OFFSET; 92 type = KEXEC_TYPE_CRASH; 93 } else { 94 entry = STARTUP_NORMAL_OFFSET; 95 type = KEXEC_TYPE_DEFAULT; 96 } 97 98 ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, 99 sizeof(entry), false); 100 if (ret) 101 return ret; 102 103 ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, 104 sizeof(type), false); 105 if (ret) 106 return ret; 107 108 #ifdef CONFIG_CRASH_DUMP 109 if (image->type == KEXEC_TYPE_CRASH) { 110 u64 crash_size; 111 112 ret = kexec_purgatory_get_set_symbol(image, "crash_start", 113 &crashk_res.start, 114 sizeof(crashk_res.start), 115 false); 116 if (ret) 117 return ret; 118 119 crash_size = crashk_res.end - crashk_res.start + 1; 120 ret = kexec_purgatory_get_set_symbol(image, "crash_size", 121 &crash_size, 122 sizeof(crash_size), 123 false); 124 } 125 #endif 126 return ret; 127 } 128 129 static int kexec_file_add_purgatory(struct kimage *image, 130 struct s390_load_data *data) 131 { 132 struct kexec_buf buf = {}; 133 int ret; 134 135 buf.image = image; 136 137 data->memsz = ALIGN(data->memsz, PAGE_SIZE); 138 buf.mem = data->memsz; 139 #ifdef CONFIG_CRASH_DUMP 140 if (image->type == KEXEC_TYPE_CRASH) 141 buf.mem += crashk_res.start; 142 #endif 143 144 ret = kexec_load_purgatory(image, &buf); 145 if (ret) 146 return ret; 147 data->memsz += buf.memsz; 148 149 return kexec_file_update_purgatory(image, data); 150 } 151 152 static int kexec_file_add_initrd(struct kimage *image, 153 struct s390_load_data *data) 154 { 155 struct kexec_buf buf = {}; 156 int ret; 157 158 buf.image = image; 159 160 buf.buffer = image->initrd_buf; 161 buf.bufsz = image->initrd_buf_len; 162 163 data->memsz = ALIGN(data->memsz, PAGE_SIZE); 164 buf.mem = data->memsz; 165 #ifdef CONFIG_CRASH_DUMP 166 if (image->type == KEXEC_TYPE_CRASH) 167 buf.mem += crashk_res.start; 168 #endif 169 buf.memsz = buf.bufsz; 170 171 data->parm->initrd_start = data->memsz; 172 data->parm->initrd_size = buf.memsz; 173 data->memsz += buf.memsz; 174 175 ret = kexec_add_buffer(&buf); 176 if (ret) 177 return ret; 178 179 return ipl_report_add_component(data->report, &buf, 0, 0); 180 } 181 182 static int kexec_file_add_ipl_report(struct kimage *image, 183 struct s390_load_data *data) 184 { 185 __u32 *lc_ipl_parmblock_ptr; 186 unsigned int len, ncerts; 187 struct kexec_buf buf = {}; 188 unsigned long addr; 189 void *ptr, *end; 190 int ret; 191 192 buf.image = image; 193 194 data->memsz = ALIGN(data->memsz, PAGE_SIZE); 195 buf.mem = data->memsz; 196 197 ptr = __va(ipl_cert_list_addr); 198 end = ptr + ipl_cert_list_size; 199 ncerts = 0; 200 while (ptr < end) { 201 ncerts++; 202 len = *(unsigned int *)ptr; 203 ptr += sizeof(len); 204 ptr += len; 205 } 206 207 addr = data->memsz + data->report->size; 208 addr += ncerts * sizeof(struct ipl_rb_certificate_entry); 209 ptr = __va(ipl_cert_list_addr); 210 while (ptr < end) { 211 len = *(unsigned int *)ptr; 212 ptr += sizeof(len); 213 ipl_report_add_certificate(data->report, ptr, addr, len); 214 addr += len; 215 ptr += len; 216 } 217 218 ret = -ENOMEM; 219 buf.buffer = ipl_report_finish(data->report); 220 if (!buf.buffer) 221 goto out; 222 buf.bufsz = data->report->size; 223 buf.memsz = buf.bufsz; 224 image->arch.ipl_buf = buf.buffer; 225 226 data->memsz += buf.memsz; 227 228 lc_ipl_parmblock_ptr = 229 data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); 230 *lc_ipl_parmblock_ptr = (__u32)buf.mem; 231 232 #ifdef CONFIG_CRASH_DUMP 233 if (image->type == KEXEC_TYPE_CRASH) 234 buf.mem += crashk_res.start; 235 #endif 236 237 ret = kexec_add_buffer(&buf); 238 out: 239 return ret; 240 } 241 242 void *kexec_file_add_components(struct kimage *image, 243 int (*add_kernel)(struct kimage *image, 244 struct s390_load_data *data)) 245 { 246 unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE; 247 struct s390_load_data data = {0}; 248 unsigned long minsize; 249 int ret; 250 251 data.report = ipl_report_init(&ipl_block); 252 if (IS_ERR(data.report)) 253 return data.report; 254 255 ret = add_kernel(image, &data); 256 if (ret) 257 goto out; 258 259 ret = -EINVAL; 260 minsize = PARMAREA + offsetof(struct parmarea, command_line); 261 if (image->kernel_buf_len < minsize) 262 goto out; 263 264 if (data.parm->max_command_line_size) 265 max_command_line_size = data.parm->max_command_line_size; 266 267 if (minsize + max_command_line_size < minsize) 268 goto out; 269 270 if (image->kernel_buf_len < minsize + max_command_line_size) 271 goto out; 272 273 if (image->cmdline_buf_len >= max_command_line_size) { 274 pr_err("Kernel command line exceeds supported limit of %lu", max_command_line_size); 275 goto out; 276 } 277 278 memcpy(data.parm->command_line, image->cmdline_buf, 279 image->cmdline_buf_len); 280 281 #ifdef CONFIG_CRASH_DUMP 282 if (image->type == KEXEC_TYPE_CRASH) { 283 data.parm->oldmem_base = crashk_res.start; 284 data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1; 285 } 286 #endif 287 288 if (image->initrd_buf) { 289 ret = kexec_file_add_initrd(image, &data); 290 if (ret) 291 goto out; 292 } 293 294 ret = kexec_file_add_purgatory(image, &data); 295 if (ret) 296 goto out; 297 298 if (data.kernel_mem == 0) { 299 unsigned long restart_psw = 0x0008000080000000UL; 300 restart_psw += image->start; 301 memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw)); 302 image->start = 0; 303 } 304 305 ret = kexec_file_add_ipl_report(image, &data); 306 out: 307 ipl_report_free(data.report); 308 return ERR_PTR(ret); 309 } 310 311 int arch_kexec_apply_relocations_add(struct purgatory_info *pi, 312 Elf_Shdr *section, 313 const Elf_Shdr *relsec, 314 const Elf_Shdr *symtab) 315 { 316 const char *strtab, *name, *shstrtab; 317 const Elf_Shdr *sechdrs; 318 Elf_Rela *relas; 319 int i, r_type; 320 int ret; 321 322 /* String & section header string table */ 323 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; 324 strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset; 325 shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; 326 327 relas = (void *)pi->ehdr + relsec->sh_offset; 328 329 for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { 330 const Elf_Sym *sym; /* symbol to relocate */ 331 unsigned long addr; /* final location after relocation */ 332 unsigned long val; /* relocated symbol value */ 333 void *loc; /* tmp location to modify */ 334 335 sym = (void *)pi->ehdr + symtab->sh_offset; 336 sym += ELF64_R_SYM(relas[i].r_info); 337 338 if (sym->st_name) 339 name = strtab + sym->st_name; 340 else 341 name = shstrtab + sechdrs[sym->st_shndx].sh_name; 342 343 if (sym->st_shndx == SHN_UNDEF) { 344 pr_err("Undefined symbol: %s\n", name); 345 return -ENOEXEC; 346 } 347 348 if (sym->st_shndx == SHN_COMMON) { 349 pr_err("symbol '%s' in common section\n", name); 350 return -ENOEXEC; 351 } 352 353 if (sym->st_shndx >= pi->ehdr->e_shnum && 354 sym->st_shndx != SHN_ABS) { 355 pr_err("Invalid section %d for symbol %s\n", 356 sym->st_shndx, name); 357 return -ENOEXEC; 358 } 359 360 loc = pi->purgatory_buf; 361 loc += section->sh_offset; 362 loc += relas[i].r_offset; 363 364 val = sym->st_value; 365 if (sym->st_shndx != SHN_ABS) 366 val += pi->sechdrs[sym->st_shndx].sh_addr; 367 val += relas[i].r_addend; 368 369 addr = section->sh_addr + relas[i].r_offset; 370 371 r_type = ELF64_R_TYPE(relas[i].r_info); 372 373 if (r_type == R_390_PLT32DBL) 374 r_type = R_390_PC32DBL; 375 376 ret = arch_kexec_do_relocs(r_type, loc, val, addr); 377 if (ret) { 378 pr_err("Unknown rela relocation: %d\n", r_type); 379 return -ENOEXEC; 380 } 381 } 382 return 0; 383 } 384 385 int arch_kimage_file_post_load_cleanup(struct kimage *image) 386 { 387 vfree(image->arch.ipl_buf); 388 image->arch.ipl_buf = NULL; 389 390 return kexec_image_post_load_cleanup_default(image); 391 } 392