1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004 Christian Limpach. 5 * Copyright (c) 2004-2006,2008 Kip Macy 6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 7 * Copyright (c) 2013 Roger Pau Monné <roger.pau@citrix.com> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #include "opt_ddb.h" 34 #include "opt_kstack_pages.h" 35 36 #include <sys/param.h> 37 #include <sys/bus.h> 38 #include <sys/kernel.h> 39 #include <sys/reboot.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/linker.h> 43 #include <sys/lock.h> 44 #include <sys/rwlock.h> 45 #include <sys/boot.h> 46 #include <sys/ctype.h> 47 #include <sys/mutex.h> 48 #include <sys/smp.h> 49 #include <sys/efi.h> 50 #include <sys/tslog.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_extern.h> 54 #include <vm/vm_kern.h> 55 #include <vm/vm_page.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_pager.h> 59 #include <vm/vm_param.h> 60 61 #include <machine/_inttypes.h> 62 #include <machine/intr_machdep.h> 63 #include <x86/acpica_machdep.h> 64 #include <x86/apicvar.h> 65 #include <x86/init.h> 66 #include <machine/pc/bios.h> 67 #include <machine/smp.h> 68 #include <machine/intr_machdep.h> 69 #include <machine/md_var.h> 70 #include <machine/metadata.h> 71 #include <machine/cpu.h> 72 73 #include <xen/xen-os.h> 74 #include <xen/hvm.h> 75 #include <xen/hypervisor.h> 76 #include <xen/xenstore/xenstorevar.h> 77 #include <xen/xen_pv.h> 78 79 #include <contrib/xen/arch-x86/cpuid.h> 80 #include <contrib/xen/arch-x86/hvm/start_info.h> 81 #include <contrib/xen/vcpu.h> 82 83 #include <dev/xen/timer/timer.h> 84 85 #ifdef DDB 86 #include <ddb/ddb.h> 87 #endif 88 89 /* Native initial function */ 90 extern u_int64_t hammer_time(u_int64_t, u_int64_t); 91 /* Xen initial function */ 92 uint64_t hammer_time_xen(vm_paddr_t); 93 94 #define MAX_E820_ENTRIES 128 95 96 /*--------------------------- Forward Declarations ---------------------------*/ 97 static caddr_t xen_pvh_parse_preload_data(uint64_t); 98 static void pvh_parse_memmap(caddr_t, vm_paddr_t *, int *); 99 100 /*---------------------------- Extern Declarations ---------------------------*/ 101 /* 102 * Placed by the linker at the end of the bss section, which is the last 103 * section loaded by Xen before loading the symtab and strtab. 104 */ 105 extern uint32_t end; 106 107 /*-------------------------------- Global Data -------------------------------*/ 108 struct init_ops xen_pvh_init_ops = { 109 .parse_preload_data = xen_pvh_parse_preload_data, 110 .early_clock_source_init = xen_clock_init, 111 .early_delay = xen_delay, 112 .parse_memmap = pvh_parse_memmap, 113 }; 114 115 static struct bios_smap xen_smap[MAX_E820_ENTRIES]; 116 117 static struct hvm_start_info *start_info; 118 119 /*-------------------------------- Xen PV init -------------------------------*/ 120 121 static int 122 isxen(void) 123 { 124 static int xen = -1; 125 uint32_t base; 126 u_int regs[4]; 127 128 if (xen != -1) 129 return (xen); 130 131 /* 132 * The full code for identifying which hypervisor we're running under 133 * is in sys/x86/x86/identcpu.c and runs later in the boot process; 134 * this is sufficient to distinguish Xen PVH booting from non-Xen PVH 135 * and skip some very early Xen-specific code in the non-Xen case. 136 */ 137 xen = 0; 138 for (base = 0x40000000; base < 0x40010000; base += 0x100) { 139 do_cpuid(base, regs); 140 if (regs[1] == XEN_CPUID_SIGNATURE_EBX && 141 regs[2] == XEN_CPUID_SIGNATURE_ECX && 142 regs[3] == XEN_CPUID_SIGNATURE_EDX) { 143 xen = 1; 144 break; 145 } 146 } 147 return (xen); 148 } 149 150 #define CRASH(...) do { \ 151 if (isxen()) \ 152 xc_printf(__VA_ARGS__); \ 153 halt(); \ 154 } while (0) 155 156 uint64_t 157 hammer_time_xen(vm_paddr_t start_info_paddr) 158 { 159 struct hvm_modlist_entry *mod; 160 uint64_t physfree; 161 162 start_info = (struct hvm_start_info *)(start_info_paddr + KERNBASE); 163 if (start_info->magic != XEN_HVM_START_MAGIC_VALUE) { 164 CRASH("Unknown magic value in start_info struct: %#x\n", 165 start_info->magic); 166 } 167 168 /* 169 * Select the higher address to use as physfree: either after 170 * start_info, after the kernel, after the memory map or after any of 171 * the modules. We assume enough memory to be available after the 172 * selected address for the needs of very early memory allocations. 173 */ 174 physfree = roundup2(start_info_paddr + sizeof(struct hvm_start_info), 175 PAGE_SIZE); 176 physfree = MAX(roundup2((vm_paddr_t)_end - KERNBASE, PAGE_SIZE), 177 physfree); 178 179 if (start_info->memmap_paddr != 0) 180 physfree = MAX(roundup2(start_info->memmap_paddr + 181 start_info->memmap_entries * 182 sizeof(struct hvm_memmap_table_entry), PAGE_SIZE), 183 physfree); 184 185 if (start_info->modlist_paddr != 0) { 186 unsigned int i; 187 188 if (start_info->nr_modules == 0) { 189 CRASH( 190 "ERROR: modlist_paddr != 0 but nr_modules == 0\n"); 191 } 192 mod = (struct hvm_modlist_entry *) 193 (start_info->modlist_paddr + KERNBASE); 194 for (i = 0; i < start_info->nr_modules; i++) 195 physfree = MAX(roundup2(mod[i].paddr + mod[i].size, 196 PAGE_SIZE), physfree); 197 } 198 199 /* Set the hooks for early functions that diverge from bare metal */ 200 init_ops = xen_pvh_init_ops; 201 hvm_start_flags = start_info->flags; 202 203 /* Now we can jump into the native init function */ 204 return (hammer_time(0, physfree)); 205 } 206 207 /*-------------------------------- PV specific -------------------------------*/ 208 209 #ifdef DDB 210 /* 211 * The way Xen loads the symtab is different from the native boot loader, 212 * because it's tailored for NetBSD. So we have to adapt and use the same 213 * method as NetBSD. Portions of the code below have been picked from NetBSD: 214 * sys/kern/kern_ksyms.c CVS Revision 1.71. 215 */ 216 static void 217 xen_pvh_parse_symtab(void) 218 { 219 Elf_Ehdr *ehdr; 220 Elf_Shdr *shdr; 221 int i, j; 222 223 ehdr = (Elf_Ehdr *)(&end + 1); 224 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) || 225 ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 226 ehdr->e_version > 1) { 227 if (isxen()) 228 xc_printf("Unable to load ELF symtab: invalid symbol table\n"); 229 return; 230 } 231 232 shdr = (Elf_Shdr *)((uint8_t *)ehdr + ehdr->e_shoff); 233 /* Find the symbol table and the corresponding string table. */ 234 for (i = 1; i < ehdr->e_shnum; i++) { 235 if (shdr[i].sh_type != SHT_SYMTAB) 236 continue; 237 if (shdr[i].sh_offset == 0) 238 continue; 239 ksymtab = (uintptr_t)((uint8_t *)ehdr + shdr[i].sh_offset); 240 ksymtab_size = shdr[i].sh_size; 241 j = shdr[i].sh_link; 242 if (shdr[j].sh_offset == 0) 243 continue; /* Can this happen? */ 244 kstrtab = (uintptr_t)((uint8_t *)ehdr + shdr[j].sh_offset); 245 break; 246 } 247 248 if ((ksymtab == 0 || kstrtab == 0) && isxen()) 249 xc_printf( 250 "Unable to load ELF symtab: could not find symtab or strtab\n"); 251 } 252 #endif 253 254 static caddr_t 255 xen_pvh_parse_preload_data(uint64_t modulep) 256 { 257 caddr_t kmdp; 258 vm_ooffset_t off; 259 vm_paddr_t metadata; 260 char *envp; 261 262 TSENTER(); 263 if (start_info->modlist_paddr != 0) { 264 struct hvm_modlist_entry *mod; 265 const char *cmdline; 266 267 mod = (struct hvm_modlist_entry *) 268 (start_info->modlist_paddr + KERNBASE); 269 cmdline = mod[0].cmdline_paddr ? 270 (const char *)(mod[0].cmdline_paddr + KERNBASE) : NULL; 271 272 if (strcmp(cmdline, "header") == 0) { 273 struct xen_header *header; 274 275 header = (struct xen_header *)(mod[0].paddr + KERNBASE); 276 277 if ((header->flags & XENHEADER_HAS_MODULEP_OFFSET) != 278 XENHEADER_HAS_MODULEP_OFFSET) { 279 xc_printf("Unable to load module metadata\n"); 280 HYPERVISOR_shutdown(SHUTDOWN_crash); 281 } 282 283 preload_metadata = (caddr_t)(mod[0].paddr + 284 header->modulep_offset + KERNBASE); 285 286 kmdp = preload_search_by_type("elf kernel"); 287 if (kmdp == NULL) 288 kmdp = preload_search_by_type("elf64 kernel"); 289 if (kmdp == NULL) { 290 xc_printf("Unable to find kernel\n"); 291 HYPERVISOR_shutdown(SHUTDOWN_crash); 292 } 293 294 /* 295 * Xen has relocated the metadata and the modules, so 296 * we need to recalculate it's position. This is done 297 * by saving the original modulep address and then 298 * calculating the offset from the real modulep 299 * position. 300 */ 301 metadata = MD_FETCH(kmdp, MODINFOMD_MODULEP, 302 vm_paddr_t); 303 off = mod[0].paddr + header->modulep_offset - metadata + 304 KERNBASE; 305 } else { 306 preload_metadata = (caddr_t)(mod[0].paddr + KERNBASE); 307 308 kmdp = preload_search_by_type("elf kernel"); 309 if (kmdp == NULL) 310 kmdp = preload_search_by_type("elf64 kernel"); 311 if (kmdp == NULL) { 312 xc_printf("Unable to find kernel\n"); 313 HYPERVISOR_shutdown(SHUTDOWN_crash); 314 } 315 316 metadata = MD_FETCH(kmdp, MODINFOMD_MODULEP, vm_paddr_t); 317 off = mod[0].paddr + KERNBASE - metadata; 318 } 319 320 preload_bootstrap_relocate(off); 321 322 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 323 envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); 324 if (envp != NULL) 325 envp += off; 326 init_static_kenv(envp, 0); 327 328 if (MD_FETCH(kmdp, MODINFOMD_EFI_MAP, void *) != NULL) 329 strlcpy(bootmethod, "UEFI", sizeof(bootmethod)); 330 else 331 strlcpy(bootmethod, "BIOS", sizeof(bootmethod)); 332 } else { 333 static char kenv_buffer[PAGE_SIZE]; 334 335 /* Provide a static kenv so the command line can be parsed. */ 336 init_static_kenv(kenv_buffer, sizeof(kenv_buffer)); 337 338 /* Parse the extra boot information given by Xen */ 339 if (start_info->cmdline_paddr != 0) 340 boot_parse_cmdline_delim( 341 (char *)(start_info->cmdline_paddr + KERNBASE), 342 ", \t\n"); 343 kmdp = NULL; 344 strlcpy(bootmethod, "PVH", sizeof(bootmethod)); 345 } 346 347 boothowto |= boot_env_to_howto(); 348 349 /* 350 * When booted as a PVH guest FreeBSD must not use the RSDP address 351 * hint provided by the loader because it points to the native set of 352 * ACPI tables instead of the ones crafted by Xen. 353 */ 354 acpi_set_root(start_info->rsdp_paddr); 355 356 #ifdef DDB 357 xen_pvh_parse_symtab(); 358 #endif 359 TSEXIT(); 360 return (kmdp); 361 } 362 363 static void 364 pvh_parse_memmap_start_info(caddr_t kmdp, vm_paddr_t *physmap, 365 int *physmap_idx) 366 { 367 const struct hvm_memmap_table_entry * entries; 368 size_t nentries; 369 size_t i; 370 371 /* Extract from HVM start_info. */ 372 entries = (struct hvm_memmap_table_entry *)(start_info->memmap_paddr + KERNBASE); 373 nentries = start_info->memmap_entries; 374 375 /* Convert into E820 format and handle one by one. */ 376 for (i = 0; i < nentries; i++) { 377 struct bios_smap entry; 378 379 entry.base = entries[i].addr; 380 entry.length = entries[i].size; 381 382 /* 383 * Luckily for us, the XEN_HVM_MEMMAP_TYPE_* values exactly 384 * match the SMAP_TYPE_* values so we don't need to translate 385 * anything here. 386 */ 387 entry.type = entries[i].type; 388 389 bios_add_smap_entries(&entry, 1, physmap, physmap_idx); 390 } 391 } 392 393 static void 394 xen_pvh_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx) 395 { 396 struct xen_memory_map memmap; 397 u_int32_t size; 398 int rc; 399 400 /* We should only reach here if we're running under Xen. */ 401 KASSERT(isxen(), ("xen_pvh_parse_memmap reached when !Xen")); 402 403 /* Fetch the E820 map from Xen */ 404 memmap.nr_entries = MAX_E820_ENTRIES; 405 set_xen_guest_handle(memmap.buffer, xen_smap); 406 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); 407 if (rc) { 408 xc_printf("ERROR: unable to fetch Xen E820 memory map: %d\n", 409 rc); 410 HYPERVISOR_shutdown(SHUTDOWN_crash); 411 } 412 413 size = memmap.nr_entries * sizeof(xen_smap[0]); 414 415 bios_add_smap_entries(xen_smap, size, physmap, physmap_idx); 416 } 417 418 static void 419 pvh_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx) 420 { 421 422 /* 423 * If version >= 1 and memmap_paddr != 0, use the memory map provided 424 * in the start_info structure; if not, we're running under legacy 425 * Xen and need to use the Xen hypercall. 426 */ 427 if ((start_info->version >= 1) && (start_info->memmap_paddr != 0)) 428 pvh_parse_memmap_start_info(kmdp, physmap, physmap_idx); 429 else 430 xen_pvh_parse_memmap(kmdp, physmap, physmap_idx); 431 } 432