1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/hwcap.h> 15#include <asm/image.h> 16#include <asm/scs.h> 17#include <asm/usercfi.h> 18#include "efi-header.S" 19 20__HEAD 21SYM_CODE_START(_start) 22 /* 23 * Image header expected by Linux boot-loaders. The image header data 24 * structure is described in asm/image.h. 25 * Do not modify it without modifying the structure and all bootloaders 26 * that expects this header format!! 27 */ 28#ifdef CONFIG_EFI 29 /* 30 * This instruction decodes to "MZ" ASCII required by UEFI. 31 */ 32 c.li s4,-13 33 j _start_kernel 34#else 35 /* jump to start kernel */ 36 j _start_kernel 37 /* reserved */ 38 .word 0 39#endif 40 .balign 8 41#ifdef CONFIG_RISCV_M_MODE 42 /* Image load offset (0MB) from start of RAM for M-mode */ 43 .dword 0 44#else 45#if __riscv_xlen == 64 46 /* Image load offset(2MB) from start of RAM */ 47 .dword 0x200000 48#else 49 /* Image load offset(4MB) from start of RAM */ 50 .dword 0x400000 51#endif 52#endif 53 /* Effective size of kernel image */ 54 .dword _end - _start 55 .dword __HEAD_FLAGS 56 .word RISCV_HEADER_VERSION 57 .word 0 58 .dword 0 59 .ascii RISCV_IMAGE_MAGIC 60 .balign 4 61 .ascii RISCV_IMAGE_MAGIC2 62#ifdef CONFIG_EFI 63 .word pe_head_start - _start 64pe_head_start: 65 66 __EFI_PE_HEADER 67#else 68 .word 0 69#endif 70 71.align 2 72#ifdef CONFIG_MMU 73 .global relocate_enable_mmu 74relocate_enable_mmu: 75 /* Relocate return address */ 76 la a1, kernel_map 77 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 78 la a2, _start 79 sub a1, a1, a2 80 add ra, ra, a1 81 82 /* Point stvec to virtual address of instruction after satp write */ 83 la a2, 1f 84 add a2, a2, a1 85 csrw CSR_TVEC, a2 86 87 /* Compute satp for kernel page tables, but don't load it yet */ 88 srl a2, a0, PAGE_SHIFT 89 la a1, satp_mode 90 REG_L a1, 0(a1) 91 or a2, a2, a1 92 93 /* 94 * Load trampoline page directory, which will cause us to trap to 95 * stvec if VA != PA, or simply fall through if VA == PA. We need a 96 * full fence here because setup_vm() just wrote these PTEs and we need 97 * to ensure the new translations are in use. 98 */ 99 la a0, trampoline_pg_dir 100 srl a0, a0, PAGE_SHIFT 101 or a0, a0, a1 102 sfence.vma 103 csrw CSR_SATP, a0 104.align 2 1051: 106 /* Set trap vector to spin forever to help debug */ 107 la a0, .Lsecondary_park 108 csrw CSR_TVEC, a0 109 110 /* Reload the global pointer */ 111 load_global_pointer 112 113 /* 114 * Switch to kernel page tables. A full fence is necessary in order to 115 * avoid using the trampoline translations, which are only correct for 116 * the first superpage. Fetching the fence is guaranteed to work 117 * because that first superpage is translated the same way. 118 */ 119 csrw CSR_SATP, a2 120 sfence.vma 121 122 ret 123#endif /* CONFIG_MMU */ 124#ifdef CONFIG_SMP 125 .global secondary_start_sbi 126secondary_start_sbi: 127 /* Mask all interrupts */ 128 csrw CSR_IE, zero 129 csrw CSR_IP, zero 130 131#ifndef CONFIG_RISCV_M_MODE 132 /* Enable time CSR */ 133 li t0, 0x2 134 csrw CSR_SCOUNTEREN, t0 135#endif 136 137 /* Load the global pointer */ 138 load_global_pointer 139 140 /* 141 * Disable FPU & VECTOR to detect illegal usage of 142 * floating point or vector in kernel space 143 */ 144 li t0, SR_FS_VS 145 csrc CSR_STATUS, t0 146 147 /* Set trap vector to spin forever to help debug */ 148 la a3, .Lsecondary_park 149 csrw CSR_TVEC, a3 150 151 /* a0 contains the hartid & a1 contains boot data */ 152 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET 153 add a2, a2, a1 154 REG_L tp, (a2) 155 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET 156 add a3, a3, a1 157 REG_L sp, (a3) 158 159.Lsecondary_start_common: 160 161#ifdef CONFIG_MMU 162 /* Enable virtual memory and relocate to virtual address */ 163 la a0, swapper_pg_dir 164 call relocate_enable_mmu 165#endif 166 call .Lsetup_trap_vector 167#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_USER_CFI) 168 li a7, SBI_EXT_FWFT 169 li a6, SBI_EXT_FWFT_SET 170 li a0, SBI_FWFT_SHADOW_STACK 171 li a1, 1 /* enable supervisor to access shadow stack access */ 172 li a2, SBI_FWFT_SET_FLAG_LOCK 173 ecall 174 beqz a0, 1f 175 la a1, riscv_nousercfi 176 li a0, CMDLINE_DISABLE_RISCV_USERCFI_BCFI 177 REG_S a0, (a1) 1781: 179#endif 180 scs_load_current 181 call smp_callin 182#endif /* CONFIG_SMP */ 183 184.align 2 185.Lsecondary_park: 186 /* 187 * Park this hart if we: 188 * - have too many harts on CONFIG_RISCV_BOOT_SPINWAIT 189 * - receive an early trap, before setup_trap_vector finished 190 * - fail in smp_callin(), as a successful one wouldn't return 191 */ 192 wfi 193 j .Lsecondary_park 194 195.align 2 196.Lsetup_trap_vector: 197 /* Set trap vector to exception handler */ 198 la a0, handle_exception 199 csrw CSR_TVEC, a0 200 201 /* 202 * Set sup0 scratch register to 0, indicating to exception vector that 203 * we are presently executing in kernel. 204 */ 205 csrw CSR_SCRATCH, zero 206 ret 207 208SYM_CODE_END(_start) 209 210SYM_CODE_START(_start_kernel) 211 /* Mask all interrupts */ 212 csrw CSR_IE, zero 213 csrw CSR_IP, zero 214 215#ifdef CONFIG_RISCV_M_MODE 216 /* flush the instruction cache */ 217 fence.i 218 219 /* Reset all registers except ra, a0, a1 */ 220 call reset_regs 221 222 /* 223 * Setup a PMP to permit access to all of memory. Some machines may 224 * not implement PMPs, so we set up a quick trap handler to just skip 225 * touching the PMPs on any trap. 226 */ 227 la a0, .Lpmp_done 228 csrw CSR_TVEC, a0 229 230 li a0, -1 231 csrw CSR_PMPADDR0, a0 232 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 233 csrw CSR_PMPCFG0, a0 234.align 2 235.Lpmp_done: 236 237 /* 238 * The hartid in a0 is expected later on, and we have no firmware 239 * to hand it to us. 240 */ 241 csrr a0, CSR_MHARTID 242#else 243 /* Enable time CSR */ 244 li t0, 0x2 245 csrw CSR_SCOUNTEREN, t0 246#endif /* CONFIG_RISCV_M_MODE */ 247 248 /* Load the global pointer */ 249 load_global_pointer 250 251 /* 252 * Disable FPU & VECTOR to detect illegal usage of 253 * floating point or vector in kernel space 254 */ 255 li t0, SR_FS_VS 256 csrc CSR_STATUS, t0 257 258#ifdef CONFIG_RISCV_BOOT_SPINWAIT 259 li t0, CONFIG_NR_CPUS 260 blt a0, t0, .Lgood_cores 261 tail .Lsecondary_park 262.Lgood_cores: 263 264 /* The lottery system is only required for spinwait booting method */ 265 /* Pick one hart to run the main boot sequence */ 266 la a3, hart_lottery 267 li a2, 1 268 amoadd.w a3, a2, (a3) 269 bnez a3, .Lsecondary_start 270#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 271 272 /* Clear BSS for flat non-ELF images */ 273 la a3, __bss_start 274 la a4, __bss_stop 275 ble a4, a3, .Lclear_bss_done 276.Lclear_bss: 277 REG_S zero, (a3) 278 add a3, a3, RISCV_SZPTR 279 blt a3, a4, .Lclear_bss 280.Lclear_bss_done: 281 la a2, boot_cpu_hartid 282 REG_S a0, (a2) 283 284 /* Initialize page tables and relocate to virtual addresses */ 285 la tp, init_task 286 la sp, init_thread_union + THREAD_SIZE 287 addi sp, sp, -PT_SIZE_ON_STACK 288 scs_load_init_stack 289#ifdef CONFIG_BUILTIN_DTB 290 la a0, __dtb_start 291#else 292 mv a0, a1 293#endif /* CONFIG_BUILTIN_DTB */ 294 /* Set trap vector to spin forever to help debug */ 295 la a3, .Lsecondary_park 296 csrw CSR_TVEC, a3 297 call setup_vm 298#ifdef CONFIG_MMU 299 la a0, early_pg_dir 300 call relocate_enable_mmu 301#endif /* CONFIG_MMU */ 302 303 call .Lsetup_trap_vector 304 /* Restore C environment */ 305 la tp, init_task 306 la sp, init_thread_union + THREAD_SIZE 307 addi sp, sp, -PT_SIZE_ON_STACK 308#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_USER_CFI) 309 li a7, SBI_EXT_FWFT 310 li a6, SBI_EXT_FWFT_SET 311 li a0, SBI_FWFT_SHADOW_STACK 312 li a1, 1 /* enable supervisor to access shadow stack access */ 313 li a2, SBI_FWFT_SET_FLAG_LOCK 314 ecall 315 beqz a0, 1f 316 la a1, riscv_nousercfi 317 li a0, CMDLINE_DISABLE_RISCV_USERCFI_BCFI 318 REG_S a0, (a1) 3191: 320#endif 321 scs_load_current 322 323#ifdef CONFIG_KASAN 324 call kasan_early_init 325#endif 326 /* Start the kernel */ 327 call soc_early_init 328 tail start_kernel 329 330#ifdef CONFIG_RISCV_BOOT_SPINWAIT 331.Lsecondary_start: 332 /* Set trap vector to spin forever to help debug */ 333 la a3, .Lsecondary_park 334 csrw CSR_TVEC, a3 335 336 slli a3, a0, LGREG 337 la a1, __cpu_spinwait_stack_pointer 338 la a2, __cpu_spinwait_task_pointer 339 add a1, a3, a1 340 add a2, a3, a2 341 342 /* 343 * This hart didn't win the lottery, so we wait for the winning hart to 344 * get far enough along the boot process that it should continue. 345 */ 346.Lwait_for_cpu_up: 347 /* FIXME: We should WFI to save some energy here. */ 348 REG_L sp, (a1) 349 REG_L tp, (a2) 350 beqz sp, .Lwait_for_cpu_up 351 beqz tp, .Lwait_for_cpu_up 352 fence 353 354 tail .Lsecondary_start_common 355#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 356 357SYM_CODE_END(_start_kernel) 358 359#ifdef CONFIG_RISCV_M_MODE 360SYM_CODE_START_LOCAL(reset_regs) 361 li sp, 0 362 li gp, 0 363 li tp, 0 364 li t0, 0 365 li t1, 0 366 li t2, 0 367 li s0, 0 368 li s1, 0 369 li a2, 0 370 li a3, 0 371 li a4, 0 372 li a5, 0 373 li a6, 0 374 li a7, 0 375 li s2, 0 376 li s3, 0 377 li s4, 0 378 li s5, 0 379 li s6, 0 380 li s7, 0 381 li s8, 0 382 li s9, 0 383 li s10, 0 384 li s11, 0 385 li t3, 0 386 li t4, 0 387 li t5, 0 388 li t6, 0 389 csrw CSR_SCRATCH, 0 390 391#ifdef CONFIG_FPU 392 csrr t0, CSR_MISA 393 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 394 beqz t0, .Lreset_regs_done_fpu 395 396 li t1, SR_FS 397 csrs CSR_STATUS, t1 398 fmv.s.x f0, zero 399 fmv.s.x f1, zero 400 fmv.s.x f2, zero 401 fmv.s.x f3, zero 402 fmv.s.x f4, zero 403 fmv.s.x f5, zero 404 fmv.s.x f6, zero 405 fmv.s.x f7, zero 406 fmv.s.x f8, zero 407 fmv.s.x f9, zero 408 fmv.s.x f10, zero 409 fmv.s.x f11, zero 410 fmv.s.x f12, zero 411 fmv.s.x f13, zero 412 fmv.s.x f14, zero 413 fmv.s.x f15, zero 414 fmv.s.x f16, zero 415 fmv.s.x f17, zero 416 fmv.s.x f18, zero 417 fmv.s.x f19, zero 418 fmv.s.x f20, zero 419 fmv.s.x f21, zero 420 fmv.s.x f22, zero 421 fmv.s.x f23, zero 422 fmv.s.x f24, zero 423 fmv.s.x f25, zero 424 fmv.s.x f26, zero 425 fmv.s.x f27, zero 426 fmv.s.x f28, zero 427 fmv.s.x f29, zero 428 fmv.s.x f30, zero 429 fmv.s.x f31, zero 430 csrw fcsr, 0 431 /* note that the caller must clear SR_FS */ 432.Lreset_regs_done_fpu: 433#endif /* CONFIG_FPU */ 434 435#ifdef CONFIG_RISCV_ISA_V 436 csrr t0, CSR_MISA 437 li t1, COMPAT_HWCAP_ISA_V 438 and t0, t0, t1 439 beqz t0, .Lreset_regs_done_vector 440 441 /* 442 * Clear vector registers and reset vcsr 443 * VLMAX has a defined value, VLEN is a constant, 444 * and this form of vsetvli is defined to set vl to VLMAX. 445 */ 446 li t1, SR_VS 447 csrs CSR_STATUS, t1 448 csrs CSR_VCSR, x0 449 vsetvli t1, x0, e8, m8, ta, ma 450 vmv.v.i v0, 0 451 vmv.v.i v8, 0 452 vmv.v.i v16, 0 453 vmv.v.i v24, 0 454 /* note that the caller must clear SR_VS */ 455.Lreset_regs_done_vector: 456#endif /* CONFIG_RISCV_ISA_V */ 457 ret 458SYM_CODE_END(reset_regs) 459#endif /* CONFIG_RISCV_M_MODE */ 460