1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#include <asm/asm-offsets.h> 7#include <asm/asm.h> 8#include <linux/init.h> 9#include <linux/linkage.h> 10#include <asm/thread_info.h> 11#include <asm/page.h> 12#include <asm/pgtable.h> 13#include <asm/csr.h> 14#include <asm/cpu_ops_sbi.h> 15#include <asm/hwcap.h> 16#include <asm/image.h> 17#include "efi-header.S" 18 19__HEAD 20ENTRY(_start) 21 /* 22 * Image header expected by Linux boot-loaders. The image header data 23 * structure is described in asm/image.h. 24 * Do not modify it without modifying the structure and all bootloaders 25 * that expects this header format!! 26 */ 27#ifdef CONFIG_EFI 28 /* 29 * This instruction decodes to "MZ" ASCII required by UEFI. 30 */ 31 c.li s4,-13 32 j _start_kernel 33#else 34 /* jump to start kernel */ 35 j _start_kernel 36 /* reserved */ 37 .word 0 38#endif 39 .balign 8 40#ifdef CONFIG_RISCV_M_MODE 41 /* Image load offset (0MB) from start of RAM for M-mode */ 42 .dword 0 43#else 44#if __riscv_xlen == 64 45 /* Image load offset(2MB) from start of RAM */ 46 .dword 0x200000 47#else 48 /* Image load offset(4MB) from start of RAM */ 49 .dword 0x400000 50#endif 51#endif 52 /* Effective size of kernel image */ 53 .dword _end - _start 54 .dword __HEAD_FLAGS 55 .word RISCV_HEADER_VERSION 56 .word 0 57 .dword 0 58 .ascii RISCV_IMAGE_MAGIC 59 .balign 4 60 .ascii RISCV_IMAGE_MAGIC2 61#ifdef CONFIG_EFI 62 .word pe_head_start - _start 63pe_head_start: 64 65 __EFI_PE_HEADER 66#else 67 .word 0 68#endif 69 70.align 2 71#ifdef CONFIG_MMU 72 .global relocate_enable_mmu 73relocate_enable_mmu: 74 /* Relocate return address */ 75 la a1, kernel_map 76 XIP_FIXUP_OFFSET a1 77 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) 78 la a2, _start 79 sub a1, a1, a2 80 add ra, ra, a1 81 82 /* Point stvec to virtual address of intruction after satp write */ 83 la a2, 1f 84 add a2, a2, a1 85 csrw CSR_TVEC, a2 86 87 /* Compute satp for kernel page tables, but don't load it yet */ 88 srl a2, a0, PAGE_SHIFT 89 la a1, satp_mode 90 REG_L a1, 0(a1) 91 or a2, a2, a1 92 93 /* 94 * Load trampoline page directory, which will cause us to trap to 95 * stvec if VA != PA, or simply fall through if VA == PA. We need a 96 * full fence here because setup_vm() just wrote these PTEs and we need 97 * to ensure the new translations are in use. 98 */ 99 la a0, trampoline_pg_dir 100 XIP_FIXUP_OFFSET a0 101 srl a0, a0, PAGE_SHIFT 102 or a0, a0, a1 103 sfence.vma 104 csrw CSR_SATP, a0 105.align 2 1061: 107 /* Set trap vector to spin forever to help debug */ 108 la a0, .Lsecondary_park 109 csrw CSR_TVEC, a0 110 111 /* Reload the global pointer */ 112.option push 113.option norelax 114 la gp, __global_pointer$ 115.option pop 116 117 /* 118 * Switch to kernel page tables. A full fence is necessary in order to 119 * avoid using the trampoline translations, which are only correct for 120 * the first superpage. Fetching the fence is guaranteed to work 121 * because that first superpage is translated the same way. 122 */ 123 csrw CSR_SATP, a2 124 sfence.vma 125 126 ret 127#endif /* CONFIG_MMU */ 128#ifdef CONFIG_SMP 129 .global secondary_start_sbi 130secondary_start_sbi: 131 /* Mask all interrupts */ 132 csrw CSR_IE, zero 133 csrw CSR_IP, zero 134 135 /* Load the global pointer */ 136 .option push 137 .option norelax 138 la gp, __global_pointer$ 139 .option pop 140 141 /* 142 * Disable FPU to detect illegal usage of 143 * floating point in kernel space 144 */ 145 li t0, SR_FS 146 csrc CSR_STATUS, t0 147 148 /* Set trap vector to spin forever to help debug */ 149 la a3, .Lsecondary_park 150 csrw CSR_TVEC, a3 151 152 /* a0 contains the hartid & a1 contains boot data */ 153 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET 154 XIP_FIXUP_OFFSET a2 155 add a2, a2, a1 156 REG_L tp, (a2) 157 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET 158 XIP_FIXUP_OFFSET a3 159 add a3, a3, a1 160 REG_L sp, (a3) 161 162.Lsecondary_start_common: 163 164#ifdef CONFIG_MMU 165 /* Enable virtual memory and relocate to virtual address */ 166 la a0, swapper_pg_dir 167 XIP_FIXUP_OFFSET a0 168 call relocate_enable_mmu 169#endif 170 call setup_trap_vector 171 tail smp_callin 172#endif /* CONFIG_SMP */ 173 174.align 2 175setup_trap_vector: 176 /* Set trap vector to exception handler */ 177 la a0, handle_exception 178 csrw CSR_TVEC, a0 179 180 /* 181 * Set sup0 scratch register to 0, indicating to exception vector that 182 * we are presently executing in kernel. 183 */ 184 csrw CSR_SCRATCH, zero 185 ret 186 187.align 2 188.Lsecondary_park: 189 /* We lack SMP support or have too many harts, so park this hart */ 190 wfi 191 j .Lsecondary_park 192 193END(_start) 194 195ENTRY(_start_kernel) 196 /* Mask all interrupts */ 197 csrw CSR_IE, zero 198 csrw CSR_IP, zero 199 200#ifdef CONFIG_RISCV_M_MODE 201 /* flush the instruction cache */ 202 fence.i 203 204 /* Reset all registers except ra, a0, a1 */ 205 call reset_regs 206 207 /* 208 * Setup a PMP to permit access to all of memory. Some machines may 209 * not implement PMPs, so we set up a quick trap handler to just skip 210 * touching the PMPs on any trap. 211 */ 212 la a0, pmp_done 213 csrw CSR_TVEC, a0 214 215 li a0, -1 216 csrw CSR_PMPADDR0, a0 217 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) 218 csrw CSR_PMPCFG0, a0 219.align 2 220pmp_done: 221 222 /* 223 * The hartid in a0 is expected later on, and we have no firmware 224 * to hand it to us. 225 */ 226 csrr a0, CSR_MHARTID 227#endif /* CONFIG_RISCV_M_MODE */ 228 229 /* Load the global pointer */ 230.option push 231.option norelax 232 la gp, __global_pointer$ 233.option pop 234 235 /* 236 * Disable FPU to detect illegal usage of 237 * floating point in kernel space 238 */ 239 li t0, SR_FS 240 csrc CSR_STATUS, t0 241 242#ifdef CONFIG_RISCV_BOOT_SPINWAIT 243 li t0, CONFIG_NR_CPUS 244 blt a0, t0, .Lgood_cores 245 tail .Lsecondary_park 246.Lgood_cores: 247 248 /* The lottery system is only required for spinwait booting method */ 249#ifndef CONFIG_XIP_KERNEL 250 /* Pick one hart to run the main boot sequence */ 251 la a3, hart_lottery 252 li a2, 1 253 amoadd.w a3, a2, (a3) 254 bnez a3, .Lsecondary_start 255 256#else 257 /* hart_lottery in flash contains a magic number */ 258 la a3, hart_lottery 259 mv a2, a3 260 XIP_FIXUP_OFFSET a2 261 XIP_FIXUP_FLASH_OFFSET a3 262 lw t1, (a3) 263 amoswap.w t0, t1, (a2) 264 /* first time here if hart_lottery in RAM is not set */ 265 beq t0, t1, .Lsecondary_start 266 267#endif /* CONFIG_XIP */ 268#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 269 270#ifdef CONFIG_XIP_KERNEL 271 la sp, _end + THREAD_SIZE 272 XIP_FIXUP_OFFSET sp 273 mv s0, a0 274 call __copy_data 275 276 /* Restore a0 copy */ 277 mv a0, s0 278#endif 279 280#ifndef CONFIG_XIP_KERNEL 281 /* Clear BSS for flat non-ELF images */ 282 la a3, __bss_start 283 la a4, __bss_stop 284 ble a4, a3, clear_bss_done 285clear_bss: 286 REG_S zero, (a3) 287 add a3, a3, RISCV_SZPTR 288 blt a3, a4, clear_bss 289clear_bss_done: 290#endif 291 /* Save hart ID and DTB physical address */ 292 mv s0, a0 293 mv s1, a1 294 295 la a2, boot_cpu_hartid 296 XIP_FIXUP_OFFSET a2 297 REG_S a0, (a2) 298 299 /* Initialize page tables and relocate to virtual addresses */ 300 la sp, init_thread_union + THREAD_SIZE 301 XIP_FIXUP_OFFSET sp 302#ifdef CONFIG_BUILTIN_DTB 303 la a0, __dtb_start 304 XIP_FIXUP_OFFSET a0 305#else 306 mv a0, s1 307#endif /* CONFIG_BUILTIN_DTB */ 308 call setup_vm 309#ifdef CONFIG_MMU 310 la a0, early_pg_dir 311 XIP_FIXUP_OFFSET a0 312 call relocate_enable_mmu 313#endif /* CONFIG_MMU */ 314 315 call setup_trap_vector 316 /* Restore C environment */ 317 la tp, init_task 318 la sp, init_thread_union + THREAD_SIZE 319 320#ifdef CONFIG_KASAN 321 call kasan_early_init 322#endif 323 /* Start the kernel */ 324 call soc_early_init 325 tail start_kernel 326 327#if CONFIG_RISCV_BOOT_SPINWAIT 328.Lsecondary_start: 329 /* Set trap vector to spin forever to help debug */ 330 la a3, .Lsecondary_park 331 csrw CSR_TVEC, a3 332 333 slli a3, a0, LGREG 334 la a1, __cpu_spinwait_stack_pointer 335 XIP_FIXUP_OFFSET a1 336 la a2, __cpu_spinwait_task_pointer 337 XIP_FIXUP_OFFSET a2 338 add a1, a3, a1 339 add a2, a3, a2 340 341 /* 342 * This hart didn't win the lottery, so we wait for the winning hart to 343 * get far enough along the boot process that it should continue. 344 */ 345.Lwait_for_cpu_up: 346 /* FIXME: We should WFI to save some energy here. */ 347 REG_L sp, (a1) 348 REG_L tp, (a2) 349 beqz sp, .Lwait_for_cpu_up 350 beqz tp, .Lwait_for_cpu_up 351 fence 352 353 tail .Lsecondary_start_common 354#endif /* CONFIG_RISCV_BOOT_SPINWAIT */ 355 356END(_start_kernel) 357 358#ifdef CONFIG_RISCV_M_MODE 359ENTRY(reset_regs) 360 li sp, 0 361 li gp, 0 362 li tp, 0 363 li t0, 0 364 li t1, 0 365 li t2, 0 366 li s0, 0 367 li s1, 0 368 li a2, 0 369 li a3, 0 370 li a4, 0 371 li a5, 0 372 li a6, 0 373 li a7, 0 374 li s2, 0 375 li s3, 0 376 li s4, 0 377 li s5, 0 378 li s6, 0 379 li s7, 0 380 li s8, 0 381 li s9, 0 382 li s10, 0 383 li s11, 0 384 li t3, 0 385 li t4, 0 386 li t5, 0 387 li t6, 0 388 csrw CSR_SCRATCH, 0 389 390#ifdef CONFIG_FPU 391 csrr t0, CSR_MISA 392 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 393 beqz t0, .Lreset_regs_done 394 395 li t1, SR_FS 396 csrs CSR_STATUS, t1 397 fmv.s.x f0, zero 398 fmv.s.x f1, zero 399 fmv.s.x f2, zero 400 fmv.s.x f3, zero 401 fmv.s.x f4, zero 402 fmv.s.x f5, zero 403 fmv.s.x f6, zero 404 fmv.s.x f7, zero 405 fmv.s.x f8, zero 406 fmv.s.x f9, zero 407 fmv.s.x f10, zero 408 fmv.s.x f11, zero 409 fmv.s.x f12, zero 410 fmv.s.x f13, zero 411 fmv.s.x f14, zero 412 fmv.s.x f15, zero 413 fmv.s.x f16, zero 414 fmv.s.x f17, zero 415 fmv.s.x f18, zero 416 fmv.s.x f19, zero 417 fmv.s.x f20, zero 418 fmv.s.x f21, zero 419 fmv.s.x f22, zero 420 fmv.s.x f23, zero 421 fmv.s.x f24, zero 422 fmv.s.x f25, zero 423 fmv.s.x f26, zero 424 fmv.s.x f27, zero 425 fmv.s.x f28, zero 426 fmv.s.x f29, zero 427 fmv.s.x f30, zero 428 fmv.s.x f31, zero 429 csrw fcsr, 0 430 /* note that the caller must clear SR_FS */ 431#endif /* CONFIG_FPU */ 432.Lreset_regs_done: 433 ret 434END(reset_regs) 435#endif /* CONFIG_RISCV_M_MODE */ 436