1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 2004 Olivier Houchard
7 * Copyright (c) 1994-1998 Mark Brinicombe.
8 * Copyright (c) 1994 Brini.
9 * All rights reserved.
10 *
11 * This code is derived from software written for Brini by Mark Brinicombe
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Mark Brinicombe
24 * for the NetBSD Project.
25 * 4. The name of the company nor the name of the author may be used to
26 * endorse or promote products derived from this software without specific
27 * prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
33 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * Machine dependent functions for kernel setup
42 *
43 * Created : 17/09/94
44 * Updated : 18/04/01 updated for new wscons
45 */
46
47 #include "opt_ddb.h"
48 #include "opt_kstack_pages.h"
49 #include "opt_platform.h"
50 #include "opt_sched.h"
51
52 #include <sys/param.h>
53 #include <sys/buf.h>
54 #include <sys/bus.h>
55 #include <sys/cons.h>
56 #include <sys/cpu.h>
57 #include <sys/devmap.h>
58 #include <sys/efi.h>
59 #include <sys/efi_map.h>
60 #include <sys/imgact.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/ktr.h>
64 #include <sys/linker.h>
65 #include <sys/msgbuf.h>
66 #include <sys/physmem.h>
67 #include <sys/reboot.h>
68 #include <sys/rwlock.h>
69 #include <sys/sched.h>
70 #include <sys/syscallsubr.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/vmmeter.h>
74
75 #include <vm/vm_object.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_pager.h>
78
79 #include <machine/asm.h>
80 #include <machine/debug_monitor.h>
81 #include <machine/machdep.h>
82 #include <machine/metadata.h>
83 #include <machine/pcb.h>
84 #include <machine/platform.h>
85 #include <machine/sysarch.h>
86 #include <machine/undefined.h>
87 #include <machine/vfp.h>
88 #include <machine/vmparam.h>
89
90 #ifdef FDT
91 #include <dev/fdt/fdt_common.h>
92 #include <machine/ofw_machdep.h>
93 #endif
94
95 #ifdef DEBUG
96 #define debugf(fmt, args...) printf(fmt, ##args)
97 #else
98 #define debugf(fmt, args...)
99 #endif
100
101 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
102 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
103 defined(COMPAT_FREEBSD9)
104 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
105 #endif
106
107
108 #if __ARM_ARCH < 6
109 #error FreeBSD requires ARMv6 or later
110 #endif
111
112 struct pcpu __pcpu[MAXCPU];
113 struct pcpu *pcpup = &__pcpu[0];
114
115 static struct trapframe proc0_tf;
116 uint32_t cpu_reset_address = 0;
117 int cold = 1;
118 vm_offset_t vector_page;
119
120 /* The address at which the kernel was loaded. Set early in initarm(). */
121 vm_paddr_t arm_physmem_kernaddr;
122
123 extern int *end;
124
125 #ifdef FDT
126 vm_paddr_t pmap_pa;
127 vm_offset_t systempage;
128 vm_offset_t irqstack;
129 vm_offset_t undstack;
130 vm_offset_t abtstack;
131 #endif /* FDT */
132
133 #ifdef PLATFORM
134 static delay_func *delay_impl;
135 static void *delay_arg;
136 #endif
137
138 #if defined(SOCDEV_PA)
139 #if !defined(SOCDEV_VA)
140 #error SOCDEV_PA defined, but not SOCDEV_VA
141 #endif
142 uintptr_t socdev_va = SOCDEV_VA;
143 #endif
144
145
146 struct kva_md_info kmi;
147 /*
148 * arm32_vector_init:
149 *
150 * Initialize the vector page, and select whether or not to
151 * relocate the vectors.
152 *
153 * NOTE: We expect the vector page to be mapped at its expected
154 * destination.
155 */
156
157 extern unsigned int page0[], page0_data[];
158 void
arm_vector_init(vm_offset_t va,int which)159 arm_vector_init(vm_offset_t va, int which)
160 {
161 unsigned int *vectors = (int *) va;
162 unsigned int *vectors_data = vectors + (page0_data - page0);
163 int vec;
164
165 /*
166 * Loop through the vectors we're taking over, and copy the
167 * vector's insn and data word.
168 */
169 for (vec = 0; vec < ARM_NVEC; vec++) {
170 if ((which & (1 << vec)) == 0) {
171 /* Don't want to take over this vector. */
172 continue;
173 }
174 vectors[vec] = page0[vec];
175 vectors_data[vec] = page0_data[vec];
176 }
177
178 /* Now sync the vectors. */
179 icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
180
181 vector_page = va;
182 }
183
184 static void
cpu_startup(void * dummy)185 cpu_startup(void *dummy)
186 {
187 struct pcb *pcb = thread0.td_pcb;
188 const unsigned int mbyte = 1024 * 1024;
189
190 identify_arm_cpu();
191
192 vm_ksubmap_init(&kmi);
193
194 /*
195 * Display the RAM layout.
196 */
197 printf("real memory = %ju (%ju MB)\n",
198 (uintmax_t)arm32_ptob(realmem),
199 (uintmax_t)arm32_ptob(realmem) / mbyte);
200 printf("avail memory = %ju (%ju MB)\n",
201 (uintmax_t)arm32_ptob(vm_free_count()),
202 (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
203 if (bootverbose) {
204 physmem_print_tables();
205 devmap_print_table();
206 }
207
208 bufinit();
209 vm_pager_bufferinit();
210 pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
211 USPACE_SVC_STACK_TOP;
212 pmap_set_pcb_pagedir(kernel_pmap, pcb);
213 }
214
215 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
216
217 /*
218 * Flush the D-cache for non-DMA I/O so that the I-cache can
219 * be made coherent later.
220 */
221 void
cpu_flush_dcache(void * ptr,size_t len)222 cpu_flush_dcache(void *ptr, size_t len)
223 {
224
225 dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
226 }
227
228 /* Get current clock frequency for the given cpu id. */
229 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)230 cpu_est_clockrate(int cpu_id, uint64_t *rate)
231 {
232 struct pcpu *pc;
233
234 pc = pcpu_find(cpu_id);
235 if (pc == NULL || rate == NULL)
236 return (EINVAL);
237
238 if (pc->pc_clock == 0)
239 return (EOPNOTSUPP);
240
241 *rate = pc->pc_clock;
242
243 return (0);
244 }
245
246 void
cpu_idle(int busy)247 cpu_idle(int busy)
248 {
249
250 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
251 spinlock_enter();
252 if (!busy)
253 cpu_idleclock();
254 if (!sched_runnable())
255 cpu_sleep(0);
256 if (!busy)
257 cpu_activeclock();
258 spinlock_exit();
259 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
260 }
261
262 int
cpu_idle_wakeup(int cpu)263 cpu_idle_wakeup(int cpu)
264 {
265
266 return (0);
267 }
268
269 void
cpu_initclocks(void)270 cpu_initclocks(void)
271 {
272
273 #ifdef SMP
274 if (PCPU_GET(cpuid) == 0)
275 cpu_initclocks_bsp();
276 else
277 cpu_initclocks_ap();
278 #else
279 cpu_initclocks_bsp();
280 #endif
281 }
282
283 #ifdef PLATFORM
284 void
arm_set_delay(delay_func * impl,void * arg)285 arm_set_delay(delay_func *impl, void *arg)
286 {
287
288 KASSERT(impl != NULL, ("No DELAY implementation"));
289 delay_impl = impl;
290 delay_arg = arg;
291 }
292
293 void
DELAY(int usec)294 DELAY(int usec)
295 {
296
297 TSENTER();
298 delay_impl(usec, delay_arg);
299 TSEXIT();
300 }
301 #endif
302
303 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)304 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
305 {
306
307 pcpu->pc_mpidr = 0xffffffff;
308 }
309
310 void
spinlock_enter(void)311 spinlock_enter(void)
312 {
313 struct thread *td;
314 register_t cspr;
315
316 td = curthread;
317 if (td->td_md.md_spinlock_count == 0) {
318 cspr = disable_interrupts(PSR_I);
319 td->td_md.md_spinlock_count = 1;
320 td->td_md.md_saved_cspr = cspr;
321 critical_enter();
322 } else
323 td->td_md.md_spinlock_count++;
324 }
325
326 void
spinlock_exit(void)327 spinlock_exit(void)
328 {
329 struct thread *td;
330 register_t cspr;
331
332 td = curthread;
333 cspr = td->td_md.md_saved_cspr;
334 td->td_md.md_spinlock_count--;
335 if (td->td_md.md_spinlock_count == 0) {
336 critical_exit();
337 restore_interrupts(cspr);
338 }
339 }
340
341 /*
342 * Construct a PCB from a trapframe. This is called from kdb_trap() where
343 * we want to start a backtrace from the function that caused us to enter
344 * the debugger. We have the context in the trapframe, but base the trace
345 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
346 * enough for a backtrace.
347 */
348 void
makectx(struct trapframe * tf,struct pcb * pcb)349 makectx(struct trapframe *tf, struct pcb *pcb)
350 {
351 pcb->pcb_regs.sf_r4 = tf->tf_r4;
352 pcb->pcb_regs.sf_r5 = tf->tf_r5;
353 pcb->pcb_regs.sf_r6 = tf->tf_r6;
354 pcb->pcb_regs.sf_r7 = tf->tf_r7;
355 pcb->pcb_regs.sf_r8 = tf->tf_r8;
356 pcb->pcb_regs.sf_r9 = tf->tf_r9;
357 pcb->pcb_regs.sf_r10 = tf->tf_r10;
358 pcb->pcb_regs.sf_r11 = tf->tf_r11;
359 pcb->pcb_regs.sf_r12 = tf->tf_r12;
360 pcb->pcb_regs.sf_pc = tf->tf_pc;
361 pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
362 pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
363 }
364
365 void
pcpu0_init(void)366 pcpu0_init(void)
367 {
368 set_curthread(&thread0);
369 pcpu_init(pcpup, 0, sizeof(struct pcpu));
370 pcpup->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
371 PCPU_SET(curthread, &thread0);
372 }
373
374 /*
375 * Initialize proc0
376 */
377 void
init_proc0(vm_offset_t kstack)378 init_proc0(vm_offset_t kstack)
379 {
380 proc_linkup0(&proc0, &thread0);
381 thread0.td_kstack = kstack;
382 thread0.td_kstack_pages = kstack_pages;
383 thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
384 thread0.td_kstack_pages * PAGE_SIZE) - 1;
385 thread0.td_pcb->pcb_flags = 0;
386 thread0.td_pcb->pcb_fpflags = 0;
387 thread0.td_pcb->pcb_vfpcpu = -1;
388 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
389 thread0.td_pcb->pcb_vfpsaved = &thread0.td_pcb->pcb_vfpstate;
390 thread0.td_frame = &proc0_tf;
391 pcpup->pc_curpcb = thread0.td_pcb;
392 }
393
394 void
set_stackptrs(int cpu)395 set_stackptrs(int cpu)
396 {
397
398 set_stackptr(PSR_IRQ32_MODE,
399 irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
400 set_stackptr(PSR_ABT32_MODE,
401 abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
402 set_stackptr(PSR_UND32_MODE,
403 undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
404 }
405
406 static void
arm_kdb_init(void)407 arm_kdb_init(void)
408 {
409
410 kdb_init();
411 #ifdef KDB
412 if (boothowto & RB_KDB)
413 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
414 #endif
415 }
416
417 #ifdef FDT
418 void *
initarm(struct arm_boot_params * abp)419 initarm(struct arm_boot_params *abp)
420 {
421 struct mem_region mem_regions[FDT_MEM_REGIONS];
422 vm_paddr_t lastaddr;
423 vm_offset_t dtbp, kernelstack, dpcpu;
424 char *env;
425 int err_devmap, mem_regions_sz;
426 phandle_t root;
427 char dts_version[255];
428 #ifdef EFI
429 struct efi_map_header *efihdr;
430 #endif
431
432 /* get last allocated physical address */
433 arm_physmem_kernaddr = abp->abp_physaddr;
434 lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
435
436 set_cpufuncs();
437 cpuinfo_init();
438
439 /*
440 * Find the dtb passed in by the boot loader.
441 */
442 dtbp = MD_FETCH(preload_kmdp, MODINFOMD_DTBP, vm_offset_t);
443 #if defined(FDT_DTB_STATIC)
444 /*
445 * In case the device tree blob was not retrieved (from metadata) try
446 * to use the statically embedded one.
447 */
448 if (dtbp == (vm_offset_t)NULL)
449 dtbp = (vm_offset_t)&fdt_static_dtb;
450 #endif
451
452 if (OF_install(OFW_FDT, 0) == FALSE)
453 panic("Cannot install FDT");
454
455 if (OF_init((void *)dtbp) != 0)
456 panic("OF_init failed with the found device tree");
457
458 #if defined(LINUX_BOOT_ABI)
459 arm_parse_fdt_bootargs();
460 #endif
461
462 #ifdef EFI
463 efihdr = (struct efi_map_header *)preload_search_info(preload_kmdp,
464 MODINFO_METADATA | MODINFOMD_EFI_MAP);
465 if (efihdr != NULL) {
466 efi_map_add_entries(efihdr);
467 efi_map_exclude_entries(efihdr);
468 } else
469 #endif
470 {
471 /* Grab physical memory regions information from device tree. */
472 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
473 panic("Cannot get physical memory regions");
474
475 physmem_hardware_regions(mem_regions, mem_regions_sz);
476
477 /* Grab reserved memory regions information from device tree. */
478 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
479 physmem_exclude_regions(mem_regions, mem_regions_sz,
480 EXFLAG_NODUMP | EXFLAG_NOALLOC);
481 }
482
483 /*
484 * Set TEX remapping registers.
485 * Setup kernel page tables and switch to kernel L1 page table.
486 */
487 pmap_set_tex();
488 pmap_bootstrap_prepare(lastaddr);
489
490 /*
491 * If EARLY_PRINTF support is enabled, we need to re-establish the
492 * mapping after pmap_bootstrap_prepare() switches to new page tables.
493 * Note that we can only do the remapping if the VA is outside the
494 * kernel, now that we have real virtual (not VA=PA) mappings in effect.
495 * Early printf does not work between the time pmap_set_tex() does
496 * cp15_prrr_set() and this code remaps the VA.
497 */
498 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
499 pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024,
500 VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
501 #endif
502
503 /*
504 * Now that proper page tables are installed, call cpu_setup() to enable
505 * instruction and data caches and other chip-specific features.
506 */
507 cpu_setup();
508
509 /* Platform-specific initialisation */
510 platform_probe_and_attach();
511 pcpu0_init();
512
513 /* Do basic tuning, hz etc */
514 init_param1();
515
516 /*
517 * Allocate a page for the system page mapped to 0xffff0000
518 * This page will just contain the system vectors and can be
519 * shared by all processes.
520 */
521 systempage = pmap_preboot_get_pages(1);
522
523 /* Map the vector page. */
524 pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1);
525 if (virtual_end >= ARM_VECTORS_HIGH)
526 virtual_end = ARM_VECTORS_HIGH - 1;
527
528 /* Allocate dynamic per-cpu area. */
529 dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
530 dpcpu_init((void *)dpcpu, 0);
531
532 /* Allocate stacks for all modes */
533 irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
534 abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
535 undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
536 kernelstack = pmap_preboot_get_vpages(kstack_pages);
537
538 /* Allocate message buffer. */
539 msgbufp = (void *)pmap_preboot_get_vpages(
540 round_page(msgbufsize) / PAGE_SIZE);
541
542 /*
543 * Pages were allocated during the secondary bootstrap for the
544 * stacks for different CPU modes.
545 * We must now set the r13 registers in the different CPU modes to
546 * point to these stacks.
547 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
548 * of the stack memory.
549 */
550 set_stackptrs(0);
551 mutex_init();
552
553 /* Establish static device mappings. */
554 err_devmap = platform_devmap_init();
555 devmap_bootstrap();
556 vm_max_kernel_address = platform_lastaddr();
557
558 /*
559 * Only after the SOC registers block is mapped we can perform device
560 * tree fixups, as they may attempt to read parameters from hardware.
561 */
562 OF_interpret("perform-fixup", 0);
563 platform_gpio_init();
564 cninit();
565
566 /*
567 * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(),
568 * undo it now that the normal console printf works.
569 */
570 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
571 pmap_kremove(SOCDEV_VA);
572 #endif
573
574 debugf("initarm: console initialized\n");
575 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)preload_kmdp);
576 debugf(" boothowto = 0x%08x\n", boothowto);
577 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
578 debugf(" lastaddr1: 0x%08x\n", lastaddr);
579 arm_print_kenv();
580
581 env = kern_getenv("kernelname");
582 if (env != NULL)
583 strlcpy(kernelname, env, sizeof(kernelname));
584
585 if (err_devmap != 0)
586 printf("WARNING: could not fully configure devmap, error=%d\n",
587 err_devmap);
588
589 platform_late_init();
590
591 root = OF_finddevice("/");
592 if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
593 if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
594 printf("WARNING: DTB version is %s while kernel expects %s, "
595 "please update the DTB in the ESP\n",
596 dts_version,
597 LINUX_DTS_VERSION);
598 } else {
599 printf("WARNING: Cannot find freebsd,dts-version property, "
600 "cannot check DTB compliance\n");
601 }
602
603 /*
604 * We must now clean the cache again....
605 * Cleaning may be done by reading new data to displace any
606 * dirty data in the cache. This will have happened in cpu_setttb()
607 * but since we are boot strapping the addresses used for the read
608 * may have just been remapped and thus the cache could be out
609 * of sync. A re-clean after the switch will cure this.
610 * After booting there are no gross relocations of the kernel thus
611 * this problem will not occur after initarm().
612 */
613 /* Set stack for exception handlers */
614 undefined_init();
615 init_proc0(kernelstack);
616 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
617 enable_interrupts(PSR_A);
618 pmap_bootstrap(0);
619
620 /* Exclude the kernel (and all the things we allocated which immediately
621 * follow the kernel) from the VM allocation pool but not from crash
622 * dumps. virtual_avail is a global variable which tracks the kva we've
623 * "allocated" while setting up pmaps.
624 *
625 * Prepare the list of physical memory available to the vm subsystem.
626 */
627 physmem_exclude_region(abp->abp_physaddr,
628 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
629 physmem_init_kernel_globals();
630
631 init_param2(physmem);
632 /* Init message buffer. */
633 msgbufinit(msgbufp, msgbufsize);
634 dbg_monitor_init();
635 arm_kdb_init();
636 /* Apply possible BP hardening. */
637 cpuinfo_init_bp_hardening();
638
639 #ifdef EFI
640 if (boothowto & RB_VERBOSE) {
641 if (efihdr != NULL)
642 efi_map_print_entries(efihdr);
643 }
644 #endif
645
646 return ((void *)STACKALIGN(thread0.td_pcb));
647
648 }
649 #endif /* FDT */
650