1 /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 2004 Olivier Houchard
7 * Copyright (c) 1994-1998 Mark Brinicombe.
8 * Copyright (c) 1994 Brini.
9 * All rights reserved.
10 *
11 * This code is derived from software written for Brini by Mark Brinicombe
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Mark Brinicombe
24 * for the NetBSD Project.
25 * 4. The name of the company nor the name of the author may be used to
26 * endorse or promote products derived from this software without specific
27 * prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
33 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * Machine dependent functions for kernel setup
42 *
43 * Created : 17/09/94
44 * Updated : 18/04/01 updated for new wscons
45 */
46
47 #include "opt_ddb.h"
48 #include "opt_kstack_pages.h"
49 #include "opt_platform.h"
50 #include "opt_sched.h"
51
52 #include <sys/param.h>
53 #include <sys/buf.h>
54 #include <sys/bus.h>
55 #include <sys/cons.h>
56 #include <sys/cpu.h>
57 #include <sys/devmap.h>
58 #include <sys/efi.h>
59 #include <sys/imgact.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/linker.h>
64 #include <sys/msgbuf.h>
65 #include <sys/physmem.h>
66 #include <sys/reboot.h>
67 #include <sys/rwlock.h>
68 #include <sys/sched.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysent.h>
71 #include <sys/sysproto.h>
72 #include <sys/vmmeter.h>
73
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pager.h>
77
78 #include <machine/asm.h>
79 #include <machine/debug_monitor.h>
80 #include <machine/machdep.h>
81 #include <machine/metadata.h>
82 #include <machine/pcb.h>
83 #include <machine/platform.h>
84 #include <machine/sysarch.h>
85 #include <machine/undefined.h>
86 #include <machine/vfp.h>
87 #include <machine/vmparam.h>
88
89 #ifdef FDT
90 #include <dev/fdt/fdt_common.h>
91 #include <machine/ofw_machdep.h>
92 #endif
93
94 #ifdef DEBUG
95 #define debugf(fmt, args...) printf(fmt, ##args)
96 #else
97 #define debugf(fmt, args...)
98 #endif
99
100 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
101 defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
102 defined(COMPAT_FREEBSD9)
103 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
104 #endif
105
106
107 #if __ARM_ARCH < 6
108 #error FreeBSD requires ARMv6 or later
109 #endif
110
111 struct pcpu __pcpu[MAXCPU];
112 struct pcpu *pcpup = &__pcpu[0];
113
114 static struct trapframe proc0_tf;
115 uint32_t cpu_reset_address = 0;
116 int cold = 1;
117 vm_offset_t vector_page;
118
119 /* The address at which the kernel was loaded. Set early in initarm(). */
120 vm_paddr_t arm_physmem_kernaddr;
121
122 extern int *end;
123
124 #ifdef FDT
125 vm_paddr_t pmap_pa;
126 vm_offset_t systempage;
127 vm_offset_t irqstack;
128 vm_offset_t undstack;
129 vm_offset_t abtstack;
130 #endif /* FDT */
131
132 #ifdef PLATFORM
133 static delay_func *delay_impl;
134 static void *delay_arg;
135 #endif
136
137 #if defined(SOCDEV_PA)
138 #if !defined(SOCDEV_VA)
139 #error SOCDEV_PA defined, but not SOCDEV_VA
140 #endif
141 uintptr_t socdev_va = SOCDEV_VA;
142 #endif
143
144
145 struct kva_md_info kmi;
146 /*
147 * arm32_vector_init:
148 *
149 * Initialize the vector page, and select whether or not to
150 * relocate the vectors.
151 *
152 * NOTE: We expect the vector page to be mapped at its expected
153 * destination.
154 */
155
156 extern unsigned int page0[], page0_data[];
157 void
arm_vector_init(vm_offset_t va,int which)158 arm_vector_init(vm_offset_t va, int which)
159 {
160 unsigned int *vectors = (int *) va;
161 unsigned int *vectors_data = vectors + (page0_data - page0);
162 int vec;
163
164 /*
165 * Loop through the vectors we're taking over, and copy the
166 * vector's insn and data word.
167 */
168 for (vec = 0; vec < ARM_NVEC; vec++) {
169 if ((which & (1 << vec)) == 0) {
170 /* Don't want to take over this vector. */
171 continue;
172 }
173 vectors[vec] = page0[vec];
174 vectors_data[vec] = page0_data[vec];
175 }
176
177 /* Now sync the vectors. */
178 icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
179
180 vector_page = va;
181 }
182
183 static void
cpu_startup(void * dummy)184 cpu_startup(void *dummy)
185 {
186 struct pcb *pcb = thread0.td_pcb;
187 const unsigned int mbyte = 1024 * 1024;
188
189 identify_arm_cpu();
190
191 vm_ksubmap_init(&kmi);
192
193 /*
194 * Display the RAM layout.
195 */
196 printf("real memory = %ju (%ju MB)\n",
197 (uintmax_t)arm32_ptob(realmem),
198 (uintmax_t)arm32_ptob(realmem) / mbyte);
199 printf("avail memory = %ju (%ju MB)\n",
200 (uintmax_t)arm32_ptob(vm_free_count()),
201 (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
202 if (bootverbose) {
203 physmem_print_tables();
204 devmap_print_table();
205 }
206
207 bufinit();
208 vm_pager_bufferinit();
209 pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
210 USPACE_SVC_STACK_TOP;
211 pmap_set_pcb_pagedir(kernel_pmap, pcb);
212 }
213
214 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
215
216 /*
217 * Flush the D-cache for non-DMA I/O so that the I-cache can
218 * be made coherent later.
219 */
220 void
cpu_flush_dcache(void * ptr,size_t len)221 cpu_flush_dcache(void *ptr, size_t len)
222 {
223
224 dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
225 }
226
227 /* Get current clock frequency for the given cpu id. */
228 int
cpu_est_clockrate(int cpu_id,uint64_t * rate)229 cpu_est_clockrate(int cpu_id, uint64_t *rate)
230 {
231 struct pcpu *pc;
232
233 pc = pcpu_find(cpu_id);
234 if (pc == NULL || rate == NULL)
235 return (EINVAL);
236
237 if (pc->pc_clock == 0)
238 return (EOPNOTSUPP);
239
240 *rate = pc->pc_clock;
241
242 return (0);
243 }
244
245 void
cpu_idle(int busy)246 cpu_idle(int busy)
247 {
248
249 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
250 spinlock_enter();
251 if (!busy)
252 cpu_idleclock();
253 if (!sched_runnable())
254 cpu_sleep(0);
255 if (!busy)
256 cpu_activeclock();
257 spinlock_exit();
258 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
259 }
260
261 int
cpu_idle_wakeup(int cpu)262 cpu_idle_wakeup(int cpu)
263 {
264
265 return (0);
266 }
267
268 void
cpu_initclocks(void)269 cpu_initclocks(void)
270 {
271
272 #ifdef SMP
273 if (PCPU_GET(cpuid) == 0)
274 cpu_initclocks_bsp();
275 else
276 cpu_initclocks_ap();
277 #else
278 cpu_initclocks_bsp();
279 #endif
280 }
281
282 #ifdef PLATFORM
283 void
arm_set_delay(delay_func * impl,void * arg)284 arm_set_delay(delay_func *impl, void *arg)
285 {
286
287 KASSERT(impl != NULL, ("No DELAY implementation"));
288 delay_impl = impl;
289 delay_arg = arg;
290 }
291
292 void
DELAY(int usec)293 DELAY(int usec)
294 {
295
296 TSENTER();
297 delay_impl(usec, delay_arg);
298 TSEXIT();
299 }
300 #endif
301
302 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t size)303 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
304 {
305
306 pcpu->pc_mpidr = 0xffffffff;
307 }
308
309 void
spinlock_enter(void)310 spinlock_enter(void)
311 {
312 struct thread *td;
313 register_t cspr;
314
315 td = curthread;
316 if (td->td_md.md_spinlock_count == 0) {
317 cspr = disable_interrupts(PSR_I);
318 td->td_md.md_spinlock_count = 1;
319 td->td_md.md_saved_cspr = cspr;
320 critical_enter();
321 } else
322 td->td_md.md_spinlock_count++;
323 }
324
325 void
spinlock_exit(void)326 spinlock_exit(void)
327 {
328 struct thread *td;
329 register_t cspr;
330
331 td = curthread;
332 cspr = td->td_md.md_saved_cspr;
333 td->td_md.md_spinlock_count--;
334 if (td->td_md.md_spinlock_count == 0) {
335 critical_exit();
336 restore_interrupts(cspr);
337 }
338 }
339
340 /*
341 * Construct a PCB from a trapframe. This is called from kdb_trap() where
342 * we want to start a backtrace from the function that caused us to enter
343 * the debugger. We have the context in the trapframe, but base the trace
344 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
345 * enough for a backtrace.
346 */
347 void
makectx(struct trapframe * tf,struct pcb * pcb)348 makectx(struct trapframe *tf, struct pcb *pcb)
349 {
350 pcb->pcb_regs.sf_r4 = tf->tf_r4;
351 pcb->pcb_regs.sf_r5 = tf->tf_r5;
352 pcb->pcb_regs.sf_r6 = tf->tf_r6;
353 pcb->pcb_regs.sf_r7 = tf->tf_r7;
354 pcb->pcb_regs.sf_r8 = tf->tf_r8;
355 pcb->pcb_regs.sf_r9 = tf->tf_r9;
356 pcb->pcb_regs.sf_r10 = tf->tf_r10;
357 pcb->pcb_regs.sf_r11 = tf->tf_r11;
358 pcb->pcb_regs.sf_r12 = tf->tf_r12;
359 pcb->pcb_regs.sf_pc = tf->tf_pc;
360 pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
361 pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
362 }
363
364 void
pcpu0_init(void)365 pcpu0_init(void)
366 {
367 set_curthread(&thread0);
368 pcpu_init(pcpup, 0, sizeof(struct pcpu));
369 pcpup->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
370 PCPU_SET(curthread, &thread0);
371 }
372
373 /*
374 * Initialize proc0
375 */
376 void
init_proc0(vm_offset_t kstack)377 init_proc0(vm_offset_t kstack)
378 {
379 proc_linkup0(&proc0, &thread0);
380 thread0.td_kstack = kstack;
381 thread0.td_kstack_pages = kstack_pages;
382 thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
383 thread0.td_kstack_pages * PAGE_SIZE) - 1;
384 thread0.td_pcb->pcb_flags = 0;
385 thread0.td_pcb->pcb_fpflags = 0;
386 thread0.td_pcb->pcb_vfpcpu = -1;
387 thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
388 thread0.td_pcb->pcb_vfpsaved = &thread0.td_pcb->pcb_vfpstate;
389 thread0.td_frame = &proc0_tf;
390 pcpup->pc_curpcb = thread0.td_pcb;
391 }
392
393 void
set_stackptrs(int cpu)394 set_stackptrs(int cpu)
395 {
396
397 set_stackptr(PSR_IRQ32_MODE,
398 irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
399 set_stackptr(PSR_ABT32_MODE,
400 abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
401 set_stackptr(PSR_UND32_MODE,
402 undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
403 }
404
405 static void
arm_kdb_init(void)406 arm_kdb_init(void)
407 {
408
409 kdb_init();
410 #ifdef KDB
411 if (boothowto & RB_KDB)
412 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
413 #endif
414 }
415
416 #ifdef FDT
417 void *
initarm(struct arm_boot_params * abp)418 initarm(struct arm_boot_params *abp)
419 {
420 struct mem_region mem_regions[FDT_MEM_REGIONS];
421 vm_paddr_t lastaddr;
422 vm_offset_t dtbp, kernelstack, dpcpu;
423 char *env;
424 void *kmdp;
425 int err_devmap, mem_regions_sz;
426 phandle_t root;
427 char dts_version[255];
428 #ifdef EFI
429 struct efi_map_header *efihdr;
430 #endif
431
432 /* get last allocated physical address */
433 arm_physmem_kernaddr = abp->abp_physaddr;
434 lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
435
436 set_cpufuncs();
437 cpuinfo_init();
438
439 /*
440 * Find the dtb passed in by the boot loader.
441 */
442 kmdp = preload_search_by_type("elf kernel");
443 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
444 #if defined(FDT_DTB_STATIC)
445 /*
446 * In case the device tree blob was not retrieved (from metadata) try
447 * to use the statically embedded one.
448 */
449 if (dtbp == (vm_offset_t)NULL)
450 dtbp = (vm_offset_t)&fdt_static_dtb;
451 #endif
452
453 if (OF_install(OFW_FDT, 0) == FALSE)
454 panic("Cannot install FDT");
455
456 if (OF_init((void *)dtbp) != 0)
457 panic("OF_init failed with the found device tree");
458
459 #if defined(LINUX_BOOT_ABI)
460 arm_parse_fdt_bootargs();
461 #endif
462
463 #ifdef EFI
464 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
465 MODINFO_METADATA | MODINFOMD_EFI_MAP);
466 if (efihdr != NULL) {
467 arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
468 } else
469 #endif
470 {
471 /* Grab physical memory regions information from device tree. */
472 if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
473 panic("Cannot get physical memory regions");
474 }
475 physmem_hardware_regions(mem_regions, mem_regions_sz);
476
477 /* Grab reserved memory regions information from device tree. */
478 if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
479 physmem_exclude_regions(mem_regions, mem_regions_sz,
480 EXFLAG_NODUMP | EXFLAG_NOALLOC);
481
482 /*
483 * Set TEX remapping registers.
484 * Setup kernel page tables and switch to kernel L1 page table.
485 */
486 pmap_set_tex();
487 pmap_bootstrap_prepare(lastaddr);
488
489 /*
490 * If EARLY_PRINTF support is enabled, we need to re-establish the
491 * mapping after pmap_bootstrap_prepare() switches to new page tables.
492 * Note that we can only do the remapping if the VA is outside the
493 * kernel, now that we have real virtual (not VA=PA) mappings in effect.
494 * Early printf does not work between the time pmap_set_tex() does
495 * cp15_prrr_set() and this code remaps the VA.
496 */
497 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
498 pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024,
499 VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
500 #endif
501
502 /*
503 * Now that proper page tables are installed, call cpu_setup() to enable
504 * instruction and data caches and other chip-specific features.
505 */
506 cpu_setup();
507
508 /* Platform-specific initialisation */
509 platform_probe_and_attach();
510 pcpu0_init();
511
512 /* Do basic tuning, hz etc */
513 init_param1();
514
515 /*
516 * Allocate a page for the system page mapped to 0xffff0000
517 * This page will just contain the system vectors and can be
518 * shared by all processes.
519 */
520 systempage = pmap_preboot_get_pages(1);
521
522 /* Map the vector page. */
523 pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1);
524 if (virtual_end >= ARM_VECTORS_HIGH)
525 virtual_end = ARM_VECTORS_HIGH - 1;
526
527 /* Allocate dynamic per-cpu area. */
528 dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
529 dpcpu_init((void *)dpcpu, 0);
530
531 /* Allocate stacks for all modes */
532 irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
533 abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
534 undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
535 kernelstack = pmap_preboot_get_vpages(kstack_pages);
536
537 /* Allocate message buffer. */
538 msgbufp = (void *)pmap_preboot_get_vpages(
539 round_page(msgbufsize) / PAGE_SIZE);
540
541 /*
542 * Pages were allocated during the secondary bootstrap for the
543 * stacks for different CPU modes.
544 * We must now set the r13 registers in the different CPU modes to
545 * point to these stacks.
546 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
547 * of the stack memory.
548 */
549 set_stackptrs(0);
550 mutex_init();
551
552 /* Establish static device mappings. */
553 err_devmap = platform_devmap_init();
554 devmap_bootstrap();
555 vm_max_kernel_address = platform_lastaddr();
556
557 /*
558 * Only after the SOC registers block is mapped we can perform device
559 * tree fixups, as they may attempt to read parameters from hardware.
560 */
561 OF_interpret("perform-fixup", 0);
562 platform_gpio_init();
563 cninit();
564
565 /*
566 * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(),
567 * undo it now that the normal console printf works.
568 */
569 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
570 pmap_kremove(SOCDEV_VA);
571 #endif
572
573 debugf("initarm: console initialized\n");
574 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
575 debugf(" boothowto = 0x%08x\n", boothowto);
576 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
577 debugf(" lastaddr1: 0x%08x\n", lastaddr);
578 arm_print_kenv();
579
580 env = kern_getenv("kernelname");
581 if (env != NULL)
582 strlcpy(kernelname, env, sizeof(kernelname));
583
584 if (err_devmap != 0)
585 printf("WARNING: could not fully configure devmap, error=%d\n",
586 err_devmap);
587
588 platform_late_init();
589
590 root = OF_finddevice("/");
591 if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
592 if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
593 printf("WARNING: DTB version is %s while kernel expects %s, "
594 "please update the DTB in the ESP\n",
595 dts_version,
596 LINUX_DTS_VERSION);
597 } else {
598 printf("WARNING: Cannot find freebsd,dts-version property, "
599 "cannot check DTB compliance\n");
600 }
601
602 /*
603 * We must now clean the cache again....
604 * Cleaning may be done by reading new data to displace any
605 * dirty data in the cache. This will have happened in cpu_setttb()
606 * but since we are boot strapping the addresses used for the read
607 * may have just been remapped and thus the cache could be out
608 * of sync. A re-clean after the switch will cure this.
609 * After booting there are no gross relocations of the kernel thus
610 * this problem will not occur after initarm().
611 */
612 /* Set stack for exception handlers */
613 undefined_init();
614 init_proc0(kernelstack);
615 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
616 enable_interrupts(PSR_A);
617 pmap_bootstrap(0);
618
619 /* Exclude the kernel (and all the things we allocated which immediately
620 * follow the kernel) from the VM allocation pool but not from crash
621 * dumps. virtual_avail is a global variable which tracks the kva we've
622 * "allocated" while setting up pmaps.
623 *
624 * Prepare the list of physical memory available to the vm subsystem.
625 */
626 physmem_exclude_region(abp->abp_physaddr,
627 pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
628 physmem_init_kernel_globals();
629
630 init_param2(physmem);
631 /* Init message buffer. */
632 msgbufinit(msgbufp, msgbufsize);
633 dbg_monitor_init();
634 arm_kdb_init();
635 /* Apply possible BP hardening. */
636 cpuinfo_init_bp_hardening();
637 return ((void *)STACKALIGN(thread0.td_pcb));
638
639 }
640 #endif /* FDT */
641