1 /*- 2 * Copyright (c) 1994-1998 Mark Brinicombe. 3 * Copyright (c) 1994 Brini. 4 * All rights reserved. 5 * 6 * This code is derived from software written for Brini by Mark Brinicombe 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Brini. 19 * 4. The name of the company nor the name of the author may be used to 20 * endorse or promote products derived from this software without specific 21 * prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 36 */ 37 38 #include "opt_ddb.h" 39 #include "opt_platform.h" 40 #include "opt_global.h" 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #define _ARM32_BUS_DMA_PRIVATE 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/signalvar.h> 50 #include <sys/imgact.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/linker.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/pcpu.h> 58 #include <sys/proc.h> 59 #include <sys/ptrace.h> 60 #include <sys/cons.h> 61 #include <sys/bio.h> 62 #include <sys/bus.h> 63 #include <sys/buf.h> 64 #include <sys/exec.h> 65 #include <sys/kdb.h> 66 #include <sys/msgbuf.h> 67 #include <machine/reg.h> 68 #include <machine/cpu.h> 69 #include <machine/fdt.h> 70 71 #include <dev/fdt/fdt_common.h> 72 #include <dev/ofw/openfirm.h> 73 74 #include <vm/vm.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vm_map.h> 80 #include <machine/pte.h> 81 #include <machine/pmap.h> 82 #include <machine/vmparam.h> 83 #include <machine/pcb.h> 84 #include <machine/undefined.h> 85 #include <machine/machdep.h> 86 #include <machine/metadata.h> 87 #include <machine/armreg.h> 88 #include <machine/bus.h> 89 #include <sys/reboot.h> 90 91 #include <arm/ti/omap4/omap4_reg.h> 92 93 #define DEBUG 94 #ifdef DEBUG 95 #define debugf(fmt, args...) printf(fmt, ##args) 96 #else 97 #define debugf(fmt, args...) 98 #endif 99 100 /* Start of address space used for bootstrap map */ 101 #define DEVMAP_BOOTSTRAP_MAP_START 0xE0000000 102 103 /* 104 * This is the number of L2 page tables required for covering max 105 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 106 * stacks etc.), uprounded to be divisible by 4. 107 */ 108 #define KERNEL_PT_MAX 78 109 110 /* Define various stack sizes in pages */ 111 #define IRQ_STACK_SIZE 1 112 #define ABT_STACK_SIZE 1 113 #define UND_STACK_SIZE 1 114 115 extern unsigned char kernbase[]; 116 extern unsigned char _etext[]; 117 extern unsigned char _edata[]; 118 extern unsigned char __bss_start[]; 119 extern unsigned char _end[]; 120 121 #ifdef DDB 122 extern vm_offset_t ksym_start, ksym_end; 123 #endif 124 125 extern u_int data_abort_handler_address; 126 extern u_int prefetch_abort_handler_address; 127 extern u_int undefined_handler_address; 128 129 extern vm_offset_t pmap_bootstrap_lastaddr; 130 extern int *end; 131 132 struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 133 134 /* Physical and virtual addresses for some global pages */ 135 vm_paddr_t phys_avail[10]; 136 vm_paddr_t dump_avail[4]; 137 vm_offset_t physical_pages; 138 vm_offset_t pmap_bootstrap_lastaddr; 139 vm_paddr_t pmap_pa; 140 141 const struct pmap_devmap *pmap_devmap_bootstrap_table; 142 struct pv_addr systempage; 143 struct pv_addr msgbufpv; 144 struct pv_addr irqstack; 145 struct pv_addr undstack; 146 struct pv_addr abtstack; 147 struct pv_addr kernelstack; 148 149 void set_stackptrs(int cpu); 150 151 static struct mem_region availmem_regions[FDT_MEM_REGIONS]; 152 static int availmem_regions_sz; 153 154 static void print_kenv(void); 155 static void print_kernel_section_addr(void); 156 157 static void physmap_init(void); 158 static int platform_devmap_init(void); 159 void (*ti_cpu_reset)(void); 160 161 static char * 162 kenv_next(char *cp) 163 { 164 165 if (cp != NULL) { 166 while (*cp != 0) 167 cp++; 168 cp++; 169 if (*cp == 0) 170 cp = NULL; 171 } 172 return (cp); 173 } 174 175 static void 176 print_kenv(void) 177 { 178 int len; 179 char *cp; 180 181 debugf("loader passed (static) kenv:\n"); 182 if (kern_envp == NULL) { 183 debugf(" no env, null ptr\n"); 184 return; 185 } 186 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 187 188 len = 0; 189 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 190 debugf(" %x %s\n", (uint32_t)cp, cp); 191 } 192 193 static void 194 print_kernel_section_addr(void) 195 { 196 197 debugf("kernel image addresses:\n"); 198 debugf(" kernbase = 0x%08x\n", (uint32_t)kernbase); 199 debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext); 200 debugf(" _edata = 0x%08x\n", (uint32_t)_edata); 201 debugf(" __bss_start = 0x%08x\n", (uint32_t)__bss_start); 202 debugf(" _end = 0x%08x\n", (uint32_t)_end); 203 } 204 205 static void 206 physmap_init(void) 207 { 208 int i, j, cnt; 209 vm_offset_t phys_kernelend, kernload; 210 uint32_t s, e, sz; 211 struct mem_region *mp, *mp1; 212 213 phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR); 214 kernload = KERNPHYSADDR; 215 ti_cpu_reset = NULL; 216 217 /* 218 * Remove kernel physical address range from avail 219 * regions list. Page align all regions. 220 * Non-page aligned memory isn't very interesting to us. 221 * Also, sort the entries for ascending addresses. 222 */ 223 sz = 0; 224 cnt = availmem_regions_sz; 225 debugf("processing avail regions:\n"); 226 for (mp = availmem_regions; mp->mr_size; mp++) { 227 s = mp->mr_start; 228 e = mp->mr_start + mp->mr_size; 229 debugf(" %08x-%08x -> ", s, e); 230 /* Check whether this region holds all of the kernel. */ 231 if (s < kernload && e > phys_kernelend) { 232 availmem_regions[cnt].mr_start = phys_kernelend; 233 availmem_regions[cnt++].mr_size = e - phys_kernelend; 234 e = kernload; 235 } 236 /* Look whether this regions starts within the kernel. */ 237 if (s >= kernload && s < phys_kernelend) { 238 if (e <= phys_kernelend) 239 goto empty; 240 s = phys_kernelend; 241 } 242 /* Now look whether this region ends within the kernel. */ 243 if (e > kernload && e <= phys_kernelend) { 244 if (s >= kernload) { 245 goto empty; 246 } 247 e = kernload; 248 } 249 /* Now page align the start and size of the region. */ 250 s = round_page(s); 251 e = trunc_page(e); 252 if (e < s) 253 e = s; 254 sz = e - s; 255 debugf("%08x-%08x = %x\n", s, e, sz); 256 257 /* Check whether some memory is left here. */ 258 if (sz == 0) { 259 empty: 260 printf("skipping\n"); 261 bcopy(mp + 1, mp, 262 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 263 cnt--; 264 mp--; 265 continue; 266 } 267 268 /* Do an insertion sort. */ 269 for (mp1 = availmem_regions; mp1 < mp; mp1++) 270 if (s < mp1->mr_start) 271 break; 272 if (mp1 < mp) { 273 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 274 mp1->mr_start = s; 275 mp1->mr_size = sz; 276 } else { 277 mp->mr_start = s; 278 mp->mr_size = sz; 279 } 280 } 281 availmem_regions_sz = cnt; 282 283 /* Fill in phys_avail table, based on availmem_regions */ 284 debugf("fill in phys_avail:\n"); 285 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 286 287 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 288 availmem_regions[i].mr_start, 289 availmem_regions[i].mr_start + availmem_regions[i].mr_size, 290 availmem_regions[i].mr_size); 291 292 phys_avail[j] = availmem_regions[i].mr_start; 293 phys_avail[j + 1] = availmem_regions[i].mr_start + 294 availmem_regions[i].mr_size; 295 } 296 phys_avail[j] = 0; 297 phys_avail[j + 1] = 0; 298 } 299 300 void * 301 initarm(struct arm_boot_params *abp) 302 { 303 struct pv_addr kernel_l1pt; 304 struct pv_addr dpcpu; 305 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 306 uint32_t memsize, l2size; 307 void *kmdp; 308 u_int l1pagetable; 309 int i = 0, j = 0, err_devmap = 0; 310 311 lastaddr = parse_boot_param(abp); 312 memsize = 0; 313 set_cpufuncs(); 314 315 316 kmdp = preload_search_by_type("elf kernel"); 317 if (kmdp != NULL) 318 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 319 else 320 dtbp = (vm_offset_t)NULL; 321 322 #if defined(FDT_DTB_STATIC) 323 /* 324 * In case the device tree blob was not retrieved (from metadata) try 325 * to use the statically embedded one. 326 */ 327 if (dtbp == (vm_offset_t)NULL) 328 dtbp = (vm_offset_t)&fdt_static_dtb; 329 #endif 330 331 if (OF_install(OFW_FDT, 0) == FALSE) 332 while (1); 333 334 if (OF_init((void *)dtbp) != 0) 335 while (1); 336 337 /* Grab physical memory regions information from device tree. */ 338 if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz, 339 &memsize) != 0) 340 while(1); 341 342 /* Platform-specific initialisation */ 343 pmap_bootstrap_lastaddr = DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE; 344 345 pcpu0_init(); 346 347 /* Calculate number of L2 tables needed for mapping vm_page_array */ 348 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 349 l2size = (l2size >> L1_S_SHIFT) + 1; 350 351 /* 352 * Add one table for end of kernel map, one for stacks, msgbuf and 353 * L1 and L2 tables map and one for vectors map. 354 */ 355 l2size += 3; 356 357 /* Make it divisible by 4 */ 358 l2size = (l2size + 3) & ~3; 359 360 #define KERNEL_TEXT_BASE (KERNBASE) 361 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 362 363 /* Define a macro to simplify memory allocation */ 364 #define valloc_pages(var, np) \ 365 alloc_pages((var).pv_va, (np)); \ 366 (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR); 367 368 #define alloc_pages(var, np) \ 369 (var) = freemempos; \ 370 freemempos += (np * PAGE_SIZE); \ 371 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 372 373 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 374 freemempos += PAGE_SIZE; 375 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 376 377 for (i = 0; i < l2size; ++i) { 378 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 379 valloc_pages(kernel_pt_table[i], 380 L2_TABLE_SIZE / PAGE_SIZE); 381 j = i; 382 } else { 383 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 384 L2_TABLE_SIZE_REAL * (i - j); 385 kernel_pt_table[i].pv_pa = 386 kernel_pt_table[i].pv_va - KERNVIRTADDR + 387 KERNPHYSADDR; 388 389 } 390 } 391 /* 392 * Allocate a page for the system page mapped to 0x00000000 393 * or 0xffff0000. This page will just contain the system vectors 394 * and can be shared by all processes. 395 */ 396 valloc_pages(systempage, 1); 397 398 /* Allocate dynamic per-cpu area. */ 399 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 400 dpcpu_init((void *)dpcpu.pv_va, 0); 401 402 /* Allocate stacks for all modes */ 403 valloc_pages(irqstack, (IRQ_STACK_SIZE * MAXCPU)); 404 valloc_pages(abtstack, (ABT_STACK_SIZE * MAXCPU)); 405 valloc_pages(undstack, (UND_STACK_SIZE * MAXCPU)); 406 valloc_pages(kernelstack, (KSTACK_PAGES * MAXCPU)); 407 408 init_param1(); 409 410 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 411 412 /* 413 * Now we start construction of the L1 page table 414 * We start by mapping the L2 page tables into the L1. 415 * This means that we can replace L1 mappings later on if necessary 416 */ 417 l1pagetable = kernel_l1pt.pv_va; 418 419 /* 420 * Try to map as much as possible of kernel text and data using 421 * 1MB section mapping and for the rest of initial kernel address 422 * space use L2 coarse tables. 423 * 424 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 425 * and kernel structures 426 */ 427 l2_start = lastaddr & ~(L1_S_OFFSET); 428 for (i = 0 ; i < l2size - 1; i++) 429 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 430 &kernel_pt_table[i]); 431 432 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 433 434 /* Map kernel code and data */ 435 pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR, 436 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 437 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 438 439 440 /* Map L1 directory and allocated L2 page tables */ 441 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 442 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 443 444 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 445 kernel_pt_table[0].pv_pa, 446 L2_TABLE_SIZE_REAL * l2size, 447 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 448 449 /* Map allocated DPCPU, stacks and msgbuf */ 450 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 451 freemempos - dpcpu.pv_va, 452 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 453 454 /* Link and map the vector page */ 455 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 456 &kernel_pt_table[l2size - 1]); 457 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 458 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 459 460 /* Map pmap_devmap[] entries */ 461 err_devmap = platform_devmap_init(); 462 pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table); 463 464 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 465 DOMAIN_CLIENT); 466 pmap_pa = kernel_l1pt.pv_pa; 467 setttb(kernel_l1pt.pv_pa); 468 cpu_tlb_flushID(); 469 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 470 471 /* 472 * Only after the SOC registers block is mapped we can perform device 473 * tree fixups, as they may attempt to read parameters from hardware. 474 */ 475 OF_interpret("perform-fixup", 0); 476 477 cninit(); 478 479 physmem = memsize / PAGE_SIZE; 480 481 debugf("initarm: console initialized\n"); 482 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 483 debugf(" boothowto = 0x%08x\n", boothowto); 484 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 485 print_kernel_section_addr(); 486 print_kenv(); 487 488 if (err_devmap != 0) 489 printf("WARNING: could not fully configure devmap, error=%d\n", 490 err_devmap); 491 492 /* 493 * Pages were allocated during the secondary bootstrap for the 494 * stacks for different CPU modes. 495 * We must now set the r13 registers in the different CPU modes to 496 * point to these stacks. 497 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 498 * of the stack memory. 499 */ 500 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 501 502 set_stackptrs(0); 503 504 /* 505 * We must now clean the cache again.... 506 * Cleaning may be done by reading new data to displace any 507 * dirty data in the cache. This will have happened in setttb() 508 * but since we are boot strapping the addresses used for the read 509 * may have just been remapped and thus the cache could be out 510 * of sync. A re-clean after the switch will cure this. 511 * After booting there are no gross relocations of the kernel thus 512 * this problem will not occur after initarm(). 513 */ 514 cpu_idcache_wbinv_all(); 515 516 /* Set stack for exception handlers */ 517 data_abort_handler_address = (u_int)data_abort_handler; 518 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 519 undefined_handler_address = (u_int)undefinedinstruction_bounce; 520 undefined_init(); 521 522 init_proc0(kernelstack.pv_va); 523 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 524 525 arm_dump_avail_init(memsize, sizeof(dump_avail) / sizeof(dump_avail[0])); 526 pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt); 527 msgbufp = (void *)msgbufpv.pv_va; 528 msgbufinit(msgbufp, msgbufsize); 529 mutex_init(); 530 531 /* 532 * Prepare map of physical memory regions available to vm subsystem. 533 */ 534 physmap_init(); 535 536 /* Do basic tuning, hz etc */ 537 init_param2(physmem); 538 kdb_init(); 539 540 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 541 sizeof(struct pcb))); 542 } 543 544 void 545 set_stackptrs(int cpu) 546 { 547 548 set_stackptr(PSR_IRQ32_MODE, 549 irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 550 set_stackptr(PSR_ABT32_MODE, 551 abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 552 set_stackptr(PSR_UND32_MODE, 553 undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); 554 } 555 556 #define FDT_DEVMAP_MAX (2) // FIXME 557 static struct pmap_devmap fdt_devmap[FDT_DEVMAP_MAX] = { 558 { 0, 0, 0, 0, 0, } 559 }; 560 561 562 /* 563 * Construct pmap_devmap[] with DT-derived config data. 564 */ 565 static int 566 platform_devmap_init(void) 567 { 568 int i = 0; 569 #if defined(SOC_OMAP4) 570 fdt_devmap[i].pd_va = 0xE8000000; 571 fdt_devmap[i].pd_pa = 0x48000000; 572 fdt_devmap[i].pd_size = 0x1000000; 573 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 574 fdt_devmap[i].pd_cache = PTE_DEVICE; 575 i++; 576 #elif defined(SOC_TI_AM335X) 577 fdt_devmap[i].pd_va = 0xE4C00000; 578 fdt_devmap[i].pd_pa = 0x44C00000; /* L4_WKUP */ 579 fdt_devmap[i].pd_size = 0x400000; /* 4 MB */ 580 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 581 fdt_devmap[i].pd_cache = PTE_DEVICE; 582 i++; 583 #else 584 #error "Unknown SoC" 585 #endif 586 587 pmap_devmap_bootstrap_table = &fdt_devmap[0]; 588 return (0); 589 } 590 591 struct arm32_dma_range * 592 bus_dma_get_range(void) 593 { 594 595 return (NULL); 596 } 597 598 int 599 bus_dma_get_range_nb(void) 600 { 601 602 return (0); 603 } 604 605 void 606 cpu_reset() 607 { 608 if (ti_cpu_reset) 609 (*ti_cpu_reset)(); 610 else 611 printf("no cpu_reset implementation\n"); 612 printf("Reset failed!\n"); 613 while (1); 614 } 615 616