1 /*- 2 * Copyright (c) 1994-1998 Mark Brinicombe. 3 * Copyright (c) 1994 Brini. 4 * All rights reserved. 5 * 6 * This code is derived from software written for Brini by Mark Brinicombe 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Brini. 19 * 4. The name of the company nor the name of the author may be used to 20 * endorse or promote products derived from this software without specific 21 * prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 36 */ 37 38 #include "opt_ddb.h" 39 #include "opt_platform.h" 40 #include "opt_global.h" 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #define _ARM32_BUS_DMA_PRIVATE 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/signalvar.h> 50 #include <sys/imgact.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/linker.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/pcpu.h> 58 #include <sys/proc.h> 59 #include <sys/ptrace.h> 60 #include <sys/cons.h> 61 #include <sys/bio.h> 62 #include <sys/bus.h> 63 #include <sys/buf.h> 64 #include <sys/exec.h> 65 #include <sys/kdb.h> 66 #include <sys/msgbuf.h> 67 #include <machine/reg.h> 68 #include <machine/cpu.h> 69 #include <machine/fdt.h> 70 71 #include <dev/fdt/fdt_common.h> 72 #include <dev/ofw/openfirm.h> 73 74 #include <vm/vm.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vm_map.h> 80 #include <machine/pte.h> 81 #include <machine/pmap.h> 82 #include <machine/vmparam.h> 83 #include <machine/pcb.h> 84 #include <machine/undefined.h> 85 #include <machine/machdep.h> 86 #include <machine/metadata.h> 87 #include <machine/armreg.h> 88 #include <machine/bus.h> 89 #include <sys/reboot.h> 90 91 #include <arm/ti/omap4/omap4_reg.h> 92 93 #define DEBUG 94 #ifdef DEBUG 95 #define debugf(fmt, args...) printf(fmt, ##args) 96 #else 97 #define debugf(fmt, args...) 98 #endif 99 100 /* Start of address space used for bootstrap map */ 101 #define DEVMAP_BOOTSTRAP_MAP_START 0xE0000000 102 103 /* 104 * This is the number of L2 page tables required for covering max 105 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 106 * stacks etc.), uprounded to be divisible by 4. 107 */ 108 #define KERNEL_PT_MAX 78 109 110 extern unsigned char kernbase[]; 111 extern unsigned char _etext[]; 112 extern unsigned char _edata[]; 113 extern unsigned char __bss_start[]; 114 extern unsigned char _end[]; 115 116 #ifdef DDB 117 extern vm_offset_t ksym_start, ksym_end; 118 #endif 119 120 extern u_int data_abort_handler_address; 121 extern u_int prefetch_abort_handler_address; 122 extern u_int undefined_handler_address; 123 124 extern vm_offset_t pmap_bootstrap_lastaddr; 125 extern int *end; 126 127 struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 128 129 /* Physical and virtual addresses for some global pages */ 130 vm_paddr_t phys_avail[10]; 131 vm_paddr_t dump_avail[4]; 132 vm_offset_t physical_pages; 133 vm_offset_t pmap_bootstrap_lastaddr; 134 vm_paddr_t pmap_pa; 135 136 const struct pmap_devmap *pmap_devmap_bootstrap_table; 137 struct pv_addr systempage; 138 struct pv_addr msgbufpv; 139 struct pv_addr irqstack; 140 struct pv_addr undstack; 141 struct pv_addr abtstack; 142 struct pv_addr kernelstack; 143 144 static struct mem_region availmem_regions[FDT_MEM_REGIONS]; 145 static int availmem_regions_sz; 146 147 static void print_kenv(void); 148 static void print_kernel_section_addr(void); 149 150 static void physmap_init(void); 151 static int platform_devmap_init(void); 152 void (*ti_cpu_reset)(void); 153 154 static char * 155 kenv_next(char *cp) 156 { 157 158 if (cp != NULL) { 159 while (*cp != 0) 160 cp++; 161 cp++; 162 if (*cp == 0) 163 cp = NULL; 164 } 165 return (cp); 166 } 167 168 static void 169 print_kenv(void) 170 { 171 int len; 172 char *cp; 173 174 debugf("loader passed (static) kenv:\n"); 175 if (kern_envp == NULL) { 176 debugf(" no env, null ptr\n"); 177 return; 178 } 179 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 180 181 len = 0; 182 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 183 debugf(" %x %s\n", (uint32_t)cp, cp); 184 } 185 186 static void 187 print_kernel_section_addr(void) 188 { 189 190 debugf("kernel image addresses:\n"); 191 debugf(" kernbase = 0x%08x\n", (uint32_t)kernbase); 192 debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext); 193 debugf(" _edata = 0x%08x\n", (uint32_t)_edata); 194 debugf(" __bss_start = 0x%08x\n", (uint32_t)__bss_start); 195 debugf(" _end = 0x%08x\n", (uint32_t)_end); 196 } 197 198 static void 199 physmap_init(void) 200 { 201 int i, j, cnt; 202 vm_offset_t phys_kernelend, kernload; 203 uint32_t s, e, sz; 204 struct mem_region *mp, *mp1; 205 206 phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR); 207 kernload = KERNPHYSADDR; 208 209 /* 210 * Remove kernel physical address range from avail 211 * regions list. Page align all regions. 212 * Non-page aligned memory isn't very interesting to us. 213 * Also, sort the entries for ascending addresses. 214 */ 215 sz = 0; 216 cnt = availmem_regions_sz; 217 debugf("processing avail regions:\n"); 218 for (mp = availmem_regions; mp->mr_size; mp++) { 219 s = mp->mr_start; 220 e = mp->mr_start + mp->mr_size; 221 debugf(" %08x-%08x -> ", s, e); 222 /* Check whether this region holds all of the kernel. */ 223 if (s < kernload && e > phys_kernelend) { 224 availmem_regions[cnt].mr_start = phys_kernelend; 225 availmem_regions[cnt++].mr_size = e - phys_kernelend; 226 e = kernload; 227 } 228 /* Look whether this regions starts within the kernel. */ 229 if (s >= kernload && s < phys_kernelend) { 230 if (e <= phys_kernelend) 231 goto empty; 232 s = phys_kernelend; 233 } 234 /* Now look whether this region ends within the kernel. */ 235 if (e > kernload && e <= phys_kernelend) { 236 if (s >= kernload) { 237 goto empty; 238 } 239 e = kernload; 240 } 241 /* Now page align the start and size of the region. */ 242 s = round_page(s); 243 e = trunc_page(e); 244 if (e < s) 245 e = s; 246 sz = e - s; 247 debugf("%08x-%08x = %x\n", s, e, sz); 248 249 /* Check whether some memory is left here. */ 250 if (sz == 0) { 251 empty: 252 printf("skipping\n"); 253 bcopy(mp + 1, mp, 254 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 255 cnt--; 256 mp--; 257 continue; 258 } 259 260 /* Do an insertion sort. */ 261 for (mp1 = availmem_regions; mp1 < mp; mp1++) 262 if (s < mp1->mr_start) 263 break; 264 if (mp1 < mp) { 265 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 266 mp1->mr_start = s; 267 mp1->mr_size = sz; 268 } else { 269 mp->mr_start = s; 270 mp->mr_size = sz; 271 } 272 } 273 availmem_regions_sz = cnt; 274 275 /* Fill in phys_avail table, based on availmem_regions */ 276 debugf("fill in phys_avail:\n"); 277 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 278 279 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 280 availmem_regions[i].mr_start, 281 availmem_regions[i].mr_start + availmem_regions[i].mr_size, 282 availmem_regions[i].mr_size); 283 284 /* 285 * We should not map the page at PA 0x0000000, the VM can't 286 * handle it, as pmap_extract() == 0 means failure. 287 */ 288 if (availmem_regions[i].mr_start > 0 || 289 availmem_regions[i].mr_size > PAGE_SIZE) { 290 phys_avail[j] = availmem_regions[i].mr_start; 291 if (phys_avail[j] == 0) 292 phys_avail[j] += PAGE_SIZE; 293 phys_avail[j + 1] = availmem_regions[i].mr_start + 294 availmem_regions[i].mr_size; 295 } else 296 j -= 2; 297 } 298 phys_avail[j] = 0; 299 phys_avail[j + 1] = 0; 300 } 301 302 void * 303 initarm(struct arm_boot_params *abp) 304 { 305 struct pv_addr kernel_l1pt; 306 struct pv_addr dpcpu; 307 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 308 uint32_t memsize, l2size; 309 char *env; 310 void *kmdp; 311 u_int l1pagetable; 312 int i = 0, j = 0, err_devmap = 0; 313 314 lastaddr = parse_boot_param(abp); 315 memsize = 0; 316 set_cpufuncs(); 317 318 /* 319 * Find the dtb passed in by the boot loader. 320 */ 321 kmdp = preload_search_by_type("elf kernel"); 322 if (kmdp != NULL) 323 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 324 else 325 dtbp = (vm_offset_t)NULL; 326 327 #if defined(FDT_DTB_STATIC) 328 /* 329 * In case the device tree blob was not retrieved (from metadata) try 330 * to use the statically embedded one. 331 */ 332 if (dtbp == (vm_offset_t)NULL) 333 dtbp = (vm_offset_t)&fdt_static_dtb; 334 #endif 335 336 if (OF_install(OFW_FDT, 0) == FALSE) 337 while (1); 338 339 if (OF_init((void *)dtbp) != 0) 340 while (1); 341 342 /* Grab physical memory regions information from device tree. */ 343 if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz, 344 &memsize) != 0) 345 while(1); 346 347 /* Platform-specific initialisation */ 348 pmap_bootstrap_lastaddr = initarm_lastaddr(); 349 350 pcpu0_init(); 351 352 /* Calculate number of L2 tables needed for mapping vm_page_array */ 353 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 354 l2size = (l2size >> L1_S_SHIFT) + 1; 355 356 /* 357 * Add one table for end of kernel map, one for stacks, msgbuf and 358 * L1 and L2 tables map and one for vectors map. 359 */ 360 l2size += 3; 361 362 /* Make it divisible by 4 */ 363 l2size = (l2size + 3) & ~3; 364 365 #define KERNEL_TEXT_BASE (KERNBASE) 366 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 367 368 /* Define a macro to simplify memory allocation */ 369 #define valloc_pages(var, np) \ 370 alloc_pages((var).pv_va, (np)); \ 371 (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR); 372 373 #define alloc_pages(var, np) \ 374 (var) = freemempos; \ 375 freemempos += (np * PAGE_SIZE); \ 376 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 377 378 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 379 freemempos += PAGE_SIZE; 380 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 381 382 for (i = 0; i < l2size; ++i) { 383 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 384 valloc_pages(kernel_pt_table[i], 385 L2_TABLE_SIZE / PAGE_SIZE); 386 j = i; 387 } else { 388 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 389 L2_TABLE_SIZE_REAL * (i - j); 390 kernel_pt_table[i].pv_pa = 391 kernel_pt_table[i].pv_va - KERNVIRTADDR + 392 KERNPHYSADDR; 393 394 } 395 } 396 /* 397 * Allocate a page for the system page mapped to 0x00000000 398 * or 0xffff0000. This page will just contain the system vectors 399 * and can be shared by all processes. 400 */ 401 valloc_pages(systempage, 1); 402 403 /* Allocate dynamic per-cpu area. */ 404 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 405 dpcpu_init((void *)dpcpu.pv_va, 0); 406 407 /* Allocate stacks for all modes */ 408 valloc_pages(irqstack, (IRQ_STACK_SIZE * MAXCPU)); 409 valloc_pages(abtstack, (ABT_STACK_SIZE * MAXCPU)); 410 valloc_pages(undstack, (UND_STACK_SIZE * MAXCPU)); 411 valloc_pages(kernelstack, (KSTACK_PAGES * MAXCPU)); 412 413 init_param1(); 414 415 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 416 417 /* 418 * Now we start construction of the L1 page table 419 * We start by mapping the L2 page tables into the L1. 420 * This means that we can replace L1 mappings later on if necessary 421 */ 422 l1pagetable = kernel_l1pt.pv_va; 423 424 /* 425 * Try to map as much as possible of kernel text and data using 426 * 1MB section mapping and for the rest of initial kernel address 427 * space use L2 coarse tables. 428 * 429 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 430 * and kernel structures 431 */ 432 l2_start = lastaddr & ~(L1_S_OFFSET); 433 for (i = 0 ; i < l2size - 1; i++) 434 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 435 &kernel_pt_table[i]); 436 437 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 438 439 /* Map kernel code and data */ 440 pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR, 441 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 442 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 443 444 445 /* Map L1 directory and allocated L2 page tables */ 446 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 447 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 448 449 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 450 kernel_pt_table[0].pv_pa, 451 L2_TABLE_SIZE_REAL * l2size, 452 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 453 454 /* Map allocated DPCPU, stacks and msgbuf */ 455 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 456 freemempos - dpcpu.pv_va, 457 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 458 459 /* Link and map the vector page */ 460 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 461 &kernel_pt_table[l2size - 1]); 462 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 463 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 464 465 /* Map pmap_devmap[] entries */ 466 err_devmap = platform_devmap_init(); 467 pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table); 468 469 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 470 DOMAIN_CLIENT); 471 pmap_pa = kernel_l1pt.pv_pa; 472 setttb(kernel_l1pt.pv_pa); 473 cpu_tlb_flushID(); 474 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 475 476 /* 477 * Only after the SOC registers block is mapped we can perform device 478 * tree fixups, as they may attempt to read parameters from hardware. 479 */ 480 OF_interpret("perform-fixup", 0); 481 482 initarm_gpio_init(); 483 484 cninit(); 485 486 physmem = memsize / PAGE_SIZE; 487 488 debugf("initarm: console initialized\n"); 489 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 490 debugf(" boothowto = 0x%08x\n", boothowto); 491 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 492 print_kernel_section_addr(); 493 print_kenv(); 494 495 env = getenv("kernelname"); 496 if (env != NULL) 497 strlcpy(kernelname, env, sizeof(kernelname)); 498 499 if (err_devmap != 0) 500 printf("WARNING: could not fully configure devmap, error=%d\n", 501 err_devmap); 502 503 initarm_late_init(); 504 505 /* 506 * Pages were allocated during the secondary bootstrap for the 507 * stacks for different CPU modes. 508 * We must now set the r13 registers in the different CPU modes to 509 * point to these stacks. 510 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 511 * of the stack memory. 512 */ 513 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 514 515 set_stackptrs(0); 516 517 /* 518 * We must now clean the cache again.... 519 * Cleaning may be done by reading new data to displace any 520 * dirty data in the cache. This will have happened in setttb() 521 * but since we are boot strapping the addresses used for the read 522 * may have just been remapped and thus the cache could be out 523 * of sync. A re-clean after the switch will cure this. 524 * After booting there are no gross relocations of the kernel thus 525 * this problem will not occur after initarm(). 526 */ 527 cpu_idcache_wbinv_all(); 528 529 /* Set stack for exception handlers */ 530 data_abort_handler_address = (u_int)data_abort_handler; 531 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 532 undefined_handler_address = (u_int)undefinedinstruction_bounce; 533 undefined_init(); 534 535 init_proc0(kernelstack.pv_va); 536 537 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 538 arm_dump_avail_init(memsize, sizeof(dump_avail) / sizeof(dump_avail[0])); 539 pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt); 540 msgbufp = (void *)msgbufpv.pv_va; 541 msgbufinit(msgbufp, msgbufsize); 542 mutex_init(); 543 544 /* 545 * Prepare map of physical memory regions available to vm subsystem. 546 */ 547 physmap_init(); 548 549 /* Do basic tuning, hz etc */ 550 init_param2(physmem); 551 kdb_init(); 552 553 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 554 sizeof(struct pcb))); 555 } 556 557 vm_offset_t 558 initarm_lastaddr(void) 559 { 560 561 ti_cpu_reset = NULL; 562 return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE); 563 } 564 565 void 566 initarm_gpio_init(void) 567 { 568 } 569 570 void 571 initarm_late_init(void) 572 { 573 } 574 575 #define FDT_DEVMAP_MAX (2) // FIXME 576 static struct pmap_devmap fdt_devmap[FDT_DEVMAP_MAX] = { 577 { 0, 0, 0, 0, 0, } 578 }; 579 580 581 /* 582 * Construct pmap_devmap[] with DT-derived config data. 583 */ 584 static int 585 platform_devmap_init(void) 586 { 587 int i = 0; 588 #if defined(SOC_OMAP4) 589 fdt_devmap[i].pd_va = 0xE8000000; 590 fdt_devmap[i].pd_pa = 0x48000000; 591 fdt_devmap[i].pd_size = 0x1000000; 592 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 593 fdt_devmap[i].pd_cache = PTE_DEVICE; 594 i++; 595 #elif defined(SOC_TI_AM335X) 596 fdt_devmap[i].pd_va = 0xE4C00000; 597 fdt_devmap[i].pd_pa = 0x44C00000; /* L4_WKUP */ 598 fdt_devmap[i].pd_size = 0x400000; /* 4 MB */ 599 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 600 fdt_devmap[i].pd_cache = PTE_DEVICE; 601 i++; 602 #else 603 #error "Unknown SoC" 604 #endif 605 606 pmap_devmap_bootstrap_table = &fdt_devmap[0]; 607 return (0); 608 } 609 610 struct arm32_dma_range * 611 bus_dma_get_range(void) 612 { 613 614 return (NULL); 615 } 616 617 int 618 bus_dma_get_range_nb(void) 619 { 620 621 return (0); 622 } 623 624 void 625 cpu_reset() 626 { 627 if (ti_cpu_reset) 628 (*ti_cpu_reset)(); 629 else 630 printf("no cpu_reset implementation\n"); 631 printf("Reset failed!\n"); 632 while (1); 633 } 634 635