1 /*- 2 * Copyright (c) 1994-1998 Mark Brinicombe. 3 * Copyright (c) 1994 Brini. 4 * All rights reserved. 5 * 6 * This code is derived from software written for Brini by Mark Brinicombe 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Brini. 19 * 4. The name of the company nor the name of the author may be used to 20 * endorse or promote products derived from this software without specific 21 * prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45 36 */ 37 38 #include "opt_ddb.h" 39 #include "opt_platform.h" 40 #include "opt_global.h" 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #define _ARM32_BUS_DMA_PRIVATE 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/sysproto.h> 49 #include <sys/signalvar.h> 50 #include <sys/imgact.h> 51 #include <sys/kernel.h> 52 #include <sys/ktr.h> 53 #include <sys/linker.h> 54 #include <sys/lock.h> 55 #include <sys/malloc.h> 56 #include <sys/mutex.h> 57 #include <sys/pcpu.h> 58 #include <sys/proc.h> 59 #include <sys/ptrace.h> 60 #include <sys/cons.h> 61 #include <sys/bio.h> 62 #include <sys/bus.h> 63 #include <sys/buf.h> 64 #include <sys/exec.h> 65 #include <sys/kdb.h> 66 #include <sys/msgbuf.h> 67 #include <machine/reg.h> 68 #include <machine/cpu.h> 69 #include <machine/fdt.h> 70 71 #include <dev/fdt/fdt_common.h> 72 #include <dev/ofw/openfirm.h> 73 74 #include <vm/vm.h> 75 #include <vm/pmap.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_page.h> 78 #include <vm/vm_pager.h> 79 #include <vm/vm_map.h> 80 #include <machine/pte.h> 81 #include <machine/pmap.h> 82 #include <machine/vmparam.h> 83 #include <machine/pcb.h> 84 #include <machine/undefined.h> 85 #include <machine/machdep.h> 86 #include <machine/metadata.h> 87 #include <machine/armreg.h> 88 #include <machine/bus.h> 89 #include <sys/reboot.h> 90 91 #include <arm/ti/omap4/omap4_reg.h> 92 93 #define DEBUG 94 #ifdef DEBUG 95 #define debugf(fmt, args...) printf(fmt, ##args) 96 #else 97 #define debugf(fmt, args...) 98 #endif 99 100 /* Start of address space used for bootstrap map */ 101 #define DEVMAP_BOOTSTRAP_MAP_START 0xE0000000 102 103 /* 104 * This is the number of L2 page tables required for covering max 105 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, 106 * stacks etc.), uprounded to be divisible by 4. 107 */ 108 #define KERNEL_PT_MAX 78 109 110 extern unsigned char kernbase[]; 111 extern unsigned char _etext[]; 112 extern unsigned char _edata[]; 113 extern unsigned char __bss_start[]; 114 extern unsigned char _end[]; 115 116 #ifdef DDB 117 extern vm_offset_t ksym_start, ksym_end; 118 #endif 119 120 extern u_int data_abort_handler_address; 121 extern u_int prefetch_abort_handler_address; 122 extern u_int undefined_handler_address; 123 124 extern vm_offset_t pmap_bootstrap_lastaddr; 125 extern int *end; 126 127 struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; 128 129 /* Physical and virtual addresses for some global pages */ 130 vm_paddr_t phys_avail[10]; 131 vm_paddr_t dump_avail[4]; 132 vm_offset_t physical_pages; 133 vm_offset_t pmap_bootstrap_lastaddr; 134 vm_paddr_t pmap_pa; 135 136 const struct pmap_devmap *pmap_devmap_bootstrap_table; 137 struct pv_addr systempage; 138 struct pv_addr msgbufpv; 139 struct pv_addr irqstack; 140 struct pv_addr undstack; 141 struct pv_addr abtstack; 142 struct pv_addr kernelstack; 143 144 static struct mem_region availmem_regions[FDT_MEM_REGIONS]; 145 static int availmem_regions_sz; 146 147 static void print_kenv(void); 148 static void print_kernel_section_addr(void); 149 150 static void physmap_init(void); 151 static int platform_devmap_init(void); 152 void (*ti_cpu_reset)(void); 153 154 static char * 155 kenv_next(char *cp) 156 { 157 158 if (cp != NULL) { 159 while (*cp != 0) 160 cp++; 161 cp++; 162 if (*cp == 0) 163 cp = NULL; 164 } 165 return (cp); 166 } 167 168 static void 169 print_kenv(void) 170 { 171 int len; 172 char *cp; 173 174 debugf("loader passed (static) kenv:\n"); 175 if (kern_envp == NULL) { 176 debugf(" no env, null ptr\n"); 177 return; 178 } 179 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp); 180 181 len = 0; 182 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) 183 debugf(" %x %s\n", (uint32_t)cp, cp); 184 } 185 186 static void 187 print_kernel_section_addr(void) 188 { 189 190 debugf("kernel image addresses:\n"); 191 debugf(" kernbase = 0x%08x\n", (uint32_t)kernbase); 192 debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext); 193 debugf(" _edata = 0x%08x\n", (uint32_t)_edata); 194 debugf(" __bss_start = 0x%08x\n", (uint32_t)__bss_start); 195 debugf(" _end = 0x%08x\n", (uint32_t)_end); 196 } 197 198 static void 199 physmap_init(void) 200 { 201 int i, j, cnt; 202 vm_offset_t phys_kernelend, kernload; 203 uint32_t s, e, sz; 204 struct mem_region *mp, *mp1; 205 206 phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR); 207 kernload = KERNPHYSADDR; 208 209 /* 210 * Remove kernel physical address range from avail 211 * regions list. Page align all regions. 212 * Non-page aligned memory isn't very interesting to us. 213 * Also, sort the entries for ascending addresses. 214 */ 215 sz = 0; 216 cnt = availmem_regions_sz; 217 debugf("processing avail regions:\n"); 218 for (mp = availmem_regions; mp->mr_size; mp++) { 219 s = mp->mr_start; 220 e = mp->mr_start + mp->mr_size; 221 debugf(" %08x-%08x -> ", s, e); 222 /* Check whether this region holds all of the kernel. */ 223 if (s < kernload && e > phys_kernelend) { 224 availmem_regions[cnt].mr_start = phys_kernelend; 225 availmem_regions[cnt++].mr_size = e - phys_kernelend; 226 e = kernload; 227 } 228 /* Look whether this regions starts within the kernel. */ 229 if (s >= kernload && s < phys_kernelend) { 230 if (e <= phys_kernelend) 231 goto empty; 232 s = phys_kernelend; 233 } 234 /* Now look whether this region ends within the kernel. */ 235 if (e > kernload && e <= phys_kernelend) { 236 if (s >= kernload) { 237 goto empty; 238 } 239 e = kernload; 240 } 241 /* Now page align the start and size of the region. */ 242 s = round_page(s); 243 e = trunc_page(e); 244 if (e < s) 245 e = s; 246 sz = e - s; 247 debugf("%08x-%08x = %x\n", s, e, sz); 248 249 /* Check whether some memory is left here. */ 250 if (sz == 0) { 251 empty: 252 printf("skipping\n"); 253 bcopy(mp + 1, mp, 254 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 255 cnt--; 256 mp--; 257 continue; 258 } 259 260 /* Do an insertion sort. */ 261 for (mp1 = availmem_regions; mp1 < mp; mp1++) 262 if (s < mp1->mr_start) 263 break; 264 if (mp1 < mp) { 265 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 266 mp1->mr_start = s; 267 mp1->mr_size = sz; 268 } else { 269 mp->mr_start = s; 270 mp->mr_size = sz; 271 } 272 } 273 availmem_regions_sz = cnt; 274 275 /* Fill in phys_avail table, based on availmem_regions */ 276 debugf("fill in phys_avail:\n"); 277 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 278 279 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 280 availmem_regions[i].mr_start, 281 availmem_regions[i].mr_start + availmem_regions[i].mr_size, 282 availmem_regions[i].mr_size); 283 284 /* 285 * We should not map the page at PA 0x0000000, the VM can't 286 * handle it, as pmap_extract() == 0 means failure. 287 */ 288 if (availmem_regions[i].mr_start > 0 || 289 availmem_regions[i].mr_size > PAGE_SIZE) { 290 phys_avail[j] = availmem_regions[i].mr_start; 291 if (phys_avail[j] == 0) 292 phys_avail[j] += PAGE_SIZE; 293 phys_avail[j + 1] = availmem_regions[i].mr_start + 294 availmem_regions[i].mr_size; 295 } else 296 j -= 2; 297 } 298 phys_avail[j] = 0; 299 phys_avail[j + 1] = 0; 300 } 301 302 void * 303 initarm(struct arm_boot_params *abp) 304 { 305 struct pv_addr kernel_l1pt; 306 struct pv_addr dpcpu; 307 vm_offset_t dtbp, freemempos, l2_start, lastaddr; 308 uint32_t memsize, l2size; 309 void *kmdp; 310 u_int l1pagetable; 311 int i = 0, j = 0, err_devmap = 0; 312 313 lastaddr = parse_boot_param(abp); 314 memsize = 0; 315 set_cpufuncs(); 316 317 /* 318 * Find the dtb passed in by the boot loader. 319 */ 320 kmdp = preload_search_by_type("elf kernel"); 321 if (kmdp != NULL) 322 dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); 323 else 324 dtbp = (vm_offset_t)NULL; 325 326 #if defined(FDT_DTB_STATIC) 327 /* 328 * In case the device tree blob was not retrieved (from metadata) try 329 * to use the statically embedded one. 330 */ 331 if (dtbp == (vm_offset_t)NULL) 332 dtbp = (vm_offset_t)&fdt_static_dtb; 333 #endif 334 335 if (OF_install(OFW_FDT, 0) == FALSE) 336 while (1); 337 338 if (OF_init((void *)dtbp) != 0) 339 while (1); 340 341 /* Grab physical memory regions information from device tree. */ 342 if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz, 343 &memsize) != 0) 344 while(1); 345 346 /* Platform-specific initialisation */ 347 pmap_bootstrap_lastaddr = initarm_lastaddr(); 348 349 pcpu0_init(); 350 351 /* Calculate number of L2 tables needed for mapping vm_page_array */ 352 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); 353 l2size = (l2size >> L1_S_SHIFT) + 1; 354 355 /* 356 * Add one table for end of kernel map, one for stacks, msgbuf and 357 * L1 and L2 tables map and one for vectors map. 358 */ 359 l2size += 3; 360 361 /* Make it divisible by 4 */ 362 l2size = (l2size + 3) & ~3; 363 364 #define KERNEL_TEXT_BASE (KERNBASE) 365 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; 366 367 /* Define a macro to simplify memory allocation */ 368 #define valloc_pages(var, np) \ 369 alloc_pages((var).pv_va, (np)); \ 370 (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR); 371 372 #define alloc_pages(var, np) \ 373 (var) = freemempos; \ 374 freemempos += (np * PAGE_SIZE); \ 375 memset((char *)(var), 0, ((np) * PAGE_SIZE)); 376 377 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) 378 freemempos += PAGE_SIZE; 379 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); 380 381 for (i = 0; i < l2size; ++i) { 382 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { 383 valloc_pages(kernel_pt_table[i], 384 L2_TABLE_SIZE / PAGE_SIZE); 385 j = i; 386 } else { 387 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + 388 L2_TABLE_SIZE_REAL * (i - j); 389 kernel_pt_table[i].pv_pa = 390 kernel_pt_table[i].pv_va - KERNVIRTADDR + 391 KERNPHYSADDR; 392 393 } 394 } 395 /* 396 * Allocate a page for the system page mapped to 0x00000000 397 * or 0xffff0000. This page will just contain the system vectors 398 * and can be shared by all processes. 399 */ 400 valloc_pages(systempage, 1); 401 402 /* Allocate dynamic per-cpu area. */ 403 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); 404 dpcpu_init((void *)dpcpu.pv_va, 0); 405 406 /* Allocate stacks for all modes */ 407 valloc_pages(irqstack, (IRQ_STACK_SIZE * MAXCPU)); 408 valloc_pages(abtstack, (ABT_STACK_SIZE * MAXCPU)); 409 valloc_pages(undstack, (UND_STACK_SIZE * MAXCPU)); 410 valloc_pages(kernelstack, (KSTACK_PAGES * MAXCPU)); 411 412 init_param1(); 413 414 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); 415 416 /* 417 * Now we start construction of the L1 page table 418 * We start by mapping the L2 page tables into the L1. 419 * This means that we can replace L1 mappings later on if necessary 420 */ 421 l1pagetable = kernel_l1pt.pv_va; 422 423 /* 424 * Try to map as much as possible of kernel text and data using 425 * 1MB section mapping and for the rest of initial kernel address 426 * space use L2 coarse tables. 427 * 428 * Link L2 tables for mapping remainder of kernel (modulo 1MB) 429 * and kernel structures 430 */ 431 l2_start = lastaddr & ~(L1_S_OFFSET); 432 for (i = 0 ; i < l2size - 1; i++) 433 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, 434 &kernel_pt_table[i]); 435 436 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; 437 438 /* Map kernel code and data */ 439 pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR, 440 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, 441 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 442 443 444 /* Map L1 directory and allocated L2 page tables */ 445 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, 446 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 447 448 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, 449 kernel_pt_table[0].pv_pa, 450 L2_TABLE_SIZE_REAL * l2size, 451 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); 452 453 /* Map allocated DPCPU, stacks and msgbuf */ 454 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, 455 freemempos - dpcpu.pv_va, 456 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 457 458 /* Link and map the vector page */ 459 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, 460 &kernel_pt_table[l2size - 1]); 461 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, 462 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 463 464 /* Map pmap_devmap[] entries */ 465 err_devmap = platform_devmap_init(); 466 pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table); 467 468 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 469 DOMAIN_CLIENT); 470 pmap_pa = kernel_l1pt.pv_pa; 471 setttb(kernel_l1pt.pv_pa); 472 cpu_tlb_flushID(); 473 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); 474 475 /* 476 * Only after the SOC registers block is mapped we can perform device 477 * tree fixups, as they may attempt to read parameters from hardware. 478 */ 479 OF_interpret("perform-fixup", 0); 480 481 initarm_gpio_init(); 482 483 cninit(); 484 485 physmem = memsize / PAGE_SIZE; 486 487 debugf("initarm: console initialized\n"); 488 debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); 489 debugf(" boothowto = 0x%08x\n", boothowto); 490 debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); 491 print_kernel_section_addr(); 492 print_kenv(); 493 494 if (err_devmap != 0) 495 printf("WARNING: could not fully configure devmap, error=%d\n", 496 err_devmap); 497 498 initarm_late_init(); 499 500 /* 501 * Pages were allocated during the secondary bootstrap for the 502 * stacks for different CPU modes. 503 * We must now set the r13 registers in the different CPU modes to 504 * point to these stacks. 505 * Since the ARM stacks use STMFD etc. we must set r13 to the top end 506 * of the stack memory. 507 */ 508 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); 509 510 set_stackptrs(0); 511 512 /* 513 * We must now clean the cache again.... 514 * Cleaning may be done by reading new data to displace any 515 * dirty data in the cache. This will have happened in setttb() 516 * but since we are boot strapping the addresses used for the read 517 * may have just been remapped and thus the cache could be out 518 * of sync. A re-clean after the switch will cure this. 519 * After booting there are no gross relocations of the kernel thus 520 * this problem will not occur after initarm(). 521 */ 522 cpu_idcache_wbinv_all(); 523 524 /* Set stack for exception handlers */ 525 data_abort_handler_address = (u_int)data_abort_handler; 526 prefetch_abort_handler_address = (u_int)prefetch_abort_handler; 527 undefined_handler_address = (u_int)undefinedinstruction_bounce; 528 undefined_init(); 529 530 init_proc0(kernelstack.pv_va); 531 532 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); 533 arm_dump_avail_init(memsize, sizeof(dump_avail) / sizeof(dump_avail[0])); 534 pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt); 535 msgbufp = (void *)msgbufpv.pv_va; 536 msgbufinit(msgbufp, msgbufsize); 537 mutex_init(); 538 539 /* 540 * Prepare map of physical memory regions available to vm subsystem. 541 */ 542 physmap_init(); 543 544 /* Do basic tuning, hz etc */ 545 init_param2(physmem); 546 kdb_init(); 547 548 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - 549 sizeof(struct pcb))); 550 } 551 552 vm_offset_t 553 initarm_lastaddr(void) 554 { 555 556 ti_cpu_reset = NULL; 557 return (DEVMAP_BOOTSTRAP_MAP_START - ARM_NOCACHE_KVA_SIZE); 558 } 559 560 void 561 initarm_gpio_init(void) 562 { 563 } 564 565 void 566 initarm_late_init(void) 567 { 568 } 569 570 #define FDT_DEVMAP_MAX (2) // FIXME 571 static struct pmap_devmap fdt_devmap[FDT_DEVMAP_MAX] = { 572 { 0, 0, 0, 0, 0, } 573 }; 574 575 576 /* 577 * Construct pmap_devmap[] with DT-derived config data. 578 */ 579 static int 580 platform_devmap_init(void) 581 { 582 int i = 0; 583 #if defined(SOC_OMAP4) 584 fdt_devmap[i].pd_va = 0xE8000000; 585 fdt_devmap[i].pd_pa = 0x48000000; 586 fdt_devmap[i].pd_size = 0x1000000; 587 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 588 fdt_devmap[i].pd_cache = PTE_DEVICE; 589 i++; 590 #elif defined(SOC_TI_AM335X) 591 fdt_devmap[i].pd_va = 0xE4C00000; 592 fdt_devmap[i].pd_pa = 0x44C00000; /* L4_WKUP */ 593 fdt_devmap[i].pd_size = 0x400000; /* 4 MB */ 594 fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE; 595 fdt_devmap[i].pd_cache = PTE_DEVICE; 596 i++; 597 #else 598 #error "Unknown SoC" 599 #endif 600 601 pmap_devmap_bootstrap_table = &fdt_devmap[0]; 602 return (0); 603 } 604 605 struct arm32_dma_range * 606 bus_dma_get_range(void) 607 { 608 609 return (NULL); 610 } 611 612 int 613 bus_dma_get_range_nb(void) 614 { 615 616 return (0); 617 } 618 619 void 620 cpu_reset() 621 { 622 if (ti_cpu_reset) 623 (*ti_cpu_reset)(); 624 else 625 printf("no cpu_reset implementation\n"); 626 printf("Reset failed!\n"); 627 while (1); 628 } 629 630