1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/kernel/setup.c 4 * 5 * Copyright (C) 1995-2001 Russell King 6 */ 7 #include <linux/efi.h> 8 #include <linux/export.h> 9 #include <linux/kernel.h> 10 #include <linux/stddef.h> 11 #include <linux/ioport.h> 12 #include <linux/delay.h> 13 #include <linux/utsname.h> 14 #include <linux/initrd.h> 15 #include <linux/console.h> 16 #include <linux/seq_file.h> 17 #include <linux/screen_info.h> 18 #include <linux/of_platform.h> 19 #include <linux/init.h> 20 #include <linux/kexec.h> 21 #include <linux/libfdt.h> 22 #include <linux/of_fdt.h> 23 #include <linux/cpu.h> 24 #include <linux/interrupt.h> 25 #include <linux/smp.h> 26 #include <linux/proc_fs.h> 27 #include <linux/memblock.h> 28 #include <linux/bug.h> 29 #include <linux/compiler.h> 30 #include <linux/sort.h> 31 #include <linux/psci.h> 32 33 #include <asm/unified.h> 34 #include <asm/cp15.h> 35 #include <asm/cpu.h> 36 #include <asm/cputype.h> 37 #include <asm/efi.h> 38 #include <asm/elf.h> 39 #include <asm/early_ioremap.h> 40 #include <asm/fixmap.h> 41 #include <asm/procinfo.h> 42 #include <asm/psci.h> 43 #include <asm/sections.h> 44 #include <asm/setup.h> 45 #include <asm/smp_plat.h> 46 #include <asm/mach-types.h> 47 #include <asm/cacheflush.h> 48 #include <asm/cachetype.h> 49 #include <asm/tlbflush.h> 50 #include <asm/xen/hypervisor.h> 51 52 #include <asm/prom.h> 53 #include <asm/mach/arch.h> 54 #include <asm/mach/irq.h> 55 #include <asm/mach/time.h> 56 #include <asm/system_info.h> 57 #include <asm/system_misc.h> 58 #include <asm/traps.h> 59 #include <asm/unwind.h> 60 #include <asm/memblock.h> 61 #include <asm/virt.h> 62 #include <asm/kasan.h> 63 64 #include "atags.h" 65 66 67 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 68 char fpe_type[8]; 69 70 static int __init fpe_setup(char *line) 71 { 72 memcpy(fpe_type, line, 8); 73 return 1; 74 } 75 76 __setup("fpe=", fpe_setup); 77 #endif 78 79 extern void init_default_cache_policy(unsigned long); 80 extern void paging_init(const struct machine_desc *desc); 81 extern void early_mm_init(const struct machine_desc *); 82 extern void adjust_lowmem_bounds(void); 83 extern enum reboot_mode reboot_mode; 84 extern void setup_dma_zone(const struct machine_desc *desc); 85 86 unsigned int processor_id; 87 EXPORT_SYMBOL(processor_id); 88 unsigned int __machine_arch_type __read_mostly; 89 EXPORT_SYMBOL(__machine_arch_type); 90 unsigned int cacheid __read_mostly; 91 EXPORT_SYMBOL(cacheid); 92 93 unsigned int __atags_pointer __initdata; 94 95 unsigned int system_rev; 96 EXPORT_SYMBOL(system_rev); 97 98 const char *system_serial; 99 EXPORT_SYMBOL(system_serial); 100 101 unsigned int system_serial_low; 102 EXPORT_SYMBOL(system_serial_low); 103 104 unsigned int system_serial_high; 105 EXPORT_SYMBOL(system_serial_high); 106 107 unsigned int elf_hwcap __read_mostly; 108 EXPORT_SYMBOL(elf_hwcap); 109 110 unsigned int elf_hwcap2 __read_mostly; 111 EXPORT_SYMBOL(elf_hwcap2); 112 113 114 #ifdef MULTI_CPU 115 struct processor processor __ro_after_init; 116 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 117 struct processor *cpu_vtable[NR_CPUS] = { 118 [0] = &processor, 119 }; 120 #endif 121 #endif 122 #ifdef MULTI_TLB 123 struct cpu_tlb_fns cpu_tlb __ro_after_init; 124 #endif 125 #ifdef MULTI_USER 126 struct cpu_user_fns cpu_user __ro_after_init; 127 #endif 128 #ifdef MULTI_CACHE 129 struct cpu_cache_fns cpu_cache __ro_after_init; 130 #endif 131 #ifdef CONFIG_OUTER_CACHE 132 struct outer_cache_fns outer_cache __ro_after_init; 133 EXPORT_SYMBOL(outer_cache); 134 #endif 135 136 /* 137 * Cached cpu_architecture() result for use by assembler code. 138 * C code should use the cpu_architecture() function instead of accessing this 139 * variable directly. 140 */ 141 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN; 142 143 struct stack { 144 u32 irq[3]; 145 u32 abt[3]; 146 u32 und[3]; 147 u32 fiq[3]; 148 } ____cacheline_aligned; 149 150 #ifndef CONFIG_CPU_V7M 151 static struct stack stacks[NR_CPUS]; 152 #endif 153 154 char elf_platform[ELF_PLATFORM_SIZE]; 155 EXPORT_SYMBOL(elf_platform); 156 157 static const char *cpu_name; 158 static const char *machine_name; 159 static char __initdata cmd_line[COMMAND_LINE_SIZE]; 160 const struct machine_desc *machine_desc __initdata; 161 162 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 163 #define ENDIANNESS ((char)endian_test.l) 164 165 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 166 167 /* 168 * Standard memory resources 169 */ 170 static struct resource mem_res[] = { 171 { 172 .name = "Video RAM", 173 .start = 0, 174 .end = 0, 175 .flags = IORESOURCE_MEM 176 }, 177 { 178 .name = "Kernel code", 179 .start = 0, 180 .end = 0, 181 .flags = IORESOURCE_SYSTEM_RAM 182 }, 183 { 184 .name = "Kernel data", 185 .start = 0, 186 .end = 0, 187 .flags = IORESOURCE_SYSTEM_RAM 188 } 189 }; 190 191 #define video_ram mem_res[0] 192 #define kernel_code mem_res[1] 193 #define kernel_data mem_res[2] 194 195 static struct resource io_res[] = { 196 { 197 .name = "reserved", 198 .start = 0x3bc, 199 .end = 0x3be, 200 .flags = IORESOURCE_IO | IORESOURCE_BUSY 201 }, 202 { 203 .name = "reserved", 204 .start = 0x378, 205 .end = 0x37f, 206 .flags = IORESOURCE_IO | IORESOURCE_BUSY 207 }, 208 { 209 .name = "reserved", 210 .start = 0x278, 211 .end = 0x27f, 212 .flags = IORESOURCE_IO | IORESOURCE_BUSY 213 } 214 }; 215 216 #define lp0 io_res[0] 217 #define lp1 io_res[1] 218 #define lp2 io_res[2] 219 220 static const char *proc_arch[] = { 221 "undefined/unknown", 222 "3", 223 "4", 224 "4T", 225 "5", 226 "5T", 227 "5TE", 228 "5TEJ", 229 "6TEJ", 230 "7", 231 "7M", 232 "?(12)", 233 "?(13)", 234 "?(14)", 235 "?(15)", 236 "?(16)", 237 "?(17)", 238 }; 239 240 #ifdef CONFIG_CPU_V7M 241 static int __get_cpu_architecture(void) 242 { 243 return CPU_ARCH_ARMv7M; 244 } 245 #else 246 static int __get_cpu_architecture(void) 247 { 248 int cpu_arch; 249 250 if ((read_cpuid_id() & 0x0008f000) == 0) { 251 cpu_arch = CPU_ARCH_UNKNOWN; 252 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 253 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 254 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 255 cpu_arch = (read_cpuid_id() >> 16) & 7; 256 if (cpu_arch) 257 cpu_arch += CPU_ARCH_ARMv3; 258 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 259 /* Revised CPUID format. Read the Memory Model Feature 260 * Register 0 and check for VMSAv7 or PMSAv7 */ 261 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); 262 if ((mmfr0 & 0x0000000f) >= 0x00000003 || 263 (mmfr0 & 0x000000f0) >= 0x00000030) 264 cpu_arch = CPU_ARCH_ARMv7; 265 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 266 (mmfr0 & 0x000000f0) == 0x00000020) 267 cpu_arch = CPU_ARCH_ARMv6; 268 else 269 cpu_arch = CPU_ARCH_UNKNOWN; 270 } else 271 cpu_arch = CPU_ARCH_UNKNOWN; 272 273 return cpu_arch; 274 } 275 #endif 276 277 int __pure cpu_architecture(void) 278 { 279 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN); 280 281 return __cpu_architecture; 282 } 283 284 static int cpu_has_aliasing_icache(unsigned int arch) 285 { 286 int aliasing_icache; 287 unsigned int id_reg, num_sets, line_size; 288 289 /* PIPT caches never alias. */ 290 if (icache_is_pipt()) 291 return 0; 292 293 /* arch specifies the register format */ 294 switch (arch) { 295 case CPU_ARCH_ARMv7: 296 set_csselr(CSSELR_ICACHE | CSSELR_L1); 297 isb(); 298 id_reg = read_ccsidr(); 299 line_size = 4 << ((id_reg & 0x7) + 2); 300 num_sets = ((id_reg >> 13) & 0x7fff) + 1; 301 aliasing_icache = (line_size * num_sets) > PAGE_SIZE; 302 break; 303 case CPU_ARCH_ARMv6: 304 aliasing_icache = read_cpuid_cachetype() & (1 << 11); 305 break; 306 default: 307 /* I-cache aliases will be handled by D-cache aliasing code */ 308 aliasing_icache = 0; 309 } 310 311 return aliasing_icache; 312 } 313 314 static void __init cacheid_init(void) 315 { 316 unsigned int arch = cpu_architecture(); 317 318 if (arch >= CPU_ARCH_ARMv6) { 319 unsigned int cachetype = read_cpuid_cachetype(); 320 321 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) { 322 cacheid = 0; 323 } else if ((cachetype & (7 << 29)) == 4 << 29) { 324 /* ARMv7 register format */ 325 arch = CPU_ARCH_ARMv7; 326 cacheid = CACHEID_VIPT_NONALIASING; 327 switch (cachetype & (3 << 14)) { 328 case (1 << 14): 329 cacheid |= CACHEID_ASID_TAGGED; 330 break; 331 case (3 << 14): 332 cacheid |= CACHEID_PIPT; 333 break; 334 } 335 } else { 336 arch = CPU_ARCH_ARMv6; 337 if (cachetype & (1 << 23)) 338 cacheid = CACHEID_VIPT_ALIASING; 339 else 340 cacheid = CACHEID_VIPT_NONALIASING; 341 } 342 if (cpu_has_aliasing_icache(arch)) 343 cacheid |= CACHEID_VIPT_I_ALIASING; 344 } else { 345 cacheid = CACHEID_VIVT; 346 } 347 348 pr_info("CPU: %s data cache, %s instruction cache\n", 349 cache_is_vivt() ? "VIVT" : 350 cache_is_vipt_aliasing() ? "VIPT aliasing" : 351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown", 352 cache_is_vivt() ? "VIVT" : 353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 354 icache_is_vipt_aliasing() ? "VIPT aliasing" : 355 icache_is_pipt() ? "PIPT" : 356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 357 } 358 359 /* 360 * These functions re-use the assembly code in head.S, which 361 * already provide the required functionality. 362 */ 363 extern struct proc_info_list *lookup_processor_type(unsigned int); 364 365 void __init early_print(const char *str, ...) 366 { 367 extern void printascii(const char *); 368 char buf[256]; 369 va_list ap; 370 371 va_start(ap, str); 372 vsnprintf(buf, sizeof(buf), str, ap); 373 va_end(ap); 374 375 #ifdef CONFIG_DEBUG_LL 376 printascii(buf); 377 #endif 378 printk("%s", buf); 379 } 380 381 #ifdef CONFIG_ARM_PATCH_IDIV 382 383 static inline u32 __attribute_const__ sdiv_instruction(void) 384 { 385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 386 /* "sdiv r0, r0, r1" */ 387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1); 388 return __opcode_to_mem_thumb32(insn); 389 } 390 391 /* "sdiv r0, r0, r1" */ 392 return __opcode_to_mem_arm(0xe710f110); 393 } 394 395 static inline u32 __attribute_const__ udiv_instruction(void) 396 { 397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 398 /* "udiv r0, r0, r1" */ 399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1); 400 return __opcode_to_mem_thumb32(insn); 401 } 402 403 /* "udiv r0, r0, r1" */ 404 return __opcode_to_mem_arm(0xe730f110); 405 } 406 407 static inline u32 __attribute_const__ bx_lr_instruction(void) 408 { 409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 410 /* "bx lr; nop" */ 411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0); 412 return __opcode_to_mem_thumb32(insn); 413 } 414 415 /* "bx lr" */ 416 return __opcode_to_mem_arm(0xe12fff1e); 417 } 418 419 static void __init patch_aeabi_idiv(void) 420 { 421 extern void __aeabi_uidiv(void); 422 extern void __aeabi_idiv(void); 423 uintptr_t fn_addr; 424 unsigned int mask; 425 426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA; 427 if (!(elf_hwcap & mask)) 428 return; 429 430 pr_info("CPU: div instructions available: patching division code\n"); 431 432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1; 433 asm ("" : "+g" (fn_addr)); 434 ((u32 *)fn_addr)[0] = udiv_instruction(); 435 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 436 flush_icache_range(fn_addr, fn_addr + 8); 437 438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1; 439 asm ("" : "+g" (fn_addr)); 440 ((u32 *)fn_addr)[0] = sdiv_instruction(); 441 ((u32 *)fn_addr)[1] = bx_lr_instruction(); 442 flush_icache_range(fn_addr, fn_addr + 8); 443 } 444 445 #else 446 static inline void patch_aeabi_idiv(void) { } 447 #endif 448 449 static void __init cpuid_init_hwcaps(void) 450 { 451 int block; 452 u32 isar5; 453 454 if (cpu_architecture() < CPU_ARCH_ARMv7) 455 return; 456 457 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24); 458 if (block >= 2) 459 elf_hwcap |= HWCAP_IDIVA; 460 if (block >= 1) 461 elf_hwcap |= HWCAP_IDIVT; 462 463 /* LPAE implies atomic ldrd/strd instructions */ 464 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); 465 if (block >= 5) 466 elf_hwcap |= HWCAP_LPAE; 467 468 /* check for supported v8 Crypto instructions */ 469 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5); 470 471 block = cpuid_feature_extract_field(isar5, 4); 472 if (block >= 2) 473 elf_hwcap2 |= HWCAP2_PMULL; 474 if (block >= 1) 475 elf_hwcap2 |= HWCAP2_AES; 476 477 block = cpuid_feature_extract_field(isar5, 8); 478 if (block >= 1) 479 elf_hwcap2 |= HWCAP2_SHA1; 480 481 block = cpuid_feature_extract_field(isar5, 12); 482 if (block >= 1) 483 elf_hwcap2 |= HWCAP2_SHA2; 484 485 block = cpuid_feature_extract_field(isar5, 16); 486 if (block >= 1) 487 elf_hwcap2 |= HWCAP2_CRC32; 488 } 489 490 static void __init elf_hwcap_fixup(void) 491 { 492 unsigned id = read_cpuid_id(); 493 494 /* 495 * HWCAP_TLS is available only on 1136 r1p0 and later, 496 * see also kuser_get_tls_init. 497 */ 498 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 && 499 ((id >> 20) & 3) == 0) { 500 elf_hwcap &= ~HWCAP_TLS; 501 return; 502 } 503 504 /* Verify if CPUID scheme is implemented */ 505 if ((id & 0x000f0000) != 0x000f0000) 506 return; 507 508 /* 509 * If the CPU supports LDREX/STREX and LDREXB/STREXB, 510 * avoid advertising SWP; it may not be atomic with 511 * multiprocessing cores. 512 */ 513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 515 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3)) 516 elf_hwcap &= ~HWCAP_SWP; 517 } 518 519 /* 520 * cpu_init - initialise one CPU. 521 * 522 * cpu_init sets up the per-CPU stacks. 523 */ 524 void notrace cpu_init(void) 525 { 526 #ifndef CONFIG_CPU_V7M 527 unsigned int cpu = smp_processor_id(); 528 struct stack *stk = &stacks[cpu]; 529 530 if (cpu >= NR_CPUS) { 531 pr_crit("CPU%u: bad primary CPU number\n", cpu); 532 BUG(); 533 } 534 535 /* 536 * This only works on resume and secondary cores. For booting on the 537 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. 538 */ 539 set_my_cpu_offset(per_cpu_offset(cpu)); 540 541 cpu_proc_init(); 542 543 /* 544 * Define the placement constraint for the inline asm directive below. 545 * In Thumb-2, msr with an immediate value is not allowed. 546 */ 547 #ifdef CONFIG_THUMB2_KERNEL 548 #define PLC_l "l" 549 #define PLC_r "r" 550 #else 551 #define PLC_l "I" 552 #define PLC_r "I" 553 #endif 554 555 /* 556 * setup stacks for re-entrant exception handlers 557 */ 558 __asm__ ( 559 "msr cpsr_c, %1\n\t" 560 "add r14, %0, %2\n\t" 561 "mov sp, r14\n\t" 562 "msr cpsr_c, %3\n\t" 563 "add r14, %0, %4\n\t" 564 "mov sp, r14\n\t" 565 "msr cpsr_c, %5\n\t" 566 "add r14, %0, %6\n\t" 567 "mov sp, r14\n\t" 568 "msr cpsr_c, %7\n\t" 569 "add r14, %0, %8\n\t" 570 "mov sp, r14\n\t" 571 "msr cpsr_c, %9" 572 : 573 : "r" (stk), 574 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 575 "I" (offsetof(struct stack, irq[0])), 576 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 577 "I" (offsetof(struct stack, abt[0])), 578 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), 579 "I" (offsetof(struct stack, und[0])), 580 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), 581 "I" (offsetof(struct stack, fiq[0])), 582 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 583 : "r14"); 584 #endif 585 } 586 587 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; 588 589 void __init smp_setup_processor_id(void) 590 { 591 int i; 592 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; 593 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 594 595 cpu_logical_map(0) = cpu; 596 for (i = 1; i < nr_cpu_ids; ++i) 597 cpu_logical_map(i) = i == cpu ? 0 : i; 598 599 /* 600 * clear __my_cpu_offset on boot CPU to avoid hang caused by 601 * using percpu variable early, for example, lockdep will 602 * access percpu variable inside lock_release 603 */ 604 set_my_cpu_offset(0); 605 606 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr); 607 } 608 609 struct mpidr_hash mpidr_hash; 610 #ifdef CONFIG_SMP 611 /** 612 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 613 * level in order to build a linear index from an 614 * MPIDR value. Resulting algorithm is a collision 615 * free hash carried out through shifting and ORing 616 */ 617 static void __init smp_build_mpidr_hash(void) 618 { 619 u32 i, affinity; 620 u32 fs[3], bits[3], ls, mask = 0; 621 /* 622 * Pre-scan the list of MPIDRS and filter out bits that do 623 * not contribute to affinity levels, ie they never toggle. 624 */ 625 for_each_possible_cpu(i) 626 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); 627 pr_debug("mask of set bits 0x%x\n", mask); 628 /* 629 * Find and stash the last and first bit set at all affinity levels to 630 * check how many bits are required to represent them. 631 */ 632 for (i = 0; i < 3; i++) { 633 affinity = MPIDR_AFFINITY_LEVEL(mask, i); 634 /* 635 * Find the MSB bit and LSB bits position 636 * to determine how many bits are required 637 * to express the affinity level. 638 */ 639 ls = fls(affinity); 640 fs[i] = affinity ? ffs(affinity) - 1 : 0; 641 bits[i] = ls - fs[i]; 642 } 643 /* 644 * An index can be created from the MPIDR by isolating the 645 * significant bits at each affinity level and by shifting 646 * them in order to compress the 24 bits values space to a 647 * compressed set of values. This is equivalent to hashing 648 * the MPIDR through shifting and ORing. It is a collision free 649 * hash though not minimal since some levels might contain a number 650 * of CPUs that is not an exact power of 2 and their bit 651 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. 652 */ 653 mpidr_hash.shift_aff[0] = fs[0]; 654 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; 655 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - 656 (bits[1] + bits[0]); 657 mpidr_hash.mask = mask; 658 mpidr_hash.bits = bits[2] + bits[1] + bits[0]; 659 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", 660 mpidr_hash.shift_aff[0], 661 mpidr_hash.shift_aff[1], 662 mpidr_hash.shift_aff[2], 663 mpidr_hash.mask, 664 mpidr_hash.bits); 665 /* 666 * 4x is an arbitrary value used to warn on a hash table much bigger 667 * than expected on most systems. 668 */ 669 if (mpidr_hash_size() > 4 * num_possible_cpus()) 670 pr_warn("Large number of MPIDR hash buckets detected\n"); 671 sync_cache_w(&mpidr_hash); 672 } 673 #endif 674 675 /* 676 * locate processor in the list of supported processor types. The linker 677 * builds this table for us from the entries in arch/arm/mm/proc-*.S 678 */ 679 struct proc_info_list *lookup_processor(u32 midr) 680 { 681 struct proc_info_list *list = lookup_processor_type(midr); 682 683 if (!list) { 684 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", 685 smp_processor_id(), midr); 686 while (1) 687 /* can't use cpu_relax() here as it may require MMU setup */; 688 } 689 690 return list; 691 } 692 693 static void __init setup_processor(void) 694 { 695 unsigned int midr = read_cpuid_id(); 696 struct proc_info_list *list = lookup_processor(midr); 697 698 cpu_name = list->cpu_name; 699 __cpu_architecture = __get_cpu_architecture(); 700 701 init_proc_vtable(list->proc); 702 #ifdef MULTI_TLB 703 cpu_tlb = *list->tlb; 704 #endif 705 #ifdef MULTI_USER 706 cpu_user = *list->user; 707 #endif 708 #ifdef MULTI_CACHE 709 cpu_cache = *list->cache; 710 #endif 711 712 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 713 list->cpu_name, midr, midr & 15, 714 proc_arch[cpu_architecture()], get_cr()); 715 716 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", 717 list->arch_name, ENDIANNESS); 718 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", 719 list->elf_name, ENDIANNESS); 720 elf_hwcap = list->elf_hwcap; 721 722 cpuid_init_hwcaps(); 723 patch_aeabi_idiv(); 724 725 #ifndef CONFIG_ARM_THUMB 726 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 727 #endif 728 #ifdef CONFIG_MMU 729 init_default_cache_policy(list->__cpu_mm_mmu_flags); 730 #endif 731 erratum_a15_798181_init(); 732 733 elf_hwcap_fixup(); 734 735 cacheid_init(); 736 cpu_init(); 737 } 738 739 void __init dump_machine_table(void) 740 { 741 const struct machine_desc *p; 742 743 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 744 for_each_machine_desc(p) 745 early_print("%08x\t%s\n", p->nr, p->name); 746 747 early_print("\nPlease check your kernel config and/or bootloader.\n"); 748 749 while (true) 750 /* can't use cpu_relax() here as it may require MMU setup */; 751 } 752 753 int __init arm_add_memory(u64 start, u64 size) 754 { 755 u64 aligned_start; 756 757 /* 758 * Ensure that start/size are aligned to a page boundary. 759 * Size is rounded down, start is rounded up. 760 */ 761 aligned_start = PAGE_ALIGN(start); 762 if (aligned_start > start + size) 763 size = 0; 764 else 765 size -= aligned_start - start; 766 767 #ifndef CONFIG_PHYS_ADDR_T_64BIT 768 if (aligned_start > ULONG_MAX) { 769 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", 770 start); 771 return -EINVAL; 772 } 773 774 if (aligned_start + size > ULONG_MAX) { 775 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n", 776 (long long)start); 777 /* 778 * To ensure bank->start + bank->size is representable in 779 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. 780 * This means we lose a page after masking. 781 */ 782 size = ULONG_MAX - aligned_start; 783 } 784 #endif 785 786 if (aligned_start < PHYS_OFFSET) { 787 if (aligned_start + size <= PHYS_OFFSET) { 788 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", 789 aligned_start, aligned_start + size); 790 return -EINVAL; 791 } 792 793 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", 794 aligned_start, (u64)PHYS_OFFSET); 795 796 size -= PHYS_OFFSET - aligned_start; 797 aligned_start = PHYS_OFFSET; 798 } 799 800 start = aligned_start; 801 size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 802 803 /* 804 * Check whether this memory region has non-zero size or 805 * invalid node number. 806 */ 807 if (size == 0) 808 return -EINVAL; 809 810 memblock_add(start, size); 811 return 0; 812 } 813 814 /* 815 * Pick out the memory size. We look for mem=size@start, 816 * where start and size are "size[KkMm]" 817 */ 818 819 static int __init early_mem(char *p) 820 { 821 static int usermem __initdata = 0; 822 u64 size; 823 u64 start; 824 char *endp; 825 826 /* 827 * If the user specifies memory size, we 828 * blow away any automatically generated 829 * size. 830 */ 831 if (usermem == 0) { 832 usermem = 1; 833 memblock_remove(memblock_start_of_DRAM(), 834 memblock_end_of_DRAM() - memblock_start_of_DRAM()); 835 } 836 837 start = PHYS_OFFSET; 838 size = memparse(p, &endp); 839 if (*endp == '@') 840 start = memparse(endp + 1, NULL); 841 842 arm_add_memory(start, size); 843 844 return 0; 845 } 846 early_param("mem", early_mem); 847 848 static void __init request_standard_resources(const struct machine_desc *mdesc) 849 { 850 phys_addr_t start, end, res_end; 851 struct resource *res; 852 u64 i; 853 854 kernel_code.start = virt_to_phys(_text); 855 kernel_code.end = virt_to_phys(__init_begin - 1); 856 kernel_data.start = virt_to_phys(_sdata); 857 kernel_data.end = virt_to_phys(_end - 1); 858 859 for_each_mem_range(i, &start, &end) { 860 unsigned long boot_alias_start; 861 862 /* 863 * In memblock, end points to the first byte after the 864 * range while in resourses, end points to the last byte in 865 * the range. 866 */ 867 res_end = end - 1; 868 869 /* 870 * Some systems have a special memory alias which is only 871 * used for booting. We need to advertise this region to 872 * kexec-tools so they know where bootable RAM is located. 873 */ 874 boot_alias_start = phys_to_idmap(start); 875 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { 876 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 877 if (!res) 878 panic("%s: Failed to allocate %zu bytes\n", 879 __func__, sizeof(*res)); 880 res->name = "System RAM (boot alias)"; 881 res->start = boot_alias_start; 882 res->end = phys_to_idmap(res_end); 883 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 884 request_resource(&iomem_resource, res); 885 } 886 887 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); 888 if (!res) 889 panic("%s: Failed to allocate %zu bytes\n", __func__, 890 sizeof(*res)); 891 res->name = "System RAM"; 892 res->start = start; 893 res->end = res_end; 894 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 895 896 request_resource(&iomem_resource, res); 897 898 if (kernel_code.start >= res->start && 899 kernel_code.end <= res->end) 900 request_resource(res, &kernel_code); 901 if (kernel_data.start >= res->start && 902 kernel_data.end <= res->end) 903 request_resource(res, &kernel_data); 904 } 905 906 if (mdesc->video_start) { 907 video_ram.start = mdesc->video_start; 908 video_ram.end = mdesc->video_end; 909 request_resource(&iomem_resource, &video_ram); 910 } 911 912 /* 913 * Some machines don't have the possibility of ever 914 * possessing lp0, lp1 or lp2 915 */ 916 if (mdesc->reserve_lp0) 917 request_resource(&ioport_resource, &lp0); 918 if (mdesc->reserve_lp1) 919 request_resource(&ioport_resource, &lp1); 920 if (mdesc->reserve_lp2) 921 request_resource(&ioport_resource, &lp2); 922 } 923 924 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \ 925 defined(CONFIG_EFI) 926 struct screen_info screen_info = { 927 .orig_video_lines = 30, 928 .orig_video_cols = 80, 929 .orig_video_mode = 0, 930 .orig_video_ega_bx = 0, 931 .orig_video_isVGA = 1, 932 .orig_video_points = 8 933 }; 934 #endif 935 936 static int __init customize_machine(void) 937 { 938 /* 939 * customizes platform devices, or adds new ones 940 * On DT based machines, we fall back to populating the 941 * machine from the device tree, if no callback is provided, 942 * otherwise we would always need an init_machine callback. 943 */ 944 if (machine_desc->init_machine) 945 machine_desc->init_machine(); 946 947 return 0; 948 } 949 arch_initcall(customize_machine); 950 951 static int __init init_machine_late(void) 952 { 953 struct device_node *root; 954 int ret; 955 956 if (machine_desc->init_late) 957 machine_desc->init_late(); 958 959 root = of_find_node_by_path("/"); 960 if (root) { 961 ret = of_property_read_string(root, "serial-number", 962 &system_serial); 963 if (ret) 964 system_serial = NULL; 965 } 966 967 if (!system_serial) 968 system_serial = kasprintf(GFP_KERNEL, "%08x%08x", 969 system_serial_high, 970 system_serial_low); 971 972 return 0; 973 } 974 late_initcall(init_machine_late); 975 976 #ifdef CONFIG_KEXEC 977 /* 978 * The crash region must be aligned to 128MB to avoid 979 * zImage relocating below the reserved region. 980 */ 981 #define CRASH_ALIGN (128 << 20) 982 983 static inline unsigned long long get_total_mem(void) 984 { 985 unsigned long total; 986 987 total = max_low_pfn - min_low_pfn; 988 return total << PAGE_SHIFT; 989 } 990 991 /** 992 * reserve_crashkernel() - reserves memory are for crash kernel 993 * 994 * This function reserves memory area given in "crashkernel=" kernel command 995 * line parameter. The memory reserved is used by a dump capture kernel when 996 * primary kernel is crashing. 997 */ 998 static void __init reserve_crashkernel(void) 999 { 1000 unsigned long long crash_size, crash_base; 1001 unsigned long long total_mem; 1002 int ret; 1003 1004 total_mem = get_total_mem(); 1005 ret = parse_crashkernel(boot_command_line, total_mem, 1006 &crash_size, &crash_base); 1007 if (ret) 1008 return; 1009 1010 if (crash_base <= 0) { 1011 unsigned long long crash_max = idmap_to_phys((u32)~0); 1012 unsigned long long lowmem_max = __pa(high_memory - 1) + 1; 1013 if (crash_max > lowmem_max) 1014 crash_max = lowmem_max; 1015 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, 1016 crash_size, CRASH_ALIGN); 1017 if (!crash_base) { 1018 pr_err("crashkernel reservation failed - No suitable area found.\n"); 1019 return; 1020 } 1021 } else { 1022 unsigned long long start; 1023 1024 start = memblock_find_in_range(crash_base, 1025 crash_base + crash_size, 1026 crash_size, SECTION_SIZE); 1027 if (start != crash_base) { 1028 pr_err("crashkernel reservation failed - memory is in use.\n"); 1029 return; 1030 } 1031 } 1032 1033 ret = memblock_reserve(crash_base, crash_size); 1034 if (ret < 0) { 1035 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n", 1036 (unsigned long)crash_base); 1037 return; 1038 } 1039 1040 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", 1041 (unsigned long)(crash_size >> 20), 1042 (unsigned long)(crash_base >> 20), 1043 (unsigned long)(total_mem >> 20)); 1044 1045 /* The crashk resource must always be located in normal mem */ 1046 crashk_res.start = crash_base; 1047 crashk_res.end = crash_base + crash_size - 1; 1048 insert_resource(&iomem_resource, &crashk_res); 1049 1050 if (arm_has_idmap_alias()) { 1051 /* 1052 * If we have a special RAM alias for use at boot, we 1053 * need to advertise to kexec tools where the alias is. 1054 */ 1055 static struct resource crashk_boot_res = { 1056 .name = "Crash kernel (boot alias)", 1057 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 1058 }; 1059 1060 crashk_boot_res.start = phys_to_idmap(crash_base); 1061 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1; 1062 insert_resource(&iomem_resource, &crashk_boot_res); 1063 } 1064 } 1065 #else 1066 static inline void reserve_crashkernel(void) {} 1067 #endif /* CONFIG_KEXEC */ 1068 1069 void __init hyp_mode_check(void) 1070 { 1071 #ifdef CONFIG_ARM_VIRT_EXT 1072 sync_boot_mode(); 1073 1074 if (is_hyp_mode_available()) { 1075 pr_info("CPU: All CPU(s) started in HYP mode.\n"); 1076 pr_info("CPU: Virtualization extensions available.\n"); 1077 } else if (is_hyp_mode_mismatched()) { 1078 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", 1079 __boot_cpu_mode & MODE_MASK); 1080 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); 1081 } else 1082 pr_info("CPU: All CPU(s) started in SVC mode.\n"); 1083 #endif 1084 } 1085 1086 static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 1087 1088 static int arm_restart(struct notifier_block *nb, unsigned long action, 1089 void *data) 1090 { 1091 __arm_pm_restart(action, data); 1092 return NOTIFY_DONE; 1093 } 1094 1095 static struct notifier_block arm_restart_nb = { 1096 .notifier_call = arm_restart, 1097 .priority = 128, 1098 }; 1099 1100 void __init setup_arch(char **cmdline_p) 1101 { 1102 const struct machine_desc *mdesc = NULL; 1103 void *atags_vaddr = NULL; 1104 1105 if (__atags_pointer) 1106 atags_vaddr = FDT_VIRT_BASE(__atags_pointer); 1107 1108 setup_processor(); 1109 if (atags_vaddr) { 1110 mdesc = setup_machine_fdt(atags_vaddr); 1111 if (mdesc) 1112 memblock_reserve(__atags_pointer, 1113 fdt_totalsize(atags_vaddr)); 1114 } 1115 if (!mdesc) 1116 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type); 1117 if (!mdesc) { 1118 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n"); 1119 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type, 1120 __atags_pointer); 1121 if (__atags_pointer) 1122 early_print(" r2[]=%*ph\n", 16, atags_vaddr); 1123 dump_machine_table(); 1124 } 1125 1126 machine_desc = mdesc; 1127 machine_name = mdesc->name; 1128 dump_stack_set_arch_desc("%s", mdesc->name); 1129 1130 if (mdesc->reboot_mode != REBOOT_HARD) 1131 reboot_mode = mdesc->reboot_mode; 1132 1133 setup_initial_init_mm(_text, _etext, _edata, _end); 1134 1135 /* populate cmd_line too for later use, preserving boot_command_line */ 1136 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 1137 *cmdline_p = cmd_line; 1138 1139 early_fixmap_init(); 1140 early_ioremap_init(); 1141 1142 parse_early_param(); 1143 1144 #ifdef CONFIG_MMU 1145 early_mm_init(mdesc); 1146 #endif 1147 setup_dma_zone(mdesc); 1148 xen_early_init(); 1149 efi_init(); 1150 /* 1151 * Make sure the calculation for lowmem/highmem is set appropriately 1152 * before reserving/allocating any memory 1153 */ 1154 adjust_lowmem_bounds(); 1155 arm_memblock_init(mdesc); 1156 /* Memory may have been removed so recalculate the bounds. */ 1157 adjust_lowmem_bounds(); 1158 1159 early_ioremap_reset(); 1160 1161 paging_init(mdesc); 1162 kasan_init(); 1163 request_standard_resources(mdesc); 1164 1165 if (mdesc->restart) { 1166 __arm_pm_restart = mdesc->restart; 1167 register_restart_handler(&arm_restart_nb); 1168 } 1169 1170 unflatten_device_tree(); 1171 1172 arm_dt_init_cpu_maps(); 1173 psci_dt_init(); 1174 #ifdef CONFIG_SMP 1175 if (is_smp()) { 1176 if (!mdesc->smp_init || !mdesc->smp_init()) { 1177 if (psci_smp_available()) 1178 smp_set_ops(&psci_smp_ops); 1179 else if (mdesc->smp) 1180 smp_set_ops(mdesc->smp); 1181 } 1182 smp_init_cpus(); 1183 smp_build_mpidr_hash(); 1184 } 1185 #endif 1186 1187 if (!is_smp()) 1188 hyp_mode_check(); 1189 1190 reserve_crashkernel(); 1191 1192 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER 1193 handle_arch_irq = mdesc->handle_irq; 1194 #endif 1195 1196 #ifdef CONFIG_VT 1197 #if defined(CONFIG_VGA_CONSOLE) 1198 conswitchp = &vga_con; 1199 #endif 1200 #endif 1201 1202 if (mdesc->init_early) 1203 mdesc->init_early(); 1204 } 1205 1206 1207 static int __init topology_init(void) 1208 { 1209 int cpu; 1210 1211 for_each_possible_cpu(cpu) { 1212 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 1213 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); 1214 register_cpu(&cpuinfo->cpu, cpu); 1215 } 1216 1217 return 0; 1218 } 1219 subsys_initcall(topology_init); 1220 1221 #ifdef CONFIG_HAVE_PROC_CPU 1222 static int __init proc_cpu_init(void) 1223 { 1224 struct proc_dir_entry *res; 1225 1226 res = proc_mkdir("cpu", NULL); 1227 if (!res) 1228 return -ENOMEM; 1229 return 0; 1230 } 1231 fs_initcall(proc_cpu_init); 1232 #endif 1233 1234 static const char *hwcap_str[] = { 1235 "swp", 1236 "half", 1237 "thumb", 1238 "26bit", 1239 "fastmult", 1240 "fpa", 1241 "vfp", 1242 "edsp", 1243 "java", 1244 "iwmmxt", 1245 "crunch", 1246 "thumbee", 1247 "neon", 1248 "vfpv3", 1249 "vfpv3d16", 1250 "tls", 1251 "vfpv4", 1252 "idiva", 1253 "idivt", 1254 "vfpd32", 1255 "lpae", 1256 "evtstrm", 1257 NULL 1258 }; 1259 1260 static const char *hwcap2_str[] = { 1261 "aes", 1262 "pmull", 1263 "sha1", 1264 "sha2", 1265 "crc32", 1266 NULL 1267 }; 1268 1269 static int c_show(struct seq_file *m, void *v) 1270 { 1271 int i, j; 1272 u32 cpuid; 1273 1274 for_each_online_cpu(i) { 1275 /* 1276 * glibc reads /proc/cpuinfo to determine the number of 1277 * online processors, looking for lines beginning with 1278 * "processor". Give glibc what it expects. 1279 */ 1280 seq_printf(m, "processor\t: %d\n", i); 1281 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); 1282 seq_printf(m, "model name\t: %s rev %d (%s)\n", 1283 cpu_name, cpuid & 15, elf_platform); 1284 1285 #if defined(CONFIG_SMP) 1286 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1287 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 1288 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 1289 #else 1290 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 1291 loops_per_jiffy / (500000/HZ), 1292 (loops_per_jiffy / (5000/HZ)) % 100); 1293 #endif 1294 /* dump out the processor features */ 1295 seq_puts(m, "Features\t: "); 1296 1297 for (j = 0; hwcap_str[j]; j++) 1298 if (elf_hwcap & (1 << j)) 1299 seq_printf(m, "%s ", hwcap_str[j]); 1300 1301 for (j = 0; hwcap2_str[j]; j++) 1302 if (elf_hwcap2 & (1 << j)) 1303 seq_printf(m, "%s ", hwcap2_str[j]); 1304 1305 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); 1306 seq_printf(m, "CPU architecture: %s\n", 1307 proc_arch[cpu_architecture()]); 1308 1309 if ((cpuid & 0x0008f000) == 0x00000000) { 1310 /* pre-ARM7 */ 1311 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); 1312 } else { 1313 if ((cpuid & 0x0008f000) == 0x00007000) { 1314 /* ARM7 */ 1315 seq_printf(m, "CPU variant\t: 0x%02x\n", 1316 (cpuid >> 16) & 127); 1317 } else { 1318 /* post-ARM7 */ 1319 seq_printf(m, "CPU variant\t: 0x%x\n", 1320 (cpuid >> 20) & 15); 1321 } 1322 seq_printf(m, "CPU part\t: 0x%03x\n", 1323 (cpuid >> 4) & 0xfff); 1324 } 1325 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); 1326 } 1327 1328 seq_printf(m, "Hardware\t: %s\n", machine_name); 1329 seq_printf(m, "Revision\t: %04x\n", system_rev); 1330 seq_printf(m, "Serial\t\t: %s\n", system_serial); 1331 1332 return 0; 1333 } 1334 1335 static void *c_start(struct seq_file *m, loff_t *pos) 1336 { 1337 return *pos < 1 ? (void *)1 : NULL; 1338 } 1339 1340 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1341 { 1342 ++*pos; 1343 return NULL; 1344 } 1345 1346 static void c_stop(struct seq_file *m, void *v) 1347 { 1348 } 1349 1350 const struct seq_operations cpuinfo_op = { 1351 .start = c_start, 1352 .next = c_next, 1353 .stop = c_stop, 1354 .show = c_show 1355 }; 1356