Lines Matching +full:pdc +full:- +full:ranges

1 // SPDX-License-Identifier: GPL-2.0
10 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
36 #include <asm/asm-offsets.h>
60 .name = "PDC data (Page Zero)",
116 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
124 * Sort the ranges. Since the number of ranges is typically
132 for (j = i; j > 0; j--) {
133 if (pmem_ranges[j-1].start_pfn <
138 swap(pmem_ranges[j-1], pmem_ranges[j]);
144 * Throw out ranges that are too far apart (controlled by
149 if (pmem_ranges[i].start_pfn -
150 (pmem_ranges[i-1].start_pfn +
151 pmem_ranges[i-1].pages) > MAX_GAP) {
155 pmem_ranges[i].start_pfn -
156 (pmem_ranges[i-1].start_pfn +
157 pmem_ranges[i-1].pages));
163 /* Print the memory ranges */
164 pr_info("Memory Ranges:\n");
174 i, start, start + (size - 1), size >> 20);
177 res->name = "System RAM";
178 res->start = start;
179 res->end = start + size - 1;
180 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
192 * to work with multiple memory ranges).
208 - (mem_max >> PAGE_SHIFT);
220 /* Merge the ranges, keeping track of the holes */
229 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
238 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
268 * We can't use memblock top-down allocations because we only
281 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
285 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
288 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
302 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
307 initrd_reserve = mem_max - __pa(initrd_start);
309 initrd_reserve = initrd_end - initrd_start;
312 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
320 data_resource.end = virt_to_phys(_end) - 1;
322 code_resource.end = virt_to_phys(&data_start)-1;
367 /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
368 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
369 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
443 map_pages(start, __pa(start), end-start,
458 map_pages(init_end, __pa(init_end), kernel_end - init_end,
461 /* The init text pages are marked R-X. We have to
462 * flush the icache and mark them RW-
467 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
469 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
471 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
478 * pages are no-longer executable */
494 pr_info("Write protecting the kernel read-only data: %luk\n",
495 (end - start) >> 10);
498 map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0);
513 * between mapping areas. That means that any out-of-bounds memory
526 & ~(VM_MAP_OFFSET-1)))
560 /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
584 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
585 " fixmap : 0x%px - 0x%px (%4ld kB)\n"
586 " memory : 0x%px - 0x%px (%4ld MB)\n"
587 " .init : 0x%px - 0x%px (%4ld kB)\n"
588 " .data : 0x%px - 0x%px (%4ld kB)\n"
589 " .text : 0x%px - 0x%px (%4ld kB)\n",
592 (VMALLOC_END - VMALLOC_START) >> 20,
598 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
601 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
604 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
607 ((unsigned long)_etext - (unsigned long)_text) >> 10);
619 * side effect of trapping those pesky NULL-reference errors in the
641 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
643 initrd_end - initrd_start, PAGE_KERNEL, 0);
655 into not treating it as DP-relative data. */
737 if (((start & (2 * size - 1)) == 0) &&
738 (end - start) >= (2 * size)) {
743 if ((start & (size - 1)) != 0) {
747 if ((end - start) >= size) {
765 /* BTLBs are not available on 64-bit CPUs */
805 * Currently we have a one-to-one relationship between space IDs and
822 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
842 free_space_ids--;
999 .ranges = {