Lines Matching +full:bottom +full:- +full:speed
28 #include <asm/text-patching.h>
44 * WC and WT fall back to UC-. pat_init() updates these values to support
46 * for the details. Note, __early_ioremap() used during early boot-time
83 * Check that the write-protect PAT entry is set for write-protect.
170 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
252 /* Except when with PTI where the kernel is mostly non-Global: */ in probe_page_size_mask()
292 boot_cpu_data.microcode < invlpg_miss_match->driver_data) { in setup_pcid()
300 * This can't be cr4_set_bits_and_update_boot() -- the in setup_pcid()
307 * Instead, we brute-force it and set CR4.PCIDE manually in in setup_pcid()
365 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
373 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
386 if (mr->page_size_mask & (1<<PG_LEVEL_1G)) in page_size_string()
389 * 32-bit without PAE has a 4M large page size. in page_size_string()
395 mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
398 if (mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
483 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { in split_mem_range()
491 (nr_range - 1 - i) * sizeof(struct map_range)); in split_mem_range()
492 mr[i--].start = old_start; in split_mem_range()
493 nr_range--; in split_mem_range()
497 pr_debug(" [mem %#010lx-%#010lx] page %s\n", in split_mem_range()
498 mr[i].start, mr[i].end - 1, in split_mem_range()
515 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) in add_pfn_range_mapped()
517 min(end_pfn, 1UL<<(32-PAGE_SHIFT))); in add_pfn_range_mapped()
544 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", in init_memory_mapping()
545 start, end - 1); in init_memory_mapping()
594 mapped_ram_size += end - start; in init_range_memory_mapping()
611 * Don't need to worry about overflow in the top-down case, on 32bit, in get_new_step_size()
614 * In the bottom-up case, round_up(x, 0) returns 0 though too, which in get_new_step_size()
617 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); in get_new_step_size()
621 * memory_map_top_down - Map [map_start, map_end) top down
626 * [map_start, map_end) in top-down. That said, the page tables
628 * memory in top-down.
642 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure in memory_map_top_down()
658 * We start from the top (end of memory) and go to the bottom. in memory_map_top_down()
667 start = round_down(last_start - 1, step_size); in memory_map_top_down()
685 * memory_map_bottom_up - Map [map_start, map_end) bottom up
690 * [map_start, map_end) in bottom-up. Since we have limited the
691 * bottom-up allocation above the kernel, the page tables will
693 * in [map_start, map_end) in bottom-up.
707 * We start from the bottom (@map_start) and go to the top (@map_end). in memory_map_bottom_up()
713 if (step_size && map_end - start > step_size) { in memory_map_bottom_up()
738 * area. This limits the randomization granularity to 1GB for both 4-level
739 * and 5-level paging.
745 * The code below will alias kernel page-tables in the user-range of the in init_trampoline()
747 * be created when using the trampoline page-table. in init_trampoline()
777 * If the allocation is in bottom-up direction, we setup direct mapping in init_mem_mapping()
778 * in bottom-up, otherwise we setup direct mapping in top-down. in init_mem_mapping()
836 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); in poking_init()
842 * We need to trigger the allocation of the page-tables that will be in poking_init()
860 * Access has to be given to non-kernel-ram areas as well, these contain the
911 * mark them not present - any buggy init-section access will in free_init_pages()
915 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", in free_init_pages()
916 begin, end - 1); in free_init_pages()
921 kmemleak_free_part((void *)begin, end - begin); in free_init_pages()
922 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
927 * writeable and non-executable first. in free_init_pages()
929 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
930 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
946 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; in free_kernel_image_pages()
986 * - i386_start_kernel() in free_initrd_mem()
987 * - x86_64_start_kernel() in free_initrd_mem()
988 * - relocate_initrd() in free_initrd_mem()
1028 /* entry 0 MUST be WB (hardwired to speed up translations) */ in update_cache_mode_entry()
1050 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; in arch_max_swapfile_size()