1 /* 2 * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation 3 * August 2002: added remote node KVA remap - Martin J. Bligh 4 * 5 * Copyright (C) 2002, IBM Corp. 6 * 7 * All rights reserved. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 17 * NON INFRINGEMENT. See the GNU General Public License for more 18 * details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25 #include <linux/bootmem.h> 26 #include <linux/memblock.h> 27 #include <linux/module.h> 28 29 #include "numa_internal.h" 30 31 #ifdef CONFIG_DISCONTIGMEM 32 /* 33 * 4) physnode_map - the mapping between a pfn and owning node 34 * physnode_map keeps track of the physical memory layout of a generic 35 * numa node on a 64Mb break (each element of the array will 36 * represent 64Mb of memory and will be marked by the node id. so, 37 * if the first gig is on node 0, and the second gig is on node 1 38 * physnode_map will contain: 39 * 40 * physnode_map[0-15] = 0; 41 * physnode_map[16-31] = 1; 42 * physnode_map[32- ] = -1; 43 */ 44 s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1}; 45 EXPORT_SYMBOL(physnode_map); 46 47 void memory_present(int nid, unsigned long start, unsigned long end) 48 { 49 unsigned long pfn; 50 51 printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n", 52 nid, start, end); 53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); 54 printk(KERN_DEBUG " "); 55 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 56 physnode_map[pfn / PAGES_PER_SECTION] = nid; 57 printk(KERN_CONT "%lx ", pfn); 58 } 59 printk(KERN_CONT "\n"); 60 } 61 62 unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, 63 unsigned long end_pfn) 64 { 65 unsigned long nr_pages = end_pfn - start_pfn; 66 67 if (!nr_pages) 68 return 0; 69 70 return (nr_pages + 1) * sizeof(struct page); 71 } 72 #endif 73 74 extern unsigned long highend_pfn, highstart_pfn; 75 76 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) 77 78 static void *node_remap_start_vaddr[MAX_NUMNODES]; 79 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); 80 81 /* 82 * Remap memory allocator 83 */ 84 static unsigned long node_remap_start_pfn[MAX_NUMNODES]; 85 static void *node_remap_end_vaddr[MAX_NUMNODES]; 86 static void *node_remap_alloc_vaddr[MAX_NUMNODES]; 87 88 /** 89 * alloc_remap - Allocate remapped memory 90 * @nid: NUMA node to allocate memory from 91 * @size: The size of allocation 92 * 93 * Allocate @size bytes from the remap area of NUMA node @nid. The 94 * size of the remap area is predetermined by init_alloc_remap() and 95 * only the callers considered there should call this function. For 96 * more info, please read the comment on top of init_alloc_remap(). 97 * 98 * The caller must be ready to handle allocation failure from this 99 * function and fall back to regular memory allocator in such cases. 100 * 101 * CONTEXT: 102 * Single CPU early boot context. 103 * 104 * RETURNS: 105 * Pointer to the allocated memory on success, %NULL on failure. 106 */ 107 void *alloc_remap(int nid, unsigned long size) 108 { 109 void *allocation = node_remap_alloc_vaddr[nid]; 110 111 size = ALIGN(size, L1_CACHE_BYTES); 112 113 if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) 114 return NULL; 115 116 node_remap_alloc_vaddr[nid] += size; 117 memset(allocation, 0, size); 118 119 return allocation; 120 } 121 122 #ifdef CONFIG_HIBERNATION 123 /** 124 * resume_map_numa_kva - add KVA mapping to the temporary page tables created 125 * during resume from hibernation 126 * @pgd_base - temporary resume page directory 127 */ 128 void resume_map_numa_kva(pgd_t *pgd_base) 129 { 130 int node; 131 132 for_each_online_node(node) { 133 unsigned long start_va, start_pfn, nr_pages, pfn; 134 135 start_va = (unsigned long)node_remap_start_vaddr[node]; 136 start_pfn = node_remap_start_pfn[node]; 137 nr_pages = (node_remap_end_vaddr[node] - 138 node_remap_start_vaddr[node]) >> PAGE_SHIFT; 139 140 printk(KERN_DEBUG "%s: node %d\n", __func__, node); 141 142 for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { 143 unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); 144 pgd_t *pgd = pgd_base + pgd_index(vaddr); 145 pud_t *pud = pud_offset(pgd, vaddr); 146 pmd_t *pmd = pmd_offset(pud, vaddr); 147 148 set_pmd(pmd, pfn_pmd(start_pfn + pfn, 149 PAGE_KERNEL_LARGE_EXEC)); 150 151 printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", 152 __func__, vaddr, start_pfn + pfn); 153 } 154 } 155 } 156 #endif 157 158 /** 159 * init_alloc_remap - Initialize remap allocator for a NUMA node 160 * @nid: NUMA node to initizlie remap allocator for 161 * 162 * NUMA nodes may end up without any lowmem. As allocating pgdat and 163 * memmap on a different node with lowmem is inefficient, a special 164 * remap allocator is implemented which can be used by alloc_remap(). 165 * 166 * For each node, the amount of memory which will be necessary for 167 * pgdat and memmap is calculated and two memory areas of the size are 168 * allocated - one in the node and the other in lowmem; then, the area 169 * in the node is remapped to the lowmem area. 170 * 171 * As pgdat and memmap must be allocated in lowmem anyway, this 172 * doesn't waste lowmem address space; however, the actual lowmem 173 * which gets remapped over is wasted. The amount shouldn't be 174 * problematic on machines this feature will be used. 175 * 176 * Initialization failure isn't fatal. alloc_remap() is used 177 * opportunistically and the callers will fall back to other memory 178 * allocation mechanisms on failure. 179 */ 180 void __init init_alloc_remap(int nid, u64 start, u64 end) 181 { 182 unsigned long start_pfn = start >> PAGE_SHIFT; 183 unsigned long end_pfn = end >> PAGE_SHIFT; 184 unsigned long size, pfn; 185 u64 node_pa, remap_pa; 186 void *remap_va; 187 188 /* 189 * The acpi/srat node info can show hot-add memroy zones where 190 * memory could be added but not currently present. 191 */ 192 printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", 193 nid, start_pfn, end_pfn); 194 195 /* calculate the necessary space aligned to large page size */ 196 size = node_memmap_size_bytes(nid, start_pfn, end_pfn); 197 size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); 198 size = ALIGN(size, LARGE_PAGE_BYTES); 199 200 /* allocate node memory and the lowmem remap area */ 201 node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); 202 if (node_pa == MEMBLOCK_ERROR) { 203 pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", 204 size, nid); 205 return; 206 } 207 memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); 208 209 remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, 210 max_low_pfn << PAGE_SHIFT, 211 size, LARGE_PAGE_BYTES); 212 if (remap_pa == MEMBLOCK_ERROR) { 213 pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", 214 size, nid); 215 memblock_x86_free_range(node_pa, node_pa + size); 216 return; 217 } 218 memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); 219 remap_va = phys_to_virt(remap_pa); 220 221 /* perform actual remap */ 222 for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) 223 set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), 224 (node_pa >> PAGE_SHIFT) + pfn, 225 PAGE_KERNEL_LARGE); 226 227 /* initialize remap allocator parameters */ 228 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; 229 node_remap_start_vaddr[nid] = remap_va; 230 node_remap_end_vaddr[nid] = remap_va + size; 231 node_remap_alloc_vaddr[nid] = remap_va; 232 233 printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", 234 nid, node_pa, node_pa + size, remap_va, remap_va + size); 235 } 236 237 void __init initmem_init(void) 238 { 239 x86_numa_init(); 240 241 #ifdef CONFIG_HIGHMEM 242 highstart_pfn = highend_pfn = max_pfn; 243 if (max_pfn > max_low_pfn) 244 highstart_pfn = max_low_pfn; 245 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", 246 pages_to_mb(highend_pfn - highstart_pfn)); 247 num_physpages = highend_pfn; 248 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; 249 #else 250 num_physpages = max_low_pfn; 251 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 252 #endif 253 printk(KERN_NOTICE "%ldMB LOWMEM available.\n", 254 pages_to_mb(max_low_pfn)); 255 printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n", 256 max_low_pfn, highstart_pfn); 257 258 printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", 259 (ulong) pfn_to_kaddr(max_low_pfn)); 260 261 printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", 262 (ulong) pfn_to_kaddr(highstart_pfn)); 263 264 setup_bootmem_allocator(); 265 } 266