xref: /freebsd/sys/kern/subr_physmem.c (revision 56cc89585d392d8c8f120b2e4b8faa7df236b4ea)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_acpi.h"
33 #include "opt_ddb.h"
34 
35 /*
36  * Routines for describing and initializing anything related to physical memory.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/physmem.h>
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_phys.h>
47 #include <vm/vm_dumpset.h>
48 #include <machine/md_var.h>
49 
50 /*
51  * These structures are used internally to keep track of regions of physical
52  * ram, and regions within the physical ram that need to be excluded.  An
53  * exclusion region can be excluded from crash dumps, from the vm pool of pages
54  * that can be allocated, or both, depending on the exclusion flags associated
55  * with the region.
56  */
57 #ifdef DEV_ACPI
58 #define	MAX_HWCNT	32	/* ACPI needs more regions */
59 #define	MAX_EXCNT	32
60 #else
61 #define	MAX_HWCNT	16
62 #define	MAX_EXCNT	16
63 #endif
64 
65 #if defined(__arm__)
66 #define	MAX_PHYS_ADDR	0xFFFFFFFFull
67 #elif defined(__aarch64__) || defined(__riscv)
68 #define	MAX_PHYS_ADDR	0xFFFFFFFFFFFFFFFFull
69 #endif
70 
71 struct region {
72 	vm_paddr_t	addr;
73 	vm_size_t	size;
74 	uint32_t	flags;
75 };
76 
77 static struct region hwregions[MAX_HWCNT];
78 static struct region exregions[MAX_EXCNT];
79 
80 static size_t hwcnt;
81 static size_t excnt;
82 
83 /*
84  * realmem is the total number of hardware pages, excluded or not.
85  * Maxmem is one greater than the last physical page number.
86  */
87 long realmem;
88 long Maxmem;
89 
90 /*
91  * Print the contents of the physical and excluded region tables using the
92  * provided printf-like output function (which will be either printf or
93  * db_printf).
94  */
95 static void
96 physmem_dump_tables(int (*prfunc)(const char *, ...))
97 {
98 	size_t i;
99 	int flags;
100 	uintmax_t addr, size;
101 	const unsigned int mbyte = 1024 * 1024;
102 
103 	prfunc("Physical memory chunk(s):\n");
104 	for (i = 0; i < hwcnt; ++i) {
105 		addr = hwregions[i].addr;
106 		size = hwregions[i].size;
107 		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages)\n", addr,
108 		    addr + size - 1, size / mbyte, size / PAGE_SIZE);
109 	}
110 
111 	prfunc("Excluded memory regions:\n");
112 	for (i = 0; i < excnt; ++i) {
113 		addr  = exregions[i].addr;
114 		size  = exregions[i].size;
115 		flags = exregions[i].flags;
116 		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages) %s %s\n",
117 		    addr, addr + size - 1, size / mbyte, size / PAGE_SIZE,
118 		    (flags & EXFLAG_NOALLOC) ? "NoAlloc" : "",
119 		    (flags & EXFLAG_NODUMP)  ? "NoDump" : "");
120 	}
121 
122 #ifdef DEBUG
123 	prfunc("Avail lists:\n");
124 	for (i = 0; phys_avail[i] != 0; ++i) {
125 		prfunc("  phys_avail[%d] 0x%08x\n", i, phys_avail[i]);
126 	}
127 	for (i = 0; dump_avail[i] != 0; ++i) {
128 		prfunc("  dump_avail[%d] 0x%08x\n", i, dump_avail[i]);
129 	}
130 #endif
131 }
132 
133 /*
134  * Print the contents of the static mapping table.  Used for bootverbose.
135  */
136 void
137 physmem_print_tables(void)
138 {
139 
140 	physmem_dump_tables(printf);
141 }
142 
143 /*
144  * Walk the list of hardware regions, processing it against the list of
145  * exclusions that contain the given exflags, and generating an "avail list".
146  *
147  * If maxphyssz is not zero it sets upper limit, in bytes, for the total
148  * "avail list" size. Walk stops once the limit is reached and the last region
149  * is cut short if necessary.
150  *
151  * Updates the value at *pavail with the sum of all pages in all hw regions.
152  *
153  * Returns the number of pages of non-excluded memory added to the avail list.
154  */
155 static size_t
156 regions_to_avail(vm_paddr_t *avail, uint32_t exflags, size_t maxavail,
157     uint64_t maxphyssz, long *pavail, long *prealmem)
158 {
159 	size_t acnt, exi, hwi;
160 	uint64_t end, start, xend, xstart;
161 	long availmem, totalmem;
162 	const struct region *exp, *hwp;
163 	uint64_t availsz;
164 
165 	totalmem = 0;
166 	availmem = 0;
167 	availsz = 0;
168 	acnt = 0;
169 	for (hwi = 0, hwp = hwregions; hwi < hwcnt; ++hwi, ++hwp) {
170 		start = hwp->addr;
171 		end   = hwp->size + start;
172 		totalmem += atop((vm_offset_t)(end - start));
173 		for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
174 			/*
175 			 * If the excluded region does not match given flags,
176 			 * continue checking with the next excluded region.
177 			 */
178 			if ((exp->flags & exflags) == 0)
179 				continue;
180 			xstart = exp->addr;
181 			xend   = exp->size + xstart;
182 			/*
183 			 * If the excluded region ends before this hw region,
184 			 * continue checking with the next excluded region.
185 			 */
186 			if (xend <= start)
187 				continue;
188 			/*
189 			 * If the excluded region begins after this hw region
190 			 * we're done because both lists are sorted.
191 			 */
192 			if (xstart >= end)
193 				break;
194 			/*
195 			 * If the excluded region completely covers this hw
196 			 * region, shrink this hw region to zero size.
197 			 */
198 			if ((start >= xstart) && (end <= xend)) {
199 				start = xend;
200 				end = xend;
201 				break;
202 			}
203 			/*
204 			 * If the excluded region falls wholly within this hw
205 			 * region without abutting or overlapping the beginning
206 			 * or end, create an available entry from the leading
207 			 * fragment, then adjust the start of this hw region to
208 			 * the end of the excluded region, and continue checking
209 			 * the next excluded region because another exclusion
210 			 * could affect the remainder of this hw region.
211 			 */
212 			if ((xstart > start) && (xend < end)) {
213 
214 				if ((maxphyssz != 0) &&
215 				    (availsz + xstart - start > maxphyssz)) {
216 					xstart = maxphyssz + start - availsz;
217 				}
218 				if (xstart <= start)
219 					continue;
220 				if (acnt > 0 &&
221 				    avail[acnt - 1] == (vm_paddr_t)start) {
222 					avail[acnt - 1] = (vm_paddr_t)xstart;
223 				} else {
224 					avail[acnt++] = (vm_paddr_t)start;
225 					avail[acnt++] = (vm_paddr_t)xstart;
226 				}
227 				availsz += (xstart - start);
228 				availmem += atop((vm_offset_t)(xstart - start));
229 				start = xend;
230 				continue;
231 			}
232 			/*
233 			 * We know the excluded region overlaps either the start
234 			 * or end of this hardware region (but not both), trim
235 			 * the excluded portion off the appropriate end.
236 			 */
237 			if (xstart <= start)
238 				start = xend;
239 			else
240 				end = xstart;
241 		}
242 		/*
243 		 * If the trimming actions above left a non-zero size, create an
244 		 * available entry for it.
245 		 */
246 		if (end > start) {
247 			if ((maxphyssz != 0) &&
248 			    (availsz + end - start > maxphyssz)) {
249 				end = maxphyssz + start - availsz;
250 			}
251 			if (end <= start)
252 				break;
253 
254 			if (acnt > 0 && avail[acnt - 1] == (vm_paddr_t)start) {
255 				avail[acnt - 1] = (vm_paddr_t)end;
256 			} else {
257 				avail[acnt++] = (vm_paddr_t)start;
258 				avail[acnt++] = (vm_paddr_t)end;
259 			}
260 			availsz += end - start;
261 			availmem += atop((vm_offset_t)(end - start));
262 		}
263 		if (acnt >= maxavail)
264 			panic("Not enough space in the dump/phys_avail arrays");
265 	}
266 
267 	if (pavail != NULL)
268 		*pavail = availmem;
269 	if (prealmem != NULL)
270 		*prealmem = totalmem;
271 	return (acnt);
272 }
273 
274 /*
275  * Check if the region at idx can be merged with the region above it.
276  */
277 static size_t
278 merge_upper_regions(struct region *regions, size_t rcnt, size_t idx)
279 {
280 	struct region *lower, *upper;
281 	vm_paddr_t lend, uend;
282 	size_t i, mergecnt, movecnt;
283 
284 	lower = &regions[idx];
285 	lend = lower->addr + lower->size;
286 
287 	/*
288 	 * Continue merging in upper entries as long as we have entries to
289 	 * merge; the new block could have spanned more than one, although one
290 	 * is likely the common case.
291 	 */
292 	for (i = idx + 1; i < rcnt; i++) {
293 		upper = &regions[i];
294 		if (lend < upper->addr || lower->flags != upper->flags)
295 			break;
296 
297 		uend = upper->addr + upper->size;
298 		if (uend > lend) {
299 			lower->size += uend - lend;
300 			lend = lower->addr + lower->size;
301 		}
302 
303 		if (uend >= lend) {
304 			/*
305 			 * If we didn't move past the end of the upper region,
306 			 * then we don't need to bother checking for another
307 			 * merge because it would have been done already.  Just
308 			 * increment i once more to maintain the invariant that
309 			 * i is one past the last entry merged.
310 			 */
311 			i++;
312 			break;
313 		}
314 	}
315 
316 	/*
317 	 * We merged in the entries from [idx + 1, i); physically move the tail
318 	 * end at [i, rcnt) if we need to.
319 	 */
320 	mergecnt = i - (idx + 1);
321 	if (mergecnt > 0) {
322 		movecnt = rcnt - i;
323 		if (movecnt == 0) {
324 			/* Merged all the way to the end, just decrease rcnt. */
325 			rcnt = idx + 1;
326 		} else {
327 			memmove(&regions[idx + 1], &regions[idx + mergecnt + 1],
328 			    movecnt * sizeof(*regions));
329 			rcnt -= mergecnt;
330 		}
331 	}
332 	return (rcnt);
333 }
334 
335 /*
336  * Insertion-sort a new entry into a regions list; sorted by start address.
337  */
338 static size_t
339 insert_region(struct region *regions, size_t rcnt, vm_paddr_t addr,
340     vm_size_t size, uint32_t flags)
341 {
342 	size_t i;
343 	vm_paddr_t nend, rend;
344 	struct region *ep, *rp;
345 
346 	nend = addr + size;
347 	ep = regions + rcnt;
348 	for (i = 0, rp = regions; i < rcnt; ++i, ++rp) {
349 		if (flags == rp->flags) {
350 			rend = rp->addr + rp->size;
351 			if (addr <= rp->addr && nend >= rp->addr) {
352 				/*
353 				 * New mapping overlaps at the beginning, shift
354 				 * for any difference in the beginning then
355 				 * shift if the new mapping extends past.
356 				 */
357 				rp->size += rp->addr - addr;
358 				rp->addr = addr;
359 				if (nend > rend) {
360 					rp->size += nend - rend;
361 					rcnt = merge_upper_regions(regions,
362 					    rcnt, i);
363 				}
364 				return (rcnt);
365 			} else if (addr <= rend && nend > rp->addr) {
366 				/*
367 				 * New mapping is either entirely contained
368 				 * within or it's overlapping at the end.
369 				 */
370 				if (nend > rend) {
371 					rp->size += nend - rend;
372 					rcnt = merge_upper_regions(regions,
373 					    rcnt, i);
374 				}
375 				return (rcnt);
376 			}
377 		}
378 		if (addr < rp->addr) {
379 			bcopy(rp, rp + 1, (ep - rp) * sizeof(*rp));
380 			break;
381 		}
382 	}
383 	rp->addr  = addr;
384 	rp->size  = size;
385 	rp->flags = flags;
386 	rcnt++;
387 
388 	return (rcnt);
389 }
390 
391 /*
392  * Add a hardware memory region.
393  */
394 void
395 physmem_hardware_region(uint64_t pa, uint64_t sz)
396 {
397 	vm_offset_t adj;
398 
399 	/*
400 	 * Filter out the page at PA 0x00000000.  The VM can't handle it, as
401 	 * pmap_extract() == 0 means failure.
402 	 */
403 	if (pa == 0) {
404 		if (sz <= PAGE_SIZE)
405 			return;
406 		pa  = PAGE_SIZE;
407 		sz -= PAGE_SIZE;
408 	} else if (pa > MAX_PHYS_ADDR) {
409 		/* This range is past usable memory, ignore it */
410 		return;
411 	}
412 
413 	/*
414 	 * Also filter out the page at the end of the physical address space --
415 	 * if addr is non-zero and addr+size is zero we wrapped to the next byte
416 	 * beyond what vm_paddr_t can express.  That leads to a NULL pointer
417 	 * deref early in startup; work around it by leaving the last page out.
418 	 *
419 	 * XXX This just in:  subtract out a whole megabyte, not just 1 page.
420 	 * Reducing the size by anything less than 1MB results in the NULL
421 	 * pointer deref in _vm_map_lock_read().  Better to give up a megabyte
422 	 * than leave some folks with an unusable system while we investigate.
423 	 */
424 	if ((pa + sz) > (MAX_PHYS_ADDR - 1024 * 1024)) {
425 		sz = MAX_PHYS_ADDR - pa + 1;
426 		if (sz <= 1024 * 1024)
427 			return;
428 		sz -= 1024 * 1024;
429 	}
430 
431 	/*
432 	 * Round the starting address up to a page boundary, and truncate the
433 	 * ending page down to a page boundary.
434 	 */
435 	adj = round_page(pa) - pa;
436 	pa  = round_page(pa);
437 	sz  = trunc_page(sz - adj);
438 
439 	if (sz > 0 && hwcnt < nitems(hwregions))
440 		hwcnt = insert_region(hwregions, hwcnt, pa, sz, 0);
441 }
442 
443 /*
444  * Add an exclusion region.
445  */
446 void
447 physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t exflags)
448 {
449 	vm_offset_t adj;
450 
451 	/*
452 	 * Truncate the starting address down to a page boundary, and round the
453 	 * ending page up to a page boundary.
454 	 */
455 	adj = pa - trunc_page(pa);
456 	pa  = trunc_page(pa);
457 	sz  = round_page(sz + adj);
458 
459 	if (excnt >= nitems(exregions))
460 		panic("failed to exclude region %#jx-%#jx", (uintmax_t)pa,
461 		    (uintmax_t)(pa + sz));
462 	excnt = insert_region(exregions, excnt, pa, sz, exflags);
463 }
464 
465 size_t
466 physmem_avail(vm_paddr_t *avail, size_t maxavail)
467 {
468 
469 	return (regions_to_avail(avail, EXFLAG_NOALLOC, maxavail, 0, NULL, NULL));
470 }
471 
472 /*
473  * Process all the regions added earlier into the global avail lists.
474  *
475  * Updates the kernel global 'physmem' with the number of physical pages
476  * available for use (all pages not in any exclusion region).
477  *
478  * Updates the kernel global 'Maxmem' with the page number one greater then the
479  * last page of physical memory in the system.
480  */
481 void
482 physmem_init_kernel_globals(void)
483 {
484 	size_t nextidx;
485 	u_long hwphyssz;
486 
487 	hwphyssz = 0;
488 	TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz);
489 
490 	regions_to_avail(dump_avail, EXFLAG_NODUMP, PHYS_AVAIL_ENTRIES,
491 	    hwphyssz, NULL, NULL);
492 	nextidx = regions_to_avail(phys_avail, EXFLAG_NOALLOC,
493 	    PHYS_AVAIL_ENTRIES, hwphyssz, &physmem, &realmem);
494 	if (nextidx == 0)
495 		panic("No memory entries in phys_avail");
496 	Maxmem = atop(phys_avail[nextidx - 1]);
497 }
498 
499 #ifdef DDB
500 #include <ddb/ddb.h>
501 
502 DB_SHOW_COMMAND(physmem, db_show_physmem)
503 {
504 
505 	physmem_dump_tables(db_printf);
506 }
507 
508 #endif /* DDB */
509