xref: /freebsd/sys/kern/subr_physmem.c (revision 1d386b48a555f61cb7325543adbbb5c3f3407a66)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #ifdef _KERNEL
31 #include "opt_acpi.h"
32 #include "opt_ddb.h"
33 #endif
34 
35 /*
36  * Routines for describing and initializing anything related to physical memory.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/physmem.h>
45 
46 #ifdef _KERNEL
47 #include <vm/vm.h>
48 #include <vm/vm_param.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_phys.h>
51 #include <vm/vm_dumpset.h>
52 
53 #include <machine/md_var.h>
54 #include <machine/resource.h>
55 #else
56 #include <stdarg.h>
57 #include <stdio.h>
58 #include <string.h>
59 #endif
60 
61 /*
62  * These structures are used internally to keep track of regions of physical
63  * ram, and regions within the physical ram that need to be excluded.  An
64  * exclusion region can be excluded from crash dumps, from the vm pool of pages
65  * that can be allocated, or both, depending on the exclusion flags associated
66  * with the region.
67  */
68 #ifdef DEV_ACPI
69 #define	MAX_HWCNT	32	/* ACPI needs more regions */
70 #define	MAX_EXCNT	32
71 #else
72 #define	MAX_HWCNT	16
73 #define	MAX_EXCNT	16
74 #endif
75 
76 #if defined(__arm__)
77 #define	MAX_PHYS_ADDR	0xFFFFFFFFull
78 #elif defined(__aarch64__) || defined(__amd64__) || defined(__riscv)
79 #define	MAX_PHYS_ADDR	0xFFFFFFFFFFFFFFFFull
80 #endif
81 
82 struct region {
83 	vm_paddr_t	addr;
84 	vm_size_t	size;
85 	uint32_t	flags;
86 };
87 
88 static struct region hwregions[MAX_HWCNT];
89 static struct region exregions[MAX_EXCNT];
90 
91 static size_t hwcnt;
92 static size_t excnt;
93 
94 /*
95  * realmem is the total number of hardware pages, excluded or not.
96  * Maxmem is one greater than the last physical page number.
97  */
98 long realmem;
99 long Maxmem;
100 
101 #ifndef _KERNEL
102 static void
103 panic(const char *fmt, ...)
104 {
105 	va_list va;
106 
107 	va_start(va, fmt);
108 	vfprintf(stderr, fmt, va);
109 	fprintf(stderr, "\n");
110 	va_end(va);
111 	__builtin_trap();
112 }
113 #endif
114 
115 /*
116  * Print the contents of the physical and excluded region tables using the
117  * provided printf-like output function (which will be either printf or
118  * db_printf).
119  */
120 static void
121 physmem_dump_tables(int (*prfunc)(const char *, ...) __printflike(1, 2))
122 {
123 	size_t i;
124 	int flags;
125 	uintmax_t addr, size;
126 	const unsigned int mbyte = 1024 * 1024;
127 
128 	prfunc("Physical memory chunk(s):\n");
129 	for (i = 0; i < hwcnt; ++i) {
130 		addr = hwregions[i].addr;
131 		size = hwregions[i].size;
132 		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages)\n", addr,
133 		    addr + size - 1, size / mbyte, size / PAGE_SIZE);
134 	}
135 
136 	prfunc("Excluded memory regions:\n");
137 	for (i = 0; i < excnt; ++i) {
138 		addr  = exregions[i].addr;
139 		size  = exregions[i].size;
140 		flags = exregions[i].flags;
141 		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages) %s %s\n",
142 		    addr, addr + size - 1, size / mbyte, size / PAGE_SIZE,
143 		    (flags & EXFLAG_NOALLOC) ? "NoAlloc" : "",
144 		    (flags & EXFLAG_NODUMP)  ? "NoDump" : "");
145 	}
146 
147 #ifdef DEBUG
148 	prfunc("Avail lists:\n");
149 	for (i = 0; phys_avail[i] != 0; ++i) {
150 		prfunc("  phys_avail[%zu] 0x%08jx\n", i,
151 		    (uintmax_t)phys_avail[i]);
152 	}
153 	for (i = 0; dump_avail[i] != 0; ++i) {
154 		prfunc("  dump_avail[%zu] 0x%08jx\n", i,
155 		    (uintmax_t)dump_avail[i]);
156 	}
157 #endif
158 }
159 
160 /*
161  * Print the contents of the static mapping table.  Used for bootverbose.
162  */
163 void
164 physmem_print_tables(void)
165 {
166 
167 	physmem_dump_tables(printf);
168 }
169 
170 /*
171  * Walk the list of hardware regions, processing it against the list of
172  * exclusions that contain the given exflags, and generating an "avail list".
173  *
174  * If maxphyssz is not zero it sets upper limit, in bytes, for the total
175  * "avail list" size. Walk stops once the limit is reached and the last region
176  * is cut short if necessary.
177  *
178  * Updates the value at *pavail with the sum of all pages in all hw regions.
179  *
180  * Returns the number of pages of non-excluded memory added to the avail list.
181  */
182 static size_t
183 regions_to_avail(vm_paddr_t *avail, uint32_t exflags, size_t maxavail,
184     uint64_t maxphyssz, long *pavail, long *prealmem)
185 {
186 	size_t acnt, exi, hwi;
187 	uint64_t adj, end, start, xend, xstart;
188 	long availmem, totalmem;
189 	const struct region *exp, *hwp;
190 	uint64_t availsz;
191 
192 	totalmem = 0;
193 	availmem = 0;
194 	availsz = 0;
195 	acnt = 0;
196 	for (hwi = 0, hwp = hwregions; hwi < hwcnt; ++hwi, ++hwp) {
197 		adj   = round_page(hwp->addr) - hwp->addr;
198 		start = round_page(hwp->addr);
199 		end   = trunc_page(hwp->size + adj) + start;
200 		totalmem += atop((vm_offset_t)(end - start));
201 		for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
202 			/*
203 			 * If the excluded region does not match given flags,
204 			 * continue checking with the next excluded region.
205 			 */
206 			if ((exp->flags & exflags) == 0)
207 				continue;
208 			xstart = exp->addr;
209 			xend   = exp->size + xstart;
210 			/*
211 			 * If the excluded region ends before this hw region,
212 			 * continue checking with the next excluded region.
213 			 */
214 			if (xend <= start)
215 				continue;
216 			/*
217 			 * If the excluded region begins after this hw region
218 			 * we're done because both lists are sorted.
219 			 */
220 			if (xstart >= end)
221 				break;
222 			/*
223 			 * If the excluded region completely covers this hw
224 			 * region, shrink this hw region to zero size.
225 			 */
226 			if ((start >= xstart) && (end <= xend)) {
227 				start = xend;
228 				end = xend;
229 				break;
230 			}
231 			/*
232 			 * If the excluded region falls wholly within this hw
233 			 * region without abutting or overlapping the beginning
234 			 * or end, create an available entry from the leading
235 			 * fragment, then adjust the start of this hw region to
236 			 * the end of the excluded region, and continue checking
237 			 * the next excluded region because another exclusion
238 			 * could affect the remainder of this hw region.
239 			 */
240 			if ((xstart > start) && (xend < end)) {
241 
242 				if ((maxphyssz != 0) &&
243 				    (availsz + xstart - start > maxphyssz)) {
244 					xstart = maxphyssz + start - availsz;
245 				}
246 				if (xstart <= start)
247 					continue;
248 				if (acnt > 0 &&
249 				    avail[acnt - 1] == (vm_paddr_t)start) {
250 					avail[acnt - 1] = (vm_paddr_t)xstart;
251 				} else {
252 					avail[acnt++] = (vm_paddr_t)start;
253 					avail[acnt++] = (vm_paddr_t)xstart;
254 				}
255 				availsz += (xstart - start);
256 				availmem += atop((vm_offset_t)(xstart - start));
257 				start = xend;
258 				continue;
259 			}
260 			/*
261 			 * We know the excluded region overlaps either the start
262 			 * or end of this hardware region (but not both), trim
263 			 * the excluded portion off the appropriate end.
264 			 */
265 			if (xstart <= start)
266 				start = xend;
267 			else
268 				end = xstart;
269 		}
270 		/*
271 		 * If the trimming actions above left a non-zero size, create an
272 		 * available entry for it.
273 		 */
274 		if (end > start) {
275 			if ((maxphyssz != 0) &&
276 			    (availsz + end - start > maxphyssz)) {
277 				end = maxphyssz + start - availsz;
278 			}
279 			if (end <= start)
280 				break;
281 
282 			if (acnt > 0 && avail[acnt - 1] == (vm_paddr_t)start) {
283 				avail[acnt - 1] = (vm_paddr_t)end;
284 			} else {
285 				avail[acnt++] = (vm_paddr_t)start;
286 				avail[acnt++] = (vm_paddr_t)end;
287 			}
288 			availsz += end - start;
289 			availmem += atop((vm_offset_t)(end - start));
290 		}
291 		if (acnt >= maxavail)
292 			panic("Not enough space in the dump/phys_avail arrays");
293 	}
294 
295 	if (pavail != NULL)
296 		*pavail = availmem;
297 	if (prealmem != NULL)
298 		*prealmem = totalmem;
299 	return (acnt);
300 }
301 
302 /*
303  * Check if the region at idx can be merged with the region above it.
304  */
305 static size_t
306 merge_upper_regions(struct region *regions, size_t rcnt, size_t idx)
307 {
308 	struct region *lower, *upper;
309 	vm_paddr_t lend, uend;
310 	size_t i, mergecnt, movecnt;
311 
312 	lower = &regions[idx];
313 	lend = lower->addr + lower->size;
314 
315 	/*
316 	 * Continue merging in upper entries as long as we have entries to
317 	 * merge; the new block could have spanned more than one, although one
318 	 * is likely the common case.
319 	 */
320 	for (i = idx + 1; i < rcnt; i++) {
321 		upper = &regions[i];
322 		if (lend < upper->addr || lower->flags != upper->flags)
323 			break;
324 
325 		uend = upper->addr + upper->size;
326 		if (uend > lend) {
327 			lower->size += uend - lend;
328 			lend = lower->addr + lower->size;
329 		}
330 
331 		if (uend >= lend) {
332 			/*
333 			 * If we didn't move past the end of the upper region,
334 			 * then we don't need to bother checking for another
335 			 * merge because it would have been done already.  Just
336 			 * increment i once more to maintain the invariant that
337 			 * i is one past the last entry merged.
338 			 */
339 			i++;
340 			break;
341 		}
342 	}
343 
344 	/*
345 	 * We merged in the entries from [idx + 1, i); physically move the tail
346 	 * end at [i, rcnt) if we need to.
347 	 */
348 	mergecnt = i - (idx + 1);
349 	if (mergecnt > 0) {
350 		movecnt = rcnt - i;
351 		if (movecnt == 0) {
352 			/* Merged all the way to the end, just decrease rcnt. */
353 			rcnt = idx + 1;
354 		} else {
355 			memmove(&regions[idx + 1], &regions[idx + mergecnt + 1],
356 			    movecnt * sizeof(*regions));
357 			rcnt -= mergecnt;
358 		}
359 	}
360 	return (rcnt);
361 }
362 
363 /*
364  * Insertion-sort a new entry into a regions list; sorted by start address.
365  */
366 static size_t
367 insert_region(struct region *regions, size_t rcnt, vm_paddr_t addr,
368     vm_size_t size, uint32_t flags)
369 {
370 	size_t i;
371 	vm_paddr_t nend, rend;
372 	struct region *ep, *rp;
373 
374 	nend = addr + size;
375 	ep = regions + rcnt;
376 	for (i = 0, rp = regions; i < rcnt; ++i, ++rp) {
377 		rend = rp->addr + rp->size;
378 		if (flags == rp->flags) {
379 			if (addr <= rp->addr && nend >= rp->addr) {
380 				/*
381 				 * New mapping overlaps at the beginning, shift
382 				 * for any difference in the beginning then
383 				 * shift if the new mapping extends past.
384 				 */
385 				rp->size += rp->addr - addr;
386 				rp->addr = addr;
387 				if (nend > rend) {
388 					rp->size += nend - rend;
389 					rcnt = merge_upper_regions(regions,
390 					    rcnt, i);
391 				}
392 				return (rcnt);
393 			} else if (addr <= rend && nend > rp->addr) {
394 				/*
395 				 * New mapping is either entirely contained
396 				 * within or it's overlapping at the end.
397 				 */
398 				if (nend > rend) {
399 					rp->size += nend - rend;
400 					rcnt = merge_upper_regions(regions,
401 					    rcnt, i);
402 				}
403 				return (rcnt);
404 			}
405 		} else if ((flags != 0) && (rp->flags != 0)) {
406 			/*
407 			 * If we're duplicating an entry that already exists
408 			 * exactly, just upgrade its flags as needed.  We could
409 			 * do more if we find that we have differently specified
410 			 * flags clipping existing excluding regions, but that's
411 			 * probably rare.
412 			 */
413 			if (addr == rp->addr && nend == rend) {
414 				rp->flags |= flags;
415 				return (rcnt);
416 			}
417 		}
418 
419 		if (addr < rp->addr) {
420 			bcopy(rp, rp + 1, (ep - rp) * sizeof(*rp));
421 			break;
422 		}
423 	}
424 	rp->addr  = addr;
425 	rp->size  = size;
426 	rp->flags = flags;
427 	rcnt++;
428 
429 	return (rcnt);
430 }
431 
432 /*
433  * Add a hardware memory region.
434  */
435 void
436 physmem_hardware_region(uint64_t pa, uint64_t sz)
437 {
438 	/*
439 	 * Filter out the page at PA 0x00000000.  The VM can't handle it, as
440 	 * pmap_extract() == 0 means failure.
441 	 */
442 	if (pa == 0) {
443 		if (sz <= PAGE_SIZE)
444 			return;
445 		pa  = PAGE_SIZE;
446 		sz -= PAGE_SIZE;
447 	} else if (pa > MAX_PHYS_ADDR) {
448 		/* This range is past usable memory, ignore it */
449 		return;
450 	}
451 
452 	/*
453 	 * Also filter out the page at the end of the physical address space --
454 	 * if addr is non-zero and addr+size is zero we wrapped to the next byte
455 	 * beyond what vm_paddr_t can express.  That leads to a NULL pointer
456 	 * deref early in startup; work around it by leaving the last page out.
457 	 *
458 	 * XXX This just in:  subtract out a whole megabyte, not just 1 page.
459 	 * Reducing the size by anything less than 1MB results in the NULL
460 	 * pointer deref in _vm_map_lock_read().  Better to give up a megabyte
461 	 * than leave some folks with an unusable system while we investigate.
462 	 */
463 	if ((pa + sz) > (MAX_PHYS_ADDR - 1024 * 1024)) {
464 		sz = MAX_PHYS_ADDR - pa + 1;
465 		if (sz <= 1024 * 1024)
466 			return;
467 		sz -= 1024 * 1024;
468 	}
469 
470 	if (sz > 0 && hwcnt < nitems(hwregions))
471 		hwcnt = insert_region(hwregions, hwcnt, pa, sz, 0);
472 }
473 
474 /*
475  * Add an exclusion region.
476  */
477 void
478 physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t exflags)
479 {
480 	vm_offset_t adj;
481 
482 	/*
483 	 * Truncate the starting address down to a page boundary, and round the
484 	 * ending page up to a page boundary.
485 	 */
486 	adj = pa - trunc_page(pa);
487 	pa  = trunc_page(pa);
488 	sz  = round_page(sz + adj);
489 
490 	if (excnt >= nitems(exregions))
491 		panic("failed to exclude region %#jx-%#jx", (uintmax_t)pa,
492 		    (uintmax_t)(pa + sz));
493 	excnt = insert_region(exregions, excnt, pa, sz, exflags);
494 }
495 
496 size_t
497 physmem_avail(vm_paddr_t *avail, size_t maxavail)
498 {
499 
500 	return (regions_to_avail(avail, EXFLAG_NOALLOC, maxavail, 0, NULL, NULL));
501 }
502 
503 bool
504 physmem_excluded(vm_paddr_t pa, vm_size_t sz)
505 {
506 	const struct region *exp;
507 	size_t exi;
508 
509 	for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
510 		if (pa < exp->addr || pa + sz > exp->addr + exp->size)
511 			continue;
512 		return (true);
513 	}
514 	return (false);
515 }
516 
517 #ifdef _KERNEL
518 /*
519  * Process all the regions added earlier into the global avail lists.
520  *
521  * Updates the kernel global 'physmem' with the number of physical pages
522  * available for use (all pages not in any exclusion region).
523  *
524  * Updates the kernel global 'Maxmem' with the page number one greater then the
525  * last page of physical memory in the system.
526  */
527 void
528 physmem_init_kernel_globals(void)
529 {
530 	size_t nextidx;
531 	u_long hwphyssz;
532 
533 	hwphyssz = 0;
534 	TUNABLE_ULONG_FETCH("hw.physmem", &hwphyssz);
535 
536 	regions_to_avail(dump_avail, EXFLAG_NODUMP, PHYS_AVAIL_ENTRIES,
537 	    hwphyssz, NULL, NULL);
538 	nextidx = regions_to_avail(phys_avail, EXFLAG_NOALLOC,
539 	    PHYS_AVAIL_ENTRIES, hwphyssz, &physmem, &realmem);
540 	if (nextidx == 0)
541 		panic("No memory entries in phys_avail");
542 	Maxmem = atop(phys_avail[nextidx - 1]);
543 }
544 
545 #ifdef DDB
546 #include <ddb/ddb.h>
547 
548 DB_SHOW_COMMAND_FLAGS(physmem, db_show_physmem, DB_CMD_MEMSAFE)
549 {
550 
551 	physmem_dump_tables(db_printf);
552 }
553 
554 #endif /* DDB */
555 
556 /*
557  * ram pseudo driver - this reserves I/O space resources corresponding to physical
558  * memory regions.
559  */
560 
561 static void
562 ram_identify(driver_t *driver, device_t parent)
563 {
564 
565 	if (resource_disabled("ram", 0))
566 		return;
567 	if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL)
568 		panic("ram_identify");
569 }
570 
571 static int
572 ram_probe(device_t dev)
573 {
574 
575 	device_quiet(dev);
576 	device_set_desc(dev, "System RAM");
577 	return (BUS_PROBE_SPECIFIC);
578 }
579 
580 static int
581 ram_attach(device_t dev)
582 {
583 	vm_paddr_t avail_list[PHYS_AVAIL_COUNT];
584 	rman_res_t start, end;
585 	struct region *hwp;
586 	int rid, i;
587 
588 	rid = 0;
589 
590 	/* Get the avail list. */
591 	bzero(avail_list, sizeof(avail_list));
592 	regions_to_avail(avail_list, EXFLAG_NOALLOC | EXFLAG_NODUMP,
593 	    PHYS_AVAIL_COUNT, 0, NULL, NULL);
594 
595 	/* Reserve all memory regions. */
596 	for (i = 0; avail_list[i + 1] != 0; i += 2) {
597 		start = avail_list[i];
598 		end = avail_list[i + 1];
599 
600 		if (bootverbose)
601 			device_printf(dev,
602 			    "reserving memory region:   %jx-%jx\n",
603 			    (uintmax_t)start, (uintmax_t)end);
604 
605 		if (bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, start, end,
606 		    end - start, 0) == NULL)
607 			panic("ram_attach: resource %d failed to attach", rid);
608 		rid++;
609 	}
610 
611 	/* Now, reserve the excluded memory regions. */
612 	for (i = 0, hwp = exregions; i < excnt; i++, hwp++) {
613 		start = hwp->addr;
614 		end = hwp->addr + hwp->size;
615 
616 		if (bootverbose)
617 			device_printf(dev,
618 			    "reserving excluded region: %jx-%jx\n",
619 			    (uintmax_t)start, (uintmax_t)(end - 1));
620 
621 		/*
622 		 * Best-effort attempt to reserve the range. This may fail, as
623 		 * sometimes the excluded ranges provided by the device tree
624 		 * will cover or overlap some I/O range.
625 		 */
626 		if (bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, start, end,
627 		    end - start, 0) == NULL) {
628 			if (bootverbose)
629 				device_printf(dev, "failed to reserve region\n");
630 			continue;
631 		}
632 		rid++;
633 	}
634 
635 	return (0);
636 }
637 
638 static device_method_t ram_methods[] = {
639 	/* Device interface */
640 	DEVMETHOD(device_identify,	ram_identify),
641 	DEVMETHOD(device_probe,		ram_probe),
642 	DEVMETHOD(device_attach,	ram_attach),
643 
644 	DEVMETHOD_END
645 };
646 
647 DEFINE_CLASS_0(ram, ram_driver, ram_methods, /* no softc */ 1);
648 DRIVER_MODULE(ram, nexus, ram_driver, 0, 0);
649 #endif /* _KERNEL */
650