xref: /titanic_52/usr/src/psm/stand/cpr/sparcv9/sun4u/bitmap.c (revision b02e9a2d4d2071d770e5aa9ae8f83f2bbe1f2ced)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cpr.h>
31 #include <sys/fs/ufs_fs.h>
32 #include <sys/prom_plat.h>
33 #include "cprboot.h"
34 
35 
36 /*
37  * max space for a copy of physavail data
38  * prop size is usually 80 to 128 bytes
39  */
40 #define	PA_BUFSIZE	1024
41 
42 #define	CB_SETBIT	1
43 #define	CB_ISSET	2
44 #define	CB_ISCLR	3
45 
46 /*
47  * globals
48  */
49 int cb_nbitmaps;
50 
51 /*
52  * file scope
53  */
54 static arange_t *cb_physavail;
55 static char pabuf[PA_BUFSIZE];
56 static caddr_t high_virt;
57 
58 static cbd_t cb_bmda[CPR_MAX_BMDESC];
59 static int tracking_init;
60 
61 
62 static int
63 cb_bitop(pfn_t ppn, int op)
64 {
65 	int rel, rval = 0;
66 	char *bitmap;
67 	cbd_t *dp;
68 
69 	for (dp = cb_bmda; dp->cbd_size; dp++) {
70 		if (PPN_IN_RANGE(ppn, dp)) {
71 			bitmap = (char *)dp->cbd_reg_bitmap;
72 			rel = ppn - dp->cbd_spfn;
73 			if (op == CB_SETBIT)
74 				setbit(bitmap, rel);
75 			else if (op == CB_ISSET)
76 				rval = isset(bitmap, rel);
77 			else if (op == CB_ISCLR)
78 				rval = isclr(bitmap, rel);
79 			break;
80 		}
81 	}
82 
83 	return (rval);
84 }
85 
86 
87 /*
88  * count pages that are isolated from the kernel
89  * within each available range
90  */
91 static void
92 count_free_pages(void)
93 {
94 	arange_t *arp;
95 	pfn_t bitno;
96 	int cnt;
97 
98 	for (arp = cb_physavail; arp->high; arp++) {
99 		cnt = 0;
100 		for (bitno = arp->low; bitno <= arp->high; bitno++) {
101 			if (cb_bitop(bitno, CB_ISCLR))
102 				cnt++;
103 		}
104 		arp->nfree = cnt;
105 	}
106 }
107 
108 
109 /*
110  * scan the physavail list for a page
111  * that doesn't clash with the kernel
112  */
113 static pfn_t
114 search_phav_pages(void)
115 {
116 	static arange_t *arp;
117 	static pfn_t bitno;
118 	int rescan;
119 
120 	if (arp == NULL) {
121 		count_free_pages();
122 		arp = cb_physavail;
123 		bitno = arp->low;
124 	}
125 
126 	/*
127 	 * begin scanning from the previous position and if the scan
128 	 * reaches the end of the list, scan a second time from the top;
129 	 * nfree is checked to eliminate scanning overhead when most
130 	 * of the available space gets used up.  when a page is found,
131 	 * set a bit so the page wont be found by another scan.
132 	 */
133 	for (rescan = 0; rescan < 2; rescan++) {
134 		for (; arp->high; bitno = (++arp)->low) {
135 			if (arp->nfree == 0)
136 				continue;
137 			for (; bitno <= arp->high; bitno++) {
138 				if (cb_bitop(bitno, CB_ISCLR)) {
139 					(void) cb_bitop(bitno, CB_SETBIT);
140 					arp->nfree--;
141 					return (bitno++);
142 				}
143 			}
144 		}
145 		arp = cb_physavail;
146 		bitno = arp->low;
147 	}
148 
149 	return (PFN_INVALID);
150 }
151 
152 
153 /*
154  * scan statefile buffer pages for reusable tmp space
155  */
156 static pfn_t
157 search_buf_pages(void)
158 {
159 	size_t coff, src_base;
160 	static size_t lboff;
161 	pfn_t ppn;
162 
163 	if (tracking_init == 0)
164 		return (PFN_INVALID);
165 
166 	/*
167 	 * when scanning the list of statefile buffer ppns, we know that
168 	 * all pages from lboff to the page boundary of buf_offset have
169 	 * already been restored; when the associated page bit is clear,
170 	 * that page is isolated from the kernel and we can reuse it for
171 	 * tmp space; otherwise, when SF_DIFF_PPN indicates a page had
172 	 * been moved, we know the page bit was previously clear and
173 	 * later set, and we can reuse the new page.
174 	 */
175 	src_base = sfile.buf_offset & MMU_PAGEMASK;
176 	while (lboff < src_base) {
177 		coff = lboff;
178 		lboff += MMU_PAGESIZE;
179 		ppn = SF_ORIG_PPN(coff);
180 		if (cb_bitop(ppn, CB_ISCLR)) {
181 			(void) cb_bitop(ppn, CB_SETBIT);
182 			SF_STAT_INC(recycle);
183 			return (ppn);
184 		} else if (SF_DIFF_PPN(coff)) {
185 			SF_STAT_INC(recycle);
186 			return (SF_BUF_PPN(coff));
187 		}
188 	}
189 
190 	return (PFN_INVALID);
191 }
192 
193 
194 /*
195  * scan physavail and statefile buffer page lists
196  * for a page that doesn't clash with the kernel
197  */
198 pfn_t
199 find_apage(void)
200 {
201 	pfn_t ppn;
202 
203 	ppn = search_phav_pages();
204 	if (ppn != PFN_INVALID)
205 		return (ppn);
206 	ppn = search_buf_pages();
207 	if (ppn != PFN_INVALID)
208 		return (ppn);
209 
210 	prom_printf("\n%s: ran out of available/free pages!\n%s\n",
211 	    prog, rsvp);
212 	cb_exit_to_mon();
213 
214 	/* NOTREACHED */
215 	return (PFN_INVALID);
216 }
217 
218 
219 /*
220  * reserve virt range, find available phys pages,
221  * and map-in each phys starting at vaddr
222  */
223 static caddr_t
224 map_free_phys(caddr_t vaddr, size_t size, char *name)
225 {
226 	int pages, ppn, err;
227 	physaddr_t phys;
228 	caddr_t virt;
229 	char *str;
230 
231 	str = "map_free_phys";
232 	virt = prom_claim_virt(size, vaddr);
233 	CB_VPRINTF(("\n%s: claim vaddr 0x%p, size 0x%lx, ret 0x%p\n",
234 	    str, vaddr, size, virt));
235 	if (virt != vaddr) {
236 		prom_printf("\n%s: cant reserve (0x%p - 0x%p) for \"%s\"\n",
237 		    str, vaddr, vaddr + size, name);
238 		return (virt);
239 	}
240 
241 	for (pages = mmu_btop(size); pages--; virt += MMU_PAGESIZE) {
242 		/*
243 		 * map virt page to free phys
244 		 */
245 		ppn = find_apage();
246 		phys = PN_TO_ADDR(ppn);
247 
248 		err = prom_map_phys(-1, MMU_PAGESIZE, virt, phys);
249 		if (err || verbose) {
250 			prom_printf("    map virt 0x%p, phys 0x%llx, "
251 			    "ppn 0x%x, ret %d\n", virt, phys, ppn, err);
252 		}
253 		if (err)
254 			return ((caddr_t)ERR);
255 	}
256 
257 	return (vaddr);
258 }
259 
260 
261 /*
262  * check bitmap desc and relocate bitmap data
263  * to pages isolated from the kernel
264  *
265  * sets globals:
266  *	high_virt
267  */
268 int
269 cb_set_bitmap(void)
270 {
271 	size_t bmda_size, all_bitmap_size, alloc_size;
272 	caddr_t newvirt, src, dst, base;
273 	cbd_t *dp;
274 	char *str;
275 
276 	str = "cb_set_bitmap";
277 	CB_VPRINTF((ent_fmt, str, entry));
278 
279 	/*
280 	 * max is checked in the cpr module;
281 	 * this condition should never occur
282 	 */
283 	if (cb_nbitmaps > (CPR_MAX_BMDESC - 1)) {
284 		prom_printf("%s: too many bitmap descriptors %d, max %d\n",
285 		    str, cb_nbitmaps, (CPR_MAX_BMDESC - 1));
286 		return (ERR);
287 	}
288 
289 	/*
290 	 * copy bitmap descriptors to aligned space, check magic numbers,
291 	 * and set the total size of all bitmaps
292 	 */
293 	bmda_size = cb_nbitmaps * sizeof (cbd_t);
294 	src = SF_DATA();
295 	bcopy(src, cb_bmda, bmda_size);
296 	base = src + bmda_size;
297 	all_bitmap_size = 0;
298 	for (dp = cb_bmda; dp < &cb_bmda[cb_nbitmaps]; dp++) {
299 		if (dp->cbd_magic != CPR_BITMAP_MAGIC) {
300 			prom_printf("%s: bad magic 0x%x, expect 0x%x\n",
301 			    str, dp->cbd_magic, CPR_BITMAP_MAGIC);
302 			return (ERR);
303 		}
304 		all_bitmap_size += dp->cbd_size;
305 		dp->cbd_reg_bitmap = (cpr_ptr)base;
306 		base += dp->cbd_size;
307 	}
308 
309 	/*
310 	 * reserve new space for bitmaps
311 	 */
312 	alloc_size = PAGE_ROUNDUP(all_bitmap_size);
313 	if (verbose || CPR_DBG(7)) {
314 		prom_printf("%s: nbitmaps %d, bmda_size 0x%lx\n",
315 		    str, cb_nbitmaps, bmda_size);
316 		prom_printf("%s: all_bitmap_size 0x%lx, alloc_size 0x%lx\n",
317 		    str, all_bitmap_size, alloc_size);
318 	}
319 	high_virt = (caddr_t)CB_HIGH_VIRT;
320 	newvirt = map_free_phys(high_virt, alloc_size, "bitmaps");
321 	if (newvirt != high_virt)
322 		return (ERR);
323 
324 	/*
325 	 * copy the bitmaps, clear any unused space trailing them,
326 	 * and set references into the new space
327 	 */
328 	base = src + bmda_size;
329 	dst = newvirt;
330 	bcopy(base, dst, all_bitmap_size);
331 	if (alloc_size > all_bitmap_size)
332 		bzero(dst + all_bitmap_size, alloc_size - all_bitmap_size);
333 	for (dp = cb_bmda; dp->cbd_size; dp++) {
334 		dp->cbd_reg_bitmap = (cpr_ptr)dst;
335 		dst += dp->cbd_size;
336 	}
337 
338 	/* advance past all the bitmap data */
339 	SF_ADV(bmda_size + all_bitmap_size);
340 	high_virt += alloc_size;
341 
342 	return (0);
343 }
344 
345 
346 /*
347  * create a new stack for cprboot;
348  * this stack is used to avoid clashes with kernel pages and
349  * to avoid exceptions while remapping cprboot virt pages
350  */
351 int
352 cb_get_newstack(void)
353 {
354 	caddr_t newstack;
355 
356 	CB_VENTRY(cb_get_newstack);
357 	newstack = map_free_phys((caddr_t)CB_STACK_VIRT,
358 	    CB_STACK_SIZE, "new stack");
359 	if (newstack != (caddr_t)CB_STACK_VIRT)
360 		return (ERR);
361 	return (0);
362 }
363 
364 
365 /*
366  * since kernel phys pages span most of the installed memory range,
367  * some statefile buffer pages will likely clash with the kernel
368  * and need to be moved before kernel pages are restored; a list
369  * of buf phys page numbers is created here and later updated as
370  * buf pages are moved
371  *
372  * sets globals:
373  *	sfile.buf_map
374  *	tracking_init
375  */
376 int
377 cb_tracking_setup(void)
378 {
379 	pfn_t ppn, lppn;
380 	uint_t *imap;
381 	caddr_t newvirt;
382 	size_t size;
383 	int pages;
384 
385 	CB_VENTRY(cb_tracking_setup);
386 
387 	pages = mmu_btop(sfile.size);
388 	size = PAGE_ROUNDUP(pages * sizeof (*imap));
389 	newvirt = map_free_phys(high_virt, size, "buf tracking");
390 	if (newvirt != high_virt)
391 		return (ERR);
392 	sfile.buf_map = (uint_t *)newvirt;
393 	high_virt += size;
394 
395 	/*
396 	 * create identity map of sfile.buf phys pages
397 	 */
398 	imap = sfile.buf_map;
399 	lppn = sfile.low_ppn + pages;
400 	for (ppn = sfile.low_ppn; ppn < lppn; ppn++, imap++)
401 		*imap = (uint_t)ppn;
402 	tracking_init = 1;
403 
404 	return (0);
405 }
406 
407 
408 /*
409  * get "available" prop from /memory node
410  *
411  * sets globals:
412  *	cb_physavail
413  */
414 int
415 cb_get_physavail(void)
416 {
417 	int len, glen, scnt, need, space;
418 	char *str, *pdev, *mem_prop;
419 	pnode_t mem_node;
420 	physaddr_t phys;
421 	pgcnt_t pages;
422 	arange_t *arp;
423 	pphav_t *pap;
424 	size_t size;
425 	pfn_t ppn;
426 	int err;
427 
428 	str = "cb_get_physavail";
429 	CB_VPRINTF((ent_fmt, str, entry));
430 
431 	/*
432 	 * first move cprboot pages off the physavail list
433 	 */
434 	size = PAGE_ROUNDUP((uintptr_t)_end) - (uintptr_t)_start;
435 	ppn = cpr_vatopfn((caddr_t)_start);
436 	phys = PN_TO_ADDR(ppn);
437 	err = prom_claim_phys(size, phys);
438 	CB_VPRINTF(("    text/data claim (0x%lx - 0x%lx) = %d\n",
439 	    ppn, ppn + mmu_btop(size) - 1, err));
440 	if (err)
441 		return (ERR);
442 
443 	pdev = "/memory";
444 	mem_node = prom_finddevice(pdev);
445 	if (mem_node == OBP_BADNODE) {
446 		prom_printf("%s: cant find \"%s\" node\n", str, pdev);
447 		return (ERR);
448 	}
449 	mem_prop = "available";
450 
451 	/*
452 	 * prop data is treated as a struct array;
453 	 * verify pabuf has enough room for the array
454 	 * in the original and converted forms
455 	 */
456 	len = prom_getproplen(mem_node, mem_prop);
457 	scnt = len / sizeof (*pap);
458 	need = len + (sizeof (*arp) * (scnt + 1));
459 	space = sizeof (pabuf);
460 	CB_VPRINTF(("    %s node 0x%x, len %d\n", pdev, mem_node, len));
461 	if (len == -1 || need > space) {
462 		prom_printf("\n%s: bad \"%s\" length %d, min %d, max %d\n",
463 		    str, mem_prop, len, need, space);
464 		return (ERR);
465 	}
466 
467 	/*
468 	 * read-in prop data and clear trailing space
469 	 */
470 	glen = prom_getprop(mem_node, mem_prop, pabuf);
471 	if (glen != len) {
472 		prom_printf("\n%s: 0x%x,%s: expected len %d, got %d\n",
473 		    str, mem_node, mem_prop, len, glen);
474 		return (ERR);
475 	}
476 	bzero(&pabuf[len], space - len);
477 
478 	/*
479 	 * convert the physavail list in place
480 	 * from (phys_base, phys_size) to (low_ppn, high_ppn)
481 	 */
482 	if (verbose)
483 		prom_printf("\nphysavail list:\n");
484 	cb_physavail = (arange_t *)pabuf;
485 	arp = cb_physavail + scnt - 1;
486 	pap = (pphav_t *)cb_physavail + scnt - 1;
487 	for (; scnt--; pap--, arp--) {
488 		pages = mmu_btop(pap->size);
489 		arp->low = ADDR_TO_PN(pap->base);
490 		arp->high = arp->low + pages - 1;
491 		if (verbose) {
492 			prom_printf("  %d: (0x%lx - 0x%lx),\tpages %ld\n",
493 			    (int)(arp - cb_physavail),
494 			    arp->low, arp->high, (arp->high - arp->low + 1));
495 		}
496 	}
497 
498 	return (0);
499 }
500 
501 
502 /*
503  * search for an available phys page,
504  * copy the old phys page to the new one
505  * and remap the virt page to the new phys
506  */
507 static int
508 move_page(caddr_t vaddr, pfn_t oldppn)
509 {
510 	physaddr_t oldphys, newphys;
511 	pfn_t newppn;
512 	int err;
513 
514 	newppn = find_apage();
515 	newphys = PN_TO_ADDR(newppn);
516 	oldphys = PN_TO_ADDR(oldppn);
517 	CB_VPRINTF(("    remap vaddr 0x%p, old 0x%lx/0x%llx,"
518 	    "	new 0x%lx/0x%llx\n",
519 	    vaddr, oldppn, oldphys, newppn, newphys));
520 	phys_xcopy(oldphys, newphys, MMU_PAGESIZE);
521 	err = prom_remap(MMU_PAGESIZE, vaddr, newphys);
522 	if (err)
523 		prom_printf("\nmove_page: remap error\n");
524 	return (err);
525 }
526 
527 
528 /*
529  * physically relocate any text/data pages that clash
530  * with the kernel; since we're already running on
531  * a new stack, the original stack area is skipped
532  */
533 int
534 cb_relocate(void)
535 {
536 	int is_ostk, is_clash, clash_cnt, ok_cnt;
537 	char *str, *desc, *skip_fmt;
538 	caddr_t ostk_low, ostk_high;
539 	caddr_t virt, saddr, eaddr;
540 	pfn_t ppn;
541 
542 	str = "cb_relocate";
543 	CB_VPRINTF((ent_fmt, str, entry));
544 
545 	ostk_low  = (caddr_t)&estack - CB_STACK_SIZE;
546 	ostk_high = (caddr_t)&estack - MMU_PAGESIZE;
547 	saddr = (caddr_t)_start;
548 	eaddr = (caddr_t)PAGE_ROUNDUP((uintptr_t)_end);
549 
550 	install_remap();
551 
552 	skip_fmt = "    skip  vaddr 0x%p, clash=%d, %s\n";
553 	clash_cnt = ok_cnt = 0;
554 	ppn = cpr_vatopfn(saddr);
555 
556 	for (virt = saddr; virt < eaddr; virt += MMU_PAGESIZE, ppn++) {
557 		is_clash = (cb_bitop(ppn, CB_ISSET) != 0);
558 		if (is_clash)
559 			clash_cnt++;
560 		else
561 			ok_cnt++;
562 
563 		is_ostk = (virt >= ostk_low && virt <= ostk_high);
564 		if (is_ostk)
565 			desc = "orig stack";
566 		else
567 			desc = "text/data";
568 
569 		/*
570 		 * page logic:
571 		 *
572 		 * if (original stack page)
573 		 *	clash doesn't matter, just skip the page
574 		 * else (not original stack page)
575 		 * 	if (no clash)
576 		 *		setbit to avoid later alloc and overwrite
577 		 *	else (clash)
578 		 *		relocate phys page
579 		 */
580 		if (is_ostk) {
581 			CB_VPRINTF((skip_fmt, virt, is_clash, desc));
582 		} else if (is_clash == 0) {
583 			CB_VPRINTF((skip_fmt, virt, is_clash, desc));
584 			(void) cb_bitop(ppn, CB_SETBIT);
585 		} else if (move_page(virt, ppn))
586 			return (ERR);
587 	}
588 	CB_VPRINTF(("%s: total %d, clash %d, ok %d\n",
589 	    str, clash_cnt + ok_cnt, clash_cnt, ok_cnt));
590 
591 	/*
592 	 * free original stack area for reuse
593 	 */
594 	ppn = cpr_vatopfn(ostk_low);
595 	prom_free_phys(CB_STACK_SIZE, PN_TO_ADDR(ppn));
596 	CB_VPRINTF(("%s: free old stack (0x%lx - 0x%lx)\n",
597 	    str, ppn, ppn + mmu_btop(CB_STACK_SIZE) - 1));
598 
599 	return (0);
600 }
601