1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Platform specific implementation code 30 */ 31 32 #define SUNDDI_IMPL 33 34 #include <sys/types.h> 35 #include <sys/promif.h> 36 #include <sys/prom_isa.h> 37 #include <sys/prom_plat.h> 38 #include <sys/mmu.h> 39 #include <vm/hat_sfmmu.h> 40 #include <sys/iommu.h> 41 #include <sys/scb.h> 42 #include <sys/cpuvar.h> 43 #include <sys/intreg.h> 44 #include <sys/pte.h> 45 #include <vm/hat.h> 46 #include <vm/page.h> 47 #include <vm/as.h> 48 #include <sys/cpr.h> 49 #include <sys/kmem.h> 50 #include <sys/clock.h> 51 #include <sys/kmem.h> 52 #include <sys/panic.h> 53 #include <vm/seg_kmem.h> 54 #include <sys/cpu_module.h> 55 #include <sys/callb.h> 56 #include <sys/machsystm.h> 57 #include <sys/vmsystm.h> 58 #include <sys/systm.h> 59 #include <sys/archsystm.h> 60 #include <sys/stack.h> 61 #include <sys/fs/ufs_fs.h> 62 #include <sys/memlist.h> 63 #include <sys/bootconf.h> 64 #include <sys/thread.h> 65 #include <vm/vm_dep.h> 66 67 extern void cpr_clear_bitmaps(void); 68 extern void dtlb_wr_entry(uint_t, tte_t *, uint64_t *); 69 extern void itlb_wr_entry(uint_t, tte_t *, uint64_t *); 70 71 static int i_cpr_storage_desc_alloc(csd_t **, pgcnt_t *, csd_t **, int); 72 static void i_cpr_storage_desc_init(csd_t *, pgcnt_t, csd_t *); 73 static caddr_t i_cpr_storage_data_alloc(pgcnt_t, pgcnt_t *, int); 74 static int cpr_dump_sensitive(vnode_t *, csd_t *); 75 static void i_cpr_clear_entries(uint64_t, uint64_t); 76 static void i_cpr_xcall(xcfunc_t); 77 78 void i_cpr_storage_free(void); 79 80 extern void *i_cpr_data_page; 81 extern int cpr_test_mode; 82 extern int cpr_nbitmaps; 83 extern char cpr_default_path[]; 84 extern caddr_t textva, datava; 85 86 static struct cpr_map_info cpr_prom_retain[CPR_PROM_RETAIN_CNT]; 87 caddr_t cpr_vaddr = NULL; 88 89 static uint_t sensitive_pages_saved; 90 static uint_t sensitive_size_saved; 91 92 caddr_t i_cpr_storage_data_base; 93 caddr_t i_cpr_storage_data_end; 94 csd_t *i_cpr_storage_desc_base; 95 csd_t *i_cpr_storage_desc_end; /* one byte beyond last used descp */ 96 csd_t *i_cpr_storage_desc_last_used; /* last used descriptor */ 97 caddr_t sensitive_write_ptr; /* position for next storage write */ 98 99 size_t i_cpr_sensitive_bytes_dumped; 100 pgcnt_t i_cpr_sensitive_pgs_dumped; 101 pgcnt_t i_cpr_storage_data_sz; /* in pages */ 102 pgcnt_t i_cpr_storage_desc_pgcnt; /* in pages */ 103 104 ushort_t cpr_mach_type = CPR_MACHTYPE_4U; 105 static csu_md_t m_info; 106 107 108 #define MAX_STORAGE_RETRY 3 109 #define MAX_STORAGE_ALLOC_RETRY 3 110 #define INITIAL_ALLOC_PCNT 40 /* starting allocation percentage */ 111 #define INTEGRAL 100 /* to get 1% precision */ 112 113 #define EXTRA_RATE 2 /* add EXTRA_RATE% extra space */ 114 #define EXTRA_DESCS 10 115 116 #define CPR_NO_STORAGE_DESC 1 117 #define CPR_NO_STORAGE_DATA 2 118 119 #define CIF_SPLICE 0 120 #define CIF_UNLINK 1 121 122 123 /* 124 * CPR miscellaneous support routines 125 */ 126 #define cpr_open(path, mode, vpp) (vn_open(path, UIO_SYSSPACE, \ 127 mode, 0600, vpp, CRCREAT, 0)) 128 #define cpr_rdwr(rw, vp, basep, cnt) (vn_rdwr(rw, vp, (caddr_t)(basep), \ 129 cnt, 0LL, UIO_SYSSPACE, 0, (rlim64_t)MAXOFF_T, CRED(), \ 130 (ssize_t *)NULL)) 131 132 /* 133 * definitions for saving/restoring prom pages 134 */ 135 static void *ppage_buf; 136 static pgcnt_t ppage_count; 137 static pfn_t *pphys_list; 138 static size_t pphys_list_size; 139 140 typedef void (*tlb_rw_t)(uint_t, tte_t *, uint64_t *); 141 typedef void (*tlb_filter_t)(int, tte_t *, uint64_t, void *); 142 143 /* 144 * private struct for tlb handling 145 */ 146 struct cpr_trans_info { 147 sutlb_t *dst; 148 sutlb_t *tail; 149 tlb_rw_t reader; 150 tlb_rw_t writer; 151 tlb_filter_t filter; 152 int index; 153 uint64_t skip; /* assumes TLB <= 64 locked entries */ 154 }; 155 typedef struct cpr_trans_info cti_t; 156 157 158 /* 159 * special handling for tlb info 160 */ 161 #define WITHIN_OFW(va) \ 162 (((va) > (uint64_t)OFW_START_ADDR) && ((va) < (uint64_t)OFW_END_ADDR)) 163 164 #define WITHIN_NUCLEUS(va, base) \ 165 (((va) >= (base)) && \ 166 (((va) + MMU_PAGESIZE) <= ((base) + MMU_PAGESIZE4M))) 167 168 #define IS_BIGKTSB(va) \ 169 (enable_bigktsb && \ 170 ((va) >= (uint64_t)ktsb_base) && \ 171 ((va) < (uint64_t)(ktsb_base + ktsb_sz))) 172 173 174 /* 175 * WARNING: 176 * the text from this file is linked to follow cpr_resume_setup.o; 177 * only add text between here and i_cpr_end_jumpback when it needs 178 * to be called during resume before we switch back to the kernel 179 * trap table. all the text in this range must fit within a page. 180 */ 181 182 183 /* 184 * each time a machine is reset, the prom uses an inconsistent set of phys 185 * pages and the cif cookie may differ as well. so prior to restoring the 186 * original prom, we have to use to use the new/tmp prom's translations 187 * when requesting prom services. 188 * 189 * cif_handler starts out as the original prom cookie, and that gets used 190 * by client_handler() to jump into the prom. here we splice-in a wrapper 191 * routine by writing cif_handler; client_handler() will now jump to the 192 * wrapper which switches the %tba to the new/tmp prom's trap table then 193 * jumps to the new cookie. 194 */ 195 void 196 i_cpr_cif_setup(int action) 197 { 198 extern void *i_cpr_orig_cif, *cif_handler; 199 extern int i_cpr_cif_wrapper(void *); 200 201 /* 202 * save the original cookie and change the current cookie to the 203 * wrapper routine. later we just restore the original cookie. 204 */ 205 if (action == CIF_SPLICE) { 206 i_cpr_orig_cif = cif_handler; 207 cif_handler = (void *)i_cpr_cif_wrapper; 208 } else if (action == CIF_UNLINK) 209 cif_handler = i_cpr_orig_cif; 210 } 211 212 213 /* 214 * launch slave cpus into kernel text, pause them, 215 * and restore the original prom pages 216 */ 217 void 218 i_cpr_mp_setup(void) 219 { 220 extern void restart_other_cpu(int); 221 ihandle_t tmpout = 0; 222 char *str; 223 cpu_t *cp; 224 225 uint64_t kctx = kcontextreg; 226 227 /* 228 * Do not allow setting page size codes in MMU primary context 229 * register while using cif wrapper. This is needed to work 230 * arround OBP incorrect handling of this MMU register. 231 */ 232 kcontextreg = 0; 233 234 /* 235 * reset cpu_ready_set so x_calls work properly 236 */ 237 CPUSET_ZERO(cpu_ready_set); 238 CPUSET_ADD(cpu_ready_set, getprocessorid()); 239 240 /* 241 * setup cif to use the cookie from the new/tmp prom 242 * and setup tmp handling for calling prom services. 243 */ 244 i_cpr_cif_setup(CIF_SPLICE); 245 246 /* 247 * at this point, only the nucleus and a few cpr pages are 248 * mapped in. once we switch to the kernel trap table, 249 * we can access the rest of kernel space. 250 */ 251 prom_set_traptable(&trap_table); 252 253 if (ncpus > 1) { 254 sfmmu_init_tsbs(); 255 256 if (cpr_debug & LEVEL1) { 257 prom_interpret("stdout @ swap l!", (uintptr_t)&tmpout, 258 0, 0, 0, 0); 259 str = "MP startup...\r\n"; 260 (void) prom_write(tmpout, str, strlen(str), 0, 0); 261 } 262 263 mutex_enter(&cpu_lock); 264 /* 265 * All of the slave cpus are not ready at this time, 266 * yet the cpu structures have various cpu_flags set; 267 * clear cpu_flags and mutex_ready. 268 * Since we are coming up from a CPU suspend, the slave cpus 269 * are frozen. 270 */ 271 for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) { 272 cp->cpu_flags = CPU_FROZEN; 273 cp->cpu_m.mutex_ready = 0; 274 } 275 276 for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) 277 restart_other_cpu(cp->cpu_id); 278 279 pause_cpus(NULL); 280 mutex_exit(&cpu_lock); 281 282 if (cpr_debug & LEVEL1) { 283 str = "MP paused...\r\n"; 284 (void) prom_write(tmpout, str, strlen(str), 0, 0); 285 } 286 287 i_cpr_xcall(i_cpr_clear_entries); 288 } else 289 i_cpr_clear_entries(0, 0); 290 291 /* 292 * now unlink the cif wrapper; WARNING: do not call any 293 * prom_xxx() routines until after prom pages are restored. 294 */ 295 i_cpr_cif_setup(CIF_UNLINK); 296 297 (void) i_cpr_prom_pages(CPR_PROM_RESTORE); 298 299 /* allow setting page size codes in MMU primary context register */ 300 kcontextreg = kctx; 301 } 302 303 304 /* 305 * end marker for jumpback page; 306 * this symbol is used to check the size of i_cpr_resume_setup() 307 * and the above text. For simplicity, the Makefile needs to 308 * link i_cpr_resume_setup.o and cpr_impl.o consecutively. 309 */ 310 void 311 i_cpr_end_jumpback(void) 312 { 313 } 314 315 316 /* 317 * scan tlb entries with reader; when valid entries are found, 318 * the filter routine will selectively save/clear them 319 */ 320 static void 321 i_cpr_scan_tlb(cti_t *ctip) 322 { 323 uint64_t va_tag; 324 int tlb_index; 325 tte_t tte; 326 327 for (tlb_index = ctip->index; tlb_index >= 0; tlb_index--) { 328 (*ctip->reader)((uint_t)tlb_index, &tte, &va_tag); 329 if (va_tag && TTE_IS_VALID(&tte)) 330 (*ctip->filter)(tlb_index, &tte, va_tag, ctip); 331 } 332 } 333 334 335 /* 336 * filter for locked tlb entries that reference the text/data nucleus 337 * and any bigktsb's; these will be reinstalled by cprboot on all cpus 338 */ 339 /* ARGSUSED */ 340 static void 341 i_cpr_lnb(int index, tte_t *ttep, uint64_t va_tag, void *ctrans) 342 { 343 cti_t *ctip; 344 345 /* 346 * record tlb data at ctip->dst; the target tlb index starts 347 * at the highest tlb offset and moves towards 0. the prom 348 * reserves both dtlb and itlb index 0. any selected entry 349 * also gets marked to prevent being flushed during resume 350 */ 351 if (TTE_IS_LOCKED(ttep) && (va_tag == (uint64_t)textva || 352 va_tag == (uint64_t)datava || IS_BIGKTSB(va_tag))) { 353 ctip = ctrans; 354 while ((1 << ctip->index) & ctip->skip) 355 ctip->index--; 356 ASSERT(ctip->index > 0); 357 ASSERT(ctip->dst < ctip->tail); 358 ctip->dst->tte.ll = ttep->ll; 359 ctip->dst->va_tag = va_tag; 360 ctip->dst->index = ctip->index--; 361 ctip->dst->tmp = 0; 362 ctip->dst++; 363 } 364 } 365 366 367 /* 368 * some tlb entries are stale, filter for unlocked entries 369 * within the prom virt range and clear them 370 */ 371 static void 372 i_cpr_ufw(int index, tte_t *ttep, uint64_t va_tag, void *ctrans) 373 { 374 sutlb_t clr; 375 cti_t *ctip; 376 377 if (!TTE_IS_LOCKED(ttep) && WITHIN_OFW(va_tag)) { 378 ctip = ctrans; 379 bzero(&clr, sizeof (clr)); 380 (*ctip->writer)((uint_t)index, &clr.tte, &clr.va_tag); 381 } 382 } 383 384 385 /* 386 * some of the entries installed by cprboot are needed only on a 387 * short-term basis and need to be flushed to avoid clogging the tlbs. 388 * scan the dtte/itte arrays for items marked as temporary and clear 389 * dtlb/itlb entries using wrfunc. 390 */ 391 static void 392 i_cpr_clear_tmp(sutlb_t *listp, int max, tlb_rw_t wrfunc) 393 { 394 sutlb_t clr, *tail; 395 396 bzero(&clr, sizeof (clr)); 397 for (tail = listp + max; listp < tail && listp->va_tag; listp++) { 398 if (listp->tmp) 399 (*wrfunc)((uint_t)listp->index, &clr.tte, &clr.va_tag); 400 } 401 } 402 403 404 /* ARGSUSED */ 405 static void 406 i_cpr_clear_entries(uint64_t arg1, uint64_t arg2) 407 { 408 extern void demap_all(void); 409 cti_t cti; 410 411 i_cpr_clear_tmp(m_info.dtte, CPR_MAX_TLB, dtlb_wr_entry); 412 i_cpr_clear_tmp(m_info.itte, CPR_MAX_TLB, itlb_wr_entry); 413 414 /* 415 * for newer cpus that implement DEMAP_ALL_TYPE, demap_all is 416 * a second label for vtag_flushall. the call is made using 417 * vtag_flushall() instead of demap_all() due to runtime and 418 * krtld results with both older and newer cpu modules. 419 */ 420 if (&demap_all != 0) { 421 vtag_flushall(); 422 return; 423 } 424 425 /* 426 * for older V9 cpus, scan tlbs and clear stale entries 427 */ 428 bzero(&cti, sizeof (cti)); 429 cti.filter = i_cpr_ufw; 430 431 cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1; 432 cti.reader = dtlb_rd_entry; 433 cti.writer = dtlb_wr_entry; 434 i_cpr_scan_tlb(&cti); 435 436 cti.index = cpunodes[CPU->cpu_id].itlb_size - 1; 437 cti.reader = itlb_rd_entry; 438 cti.writer = itlb_wr_entry; 439 i_cpr_scan_tlb(&cti); 440 } 441 442 443 /* 444 * craft tlb info for tmp use during resume; this data gets used by 445 * cprboot to install tlb entries. we also mark each struct as tmp 446 * so those tlb entries will get flushed after switching to the kernel 447 * trap table. no data needs to be recorded for vaddr when it falls 448 * within the nucleus since we've already recorded nucleus ttes and 449 * a 8K tte would conflict with a 4MB tte. eg: the cpr module 450 * text/data may have been loaded into the text/data nucleus. 451 */ 452 static void 453 i_cpr_make_tte(cti_t *ctip, void *vaddr, caddr_t nbase) 454 { 455 pfn_t ppn; 456 uint_t rw; 457 458 if (WITHIN_NUCLEUS((caddr_t)vaddr, nbase)) 459 return; 460 461 while ((1 << ctip->index) & ctip->skip) 462 ctip->index--; 463 ASSERT(ctip->index > 0); 464 ASSERT(ctip->dst < ctip->tail); 465 466 /* 467 * without any global service available to lookup 468 * a tte by vaddr, we craft our own here: 469 */ 470 ppn = va_to_pfn(vaddr); 471 rw = (nbase == datava) ? TTE_HWWR_INT : 0; 472 ctip->dst->tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn); 473 ctip->dst->tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT | 474 TTE_CP_INT | TTE_PRIV_INT | rw; 475 ctip->dst->va_tag = ((uintptr_t)vaddr & MMU_PAGEMASK); 476 ctip->dst->index = ctip->index--; 477 ctip->dst->tmp = 1; 478 ctip->dst++; 479 } 480 481 482 static void 483 i_cpr_xcall(xcfunc_t func) 484 { 485 uint_t pil, reset_pil; 486 487 pil = getpil(); 488 if (pil < XCALL_PIL) 489 reset_pil = 0; 490 else { 491 reset_pil = 1; 492 setpil(XCALL_PIL - 1); 493 } 494 xc_some(cpu_ready_set, func, 0, 0); 495 if (reset_pil) 496 setpil(pil); 497 } 498 499 500 /* 501 * restart paused slave cpus 502 */ 503 void 504 i_cpr_machdep_setup(void) 505 { 506 if (ncpus > 1) { 507 DEBUG1(errp("MP restarted...\n")); 508 mutex_enter(&cpu_lock); 509 start_cpus(); 510 mutex_exit(&cpu_lock); 511 } 512 } 513 514 515 /* 516 * Stop all interrupt activities in the system 517 */ 518 void 519 i_cpr_stop_intr(void) 520 { 521 (void) spl7(); 522 } 523 524 /* 525 * Set machine up to take interrupts 526 */ 527 void 528 i_cpr_enable_intr(void) 529 { 530 (void) spl0(); 531 } 532 533 534 /* 535 * record cpu nodes and ids 536 */ 537 static void 538 i_cpr_save_cpu_info(void) 539 { 540 struct sun4u_cpu_info *scip; 541 cpu_t *cp; 542 543 scip = m_info.sci; 544 cp = CPU; 545 do { 546 ASSERT(scip < &m_info.sci[NCPU]); 547 scip->cpu_id = cp->cpu_id; 548 scip->node = cpunodes[cp->cpu_id].nodeid; 549 scip++; 550 } while ((cp = cp->cpu_next) != CPU); 551 } 552 553 554 /* 555 * Write necessary machine dependent information to cpr state file, 556 * eg. sun4u mmu ctx secondary for the current running process (cpr) ... 557 */ 558 int 559 i_cpr_write_machdep(vnode_t *vp) 560 { 561 extern uint_t getpstate(), getwstate(); 562 extern uint_t i_cpr_tstack_size; 563 const char ustr[] = ": unix-tte 2drop false ;"; 564 uintptr_t tinfo; 565 label_t *ltp; 566 cmd_t cmach; 567 char *fmt; 568 int rc; 569 570 /* 571 * ustr[] is used as temporary forth words during 572 * slave startup sequence, see sfmmu_mp_startup() 573 */ 574 575 cmach.md_magic = (uint_t)CPR_MACHDEP_MAGIC; 576 cmach.md_size = sizeof (m_info) + sizeof (ustr); 577 578 if (rc = cpr_write(vp, (caddr_t)&cmach, sizeof (cmach))) { 579 cpr_err(CE_WARN, "Failed to write descriptor."); 580 return (rc); 581 } 582 583 /* 584 * m_info is now cleared in i_cpr_dump_setup() 585 */ 586 m_info.ksb = (uint32_t)STACK_BIAS; 587 m_info.kpstate = (uint16_t)getpstate(); 588 m_info.kwstate = (uint16_t)getwstate(); 589 DEBUG1(errp("stack bias 0x%x, pstate 0x%x, wstate 0x%x\n", 590 m_info.ksb, m_info.kpstate, m_info.kwstate)); 591 592 ltp = &ttolwp(curthread)->lwp_qsav; 593 m_info.qsav_pc = (cpr_ext)ltp->val[0]; 594 m_info.qsav_sp = (cpr_ext)ltp->val[1]; 595 596 /* 597 * Set secondary context to INVALID_CONTEXT to force the HAT 598 * to re-setup the MMU registers and locked TTEs it needs for 599 * TLB miss handling. 600 */ 601 m_info.mmu_ctx_sec = INVALID_CONTEXT; 602 m_info.mmu_ctx_pri = KCONTEXT; 603 604 tinfo = (uintptr_t)curthread; 605 m_info.thrp = (cpr_ptr)tinfo; 606 607 tinfo = (uintptr_t)i_cpr_resume_setup; 608 m_info.func = (cpr_ptr)tinfo; 609 610 /* 611 * i_cpr_data_page is comprised of a 4K stack area and a few 612 * trailing data symbols; the page is shared by the prom and 613 * kernel during resume. the stack size is recorded here 614 * and used by cprboot to set %sp 615 */ 616 tinfo = (uintptr_t)&i_cpr_data_page; 617 m_info.tmp_stack = (cpr_ptr)tinfo; 618 m_info.tmp_stacksize = i_cpr_tstack_size; 619 620 m_info.test_mode = cpr_test_mode; 621 622 i_cpr_save_cpu_info(); 623 624 if (rc = cpr_write(vp, (caddr_t)&m_info, sizeof (m_info))) { 625 cpr_err(CE_WARN, "Failed to write machdep info."); 626 return (rc); 627 } 628 629 fmt = "error writing %s forth info"; 630 if (rc = cpr_write(vp, (caddr_t)ustr, sizeof (ustr))) 631 cpr_err(CE_WARN, fmt, "unix-tte"); 632 633 return (rc); 634 } 635 636 637 /* 638 * Save miscellaneous information which needs to be written to the 639 * state file. This information is required to re-initialize 640 * kernel/prom handshaking. 641 */ 642 void 643 i_cpr_save_machdep_info(void) 644 { 645 DEBUG5(errp("jumpback size = 0x%lx\n", 646 (uintptr_t)&i_cpr_end_jumpback - 647 (uintptr_t)i_cpr_resume_setup)); 648 649 /* 650 * Verify the jumpback code all falls in one page. 651 */ 652 if (((uintptr_t)&i_cpr_end_jumpback & MMU_PAGEMASK) != 653 ((uintptr_t)i_cpr_resume_setup & MMU_PAGEMASK)) 654 cpr_err(CE_PANIC, "jumpback code exceeds one page."); 655 } 656 657 658 void 659 i_cpr_set_tbr(void) 660 { 661 } 662 663 664 /* 665 * cpu0 should contain bootcpu info 666 */ 667 cpu_t * 668 i_cpr_bootcpu(void) 669 { 670 return (&cpu0); 671 } 672 673 674 /* 675 * Return the virtual address of the mapping area 676 */ 677 caddr_t 678 i_cpr_map_setup(void) 679 { 680 /* 681 * Allocate a virtual memory range spanned by an hmeblk. 682 * This would be 8 hments or 64k bytes. Starting VA 683 * must be 64k (8-page) aligned. 684 */ 685 cpr_vaddr = vmem_xalloc(heap_arena, 686 mmu_ptob(NHMENTS), mmu_ptob(NHMENTS), 687 0, 0, NULL, NULL, VM_NOSLEEP); 688 return (cpr_vaddr); 689 } 690 691 /* 692 * create tmp locked tlb entries for a group of phys pages; 693 * 694 * i_cpr_mapin/i_cpr_mapout should always be called in pairs, 695 * otherwise would fill up a tlb with locked entries 696 */ 697 void 698 i_cpr_mapin(caddr_t vaddr, uint_t pages, pfn_t ppn) 699 { 700 tte_t tte; 701 extern pfn_t curthreadpfn; 702 extern int curthreadremapped; 703 704 curthreadremapped = (ppn <= curthreadpfn && curthreadpfn < ppn + pages); 705 706 for (; pages--; ppn++, vaddr += MMU_PAGESIZE) { 707 tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn); 708 tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT | 709 TTE_CP_INT | TTE_PRIV_INT | TTE_HWWR_INT; 710 sfmmu_dtlb_ld_kva(vaddr, &tte); 711 } 712 } 713 714 void 715 i_cpr_mapout(caddr_t vaddr, uint_t pages) 716 { 717 extern int curthreadremapped; 718 719 if (curthreadremapped && vaddr <= (caddr_t)curthread && 720 (caddr_t)curthread < vaddr + pages * MMU_PAGESIZE) 721 curthreadremapped = 0; 722 723 for (; pages--; vaddr += MMU_PAGESIZE) 724 vtag_flushpage(vaddr, (uint64_t)ksfmmup); 725 } 726 727 /* 728 * We're done using the mapping area; release virtual space 729 */ 730 void 731 i_cpr_map_destroy(void) 732 { 733 vmem_free(heap_arena, cpr_vaddr, mmu_ptob(NHMENTS)); 734 cpr_vaddr = NULL; 735 } 736 737 /* ARGSUSED */ 738 void 739 i_cpr_handle_xc(int flag) 740 { 741 } 742 743 744 /* 745 * This function takes care of pages which are not in kas or need to be 746 * taken care of in a special way. For example, panicbuf pages are not 747 * in kas and their pages are allocated via prom_retain(). 748 */ 749 pgcnt_t 750 i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc) 751 { 752 struct cpr_map_info *pri, *tail; 753 pgcnt_t pages, total = 0; 754 pfn_t pfn; 755 756 /* 757 * Save information about prom retained panicbuf pages 758 */ 759 if (bitfunc == cpr_setbit) { 760 pri = &cpr_prom_retain[CPR_PANICBUF]; 761 pri->virt = (cpr_ptr)panicbuf; 762 pri->phys = va_to_pa(panicbuf); 763 pri->size = sizeof (panicbuf); 764 } 765 766 /* 767 * Go through the prom_retain array to tag those pages. 768 */ 769 tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT]; 770 for (pri = cpr_prom_retain; pri < tail; pri++) { 771 pages = mmu_btopr(pri->size); 772 for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) { 773 if (pf_is_memory(pfn)) { 774 if (bitfunc == cpr_setbit) { 775 if ((*bitfunc)(pfn, mapflag) == 0) 776 total++; 777 } else 778 total++; 779 } 780 } 781 } 782 783 return (total); 784 } 785 786 787 /* 788 * Free up memory-related resources here. We start by freeing buffers 789 * allocated during suspend initialization. Also, free up the mapping 790 * resources allocated in cpr_init(). 791 */ 792 void 793 i_cpr_free_memory_resources(void) 794 { 795 (void) i_cpr_prom_pages(CPR_PROM_FREE); 796 i_cpr_map_destroy(); 797 i_cpr_storage_free(); 798 } 799 800 801 /* 802 * Derived from cpr_write_statefile(). 803 * Save the sensitive pages to the storage area and do bookkeeping 804 * using the sensitive descriptors. Each descriptor will contain no more 805 * than CPR_MAXCONTIG amount of contiguous pages to match the max amount 806 * of pages that statefile gets written to disk at each write. 807 * XXX The CPR_MAXCONTIG can be changed to the size of the compression 808 * scratch area. 809 */ 810 static int 811 i_cpr_save_to_storage(void) 812 { 813 sensitive_size_saved = 0; 814 sensitive_pages_saved = 0; 815 sensitive_write_ptr = i_cpr_storage_data_base; 816 return (cpr_contig_pages(NULL, SAVE_TO_STORAGE)); 817 } 818 819 820 /* 821 * This routine allocates space to save the sensitive kernel pages, 822 * i.e. kernel data nucleus, kvalloc and kvseg segments. 823 * It's assumed that those segments are the only areas that can be 824 * contaminated by memory allocations during statefile dumping. 825 * The space allocated here contains: 826 * A list of descriptors describing the saved sensitive pages. 827 * The storage area for saving the compressed sensitive kernel pages. 828 * Since storage pages are allocated from segkmem, they need to be 829 * excluded when saving. 830 */ 831 int 832 i_cpr_save_sensitive_kpages(void) 833 { 834 static const char pages_fmt[] = "\n%s %s allocs\n" 835 " spages %ld, vpages %ld, diff %ld\n"; 836 int retry_cnt; 837 int error = 0; 838 pgcnt_t pages, spages, vpages; 839 caddr_t addr; 840 char *str; 841 842 /* 843 * Tag sensitive kpages. Allocate space for storage descriptors 844 * and storage data area based on the resulting bitmaps. 845 * Note: The storage space will be part of the sensitive 846 * segment, so we need to tag kpages here before the storage 847 * is actually allocated just so their space won't be accounted 848 * for. They will not be part of the statefile although those 849 * pages will be claimed by cprboot. 850 */ 851 cpr_clear_bitmaps(); 852 853 spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_setbit); 854 vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 855 pages = spages - vpages; 856 857 str = "i_cpr_save_sensitive_kpages:"; 858 DEBUG7(errp(pages_fmt, "before", str, spages, vpages, pages)); 859 860 /* 861 * Allocate space to save the clean sensitive kpages 862 */ 863 for (retry_cnt = 0; retry_cnt < MAX_STORAGE_ALLOC_RETRY; retry_cnt++) { 864 /* 865 * Alloc on first pass or realloc if we are retrying because 866 * of insufficient storage for sensitive pages 867 */ 868 if (retry_cnt == 0 || error == ENOMEM) { 869 if (i_cpr_storage_data_base) { 870 kmem_free(i_cpr_storage_data_base, 871 mmu_ptob(i_cpr_storage_data_sz)); 872 i_cpr_storage_data_base = NULL; 873 i_cpr_storage_data_sz = 0; 874 } 875 addr = i_cpr_storage_data_alloc(pages, 876 &i_cpr_storage_data_sz, retry_cnt); 877 if (addr == NULL) { 878 DEBUG7(errp( 879 "\n%s can't allocate data storage space!\n", 880 str)); 881 return (ENOMEM); 882 } 883 i_cpr_storage_data_base = addr; 884 i_cpr_storage_data_end = 885 addr + mmu_ptob(i_cpr_storage_data_sz); 886 } 887 888 /* 889 * Allocate on first pass, only realloc if retry is because of 890 * insufficient descriptors, but reset contents on each pass 891 * (desc_alloc resets contents as well) 892 */ 893 if (retry_cnt == 0 || error == -1) { 894 error = i_cpr_storage_desc_alloc( 895 &i_cpr_storage_desc_base, &i_cpr_storage_desc_pgcnt, 896 &i_cpr_storage_desc_end, retry_cnt); 897 if (error != 0) 898 return (error); 899 } else { 900 i_cpr_storage_desc_init(i_cpr_storage_desc_base, 901 i_cpr_storage_desc_pgcnt, i_cpr_storage_desc_end); 902 } 903 904 /* 905 * We are ready to save the sensitive kpages to storage. 906 * We cannot trust what's tagged in the bitmaps anymore 907 * after storage allocations. Clear up the bitmaps and 908 * retag the sensitive kpages again. The storage pages 909 * should be untagged. 910 */ 911 cpr_clear_bitmaps(); 912 913 spages = 914 i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_setbit); 915 vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 916 917 DEBUG7(errp(pages_fmt, "after ", str, 918 spages, vpages, spages - vpages)); 919 920 /* 921 * Returns 0 on success, -1 if too few descriptors, and 922 * ENOMEM if not enough space to save sensitive pages 923 */ 924 DEBUG1(errp("compressing pages to storage...\n")); 925 error = i_cpr_save_to_storage(); 926 if (error == 0) { 927 /* Saving to storage succeeded */ 928 DEBUG1(errp("compressed %d pages\n", 929 sensitive_pages_saved)); 930 break; 931 } else if (error == -1) 932 DEBUG1(errp("%s too few descriptors\n", str)); 933 } 934 if (error == -1) 935 error = ENOMEM; 936 return (error); 937 } 938 939 940 /* 941 * Estimate how much memory we will need to save 942 * the sensitive pages with compression. 943 */ 944 static caddr_t 945 i_cpr_storage_data_alloc(pgcnt_t pages, pgcnt_t *alloc_pages, int retry_cnt) 946 { 947 pgcnt_t alloc_pcnt, last_pcnt; 948 caddr_t addr; 949 char *str; 950 951 str = "i_cpr_storage_data_alloc:"; 952 if (retry_cnt == 0) { 953 /* 954 * common compression ratio is about 3:1 955 * initial storage allocation is estimated at 40% 956 * to cover the majority of cases 957 */ 958 alloc_pcnt = INITIAL_ALLOC_PCNT; 959 *alloc_pages = (pages * alloc_pcnt) / INTEGRAL; 960 DEBUG7(errp("%s sensitive pages: %ld\n", str, pages)); 961 DEBUG7(errp("%s initial est pages: %ld, alloc %ld%%\n", 962 str, *alloc_pages, alloc_pcnt)); 963 } else { 964 /* 965 * calculate the prior compression percentage (x100) 966 * from the last attempt to save sensitive pages 967 */ 968 ASSERT(sensitive_pages_saved != 0); 969 last_pcnt = (mmu_btopr(sensitive_size_saved) * INTEGRAL) / 970 sensitive_pages_saved; 971 DEBUG7(errp("%s last ratio %ld%%\n", str, last_pcnt)); 972 973 /* 974 * new estimated storage size is based on 975 * the larger ratio + 5% for each retry: 976 * pages * (last + [5%, 10%]) 977 */ 978 alloc_pcnt = MAX(last_pcnt, INITIAL_ALLOC_PCNT) + 979 (retry_cnt * 5); 980 *alloc_pages = (pages * alloc_pcnt) / INTEGRAL; 981 DEBUG7(errp("%s Retry est pages: %ld, alloc %ld%%\n", 982 str, *alloc_pages, alloc_pcnt)); 983 } 984 985 addr = kmem_alloc(mmu_ptob(*alloc_pages), KM_NOSLEEP); 986 DEBUG7(errp("%s alloc %ld pages\n", str, *alloc_pages)); 987 return (addr); 988 } 989 990 991 void 992 i_cpr_storage_free(void) 993 { 994 /* Free descriptors */ 995 if (i_cpr_storage_desc_base) { 996 kmem_free(i_cpr_storage_desc_base, 997 mmu_ptob(i_cpr_storage_desc_pgcnt)); 998 i_cpr_storage_desc_base = NULL; 999 i_cpr_storage_desc_pgcnt = 0; 1000 } 1001 1002 1003 /* Data storage */ 1004 if (i_cpr_storage_data_base) { 1005 kmem_free(i_cpr_storage_data_base, 1006 mmu_ptob(i_cpr_storage_data_sz)); 1007 i_cpr_storage_data_base = NULL; 1008 i_cpr_storage_data_sz = 0; 1009 } 1010 } 1011 1012 1013 /* 1014 * This routine is derived from cpr_compress_and_write(). 1015 * 1. Do bookkeeping in the descriptor for the contiguous sensitive chunk. 1016 * 2. Compress and save the clean sensitive pages into the storage area. 1017 */ 1018 int 1019 i_cpr_compress_and_save(int chunks, pfn_t spfn, pgcnt_t pages) 1020 { 1021 extern char *cpr_compress_pages(cpd_t *, pgcnt_t, int); 1022 extern caddr_t i_cpr_storage_data_end; 1023 uint_t remaining, datalen; 1024 uint32_t test_usum; 1025 char *datap; 1026 csd_t *descp; 1027 cpd_t cpd; 1028 int error; 1029 1030 /* 1031 * Fill next empty storage descriptor 1032 */ 1033 descp = i_cpr_storage_desc_base + chunks - 1; 1034 if (descp >= i_cpr_storage_desc_end) { 1035 DEBUG1(errp("ran out of descriptors, base 0x%p, chunks %d, " 1036 "end 0x%p, descp 0x%p\n", i_cpr_storage_desc_base, chunks, 1037 i_cpr_storage_desc_end, descp)); 1038 return (-1); 1039 } 1040 ASSERT(descp->csd_dirty_spfn == (uint_t)-1); 1041 i_cpr_storage_desc_last_used = descp; 1042 1043 descp->csd_dirty_spfn = spfn; 1044 descp->csd_dirty_npages = pages; 1045 1046 i_cpr_mapin(CPR->c_mapping_area, pages, spfn); 1047 1048 /* 1049 * try compressing pages and copy cpd fields 1050 * pfn is copied for debug use 1051 */ 1052 cpd.cpd_pfn = spfn; 1053 datap = cpr_compress_pages(&cpd, pages, C_COMPRESSING); 1054 datalen = cpd.cpd_length; 1055 descp->csd_clean_compressed = (cpd.cpd_flag & CPD_COMPRESS); 1056 #ifdef DEBUG 1057 descp->csd_usum = cpd.cpd_usum; 1058 descp->csd_csum = cpd.cpd_csum; 1059 #endif 1060 1061 error = 0; 1062 1063 /* 1064 * Save the raw or compressed data to the storage area pointed to by 1065 * sensitive_write_ptr. Make sure the storage space is big enough to 1066 * hold the result. Otherwise roll back to increase the storage space. 1067 */ 1068 descp->csd_clean_sva = (cpr_ptr)sensitive_write_ptr; 1069 descp->csd_clean_sz = datalen; 1070 if ((sensitive_write_ptr + datalen) < i_cpr_storage_data_end) { 1071 extern void cprbcopy(void *, void *, size_t); 1072 1073 cprbcopy(datap, sensitive_write_ptr, datalen); 1074 sensitive_size_saved += datalen; 1075 sensitive_pages_saved += descp->csd_dirty_npages; 1076 sensitive_write_ptr += datalen; 1077 } else { 1078 remaining = (i_cpr_storage_data_end - sensitive_write_ptr); 1079 DEBUG1(errp("i_cpr_compress_and_save: The storage " 1080 "space is too small!\ngot %d, want %d\n\n", 1081 remaining, (remaining + datalen))); 1082 #ifdef DEBUG 1083 /* 1084 * Check to see if the content of the sensitive pages that we 1085 * just copied have changed during this small time window. 1086 */ 1087 test_usum = checksum32(CPR->c_mapping_area, mmu_ptob(pages)); 1088 descp->csd_usum = cpd.cpd_usum; 1089 if (test_usum != descp->csd_usum) { 1090 DEBUG1(errp("\nWARNING: i_cpr_compress_and_save: " 1091 "Data in the range of pfn 0x%lx to pfn " 1092 "0x%lx has changed after they are saved " 1093 "into storage.", spfn, (spfn + pages - 1))); 1094 } 1095 #endif 1096 error = ENOMEM; 1097 } 1098 1099 i_cpr_mapout(CPR->c_mapping_area, pages); 1100 return (error); 1101 } 1102 1103 1104 /* 1105 * This routine is derived from cpr_count_kpages(). 1106 * It goes through kernel data nucleus and segkmem segments to select 1107 * pages in use and mark them in the corresponding bitmap. 1108 */ 1109 pgcnt_t 1110 i_cpr_count_sensitive_kpages(int mapflag, bitfunc_t bitfunc) 1111 { 1112 pgcnt_t kdata_cnt = 0, segkmem_cnt = 0; 1113 extern caddr_t e_moddata; 1114 extern struct seg kvalloc; 1115 extern struct seg kmem64; 1116 size_t size; 1117 1118 /* 1119 * Kernel data nucleus pages 1120 */ 1121 size = e_moddata - s_data; 1122 kdata_cnt += cpr_count_pages(s_data, size, 1123 mapflag, bitfunc, DBG_SHOWRANGE); 1124 1125 /* 1126 * kvseg and kvalloc pages 1127 */ 1128 segkmem_cnt += cpr_scan_kvseg(mapflag, bitfunc, &kvseg); 1129 segkmem_cnt += cpr_count_pages(kvalloc.s_base, kvalloc.s_size, 1130 mapflag, bitfunc, DBG_SHOWRANGE); 1131 1132 /* segment to support kernel memory usage above 32-bit space (4GB) */ 1133 if (kmem64.s_base) 1134 segkmem_cnt += cpr_count_pages(kmem64.s_base, kmem64.s_size, 1135 mapflag, bitfunc, DBG_SHOWRANGE); 1136 1137 DEBUG7(errp("\ni_cpr_count_sensitive_kpages:\n" 1138 "\tkdata_cnt %ld + segkmem_cnt %ld = %ld pages\n", 1139 kdata_cnt, segkmem_cnt, kdata_cnt + segkmem_cnt)); 1140 1141 return (kdata_cnt + segkmem_cnt); 1142 } 1143 1144 1145 pgcnt_t 1146 i_cpr_count_storage_pages(int mapflag, bitfunc_t bitfunc) 1147 { 1148 pgcnt_t count = 0; 1149 1150 if (i_cpr_storage_desc_base) { 1151 count += cpr_count_pages((caddr_t)i_cpr_storage_desc_base, 1152 (size_t)mmu_ptob(i_cpr_storage_desc_pgcnt), 1153 mapflag, bitfunc, DBG_SHOWRANGE); 1154 } 1155 if (i_cpr_storage_data_base) { 1156 count += cpr_count_pages(i_cpr_storage_data_base, 1157 (size_t)mmu_ptob(i_cpr_storage_data_sz), 1158 mapflag, bitfunc, DBG_SHOWRANGE); 1159 } 1160 return (count); 1161 } 1162 1163 1164 /* 1165 * Derived from cpr_write_statefile(). 1166 * Allocate (or reallocate after exhausting the supply) descriptors for each 1167 * chunk of contiguous sensitive kpages. 1168 */ 1169 static int 1170 i_cpr_storage_desc_alloc(csd_t **basepp, pgcnt_t *pgsp, csd_t **endpp, 1171 int retry) 1172 { 1173 pgcnt_t npages; 1174 int chunks; 1175 csd_t *descp, *end; 1176 size_t len; 1177 char *str = "i_cpr_storage_desc_alloc:"; 1178 1179 /* 1180 * On initial allocation, add some extra to cover overhead caused 1181 * by the allocation for the storage area later. 1182 */ 1183 if (retry == 0) { 1184 chunks = cpr_contig_pages(NULL, STORAGE_DESC_ALLOC) + 1185 EXTRA_DESCS; 1186 npages = mmu_btopr(sizeof (**basepp) * (pgcnt_t)chunks); 1187 DEBUG7(errp("%s chunks %d, ", str, chunks)); 1188 } else { 1189 DEBUG7(errp("%s retry %d: ", str, retry)); 1190 npages = *pgsp + 1; 1191 } 1192 /* Free old descriptors, if any */ 1193 if (*basepp) 1194 kmem_free((caddr_t)*basepp, mmu_ptob(*pgsp)); 1195 1196 descp = *basepp = kmem_alloc(mmu_ptob(npages), KM_NOSLEEP); 1197 if (descp == NULL) { 1198 DEBUG7(errp("%s no space for descriptors!\n", str)); 1199 return (ENOMEM); 1200 } 1201 1202 *pgsp = npages; 1203 len = mmu_ptob(npages); 1204 end = *endpp = descp + (len / (sizeof (**basepp))); 1205 DEBUG7(errp("npages 0x%lx, len 0x%lx, items 0x%lx\n\t*basepp " 1206 "%p, *endpp %p\n", npages, len, (len / (sizeof (**basepp))), 1207 *basepp, *endpp)); 1208 i_cpr_storage_desc_init(descp, npages, end); 1209 return (0); 1210 } 1211 1212 static void 1213 i_cpr_storage_desc_init(csd_t *descp, pgcnt_t npages, csd_t *end) 1214 { 1215 size_t len = mmu_ptob(npages); 1216 1217 /* Initialize the descriptors to something impossible. */ 1218 bzero(descp, len); 1219 #ifdef DEBUG 1220 /* 1221 * This condition is tested by an ASSERT 1222 */ 1223 for (; descp < end; descp++) 1224 descp->csd_dirty_spfn = (uint_t)-1; 1225 #endif 1226 } 1227 1228 int 1229 i_cpr_dump_sensitive_kpages(vnode_t *vp) 1230 { 1231 int error = 0; 1232 uint_t spin_cnt = 0; 1233 csd_t *descp; 1234 1235 /* 1236 * These following two variables need to be reinitialized 1237 * for each cpr cycle. 1238 */ 1239 i_cpr_sensitive_bytes_dumped = 0; 1240 i_cpr_sensitive_pgs_dumped = 0; 1241 1242 if (i_cpr_storage_desc_base) { 1243 for (descp = i_cpr_storage_desc_base; 1244 descp <= i_cpr_storage_desc_last_used; descp++) { 1245 if (error = cpr_dump_sensitive(vp, descp)) 1246 return (error); 1247 spin_cnt++; 1248 if ((spin_cnt & 0x5F) == 1) 1249 cpr_spinning_bar(); 1250 } 1251 prom_printf(" \b"); 1252 } 1253 1254 DEBUG7(errp("\ni_cpr_dump_sensitive_kpages: dumped %ld\n", 1255 i_cpr_sensitive_pgs_dumped)); 1256 return (0); 1257 } 1258 1259 1260 /* 1261 * 1. Fill the cpr page descriptor with the info of the dirty pages 1262 * and 1263 * write the descriptor out. It will be used at resume. 1264 * 2. Write the clean data in stead of the dirty data out. 1265 * Note: to save space, the clean data is already compressed. 1266 */ 1267 static int 1268 cpr_dump_sensitive(vnode_t *vp, csd_t *descp) 1269 { 1270 int error = 0; 1271 caddr_t datap; 1272 cpd_t cpd; /* cpr page descriptor */ 1273 pfn_t dirty_spfn; 1274 pgcnt_t dirty_npages; 1275 size_t clean_sz; 1276 caddr_t clean_sva; 1277 int clean_compressed; 1278 extern uchar_t cpr_pagecopy[]; 1279 1280 dirty_spfn = descp->csd_dirty_spfn; 1281 dirty_npages = descp->csd_dirty_npages; 1282 clean_sva = (caddr_t)descp->csd_clean_sva; 1283 clean_sz = descp->csd_clean_sz; 1284 clean_compressed = descp->csd_clean_compressed; 1285 1286 /* Fill cpr page descriptor. */ 1287 cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC; 1288 cpd.cpd_pfn = dirty_spfn; 1289 cpd.cpd_flag = 0; /* must init to zero */ 1290 cpd.cpd_pages = dirty_npages; 1291 1292 #ifdef DEBUG 1293 if ((cpd.cpd_usum = descp->csd_usum) != 0) 1294 cpd.cpd_flag |= CPD_USUM; 1295 if ((cpd.cpd_csum = descp->csd_csum) != 0) 1296 cpd.cpd_flag |= CPD_CSUM; 1297 #endif 1298 1299 STAT->cs_dumped_statefsz += mmu_ptob(dirty_npages); 1300 1301 /* 1302 * The sensitive kpages are usually saved with compression 1303 * unless compression could not reduce the size of the data. 1304 * If user choose not to have the statefile compressed, 1305 * we need to decompress the data back before dumping it to disk. 1306 */ 1307 if (CPR->c_flags & C_COMPRESSING) { 1308 cpd.cpd_length = clean_sz; 1309 datap = clean_sva; 1310 if (clean_compressed) 1311 cpd.cpd_flag |= CPD_COMPRESS; 1312 } else { 1313 if (clean_compressed) { 1314 cpd.cpd_length = decompress(clean_sva, cpr_pagecopy, 1315 clean_sz, mmu_ptob(dirty_npages)); 1316 datap = (caddr_t)cpr_pagecopy; 1317 ASSERT(cpd.cpd_length == mmu_ptob(dirty_npages)); 1318 } else { 1319 cpd.cpd_length = clean_sz; 1320 datap = clean_sva; 1321 } 1322 cpd.cpd_csum = 0; 1323 } 1324 1325 /* Write cpr page descriptor */ 1326 error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd)); 1327 if (error) { 1328 DEBUG7(errp("descp: %p\n", descp)); 1329 #ifdef DEBUG 1330 debug_enter("cpr_dump_sensitive: cpr_write() page " 1331 "descriptor failed!\n"); 1332 #endif 1333 return (error); 1334 } 1335 1336 i_cpr_sensitive_bytes_dumped += sizeof (cpd_t); 1337 1338 /* Write page data */ 1339 error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length); 1340 if (error) { 1341 DEBUG7(errp("error: %x\n", error)); 1342 DEBUG7(errp("descp: %p\n", descp)); 1343 DEBUG7(errp("cpr_write(%p, %p , %lx)\n", vp, datap, 1344 cpd.cpd_length)); 1345 #ifdef DEBUG 1346 debug_enter("cpr_dump_sensitive: cpr_write() data failed!\n"); 1347 #endif 1348 return (error); 1349 } 1350 1351 i_cpr_sensitive_bytes_dumped += cpd.cpd_length; 1352 i_cpr_sensitive_pgs_dumped += dirty_npages; 1353 1354 return (error); 1355 } 1356 1357 1358 /* 1359 * Sanity check to make sure that we have dumped right amount 1360 * of pages from different sources to statefile. 1361 */ 1362 int 1363 i_cpr_check_pgs_dumped(uint_t pgs_expected, uint_t regular_pgs_dumped) 1364 { 1365 uint_t total_pgs_dumped; 1366 1367 total_pgs_dumped = regular_pgs_dumped + i_cpr_sensitive_pgs_dumped; 1368 1369 DEBUG7(errp("\ncheck_pgs: reg %d + sens %ld = %d, expect %d\n\n", 1370 regular_pgs_dumped, i_cpr_sensitive_pgs_dumped, 1371 total_pgs_dumped, pgs_expected)); 1372 1373 if (pgs_expected == total_pgs_dumped) 1374 return (0); 1375 1376 return (EINVAL); 1377 } 1378 1379 1380 int 1381 i_cpr_reusefini(void) 1382 { 1383 struct vnode *vp; 1384 cdef_t *cdef; 1385 size_t size; 1386 char *bufp; 1387 int rc; 1388 1389 if (cpr_reusable_mode) 1390 cpr_reusable_mode = 0; 1391 1392 if (rc = cpr_open_deffile(FREAD|FWRITE, &vp)) { 1393 if (rc == EROFS) { 1394 cpr_err(CE_CONT, "uadmin A_FREEZE AD_REUSEFINI " 1395 "(uadmin %d %d)\nmust be done with / mounted " 1396 "writeable.\n", A_FREEZE, AD_REUSEFINI); 1397 } 1398 return (rc); 1399 } 1400 1401 cdef = kmem_alloc(sizeof (*cdef), KM_SLEEP); 1402 rc = cpr_rdwr(UIO_READ, vp, cdef, sizeof (*cdef)); 1403 1404 if (rc) { 1405 cpr_err(CE_WARN, "Failed reading %s, errno = %d", 1406 cpr_default_path, rc); 1407 } else if (cdef->mini.magic != CPR_DEFAULT_MAGIC) { 1408 cpr_err(CE_WARN, "bad magic number in %s, cannot restore " 1409 "prom values for %s", cpr_default_path, 1410 cpr_enumerate_promprops(&bufp, &size)); 1411 kmem_free(bufp, size); 1412 rc = EINVAL; 1413 } else { 1414 /* 1415 * clean up prom properties 1416 */ 1417 rc = cpr_update_nvram(cdef->props); 1418 if (rc == 0) { 1419 /* 1420 * invalidate the disk copy and turn off reusable 1421 */ 1422 cdef->mini.magic = 0; 1423 cdef->mini.reusable = 0; 1424 if (rc = cpr_rdwr(UIO_WRITE, vp, 1425 &cdef->mini, sizeof (cdef->mini))) { 1426 cpr_err(CE_WARN, "Failed writing %s, errno %d", 1427 cpr_default_path, rc); 1428 } 1429 } 1430 } 1431 1432 (void) VOP_CLOSE(vp, FREAD|FWRITE, 1, (offset_t)0, CRED()); 1433 VN_RELE(vp); 1434 kmem_free(cdef, sizeof (*cdef)); 1435 1436 return (rc); 1437 } 1438 1439 1440 int 1441 i_cpr_reuseinit(void) 1442 { 1443 int rc = 0; 1444 1445 if (rc = cpr_default_setup(1)) 1446 return (rc); 1447 1448 /* 1449 * We need to validate default file 1450 */ 1451 rc = cpr_validate_definfo(1); 1452 if (rc == 0) 1453 cpr_reusable_mode = 1; 1454 else if (rc == EROFS) { 1455 cpr_err(CE_NOTE, "reuseinit must be performed " 1456 "while / is mounted writeable"); 1457 } 1458 1459 (void) cpr_default_setup(0); 1460 1461 return (rc); 1462 } 1463 1464 1465 int 1466 i_cpr_check_cprinfo(void) 1467 { 1468 struct vnode *vp; 1469 cmini_t mini; 1470 int rc = 0; 1471 1472 if (rc = cpr_open_deffile(FREAD, &vp)) { 1473 if (rc == ENOENT) 1474 cpr_err(CE_NOTE, "cprinfo file does not " 1475 "exist. You must run 'uadmin %d %d' " 1476 "command while / is mounted writeable,\n" 1477 "then reboot and run 'uadmin %d %d' " 1478 "to create a reusable statefile", 1479 A_FREEZE, AD_REUSEINIT, A_FREEZE, AD_REUSABLE); 1480 return (rc); 1481 } 1482 1483 rc = cpr_rdwr(UIO_READ, vp, &mini, sizeof (mini)); 1484 (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED()); 1485 VN_RELE(vp); 1486 1487 if (rc) { 1488 cpr_err(CE_WARN, "Failed reading %s, errno = %d", 1489 cpr_default_path, rc); 1490 } else if (mini.magic != CPR_DEFAULT_MAGIC) { 1491 cpr_err(CE_CONT, "bad magic number in cprinfo file.\n" 1492 "You must run 'uadmin %d %d' while / is mounted " 1493 "writeable, then reboot and run 'uadmin %d %d' " 1494 "to create a reusable statefile\n", 1495 A_FREEZE, AD_REUSEINIT, A_FREEZE, AD_REUSABLE); 1496 rc = EINVAL; 1497 } 1498 1499 return (rc); 1500 } 1501 1502 1503 int 1504 i_cpr_reusable_supported(void) 1505 { 1506 return (1); 1507 } 1508 1509 1510 /* 1511 * find prom phys pages and alloc space for a tmp copy 1512 */ 1513 static int 1514 i_cpr_find_ppages(void) 1515 { 1516 extern struct vnode prom_ppages; 1517 struct page *pp; 1518 struct memlist *pmem; 1519 pgcnt_t npages, pcnt, scnt, vcnt; 1520 pfn_t ppn, plast, *dst; 1521 int mapflag; 1522 1523 cpr_clear_bitmaps(); 1524 mapflag = REGULAR_BITMAP; 1525 1526 /* 1527 * there should be a page_t for each phys page used by the kernel; 1528 * set a bit for each phys page not tracked by a page_t 1529 */ 1530 pcnt = 0; 1531 memlist_read_lock(); 1532 for (pmem = phys_install; pmem; pmem = pmem->next) { 1533 npages = mmu_btop(pmem->size); 1534 ppn = mmu_btop(pmem->address); 1535 for (plast = ppn + npages; ppn < plast; ppn++) { 1536 if (page_numtopp_nolock(ppn)) 1537 continue; 1538 (void) cpr_setbit(ppn, mapflag); 1539 pcnt++; 1540 } 1541 } 1542 memlist_read_unlock(); 1543 1544 /* 1545 * clear bits for phys pages in each segment 1546 */ 1547 scnt = cpr_count_seg_pages(mapflag, cpr_clrbit); 1548 1549 /* 1550 * set bits for phys pages referenced by the prom_ppages vnode; 1551 * these pages are mostly comprised of forthdebug words 1552 */ 1553 vcnt = 0; 1554 for (pp = prom_ppages.v_pages; pp; ) { 1555 if (cpr_setbit(pp->p_offset, mapflag) == 0) 1556 vcnt++; 1557 pp = pp->p_vpnext; 1558 if (pp == prom_ppages.v_pages) 1559 break; 1560 } 1561 1562 /* 1563 * total number of prom pages are: 1564 * (non-page_t pages - seg pages + vnode pages) 1565 */ 1566 ppage_count = pcnt - scnt + vcnt; 1567 DEBUG1(errp("find_ppages: pcnt %ld - scnt %ld + vcnt %ld = %ld\n", 1568 pcnt, scnt, vcnt, ppage_count)); 1569 1570 /* 1571 * alloc array of pfn_t to store phys page list 1572 */ 1573 pphys_list_size = ppage_count * sizeof (pfn_t); 1574 pphys_list = kmem_alloc(pphys_list_size, KM_NOSLEEP); 1575 if (pphys_list == NULL) { 1576 cpr_err(CE_WARN, "cannot alloc pphys_list"); 1577 return (ENOMEM); 1578 } 1579 1580 /* 1581 * phys pages referenced in the bitmap should be 1582 * those used by the prom; scan bitmap and save 1583 * a list of prom phys page numbers 1584 */ 1585 dst = pphys_list; 1586 memlist_read_lock(); 1587 for (pmem = phys_install; pmem; pmem = pmem->next) { 1588 npages = mmu_btop(pmem->size); 1589 ppn = mmu_btop(pmem->address); 1590 for (plast = ppn + npages; ppn < plast; ppn++) { 1591 if (cpr_isset(ppn, mapflag)) { 1592 ASSERT(dst < (pphys_list + ppage_count)); 1593 *dst++ = ppn; 1594 } 1595 } 1596 } 1597 memlist_read_unlock(); 1598 1599 /* 1600 * allocate space to store prom pages 1601 */ 1602 ppage_buf = kmem_alloc(mmu_ptob(ppage_count), KM_NOSLEEP); 1603 if (ppage_buf == NULL) { 1604 kmem_free(pphys_list, pphys_list_size); 1605 pphys_list = NULL; 1606 cpr_err(CE_WARN, "cannot alloc ppage_buf"); 1607 return (ENOMEM); 1608 } 1609 1610 return (0); 1611 } 1612 1613 1614 /* 1615 * save prom pages to kmem pages 1616 */ 1617 static void 1618 i_cpr_save_ppages(void) 1619 { 1620 pfn_t *pphys, *plast; 1621 caddr_t dst; 1622 1623 /* 1624 * map in each prom page and copy to a kmem page 1625 */ 1626 dst = ppage_buf; 1627 plast = pphys_list + ppage_count; 1628 for (pphys = pphys_list; pphys < plast; pphys++) { 1629 i_cpr_mapin(cpr_vaddr, 1, *pphys); 1630 bcopy(cpr_vaddr, dst, MMU_PAGESIZE); 1631 i_cpr_mapout(cpr_vaddr, 1); 1632 dst += MMU_PAGESIZE; 1633 } 1634 1635 DEBUG1(errp("saved %ld prom pages\n", ppage_count)); 1636 } 1637 1638 1639 /* 1640 * restore prom pages from kmem pages 1641 */ 1642 static void 1643 i_cpr_restore_ppages(void) 1644 { 1645 pfn_t *pphys, *plast; 1646 caddr_t src; 1647 1648 dcache_flushall(); 1649 1650 /* 1651 * map in each prom page and copy from a kmem page 1652 */ 1653 src = ppage_buf; 1654 plast = pphys_list + ppage_count; 1655 for (pphys = pphys_list; pphys < plast; pphys++) { 1656 i_cpr_mapin(cpr_vaddr, 1, *pphys); 1657 bcopy(src, cpr_vaddr, MMU_PAGESIZE); 1658 i_cpr_mapout(cpr_vaddr, 1); 1659 src += MMU_PAGESIZE; 1660 } 1661 1662 dcache_flushall(); 1663 1664 DEBUG1(errp("restored %ld prom pages\n", ppage_count)); 1665 } 1666 1667 1668 /* 1669 * save/restore prom pages or free related allocs 1670 */ 1671 int 1672 i_cpr_prom_pages(int action) 1673 { 1674 int error; 1675 1676 if (action == CPR_PROM_SAVE) { 1677 if (ppage_buf == NULL) { 1678 ASSERT(pphys_list == NULL); 1679 if (error = i_cpr_find_ppages()) 1680 return (error); 1681 i_cpr_save_ppages(); 1682 } 1683 } else if (action == CPR_PROM_RESTORE) { 1684 i_cpr_restore_ppages(); 1685 } else if (action == CPR_PROM_FREE) { 1686 if (pphys_list) { 1687 ASSERT(pphys_list_size); 1688 kmem_free(pphys_list, pphys_list_size); 1689 pphys_list = NULL; 1690 pphys_list_size = 0; 1691 } 1692 if (ppage_buf) { 1693 ASSERT(ppage_count); 1694 kmem_free(ppage_buf, mmu_ptob(ppage_count)); 1695 DEBUG1(errp("freed %ld prom pages\n", ppage_count)); 1696 ppage_buf = NULL; 1697 ppage_count = 0; 1698 } 1699 } 1700 return (0); 1701 } 1702 1703 1704 /* 1705 * record tlb data for the nucleus, bigktsb's, and the cpr module; 1706 * this data is later used by cprboot to install dtlb/itlb entries. 1707 * when we jump into the cpr module during the resume phase, those 1708 * mappings are needed until switching to the kernel trap table. 1709 * to make the dtte/itte info available during resume, we need 1710 * the info recorded prior to saving sensitive pages, otherwise 1711 * all the data would appear as NULLs. 1712 */ 1713 static void 1714 i_cpr_save_tlbinfo(void) 1715 { 1716 cti_t cti = {0}; 1717 1718 /* 1719 * during resume - shortly after jumping into the cpr module, 1720 * sfmmu_load_mmustate() will overwrite any dtlb entry at any 1721 * index used for TSBs; skip is set so that any saved tte will 1722 * target other tlb offsets and prevent being lost during 1723 * resume. now scan the dtlb and save locked entries, 1724 * then add entries for the tmp stack / data page and the 1725 * cpr thread structure. 1726 */ 1727 cti.dst = m_info.dtte; 1728 cti.tail = cti.dst + CPR_MAX_TLB; 1729 cti.reader = dtlb_rd_entry; 1730 cti.writer = NULL; 1731 cti.filter = i_cpr_lnb; 1732 cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1; 1733 1734 if (utsb_dtlb_ttenum != -1) 1735 cti.skip = (1 << utsb_dtlb_ttenum); 1736 1737 if (utsb4m_dtlb_ttenum != -1) 1738 cti.skip |= (1 << utsb4m_dtlb_ttenum); 1739 1740 i_cpr_scan_tlb(&cti); 1741 i_cpr_make_tte(&cti, &i_cpr_data_page, datava); 1742 i_cpr_make_tte(&cti, curthread, datava); 1743 1744 /* 1745 * scan itlb and save locked entries; add an entry for 1746 * the first text page of the cpr module; cprboot will 1747 * jump to that page after restoring kernel pages. 1748 */ 1749 cti.dst = m_info.itte; 1750 cti.tail = cti.dst + CPR_MAX_TLB; 1751 cti.reader = itlb_rd_entry; 1752 cti.index = cpunodes[CPU->cpu_id].itlb_size - 1; 1753 cti.skip = 0; 1754 i_cpr_scan_tlb(&cti); 1755 i_cpr_make_tte(&cti, (void *)i_cpr_resume_setup, textva); 1756 } 1757 1758 1759 /* ARGSUSED */ 1760 int 1761 i_cpr_dump_setup(vnode_t *vp) 1762 { 1763 /* 1764 * zero out m_info and add info to dtte/itte arrays 1765 */ 1766 bzero(&m_info, sizeof (m_info)); 1767 i_cpr_save_tlbinfo(); 1768 return (0); 1769 } 1770 1771 1772 int 1773 i_cpr_is_supported(void) 1774 { 1775 char es_prop[] = "energystar-v2"; 1776 pnode_t node; 1777 int last; 1778 extern int cpr_supported_override; 1779 extern int cpr_platform_enable; 1780 1781 /* 1782 * The next statement tests if a specific platform has turned off 1783 * cpr support. 1784 */ 1785 if (cpr_supported_override) 1786 return (0); 1787 1788 /* 1789 * Do not inspect energystar-v* property if a platform has 1790 * specifically turned on cpr support 1791 */ 1792 if (cpr_platform_enable) 1793 return (1); 1794 1795 node = prom_rootnode(); 1796 if (prom_getproplen(node, es_prop) != -1) 1797 return (1); 1798 last = strlen(es_prop) - 1; 1799 es_prop[last] = '3'; 1800 return (prom_getproplen(node, es_prop) != -1); 1801 } 1802 1803 1804 /* 1805 * the actual size of the statefile data isn't known until after all the 1806 * compressed pages are written; even the inode size doesn't reflect the 1807 * data size since there are usually many extra fs blocks. for recording 1808 * the actual data size, the first sector of the statefile is copied to 1809 * a tmp buf, and the copy is later updated and flushed to disk. 1810 */ 1811 int 1812 i_cpr_blockzero(char *base, char **bufpp, int *blkno, vnode_t *vp) 1813 { 1814 extern int cpr_flush_write(vnode_t *); 1815 static char cpr_sector[DEV_BSIZE]; 1816 cpr_ext bytes, *dst; 1817 1818 /* 1819 * this routine is called after cdd_t and csu_md_t are copied 1820 * to cpr_buf; mini-hack alert: the save/update method creates 1821 * a dependency on the combined struct size being >= one sector 1822 * or DEV_BSIZE; since introduction in Sol2.7, csu_md_t size is 1823 * over 1K bytes and will probably grow with any changes. 1824 * 1825 * copy when vp is NULL, flush when non-NULL 1826 */ 1827 if (vp == NULL) { 1828 ASSERT((*bufpp - base) >= DEV_BSIZE); 1829 bcopy(base, cpr_sector, sizeof (cpr_sector)); 1830 return (0); 1831 } else { 1832 bytes = dbtob(*blkno); 1833 dst = &((cdd_t *)cpr_sector)->cdd_filesize; 1834 bcopy(&bytes, dst, sizeof (bytes)); 1835 bcopy(cpr_sector, base, sizeof (cpr_sector)); 1836 *bufpp = base + sizeof (cpr_sector); 1837 *blkno = cpr_statefile_offset(); 1838 DEBUG1(errp("statefile data size: %ld\n\n", bytes)); 1839 return (cpr_flush_write(vp)); 1840 } 1841 } 1842 1843 1844 /* 1845 * Allocate bitmaps according to the phys_install list. 1846 */ 1847 static int 1848 i_cpr_bitmap_setup(void) 1849 { 1850 struct memlist *pmem; 1851 cbd_t *dp, *tail; 1852 void *space; 1853 size_t size; 1854 1855 /* 1856 * The number of bitmap descriptors will be the count of 1857 * phys_install ranges plus 1 for a trailing NULL struct. 1858 */ 1859 cpr_nbitmaps = 1; 1860 for (pmem = phys_install; pmem; pmem = pmem->next) 1861 cpr_nbitmaps++; 1862 1863 if (cpr_nbitmaps > (CPR_MAX_BMDESC - 1)) { 1864 cpr_err(CE_WARN, "too many physical memory ranges %d, max %d", 1865 cpr_nbitmaps, CPR_MAX_BMDESC - 1); 1866 return (EFBIG); 1867 } 1868 1869 /* Alloc an array of bitmap descriptors. */ 1870 dp = kmem_zalloc(cpr_nbitmaps * sizeof (*dp), KM_NOSLEEP); 1871 if (dp == NULL) { 1872 cpr_nbitmaps = 0; 1873 return (ENOMEM); 1874 } 1875 tail = dp + cpr_nbitmaps; 1876 1877 CPR->c_bmda = dp; 1878 for (pmem = phys_install; pmem; pmem = pmem->next) { 1879 size = BITMAP_BYTES(pmem->size); 1880 space = kmem_zalloc(size * 2, KM_NOSLEEP); 1881 if (space == NULL) 1882 return (ENOMEM); 1883 ASSERT(dp < tail); 1884 dp->cbd_magic = CPR_BITMAP_MAGIC; 1885 dp->cbd_spfn = mmu_btop(pmem->address); 1886 dp->cbd_epfn = mmu_btop(pmem->address + pmem->size) - 1; 1887 dp->cbd_size = size; 1888 dp->cbd_reg_bitmap = (cpr_ptr)space; 1889 dp->cbd_vlt_bitmap = (cpr_ptr)((caddr_t)space + size); 1890 dp++; 1891 } 1892 1893 /* set magic for the last descriptor */ 1894 ASSERT(dp == (tail - 1)); 1895 dp->cbd_magic = CPR_BITMAP_MAGIC; 1896 1897 return (0); 1898 } 1899 1900 1901 void 1902 i_cpr_bitmap_cleanup(void) 1903 { 1904 cbd_t *dp; 1905 1906 if (CPR->c_bmda == NULL) 1907 return; 1908 for (dp = CPR->c_bmda; dp->cbd_size; dp++) 1909 kmem_free((void *)dp->cbd_reg_bitmap, dp->cbd_size * 2); 1910 kmem_free(CPR->c_bmda, cpr_nbitmaps * sizeof (*CPR->c_bmda)); 1911 CPR->c_bmda = NULL; 1912 cpr_nbitmaps = 0; 1913 } 1914 1915 1916 /* 1917 * A "regular" and "volatile" bitmap are created for each range of 1918 * physical memory. The volatile maps are used to count and track pages 1919 * susceptible to heap corruption - caused by drivers that allocate mem 1920 * during VOP_DUMP(); the regular maps are used for all the other non- 1921 * susceptible pages. Before writing the bitmaps to the statefile, 1922 * each bitmap pair gets merged to simplify handling within cprboot. 1923 */ 1924 int 1925 i_cpr_alloc_bitmaps(void) 1926 { 1927 int err; 1928 1929 memlist_read_lock(); 1930 err = i_cpr_bitmap_setup(); 1931 memlist_read_unlock(); 1932 if (err) 1933 i_cpr_bitmap_cleanup(); 1934 return (err); 1935 } 1936