17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 525cf1a30Sjl139090 * Common Development and Distribution License (the "License"). 625cf1a30Sjl139090 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2256f33205SJonathan Adams * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* 277c478bd9Sstevel@tonic-gate * Platform specific implementation code 287c478bd9Sstevel@tonic-gate */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #define SUNDDI_IMPL 317c478bd9Sstevel@tonic-gate 327c478bd9Sstevel@tonic-gate #include <sys/types.h> 337c478bd9Sstevel@tonic-gate #include <sys/promif.h> 347c478bd9Sstevel@tonic-gate #include <sys/prom_isa.h> 357c478bd9Sstevel@tonic-gate #include <sys/prom_plat.h> 367c478bd9Sstevel@tonic-gate #include <sys/mmu.h> 377c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 387c478bd9Sstevel@tonic-gate #include <sys/iommu.h> 397c478bd9Sstevel@tonic-gate #include <sys/scb.h> 407c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 417c478bd9Sstevel@tonic-gate #include <sys/intreg.h> 427c478bd9Sstevel@tonic-gate #include <sys/pte.h> 437c478bd9Sstevel@tonic-gate #include <vm/hat.h> 447c478bd9Sstevel@tonic-gate #include <vm/page.h> 457c478bd9Sstevel@tonic-gate #include <vm/as.h> 467c478bd9Sstevel@tonic-gate #include <sys/cpr.h> 477c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 487c478bd9Sstevel@tonic-gate #include <sys/clock.h> 497c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 507c478bd9Sstevel@tonic-gate #include <sys/panic.h> 517c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 527c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h> 537c478bd9Sstevel@tonic-gate #include <sys/callb.h> 547c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 557c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h> 567c478bd9Sstevel@tonic-gate #include <sys/systm.h> 577c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 587c478bd9Sstevel@tonic-gate #include <sys/stack.h> 597c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_fs.h> 607c478bd9Sstevel@tonic-gate #include <sys/memlist.h> 617c478bd9Sstevel@tonic-gate #include <sys/bootconf.h> 627c478bd9Sstevel@tonic-gate #include <sys/thread.h> 637bc98a2eSeg155566 #include <vm/vm_dep.h> 647c478bd9Sstevel@tonic-gate 657c478bd9Sstevel@tonic-gate extern void cpr_clear_bitmaps(void); 662df1fe9cSrandyf extern int cpr_setbit(pfn_t ppn, int mapflag); 672df1fe9cSrandyf extern int cpr_clrbit(pfn_t ppn, int mapflag); 682df1fe9cSrandyf extern pgcnt_t cpr_scan_kvseg(int mapflag, bitfunc_t bitfunc, struct seg *seg); 692df1fe9cSrandyf extern pgcnt_t cpr_count_seg_pages(int mapflag, bitfunc_t bitfunc); 707c478bd9Sstevel@tonic-gate extern void dtlb_wr_entry(uint_t, tte_t *, uint64_t *); 717c478bd9Sstevel@tonic-gate extern void itlb_wr_entry(uint_t, tte_t *, uint64_t *); 727c478bd9Sstevel@tonic-gate 737c478bd9Sstevel@tonic-gate static int i_cpr_storage_desc_alloc(csd_t **, pgcnt_t *, csd_t **, int); 747c478bd9Sstevel@tonic-gate static void i_cpr_storage_desc_init(csd_t *, pgcnt_t, csd_t *); 757c478bd9Sstevel@tonic-gate static caddr_t i_cpr_storage_data_alloc(pgcnt_t, pgcnt_t *, int); 767c478bd9Sstevel@tonic-gate static int cpr_dump_sensitive(vnode_t *, csd_t *); 777c478bd9Sstevel@tonic-gate static void i_cpr_clear_entries(uint64_t, uint64_t); 787c478bd9Sstevel@tonic-gate static void i_cpr_xcall(xcfunc_t); 797c478bd9Sstevel@tonic-gate 807c478bd9Sstevel@tonic-gate void i_cpr_storage_free(void); 817c478bd9Sstevel@tonic-gate 827c478bd9Sstevel@tonic-gate extern void *i_cpr_data_page; 837c478bd9Sstevel@tonic-gate extern int cpr_test_mode; 847c478bd9Sstevel@tonic-gate extern int cpr_nbitmaps; 857c478bd9Sstevel@tonic-gate extern char cpr_default_path[]; 867c478bd9Sstevel@tonic-gate extern caddr_t textva, datava; 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate static struct cpr_map_info cpr_prom_retain[CPR_PROM_RETAIN_CNT]; 897c478bd9Sstevel@tonic-gate caddr_t cpr_vaddr = NULL; 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate static uint_t sensitive_pages_saved; 927c478bd9Sstevel@tonic-gate static uint_t sensitive_size_saved; 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate caddr_t i_cpr_storage_data_base; 957c478bd9Sstevel@tonic-gate caddr_t i_cpr_storage_data_end; 967c478bd9Sstevel@tonic-gate csd_t *i_cpr_storage_desc_base; 977c478bd9Sstevel@tonic-gate csd_t *i_cpr_storage_desc_end; /* one byte beyond last used descp */ 987c478bd9Sstevel@tonic-gate csd_t *i_cpr_storage_desc_last_used; /* last used descriptor */ 997c478bd9Sstevel@tonic-gate caddr_t sensitive_write_ptr; /* position for next storage write */ 1007c478bd9Sstevel@tonic-gate 1017c478bd9Sstevel@tonic-gate size_t i_cpr_sensitive_bytes_dumped; 1027c478bd9Sstevel@tonic-gate pgcnt_t i_cpr_sensitive_pgs_dumped; 1037c478bd9Sstevel@tonic-gate pgcnt_t i_cpr_storage_data_sz; /* in pages */ 1047c478bd9Sstevel@tonic-gate pgcnt_t i_cpr_storage_desc_pgcnt; /* in pages */ 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate ushort_t cpr_mach_type = CPR_MACHTYPE_4U; 1077c478bd9Sstevel@tonic-gate static csu_md_t m_info; 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate 1107c478bd9Sstevel@tonic-gate #define MAX_STORAGE_RETRY 3 1117c478bd9Sstevel@tonic-gate #define MAX_STORAGE_ALLOC_RETRY 3 1127c478bd9Sstevel@tonic-gate #define INITIAL_ALLOC_PCNT 40 /* starting allocation percentage */ 1137c478bd9Sstevel@tonic-gate #define INTEGRAL 100 /* to get 1% precision */ 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate #define EXTRA_RATE 2 /* add EXTRA_RATE% extra space */ 1167c478bd9Sstevel@tonic-gate #define EXTRA_DESCS 10 1177c478bd9Sstevel@tonic-gate 1187c478bd9Sstevel@tonic-gate #define CPR_NO_STORAGE_DESC 1 1197c478bd9Sstevel@tonic-gate #define CPR_NO_STORAGE_DATA 2 1207c478bd9Sstevel@tonic-gate 1217c478bd9Sstevel@tonic-gate #define CIF_SPLICE 0 1227c478bd9Sstevel@tonic-gate #define CIF_UNLINK 1 1237c478bd9Sstevel@tonic-gate 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate /* 1267c478bd9Sstevel@tonic-gate * CPR miscellaneous support routines 1277c478bd9Sstevel@tonic-gate */ 1287c478bd9Sstevel@tonic-gate #define cpr_open(path, mode, vpp) (vn_open(path, UIO_SYSSPACE, \ 1297c478bd9Sstevel@tonic-gate mode, 0600, vpp, CRCREAT, 0)) 1307c478bd9Sstevel@tonic-gate #define cpr_rdwr(rw, vp, basep, cnt) (vn_rdwr(rw, vp, (caddr_t)(basep), \ 1317c478bd9Sstevel@tonic-gate cnt, 0LL, UIO_SYSSPACE, 0, (rlim64_t)MAXOFF_T, CRED(), \ 1327c478bd9Sstevel@tonic-gate (ssize_t *)NULL)) 1337c478bd9Sstevel@tonic-gate 1347c478bd9Sstevel@tonic-gate /* 1357c478bd9Sstevel@tonic-gate * definitions for saving/restoring prom pages 1367c478bd9Sstevel@tonic-gate */ 1377c478bd9Sstevel@tonic-gate static void *ppage_buf; 1387c478bd9Sstevel@tonic-gate static pgcnt_t ppage_count; 1397c478bd9Sstevel@tonic-gate static pfn_t *pphys_list; 1407c478bd9Sstevel@tonic-gate static size_t pphys_list_size; 1417c478bd9Sstevel@tonic-gate 1427c478bd9Sstevel@tonic-gate typedef void (*tlb_rw_t)(uint_t, tte_t *, uint64_t *); 1437c478bd9Sstevel@tonic-gate typedef void (*tlb_filter_t)(int, tte_t *, uint64_t, void *); 1447c478bd9Sstevel@tonic-gate 1457c478bd9Sstevel@tonic-gate /* 1467c478bd9Sstevel@tonic-gate * private struct for tlb handling 1477c478bd9Sstevel@tonic-gate */ 1487c478bd9Sstevel@tonic-gate struct cpr_trans_info { 1497c478bd9Sstevel@tonic-gate sutlb_t *dst; 1507c478bd9Sstevel@tonic-gate sutlb_t *tail; 1517c478bd9Sstevel@tonic-gate tlb_rw_t reader; 1527c478bd9Sstevel@tonic-gate tlb_rw_t writer; 1537c478bd9Sstevel@tonic-gate tlb_filter_t filter; 1547c478bd9Sstevel@tonic-gate int index; 1557c478bd9Sstevel@tonic-gate uint64_t skip; /* assumes TLB <= 64 locked entries */ 1567c478bd9Sstevel@tonic-gate }; 1577c478bd9Sstevel@tonic-gate typedef struct cpr_trans_info cti_t; 1587c478bd9Sstevel@tonic-gate 1597c478bd9Sstevel@tonic-gate 1607c478bd9Sstevel@tonic-gate /* 1617c478bd9Sstevel@tonic-gate * special handling for tlb info 1627c478bd9Sstevel@tonic-gate */ 1637c478bd9Sstevel@tonic-gate #define WITHIN_OFW(va) \ 1647c478bd9Sstevel@tonic-gate (((va) > (uint64_t)OFW_START_ADDR) && ((va) < (uint64_t)OFW_END_ADDR)) 1657c478bd9Sstevel@tonic-gate 1667c478bd9Sstevel@tonic-gate #define WITHIN_NUCLEUS(va, base) \ 1677c478bd9Sstevel@tonic-gate (((va) >= (base)) && \ 1687c478bd9Sstevel@tonic-gate (((va) + MMU_PAGESIZE) <= ((base) + MMU_PAGESIZE4M))) 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate #define IS_BIGKTSB(va) \ 1717c478bd9Sstevel@tonic-gate (enable_bigktsb && \ 1727c478bd9Sstevel@tonic-gate ((va) >= (uint64_t)ktsb_base) && \ 1737c478bd9Sstevel@tonic-gate ((va) < (uint64_t)(ktsb_base + ktsb_sz))) 1747c478bd9Sstevel@tonic-gate 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate /* 1777c478bd9Sstevel@tonic-gate * WARNING: 1787c478bd9Sstevel@tonic-gate * the text from this file is linked to follow cpr_resume_setup.o; 1797c478bd9Sstevel@tonic-gate * only add text between here and i_cpr_end_jumpback when it needs 1807c478bd9Sstevel@tonic-gate * to be called during resume before we switch back to the kernel 1817c478bd9Sstevel@tonic-gate * trap table. all the text in this range must fit within a page. 1827c478bd9Sstevel@tonic-gate */ 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate /* 1867c478bd9Sstevel@tonic-gate * each time a machine is reset, the prom uses an inconsistent set of phys 1877c478bd9Sstevel@tonic-gate * pages and the cif cookie may differ as well. so prior to restoring the 1887c478bd9Sstevel@tonic-gate * original prom, we have to use to use the new/tmp prom's translations 1897c478bd9Sstevel@tonic-gate * when requesting prom services. 1907c478bd9Sstevel@tonic-gate * 1917c478bd9Sstevel@tonic-gate * cif_handler starts out as the original prom cookie, and that gets used 1927c478bd9Sstevel@tonic-gate * by client_handler() to jump into the prom. here we splice-in a wrapper 1937c478bd9Sstevel@tonic-gate * routine by writing cif_handler; client_handler() will now jump to the 1947c478bd9Sstevel@tonic-gate * wrapper which switches the %tba to the new/tmp prom's trap table then 1957c478bd9Sstevel@tonic-gate * jumps to the new cookie. 1967c478bd9Sstevel@tonic-gate */ 1977c478bd9Sstevel@tonic-gate void 1987c478bd9Sstevel@tonic-gate i_cpr_cif_setup(int action) 1997c478bd9Sstevel@tonic-gate { 2007c478bd9Sstevel@tonic-gate extern void *i_cpr_orig_cif, *cif_handler; 2017c478bd9Sstevel@tonic-gate extern int i_cpr_cif_wrapper(void *); 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate /* 2047c478bd9Sstevel@tonic-gate * save the original cookie and change the current cookie to the 2057c478bd9Sstevel@tonic-gate * wrapper routine. later we just restore the original cookie. 2067c478bd9Sstevel@tonic-gate */ 2077c478bd9Sstevel@tonic-gate if (action == CIF_SPLICE) { 2087c478bd9Sstevel@tonic-gate i_cpr_orig_cif = cif_handler; 2097c478bd9Sstevel@tonic-gate cif_handler = (void *)i_cpr_cif_wrapper; 2107c478bd9Sstevel@tonic-gate } else if (action == CIF_UNLINK) 2117c478bd9Sstevel@tonic-gate cif_handler = i_cpr_orig_cif; 2127c478bd9Sstevel@tonic-gate } 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate 2157c478bd9Sstevel@tonic-gate /* 2167c478bd9Sstevel@tonic-gate * launch slave cpus into kernel text, pause them, 2177c478bd9Sstevel@tonic-gate * and restore the original prom pages 2187c478bd9Sstevel@tonic-gate */ 2197c478bd9Sstevel@tonic-gate void 2207c478bd9Sstevel@tonic-gate i_cpr_mp_setup(void) 2217c478bd9Sstevel@tonic-gate { 2227c478bd9Sstevel@tonic-gate extern void restart_other_cpu(int); 2237c478bd9Sstevel@tonic-gate cpu_t *cp; 2247c478bd9Sstevel@tonic-gate 2257bc98a2eSeg155566 uint64_t kctx = kcontextreg; 2267bc98a2eSeg155566 2277bc98a2eSeg155566 /* 2287bc98a2eSeg155566 * Do not allow setting page size codes in MMU primary context 2297bc98a2eSeg155566 * register while using cif wrapper. This is needed to work 230da6c28aaSamw * around OBP incorrect handling of this MMU register. 2317bc98a2eSeg155566 */ 2327bc98a2eSeg155566 kcontextreg = 0; 2337bc98a2eSeg155566 2347c478bd9Sstevel@tonic-gate /* 2357c478bd9Sstevel@tonic-gate * reset cpu_ready_set so x_calls work properly 2367c478bd9Sstevel@tonic-gate */ 2377c478bd9Sstevel@tonic-gate CPUSET_ZERO(cpu_ready_set); 2387c478bd9Sstevel@tonic-gate CPUSET_ADD(cpu_ready_set, getprocessorid()); 2397c478bd9Sstevel@tonic-gate 2407c478bd9Sstevel@tonic-gate /* 2417c478bd9Sstevel@tonic-gate * setup cif to use the cookie from the new/tmp prom 2427c478bd9Sstevel@tonic-gate * and setup tmp handling for calling prom services. 2437c478bd9Sstevel@tonic-gate */ 2447c478bd9Sstevel@tonic-gate i_cpr_cif_setup(CIF_SPLICE); 2457c478bd9Sstevel@tonic-gate 2467c478bd9Sstevel@tonic-gate /* 2477c478bd9Sstevel@tonic-gate * at this point, only the nucleus and a few cpr pages are 2487c478bd9Sstevel@tonic-gate * mapped in. once we switch to the kernel trap table, 2497c478bd9Sstevel@tonic-gate * we can access the rest of kernel space. 2507c478bd9Sstevel@tonic-gate */ 2517c478bd9Sstevel@tonic-gate prom_set_traptable(&trap_table); 2527c478bd9Sstevel@tonic-gate 2537c478bd9Sstevel@tonic-gate if (ncpus > 1) { 2547c478bd9Sstevel@tonic-gate sfmmu_init_tsbs(); 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 2577c478bd9Sstevel@tonic-gate /* 2587c478bd9Sstevel@tonic-gate * All of the slave cpus are not ready at this time, 2597c478bd9Sstevel@tonic-gate * yet the cpu structures have various cpu_flags set; 2607c478bd9Sstevel@tonic-gate * clear cpu_flags and mutex_ready. 2617c478bd9Sstevel@tonic-gate * Since we are coming up from a CPU suspend, the slave cpus 2627c478bd9Sstevel@tonic-gate * are frozen. 2637c478bd9Sstevel@tonic-gate */ 2647c478bd9Sstevel@tonic-gate for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) { 2657c478bd9Sstevel@tonic-gate cp->cpu_flags = CPU_FROZEN; 2667c478bd9Sstevel@tonic-gate cp->cpu_m.mutex_ready = 0; 2677c478bd9Sstevel@tonic-gate } 2687c478bd9Sstevel@tonic-gate 2697c478bd9Sstevel@tonic-gate for (cp = CPU->cpu_next; cp != CPU; cp = cp->cpu_next) 2707c478bd9Sstevel@tonic-gate restart_other_cpu(cp->cpu_id); 2717c478bd9Sstevel@tonic-gate 272*0ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL); 2737c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 2747c478bd9Sstevel@tonic-gate 2757c478bd9Sstevel@tonic-gate i_cpr_xcall(i_cpr_clear_entries); 2767c478bd9Sstevel@tonic-gate } else 2777c478bd9Sstevel@tonic-gate i_cpr_clear_entries(0, 0); 2787c478bd9Sstevel@tonic-gate 2797c478bd9Sstevel@tonic-gate /* 2807c478bd9Sstevel@tonic-gate * now unlink the cif wrapper; WARNING: do not call any 2817c478bd9Sstevel@tonic-gate * prom_xxx() routines until after prom pages are restored. 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate i_cpr_cif_setup(CIF_UNLINK); 2847c478bd9Sstevel@tonic-gate 2857c478bd9Sstevel@tonic-gate (void) i_cpr_prom_pages(CPR_PROM_RESTORE); 2867bc98a2eSeg155566 2877bc98a2eSeg155566 /* allow setting page size codes in MMU primary context register */ 2887bc98a2eSeg155566 kcontextreg = kctx; 2897c478bd9Sstevel@tonic-gate } 2907c478bd9Sstevel@tonic-gate 2917c478bd9Sstevel@tonic-gate 2927c478bd9Sstevel@tonic-gate /* 2937c478bd9Sstevel@tonic-gate * end marker for jumpback page; 2947c478bd9Sstevel@tonic-gate * this symbol is used to check the size of i_cpr_resume_setup() 2957c478bd9Sstevel@tonic-gate * and the above text. For simplicity, the Makefile needs to 2967c478bd9Sstevel@tonic-gate * link i_cpr_resume_setup.o and cpr_impl.o consecutively. 2977c478bd9Sstevel@tonic-gate */ 2987c478bd9Sstevel@tonic-gate void 2997c478bd9Sstevel@tonic-gate i_cpr_end_jumpback(void) 3007c478bd9Sstevel@tonic-gate { 3017c478bd9Sstevel@tonic-gate } 3027c478bd9Sstevel@tonic-gate 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate /* 3057c478bd9Sstevel@tonic-gate * scan tlb entries with reader; when valid entries are found, 3067c478bd9Sstevel@tonic-gate * the filter routine will selectively save/clear them 3077c478bd9Sstevel@tonic-gate */ 3087c478bd9Sstevel@tonic-gate static void 3097c478bd9Sstevel@tonic-gate i_cpr_scan_tlb(cti_t *ctip) 3107c478bd9Sstevel@tonic-gate { 3117c478bd9Sstevel@tonic-gate uint64_t va_tag; 3127c478bd9Sstevel@tonic-gate int tlb_index; 3137c478bd9Sstevel@tonic-gate tte_t tte; 3147c478bd9Sstevel@tonic-gate 3157c478bd9Sstevel@tonic-gate for (tlb_index = ctip->index; tlb_index >= 0; tlb_index--) { 3167c478bd9Sstevel@tonic-gate (*ctip->reader)((uint_t)tlb_index, &tte, &va_tag); 3177c478bd9Sstevel@tonic-gate if (va_tag && TTE_IS_VALID(&tte)) 3187c478bd9Sstevel@tonic-gate (*ctip->filter)(tlb_index, &tte, va_tag, ctip); 3197c478bd9Sstevel@tonic-gate } 3207c478bd9Sstevel@tonic-gate } 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate /* 3247c478bd9Sstevel@tonic-gate * filter for locked tlb entries that reference the text/data nucleus 3257c478bd9Sstevel@tonic-gate * and any bigktsb's; these will be reinstalled by cprboot on all cpus 3267c478bd9Sstevel@tonic-gate */ 3277c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3287c478bd9Sstevel@tonic-gate static void 3297c478bd9Sstevel@tonic-gate i_cpr_lnb(int index, tte_t *ttep, uint64_t va_tag, void *ctrans) 3307c478bd9Sstevel@tonic-gate { 3317c478bd9Sstevel@tonic-gate cti_t *ctip; 3327c478bd9Sstevel@tonic-gate 3337c478bd9Sstevel@tonic-gate /* 3347c478bd9Sstevel@tonic-gate * record tlb data at ctip->dst; the target tlb index starts 3357c478bd9Sstevel@tonic-gate * at the highest tlb offset and moves towards 0. the prom 3367c478bd9Sstevel@tonic-gate * reserves both dtlb and itlb index 0. any selected entry 3377c478bd9Sstevel@tonic-gate * also gets marked to prevent being flushed during resume 3387c478bd9Sstevel@tonic-gate */ 3397c478bd9Sstevel@tonic-gate if (TTE_IS_LOCKED(ttep) && (va_tag == (uint64_t)textva || 3407c478bd9Sstevel@tonic-gate va_tag == (uint64_t)datava || IS_BIGKTSB(va_tag))) { 3417c478bd9Sstevel@tonic-gate ctip = ctrans; 3427c478bd9Sstevel@tonic-gate while ((1 << ctip->index) & ctip->skip) 3437c478bd9Sstevel@tonic-gate ctip->index--; 3447c478bd9Sstevel@tonic-gate ASSERT(ctip->index > 0); 3457c478bd9Sstevel@tonic-gate ASSERT(ctip->dst < ctip->tail); 3467c478bd9Sstevel@tonic-gate ctip->dst->tte.ll = ttep->ll; 3477c478bd9Sstevel@tonic-gate ctip->dst->va_tag = va_tag; 3487c478bd9Sstevel@tonic-gate ctip->dst->index = ctip->index--; 3497c478bd9Sstevel@tonic-gate ctip->dst->tmp = 0; 3507c478bd9Sstevel@tonic-gate ctip->dst++; 3517c478bd9Sstevel@tonic-gate } 3527c478bd9Sstevel@tonic-gate } 3537c478bd9Sstevel@tonic-gate 3547c478bd9Sstevel@tonic-gate 3557c478bd9Sstevel@tonic-gate /* 3567c478bd9Sstevel@tonic-gate * some tlb entries are stale, filter for unlocked entries 3577c478bd9Sstevel@tonic-gate * within the prom virt range and clear them 3587c478bd9Sstevel@tonic-gate */ 3597c478bd9Sstevel@tonic-gate static void 3607c478bd9Sstevel@tonic-gate i_cpr_ufw(int index, tte_t *ttep, uint64_t va_tag, void *ctrans) 3617c478bd9Sstevel@tonic-gate { 3627c478bd9Sstevel@tonic-gate sutlb_t clr; 3637c478bd9Sstevel@tonic-gate cti_t *ctip; 3647c478bd9Sstevel@tonic-gate 3657c478bd9Sstevel@tonic-gate if (!TTE_IS_LOCKED(ttep) && WITHIN_OFW(va_tag)) { 3667c478bd9Sstevel@tonic-gate ctip = ctrans; 3677c478bd9Sstevel@tonic-gate bzero(&clr, sizeof (clr)); 3687c478bd9Sstevel@tonic-gate (*ctip->writer)((uint_t)index, &clr.tte, &clr.va_tag); 3697c478bd9Sstevel@tonic-gate } 3707c478bd9Sstevel@tonic-gate } 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate 3737c478bd9Sstevel@tonic-gate /* 3747c478bd9Sstevel@tonic-gate * some of the entries installed by cprboot are needed only on a 3757c478bd9Sstevel@tonic-gate * short-term basis and need to be flushed to avoid clogging the tlbs. 3767c478bd9Sstevel@tonic-gate * scan the dtte/itte arrays for items marked as temporary and clear 3777c478bd9Sstevel@tonic-gate * dtlb/itlb entries using wrfunc. 3787c478bd9Sstevel@tonic-gate */ 3797c478bd9Sstevel@tonic-gate static void 3807c478bd9Sstevel@tonic-gate i_cpr_clear_tmp(sutlb_t *listp, int max, tlb_rw_t wrfunc) 3817c478bd9Sstevel@tonic-gate { 3827c478bd9Sstevel@tonic-gate sutlb_t clr, *tail; 3837c478bd9Sstevel@tonic-gate 3847c478bd9Sstevel@tonic-gate bzero(&clr, sizeof (clr)); 3857c478bd9Sstevel@tonic-gate for (tail = listp + max; listp < tail && listp->va_tag; listp++) { 3867c478bd9Sstevel@tonic-gate if (listp->tmp) 3877c478bd9Sstevel@tonic-gate (*wrfunc)((uint_t)listp->index, &clr.tte, &clr.va_tag); 3887c478bd9Sstevel@tonic-gate } 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate 3917c478bd9Sstevel@tonic-gate 3927c478bd9Sstevel@tonic-gate /* ARGSUSED */ 3937c478bd9Sstevel@tonic-gate static void 3947c478bd9Sstevel@tonic-gate i_cpr_clear_entries(uint64_t arg1, uint64_t arg2) 3957c478bd9Sstevel@tonic-gate { 3967c478bd9Sstevel@tonic-gate extern void demap_all(void); 3977c478bd9Sstevel@tonic-gate cti_t cti; 3987c478bd9Sstevel@tonic-gate 3997c478bd9Sstevel@tonic-gate i_cpr_clear_tmp(m_info.dtte, CPR_MAX_TLB, dtlb_wr_entry); 4007c478bd9Sstevel@tonic-gate i_cpr_clear_tmp(m_info.itte, CPR_MAX_TLB, itlb_wr_entry); 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate /* 4037c478bd9Sstevel@tonic-gate * for newer cpus that implement DEMAP_ALL_TYPE, demap_all is 4047c478bd9Sstevel@tonic-gate * a second label for vtag_flushall. the call is made using 4057c478bd9Sstevel@tonic-gate * vtag_flushall() instead of demap_all() due to runtime and 4067c478bd9Sstevel@tonic-gate * krtld results with both older and newer cpu modules. 4077c478bd9Sstevel@tonic-gate */ 4087c478bd9Sstevel@tonic-gate if (&demap_all != 0) { 4097c478bd9Sstevel@tonic-gate vtag_flushall(); 4107c478bd9Sstevel@tonic-gate return; 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate 4137c478bd9Sstevel@tonic-gate /* 4147c478bd9Sstevel@tonic-gate * for older V9 cpus, scan tlbs and clear stale entries 4157c478bd9Sstevel@tonic-gate */ 4167c478bd9Sstevel@tonic-gate bzero(&cti, sizeof (cti)); 4177c478bd9Sstevel@tonic-gate cti.filter = i_cpr_ufw; 4187c478bd9Sstevel@tonic-gate 4197c478bd9Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1; 4207c478bd9Sstevel@tonic-gate cti.reader = dtlb_rd_entry; 4217c478bd9Sstevel@tonic-gate cti.writer = dtlb_wr_entry; 4227c478bd9Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 4237c478bd9Sstevel@tonic-gate 4247c478bd9Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].itlb_size - 1; 4257c478bd9Sstevel@tonic-gate cti.reader = itlb_rd_entry; 4267c478bd9Sstevel@tonic-gate cti.writer = itlb_wr_entry; 4277c478bd9Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate 4307c478bd9Sstevel@tonic-gate 4317c478bd9Sstevel@tonic-gate /* 4327c478bd9Sstevel@tonic-gate * craft tlb info for tmp use during resume; this data gets used by 4337c478bd9Sstevel@tonic-gate * cprboot to install tlb entries. we also mark each struct as tmp 4347c478bd9Sstevel@tonic-gate * so those tlb entries will get flushed after switching to the kernel 4357c478bd9Sstevel@tonic-gate * trap table. no data needs to be recorded for vaddr when it falls 4367c478bd9Sstevel@tonic-gate * within the nucleus since we've already recorded nucleus ttes and 4377c478bd9Sstevel@tonic-gate * a 8K tte would conflict with a 4MB tte. eg: the cpr module 4387c478bd9Sstevel@tonic-gate * text/data may have been loaded into the text/data nucleus. 4397c478bd9Sstevel@tonic-gate */ 4407c478bd9Sstevel@tonic-gate static void 4417c478bd9Sstevel@tonic-gate i_cpr_make_tte(cti_t *ctip, void *vaddr, caddr_t nbase) 4427c478bd9Sstevel@tonic-gate { 4437c478bd9Sstevel@tonic-gate pfn_t ppn; 4447c478bd9Sstevel@tonic-gate uint_t rw; 4457c478bd9Sstevel@tonic-gate 4467c478bd9Sstevel@tonic-gate if (WITHIN_NUCLEUS((caddr_t)vaddr, nbase)) 4477c478bd9Sstevel@tonic-gate return; 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate while ((1 << ctip->index) & ctip->skip) 4507c478bd9Sstevel@tonic-gate ctip->index--; 4517c478bd9Sstevel@tonic-gate ASSERT(ctip->index > 0); 4527c478bd9Sstevel@tonic-gate ASSERT(ctip->dst < ctip->tail); 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate /* 4557c478bd9Sstevel@tonic-gate * without any global service available to lookup 4567c478bd9Sstevel@tonic-gate * a tte by vaddr, we craft our own here: 4577c478bd9Sstevel@tonic-gate */ 4587c478bd9Sstevel@tonic-gate ppn = va_to_pfn(vaddr); 4597c478bd9Sstevel@tonic-gate rw = (nbase == datava) ? TTE_HWWR_INT : 0; 4607c478bd9Sstevel@tonic-gate ctip->dst->tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn); 4617c478bd9Sstevel@tonic-gate ctip->dst->tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT | 4627c478bd9Sstevel@tonic-gate TTE_CP_INT | TTE_PRIV_INT | rw; 4637c478bd9Sstevel@tonic-gate ctip->dst->va_tag = ((uintptr_t)vaddr & MMU_PAGEMASK); 4647c478bd9Sstevel@tonic-gate ctip->dst->index = ctip->index--; 4657c478bd9Sstevel@tonic-gate ctip->dst->tmp = 1; 4667c478bd9Sstevel@tonic-gate ctip->dst++; 4677c478bd9Sstevel@tonic-gate } 4687c478bd9Sstevel@tonic-gate 4697c478bd9Sstevel@tonic-gate 4707c478bd9Sstevel@tonic-gate static void 4717c478bd9Sstevel@tonic-gate i_cpr_xcall(xcfunc_t func) 4727c478bd9Sstevel@tonic-gate { 4737c478bd9Sstevel@tonic-gate uint_t pil, reset_pil; 4747c478bd9Sstevel@tonic-gate 4757c478bd9Sstevel@tonic-gate pil = getpil(); 4767c478bd9Sstevel@tonic-gate if (pil < XCALL_PIL) 4777c478bd9Sstevel@tonic-gate reset_pil = 0; 4787c478bd9Sstevel@tonic-gate else { 4797c478bd9Sstevel@tonic-gate reset_pil = 1; 4807c478bd9Sstevel@tonic-gate setpil(XCALL_PIL - 1); 4817c478bd9Sstevel@tonic-gate } 4827c478bd9Sstevel@tonic-gate xc_some(cpu_ready_set, func, 0, 0); 4837c478bd9Sstevel@tonic-gate if (reset_pil) 4847c478bd9Sstevel@tonic-gate setpil(pil); 4857c478bd9Sstevel@tonic-gate } 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate /* 4897c478bd9Sstevel@tonic-gate * restart paused slave cpus 4907c478bd9Sstevel@tonic-gate */ 4917c478bd9Sstevel@tonic-gate void 4927c478bd9Sstevel@tonic-gate i_cpr_machdep_setup(void) 4937c478bd9Sstevel@tonic-gate { 4947c478bd9Sstevel@tonic-gate if (ncpus > 1) { 495ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "MP restarted...\n"); 4967c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock); 4977c478bd9Sstevel@tonic-gate start_cpus(); 4987c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock); 4997c478bd9Sstevel@tonic-gate } 5007c478bd9Sstevel@tonic-gate } 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate 5037c478bd9Sstevel@tonic-gate /* 5047c478bd9Sstevel@tonic-gate * Stop all interrupt activities in the system 5057c478bd9Sstevel@tonic-gate */ 5067c478bd9Sstevel@tonic-gate void 5077c478bd9Sstevel@tonic-gate i_cpr_stop_intr(void) 5087c478bd9Sstevel@tonic-gate { 5097c478bd9Sstevel@tonic-gate (void) spl7(); 5107c478bd9Sstevel@tonic-gate } 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate /* 5137c478bd9Sstevel@tonic-gate * Set machine up to take interrupts 5147c478bd9Sstevel@tonic-gate */ 5157c478bd9Sstevel@tonic-gate void 5167c478bd9Sstevel@tonic-gate i_cpr_enable_intr(void) 5177c478bd9Sstevel@tonic-gate { 5187c478bd9Sstevel@tonic-gate (void) spl0(); 5197c478bd9Sstevel@tonic-gate } 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate 5227c478bd9Sstevel@tonic-gate /* 5237c478bd9Sstevel@tonic-gate * record cpu nodes and ids 5247c478bd9Sstevel@tonic-gate */ 5257c478bd9Sstevel@tonic-gate static void 5267c478bd9Sstevel@tonic-gate i_cpr_save_cpu_info(void) 5277c478bd9Sstevel@tonic-gate { 5287c478bd9Sstevel@tonic-gate struct sun4u_cpu_info *scip; 5297c478bd9Sstevel@tonic-gate cpu_t *cp; 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate scip = m_info.sci; 5327c478bd9Sstevel@tonic-gate cp = CPU; 5337c478bd9Sstevel@tonic-gate do { 5347c478bd9Sstevel@tonic-gate ASSERT(scip < &m_info.sci[NCPU]); 5357c478bd9Sstevel@tonic-gate scip->cpu_id = cp->cpu_id; 5367c478bd9Sstevel@tonic-gate scip->node = cpunodes[cp->cpu_id].nodeid; 5377c478bd9Sstevel@tonic-gate scip++; 5387c478bd9Sstevel@tonic-gate } while ((cp = cp->cpu_next) != CPU); 5397c478bd9Sstevel@tonic-gate } 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate 5427c478bd9Sstevel@tonic-gate /* 5437c478bd9Sstevel@tonic-gate * Write necessary machine dependent information to cpr state file, 5447c478bd9Sstevel@tonic-gate * eg. sun4u mmu ctx secondary for the current running process (cpr) ... 5457c478bd9Sstevel@tonic-gate */ 5467c478bd9Sstevel@tonic-gate int 5477c478bd9Sstevel@tonic-gate i_cpr_write_machdep(vnode_t *vp) 5487c478bd9Sstevel@tonic-gate { 5497c478bd9Sstevel@tonic-gate extern uint_t getpstate(), getwstate(); 5507c478bd9Sstevel@tonic-gate extern uint_t i_cpr_tstack_size; 5517c478bd9Sstevel@tonic-gate const char ustr[] = ": unix-tte 2drop false ;"; 5527c478bd9Sstevel@tonic-gate uintptr_t tinfo; 5537c478bd9Sstevel@tonic-gate label_t *ltp; 5547c478bd9Sstevel@tonic-gate cmd_t cmach; 5557c478bd9Sstevel@tonic-gate char *fmt; 5567c478bd9Sstevel@tonic-gate int rc; 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate /* 5597c478bd9Sstevel@tonic-gate * ustr[] is used as temporary forth words during 5607c478bd9Sstevel@tonic-gate * slave startup sequence, see sfmmu_mp_startup() 5617c478bd9Sstevel@tonic-gate */ 5627c478bd9Sstevel@tonic-gate 5637c478bd9Sstevel@tonic-gate cmach.md_magic = (uint_t)CPR_MACHDEP_MAGIC; 5647c478bd9Sstevel@tonic-gate cmach.md_size = sizeof (m_info) + sizeof (ustr); 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate if (rc = cpr_write(vp, (caddr_t)&cmach, sizeof (cmach))) { 5677c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "Failed to write descriptor."); 5687c478bd9Sstevel@tonic-gate return (rc); 5697c478bd9Sstevel@tonic-gate } 5707c478bd9Sstevel@tonic-gate 5717c478bd9Sstevel@tonic-gate /* 5727c478bd9Sstevel@tonic-gate * m_info is now cleared in i_cpr_dump_setup() 5737c478bd9Sstevel@tonic-gate */ 5747c478bd9Sstevel@tonic-gate m_info.ksb = (uint32_t)STACK_BIAS; 5757c478bd9Sstevel@tonic-gate m_info.kpstate = (uint16_t)getpstate(); 5767c478bd9Sstevel@tonic-gate m_info.kwstate = (uint16_t)getwstate(); 577ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "stack bias 0x%x, pstate 0x%x, wstate 0x%x\n", 578ae115bc7Smrj m_info.ksb, m_info.kpstate, m_info.kwstate); 5797c478bd9Sstevel@tonic-gate 5807c478bd9Sstevel@tonic-gate ltp = &ttolwp(curthread)->lwp_qsav; 5817c478bd9Sstevel@tonic-gate m_info.qsav_pc = (cpr_ext)ltp->val[0]; 5827c478bd9Sstevel@tonic-gate m_info.qsav_sp = (cpr_ext)ltp->val[1]; 5837c478bd9Sstevel@tonic-gate 5847c478bd9Sstevel@tonic-gate /* 5857c478bd9Sstevel@tonic-gate * Set secondary context to INVALID_CONTEXT to force the HAT 5867c478bd9Sstevel@tonic-gate * to re-setup the MMU registers and locked TTEs it needs for 5877c478bd9Sstevel@tonic-gate * TLB miss handling. 5887c478bd9Sstevel@tonic-gate */ 5897c478bd9Sstevel@tonic-gate m_info.mmu_ctx_sec = INVALID_CONTEXT; 5907bc98a2eSeg155566 m_info.mmu_ctx_pri = KCONTEXT; 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate tinfo = (uintptr_t)curthread; 5937c478bd9Sstevel@tonic-gate m_info.thrp = (cpr_ptr)tinfo; 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate tinfo = (uintptr_t)i_cpr_resume_setup; 5967c478bd9Sstevel@tonic-gate m_info.func = (cpr_ptr)tinfo; 5977c478bd9Sstevel@tonic-gate 5987c478bd9Sstevel@tonic-gate /* 5997c478bd9Sstevel@tonic-gate * i_cpr_data_page is comprised of a 4K stack area and a few 6007c478bd9Sstevel@tonic-gate * trailing data symbols; the page is shared by the prom and 6017c478bd9Sstevel@tonic-gate * kernel during resume. the stack size is recorded here 6027c478bd9Sstevel@tonic-gate * and used by cprboot to set %sp 6037c478bd9Sstevel@tonic-gate */ 6047c478bd9Sstevel@tonic-gate tinfo = (uintptr_t)&i_cpr_data_page; 6057c478bd9Sstevel@tonic-gate m_info.tmp_stack = (cpr_ptr)tinfo; 6067c478bd9Sstevel@tonic-gate m_info.tmp_stacksize = i_cpr_tstack_size; 6077c478bd9Sstevel@tonic-gate 6087c478bd9Sstevel@tonic-gate m_info.test_mode = cpr_test_mode; 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate i_cpr_save_cpu_info(); 6117c478bd9Sstevel@tonic-gate 6127c478bd9Sstevel@tonic-gate if (rc = cpr_write(vp, (caddr_t)&m_info, sizeof (m_info))) { 6137c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "Failed to write machdep info."); 6147c478bd9Sstevel@tonic-gate return (rc); 6157c478bd9Sstevel@tonic-gate } 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate fmt = "error writing %s forth info"; 6187c478bd9Sstevel@tonic-gate if (rc = cpr_write(vp, (caddr_t)ustr, sizeof (ustr))) 6197c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, fmt, "unix-tte"); 6207c478bd9Sstevel@tonic-gate 6217c478bd9Sstevel@tonic-gate return (rc); 6227c478bd9Sstevel@tonic-gate } 6237c478bd9Sstevel@tonic-gate 6247c478bd9Sstevel@tonic-gate 6257c478bd9Sstevel@tonic-gate /* 6267c478bd9Sstevel@tonic-gate * Save miscellaneous information which needs to be written to the 6277c478bd9Sstevel@tonic-gate * state file. This information is required to re-initialize 6287c478bd9Sstevel@tonic-gate * kernel/prom handshaking. 6297c478bd9Sstevel@tonic-gate */ 6307c478bd9Sstevel@tonic-gate void 6317c478bd9Sstevel@tonic-gate i_cpr_save_machdep_info(void) 6327c478bd9Sstevel@tonic-gate { 633ae115bc7Smrj CPR_DEBUG(CPR_DEBUG5, "jumpback size = 0x%lx\n", 6347c478bd9Sstevel@tonic-gate (uintptr_t)&i_cpr_end_jumpback - 635ae115bc7Smrj (uintptr_t)i_cpr_resume_setup); 6367c478bd9Sstevel@tonic-gate 6377c478bd9Sstevel@tonic-gate /* 6387c478bd9Sstevel@tonic-gate * Verify the jumpback code all falls in one page. 6397c478bd9Sstevel@tonic-gate */ 6407c478bd9Sstevel@tonic-gate if (((uintptr_t)&i_cpr_end_jumpback & MMU_PAGEMASK) != 6417c478bd9Sstevel@tonic-gate ((uintptr_t)i_cpr_resume_setup & MMU_PAGEMASK)) 6427c478bd9Sstevel@tonic-gate cpr_err(CE_PANIC, "jumpback code exceeds one page."); 6437c478bd9Sstevel@tonic-gate } 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate 6467c478bd9Sstevel@tonic-gate /* 6477c478bd9Sstevel@tonic-gate * cpu0 should contain bootcpu info 6487c478bd9Sstevel@tonic-gate */ 6497c478bd9Sstevel@tonic-gate cpu_t * 6507c478bd9Sstevel@tonic-gate i_cpr_bootcpu(void) 6517c478bd9Sstevel@tonic-gate { 6527c478bd9Sstevel@tonic-gate return (&cpu0); 6537c478bd9Sstevel@tonic-gate } 6547c478bd9Sstevel@tonic-gate 6552df1fe9cSrandyf processorid_t 6562df1fe9cSrandyf i_cpr_bootcpuid(void) 6572df1fe9cSrandyf { 6582df1fe9cSrandyf return (0); 6592df1fe9cSrandyf } 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate /* 6627c478bd9Sstevel@tonic-gate * Return the virtual address of the mapping area 6637c478bd9Sstevel@tonic-gate */ 6647c478bd9Sstevel@tonic-gate caddr_t 6657c478bd9Sstevel@tonic-gate i_cpr_map_setup(void) 6667c478bd9Sstevel@tonic-gate { 6677c478bd9Sstevel@tonic-gate /* 6687c478bd9Sstevel@tonic-gate * Allocate a virtual memory range spanned by an hmeblk. 6697c478bd9Sstevel@tonic-gate * This would be 8 hments or 64k bytes. Starting VA 6707c478bd9Sstevel@tonic-gate * must be 64k (8-page) aligned. 6717c478bd9Sstevel@tonic-gate */ 6727c478bd9Sstevel@tonic-gate cpr_vaddr = vmem_xalloc(heap_arena, 6737c478bd9Sstevel@tonic-gate mmu_ptob(NHMENTS), mmu_ptob(NHMENTS), 6747c478bd9Sstevel@tonic-gate 0, 0, NULL, NULL, VM_NOSLEEP); 6757c478bd9Sstevel@tonic-gate return (cpr_vaddr); 6767c478bd9Sstevel@tonic-gate } 6777c478bd9Sstevel@tonic-gate 6787c478bd9Sstevel@tonic-gate /* 6797c478bd9Sstevel@tonic-gate * create tmp locked tlb entries for a group of phys pages; 6807c478bd9Sstevel@tonic-gate * 6817c478bd9Sstevel@tonic-gate * i_cpr_mapin/i_cpr_mapout should always be called in pairs, 6827c478bd9Sstevel@tonic-gate * otherwise would fill up a tlb with locked entries 6837c478bd9Sstevel@tonic-gate */ 6847c478bd9Sstevel@tonic-gate void 6857c478bd9Sstevel@tonic-gate i_cpr_mapin(caddr_t vaddr, uint_t pages, pfn_t ppn) 6867c478bd9Sstevel@tonic-gate { 6877c478bd9Sstevel@tonic-gate tte_t tte; 6887c478bd9Sstevel@tonic-gate extern pfn_t curthreadpfn; 6897c478bd9Sstevel@tonic-gate extern int curthreadremapped; 6907c478bd9Sstevel@tonic-gate 6917c478bd9Sstevel@tonic-gate curthreadremapped = (ppn <= curthreadpfn && curthreadpfn < ppn + pages); 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate for (; pages--; ppn++, vaddr += MMU_PAGESIZE) { 6947c478bd9Sstevel@tonic-gate tte.tte_inthi = TTE_VALID_INT | TTE_PFN_INTHI(ppn); 6957c478bd9Sstevel@tonic-gate tte.tte_intlo = TTE_PFN_INTLO(ppn) | TTE_LCK_INT | 6967c478bd9Sstevel@tonic-gate TTE_CP_INT | TTE_PRIV_INT | TTE_HWWR_INT; 6971e2e7a75Shuah sfmmu_dtlb_ld_kva(vaddr, &tte); 6987c478bd9Sstevel@tonic-gate } 6997c478bd9Sstevel@tonic-gate } 7007c478bd9Sstevel@tonic-gate 7017c478bd9Sstevel@tonic-gate void 7027c478bd9Sstevel@tonic-gate i_cpr_mapout(caddr_t vaddr, uint_t pages) 7037c478bd9Sstevel@tonic-gate { 7047c478bd9Sstevel@tonic-gate extern int curthreadremapped; 7057c478bd9Sstevel@tonic-gate 7067c478bd9Sstevel@tonic-gate if (curthreadremapped && vaddr <= (caddr_t)curthread && 7077c478bd9Sstevel@tonic-gate (caddr_t)curthread < vaddr + pages * MMU_PAGESIZE) 7087c478bd9Sstevel@tonic-gate curthreadremapped = 0; 7097c478bd9Sstevel@tonic-gate 7107c478bd9Sstevel@tonic-gate for (; pages--; vaddr += MMU_PAGESIZE) 7111e2e7a75Shuah vtag_flushpage(vaddr, (uint64_t)ksfmmup); 7127c478bd9Sstevel@tonic-gate } 7137c478bd9Sstevel@tonic-gate 7147c478bd9Sstevel@tonic-gate /* 7157c478bd9Sstevel@tonic-gate * We're done using the mapping area; release virtual space 7167c478bd9Sstevel@tonic-gate */ 7177c478bd9Sstevel@tonic-gate void 7187c478bd9Sstevel@tonic-gate i_cpr_map_destroy(void) 7197c478bd9Sstevel@tonic-gate { 7207c478bd9Sstevel@tonic-gate vmem_free(heap_arena, cpr_vaddr, mmu_ptob(NHMENTS)); 7217c478bd9Sstevel@tonic-gate cpr_vaddr = NULL; 7227c478bd9Sstevel@tonic-gate } 7237c478bd9Sstevel@tonic-gate 7247c478bd9Sstevel@tonic-gate /* ARGSUSED */ 7257c478bd9Sstevel@tonic-gate void 7267c478bd9Sstevel@tonic-gate i_cpr_handle_xc(int flag) 7277c478bd9Sstevel@tonic-gate { 7287c478bd9Sstevel@tonic-gate } 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate 7317c478bd9Sstevel@tonic-gate /* 7327c478bd9Sstevel@tonic-gate * This function takes care of pages which are not in kas or need to be 7337c478bd9Sstevel@tonic-gate * taken care of in a special way. For example, panicbuf pages are not 7347c478bd9Sstevel@tonic-gate * in kas and their pages are allocated via prom_retain(). 7357c478bd9Sstevel@tonic-gate */ 7367c478bd9Sstevel@tonic-gate pgcnt_t 7377c478bd9Sstevel@tonic-gate i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc) 7387c478bd9Sstevel@tonic-gate { 7397c478bd9Sstevel@tonic-gate struct cpr_map_info *pri, *tail; 7407c478bd9Sstevel@tonic-gate pgcnt_t pages, total = 0; 7417c478bd9Sstevel@tonic-gate pfn_t pfn; 7427c478bd9Sstevel@tonic-gate 7437c478bd9Sstevel@tonic-gate /* 7447c478bd9Sstevel@tonic-gate * Save information about prom retained panicbuf pages 7457c478bd9Sstevel@tonic-gate */ 7467c478bd9Sstevel@tonic-gate if (bitfunc == cpr_setbit) { 7477c478bd9Sstevel@tonic-gate pri = &cpr_prom_retain[CPR_PANICBUF]; 7487c478bd9Sstevel@tonic-gate pri->virt = (cpr_ptr)panicbuf; 7497c478bd9Sstevel@tonic-gate pri->phys = va_to_pa(panicbuf); 7507c478bd9Sstevel@tonic-gate pri->size = sizeof (panicbuf); 7517c478bd9Sstevel@tonic-gate } 7527c478bd9Sstevel@tonic-gate 7537c478bd9Sstevel@tonic-gate /* 7547c478bd9Sstevel@tonic-gate * Go through the prom_retain array to tag those pages. 7557c478bd9Sstevel@tonic-gate */ 7567c478bd9Sstevel@tonic-gate tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT]; 7577c478bd9Sstevel@tonic-gate for (pri = cpr_prom_retain; pri < tail; pri++) { 7587c478bd9Sstevel@tonic-gate pages = mmu_btopr(pri->size); 7597c478bd9Sstevel@tonic-gate for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) { 7607c478bd9Sstevel@tonic-gate if (pf_is_memory(pfn)) { 7617c478bd9Sstevel@tonic-gate if (bitfunc == cpr_setbit) { 7627c478bd9Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0) 7637c478bd9Sstevel@tonic-gate total++; 7647c478bd9Sstevel@tonic-gate } else 7657c478bd9Sstevel@tonic-gate total++; 7667c478bd9Sstevel@tonic-gate } 7677c478bd9Sstevel@tonic-gate } 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate return (total); 7717c478bd9Sstevel@tonic-gate } 7727c478bd9Sstevel@tonic-gate 7737c478bd9Sstevel@tonic-gate 7747c478bd9Sstevel@tonic-gate /* 7757c478bd9Sstevel@tonic-gate * Free up memory-related resources here. We start by freeing buffers 7767c478bd9Sstevel@tonic-gate * allocated during suspend initialization. Also, free up the mapping 7777c478bd9Sstevel@tonic-gate * resources allocated in cpr_init(). 7787c478bd9Sstevel@tonic-gate */ 7797c478bd9Sstevel@tonic-gate void 7807c478bd9Sstevel@tonic-gate i_cpr_free_memory_resources(void) 7817c478bd9Sstevel@tonic-gate { 7827c478bd9Sstevel@tonic-gate (void) i_cpr_prom_pages(CPR_PROM_FREE); 7837c478bd9Sstevel@tonic-gate i_cpr_map_destroy(); 7847c478bd9Sstevel@tonic-gate i_cpr_storage_free(); 7857c478bd9Sstevel@tonic-gate } 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate 7887c478bd9Sstevel@tonic-gate /* 7897c478bd9Sstevel@tonic-gate * Derived from cpr_write_statefile(). 7907c478bd9Sstevel@tonic-gate * Save the sensitive pages to the storage area and do bookkeeping 7917c478bd9Sstevel@tonic-gate * using the sensitive descriptors. Each descriptor will contain no more 7927c478bd9Sstevel@tonic-gate * than CPR_MAXCONTIG amount of contiguous pages to match the max amount 7937c478bd9Sstevel@tonic-gate * of pages that statefile gets written to disk at each write. 7947c478bd9Sstevel@tonic-gate * XXX The CPR_MAXCONTIG can be changed to the size of the compression 7957c478bd9Sstevel@tonic-gate * scratch area. 7967c478bd9Sstevel@tonic-gate */ 7977c478bd9Sstevel@tonic-gate static int 7987c478bd9Sstevel@tonic-gate i_cpr_save_to_storage(void) 7997c478bd9Sstevel@tonic-gate { 8007c478bd9Sstevel@tonic-gate sensitive_size_saved = 0; 8017c478bd9Sstevel@tonic-gate sensitive_pages_saved = 0; 8027c478bd9Sstevel@tonic-gate sensitive_write_ptr = i_cpr_storage_data_base; 8037c478bd9Sstevel@tonic-gate return (cpr_contig_pages(NULL, SAVE_TO_STORAGE)); 8047c478bd9Sstevel@tonic-gate } 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate 8077c478bd9Sstevel@tonic-gate /* 8087c478bd9Sstevel@tonic-gate * This routine allocates space to save the sensitive kernel pages, 8097c478bd9Sstevel@tonic-gate * i.e. kernel data nucleus, kvalloc and kvseg segments. 8107c478bd9Sstevel@tonic-gate * It's assumed that those segments are the only areas that can be 8117c478bd9Sstevel@tonic-gate * contaminated by memory allocations during statefile dumping. 8127c478bd9Sstevel@tonic-gate * The space allocated here contains: 8137c478bd9Sstevel@tonic-gate * A list of descriptors describing the saved sensitive pages. 8147c478bd9Sstevel@tonic-gate * The storage area for saving the compressed sensitive kernel pages. 8157c478bd9Sstevel@tonic-gate * Since storage pages are allocated from segkmem, they need to be 8167c478bd9Sstevel@tonic-gate * excluded when saving. 8177c478bd9Sstevel@tonic-gate */ 8187c478bd9Sstevel@tonic-gate int 8197c478bd9Sstevel@tonic-gate i_cpr_save_sensitive_kpages(void) 8207c478bd9Sstevel@tonic-gate { 8217c478bd9Sstevel@tonic-gate static const char pages_fmt[] = "\n%s %s allocs\n" 8227c478bd9Sstevel@tonic-gate " spages %ld, vpages %ld, diff %ld\n"; 8237c478bd9Sstevel@tonic-gate int retry_cnt; 8247c478bd9Sstevel@tonic-gate int error = 0; 8257c478bd9Sstevel@tonic-gate pgcnt_t pages, spages, vpages; 8267c478bd9Sstevel@tonic-gate caddr_t addr; 8277c478bd9Sstevel@tonic-gate char *str; 8287c478bd9Sstevel@tonic-gate 8297c478bd9Sstevel@tonic-gate /* 8307c478bd9Sstevel@tonic-gate * Tag sensitive kpages. Allocate space for storage descriptors 8317c478bd9Sstevel@tonic-gate * and storage data area based on the resulting bitmaps. 8327c478bd9Sstevel@tonic-gate * Note: The storage space will be part of the sensitive 8337c478bd9Sstevel@tonic-gate * segment, so we need to tag kpages here before the storage 8347c478bd9Sstevel@tonic-gate * is actually allocated just so their space won't be accounted 8357c478bd9Sstevel@tonic-gate * for. They will not be part of the statefile although those 8367c478bd9Sstevel@tonic-gate * pages will be claimed by cprboot. 8377c478bd9Sstevel@tonic-gate */ 8387c478bd9Sstevel@tonic-gate cpr_clear_bitmaps(); 8397c478bd9Sstevel@tonic-gate 8407c478bd9Sstevel@tonic-gate spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_setbit); 8417c478bd9Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 8427c478bd9Sstevel@tonic-gate pages = spages - vpages; 8437c478bd9Sstevel@tonic-gate 8447c478bd9Sstevel@tonic-gate str = "i_cpr_save_sensitive_kpages:"; 845ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, pages_fmt, "before", str, spages, vpages, pages); 8467c478bd9Sstevel@tonic-gate 8477c478bd9Sstevel@tonic-gate /* 8487c478bd9Sstevel@tonic-gate * Allocate space to save the clean sensitive kpages 8497c478bd9Sstevel@tonic-gate */ 8507c478bd9Sstevel@tonic-gate for (retry_cnt = 0; retry_cnt < MAX_STORAGE_ALLOC_RETRY; retry_cnt++) { 8517c478bd9Sstevel@tonic-gate /* 8527c478bd9Sstevel@tonic-gate * Alloc on first pass or realloc if we are retrying because 8537c478bd9Sstevel@tonic-gate * of insufficient storage for sensitive pages 8547c478bd9Sstevel@tonic-gate */ 8557c478bd9Sstevel@tonic-gate if (retry_cnt == 0 || error == ENOMEM) { 8567c478bd9Sstevel@tonic-gate if (i_cpr_storage_data_base) { 8577c478bd9Sstevel@tonic-gate kmem_free(i_cpr_storage_data_base, 8587c478bd9Sstevel@tonic-gate mmu_ptob(i_cpr_storage_data_sz)); 8597c478bd9Sstevel@tonic-gate i_cpr_storage_data_base = NULL; 8607c478bd9Sstevel@tonic-gate i_cpr_storage_data_sz = 0; 8617c478bd9Sstevel@tonic-gate } 8627c478bd9Sstevel@tonic-gate addr = i_cpr_storage_data_alloc(pages, 8637c478bd9Sstevel@tonic-gate &i_cpr_storage_data_sz, retry_cnt); 8647c478bd9Sstevel@tonic-gate if (addr == NULL) { 865ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 8667c478bd9Sstevel@tonic-gate "\n%s can't allocate data storage space!\n", 867ae115bc7Smrj str); 8687c478bd9Sstevel@tonic-gate return (ENOMEM); 8697c478bd9Sstevel@tonic-gate } 8707c478bd9Sstevel@tonic-gate i_cpr_storage_data_base = addr; 8717c478bd9Sstevel@tonic-gate i_cpr_storage_data_end = 8727c478bd9Sstevel@tonic-gate addr + mmu_ptob(i_cpr_storage_data_sz); 8737c478bd9Sstevel@tonic-gate } 8747c478bd9Sstevel@tonic-gate 8757c478bd9Sstevel@tonic-gate /* 8767c478bd9Sstevel@tonic-gate * Allocate on first pass, only realloc if retry is because of 8777c478bd9Sstevel@tonic-gate * insufficient descriptors, but reset contents on each pass 8787c478bd9Sstevel@tonic-gate * (desc_alloc resets contents as well) 8797c478bd9Sstevel@tonic-gate */ 8807c478bd9Sstevel@tonic-gate if (retry_cnt == 0 || error == -1) { 8817c478bd9Sstevel@tonic-gate error = i_cpr_storage_desc_alloc( 8827c478bd9Sstevel@tonic-gate &i_cpr_storage_desc_base, &i_cpr_storage_desc_pgcnt, 8837c478bd9Sstevel@tonic-gate &i_cpr_storage_desc_end, retry_cnt); 8847c478bd9Sstevel@tonic-gate if (error != 0) 8857c478bd9Sstevel@tonic-gate return (error); 8867c478bd9Sstevel@tonic-gate } else { 8877c478bd9Sstevel@tonic-gate i_cpr_storage_desc_init(i_cpr_storage_desc_base, 8887c478bd9Sstevel@tonic-gate i_cpr_storage_desc_pgcnt, i_cpr_storage_desc_end); 8897c478bd9Sstevel@tonic-gate } 8907c478bd9Sstevel@tonic-gate 8917c478bd9Sstevel@tonic-gate /* 8927c478bd9Sstevel@tonic-gate * We are ready to save the sensitive kpages to storage. 8937c478bd9Sstevel@tonic-gate * We cannot trust what's tagged in the bitmaps anymore 8947c478bd9Sstevel@tonic-gate * after storage allocations. Clear up the bitmaps and 8957c478bd9Sstevel@tonic-gate * retag the sensitive kpages again. The storage pages 8967c478bd9Sstevel@tonic-gate * should be untagged. 8977c478bd9Sstevel@tonic-gate */ 8987c478bd9Sstevel@tonic-gate cpr_clear_bitmaps(); 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate spages = 9017c478bd9Sstevel@tonic-gate i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_setbit); 9027c478bd9Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 9037c478bd9Sstevel@tonic-gate 904ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, pages_fmt, "after ", str, 905ae115bc7Smrj spages, vpages, spages - vpages); 9067c478bd9Sstevel@tonic-gate 9077c478bd9Sstevel@tonic-gate /* 9087c478bd9Sstevel@tonic-gate * Returns 0 on success, -1 if too few descriptors, and 9097c478bd9Sstevel@tonic-gate * ENOMEM if not enough space to save sensitive pages 9107c478bd9Sstevel@tonic-gate */ 911ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "compressing pages to storage...\n"); 9127c478bd9Sstevel@tonic-gate error = i_cpr_save_to_storage(); 9137c478bd9Sstevel@tonic-gate if (error == 0) { 9147c478bd9Sstevel@tonic-gate /* Saving to storage succeeded */ 915ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "compressed %d pages\n", 916ae115bc7Smrj sensitive_pages_saved); 9177c478bd9Sstevel@tonic-gate break; 9187c478bd9Sstevel@tonic-gate } else if (error == -1) 919ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "%s too few descriptors\n", str); 9207c478bd9Sstevel@tonic-gate } 9217c478bd9Sstevel@tonic-gate if (error == -1) 9227c478bd9Sstevel@tonic-gate error = ENOMEM; 9237c478bd9Sstevel@tonic-gate return (error); 9247c478bd9Sstevel@tonic-gate } 9257c478bd9Sstevel@tonic-gate 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate /* 9287c478bd9Sstevel@tonic-gate * Estimate how much memory we will need to save 9297c478bd9Sstevel@tonic-gate * the sensitive pages with compression. 9307c478bd9Sstevel@tonic-gate */ 9317c478bd9Sstevel@tonic-gate static caddr_t 9327c478bd9Sstevel@tonic-gate i_cpr_storage_data_alloc(pgcnt_t pages, pgcnt_t *alloc_pages, int retry_cnt) 9337c478bd9Sstevel@tonic-gate { 9347c478bd9Sstevel@tonic-gate pgcnt_t alloc_pcnt, last_pcnt; 9357c478bd9Sstevel@tonic-gate caddr_t addr; 9367c478bd9Sstevel@tonic-gate char *str; 9377c478bd9Sstevel@tonic-gate 9387c478bd9Sstevel@tonic-gate str = "i_cpr_storage_data_alloc:"; 9397c478bd9Sstevel@tonic-gate if (retry_cnt == 0) { 9407c478bd9Sstevel@tonic-gate /* 9417c478bd9Sstevel@tonic-gate * common compression ratio is about 3:1 9427c478bd9Sstevel@tonic-gate * initial storage allocation is estimated at 40% 9437c478bd9Sstevel@tonic-gate * to cover the majority of cases 9447c478bd9Sstevel@tonic-gate */ 9457c478bd9Sstevel@tonic-gate alloc_pcnt = INITIAL_ALLOC_PCNT; 9467c478bd9Sstevel@tonic-gate *alloc_pages = (pages * alloc_pcnt) / INTEGRAL; 947ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s sensitive pages: %ld\n", str, pages); 948ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 949ae115bc7Smrj "%s initial est pages: %ld, alloc %ld%%\n", 950ae115bc7Smrj str, *alloc_pages, alloc_pcnt); 9517c478bd9Sstevel@tonic-gate } else { 9527c478bd9Sstevel@tonic-gate /* 9537c478bd9Sstevel@tonic-gate * calculate the prior compression percentage (x100) 9547c478bd9Sstevel@tonic-gate * from the last attempt to save sensitive pages 9557c478bd9Sstevel@tonic-gate */ 9567c478bd9Sstevel@tonic-gate ASSERT(sensitive_pages_saved != 0); 9577c478bd9Sstevel@tonic-gate last_pcnt = (mmu_btopr(sensitive_size_saved) * INTEGRAL) / 9587c478bd9Sstevel@tonic-gate sensitive_pages_saved; 959ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s last ratio %ld%%\n", str, last_pcnt); 9607c478bd9Sstevel@tonic-gate 9617c478bd9Sstevel@tonic-gate /* 9627c478bd9Sstevel@tonic-gate * new estimated storage size is based on 9637c478bd9Sstevel@tonic-gate * the larger ratio + 5% for each retry: 9647c478bd9Sstevel@tonic-gate * pages * (last + [5%, 10%]) 9657c478bd9Sstevel@tonic-gate */ 9667c478bd9Sstevel@tonic-gate alloc_pcnt = MAX(last_pcnt, INITIAL_ALLOC_PCNT) + 9677c478bd9Sstevel@tonic-gate (retry_cnt * 5); 9687c478bd9Sstevel@tonic-gate *alloc_pages = (pages * alloc_pcnt) / INTEGRAL; 969ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s Retry est pages: %ld, alloc %ld%%\n", 970ae115bc7Smrj str, *alloc_pages, alloc_pcnt); 9717c478bd9Sstevel@tonic-gate } 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate addr = kmem_alloc(mmu_ptob(*alloc_pages), KM_NOSLEEP); 974ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s alloc %ld pages\n", str, *alloc_pages); 9757c478bd9Sstevel@tonic-gate return (addr); 9767c478bd9Sstevel@tonic-gate } 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate 9797c478bd9Sstevel@tonic-gate void 9807c478bd9Sstevel@tonic-gate i_cpr_storage_free(void) 9817c478bd9Sstevel@tonic-gate { 9827c478bd9Sstevel@tonic-gate /* Free descriptors */ 9837c478bd9Sstevel@tonic-gate if (i_cpr_storage_desc_base) { 9847c478bd9Sstevel@tonic-gate kmem_free(i_cpr_storage_desc_base, 9857c478bd9Sstevel@tonic-gate mmu_ptob(i_cpr_storage_desc_pgcnt)); 9867c478bd9Sstevel@tonic-gate i_cpr_storage_desc_base = NULL; 9877c478bd9Sstevel@tonic-gate i_cpr_storage_desc_pgcnt = 0; 9887c478bd9Sstevel@tonic-gate } 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate /* Data storage */ 9927c478bd9Sstevel@tonic-gate if (i_cpr_storage_data_base) { 9937c478bd9Sstevel@tonic-gate kmem_free(i_cpr_storage_data_base, 9947c478bd9Sstevel@tonic-gate mmu_ptob(i_cpr_storage_data_sz)); 9957c478bd9Sstevel@tonic-gate i_cpr_storage_data_base = NULL; 9967c478bd9Sstevel@tonic-gate i_cpr_storage_data_sz = 0; 9977c478bd9Sstevel@tonic-gate } 9987c478bd9Sstevel@tonic-gate } 9997c478bd9Sstevel@tonic-gate 10007c478bd9Sstevel@tonic-gate 10017c478bd9Sstevel@tonic-gate /* 10027c478bd9Sstevel@tonic-gate * This routine is derived from cpr_compress_and_write(). 10037c478bd9Sstevel@tonic-gate * 1. Do bookkeeping in the descriptor for the contiguous sensitive chunk. 10047c478bd9Sstevel@tonic-gate * 2. Compress and save the clean sensitive pages into the storage area. 10057c478bd9Sstevel@tonic-gate */ 10067c478bd9Sstevel@tonic-gate int 10077c478bd9Sstevel@tonic-gate i_cpr_compress_and_save(int chunks, pfn_t spfn, pgcnt_t pages) 10087c478bd9Sstevel@tonic-gate { 10097c478bd9Sstevel@tonic-gate extern char *cpr_compress_pages(cpd_t *, pgcnt_t, int); 10107c478bd9Sstevel@tonic-gate extern caddr_t i_cpr_storage_data_end; 10117c478bd9Sstevel@tonic-gate uint_t remaining, datalen; 10127c478bd9Sstevel@tonic-gate uint32_t test_usum; 10137c478bd9Sstevel@tonic-gate char *datap; 10147c478bd9Sstevel@tonic-gate csd_t *descp; 10157c478bd9Sstevel@tonic-gate cpd_t cpd; 10167c478bd9Sstevel@tonic-gate int error; 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate /* 10197c478bd9Sstevel@tonic-gate * Fill next empty storage descriptor 10207c478bd9Sstevel@tonic-gate */ 10217c478bd9Sstevel@tonic-gate descp = i_cpr_storage_desc_base + chunks - 1; 10227c478bd9Sstevel@tonic-gate if (descp >= i_cpr_storage_desc_end) { 1023ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "ran out of descriptors, base 0x%p, " 1024ae115bc7Smrj "chunks %d, end 0x%p, descp 0x%p\n", 1025903a11ebSrh87107 (void *)i_cpr_storage_desc_base, chunks, 1026903a11ebSrh87107 (void *)i_cpr_storage_desc_end, (void *)descp); 10277c478bd9Sstevel@tonic-gate return (-1); 10287c478bd9Sstevel@tonic-gate } 10297c478bd9Sstevel@tonic-gate ASSERT(descp->csd_dirty_spfn == (uint_t)-1); 10307c478bd9Sstevel@tonic-gate i_cpr_storage_desc_last_used = descp; 10317c478bd9Sstevel@tonic-gate 10327c478bd9Sstevel@tonic-gate descp->csd_dirty_spfn = spfn; 10337c478bd9Sstevel@tonic-gate descp->csd_dirty_npages = pages; 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate i_cpr_mapin(CPR->c_mapping_area, pages, spfn); 10367c478bd9Sstevel@tonic-gate 10377c478bd9Sstevel@tonic-gate /* 10387c478bd9Sstevel@tonic-gate * try compressing pages and copy cpd fields 10397c478bd9Sstevel@tonic-gate * pfn is copied for debug use 10407c478bd9Sstevel@tonic-gate */ 10417c478bd9Sstevel@tonic-gate cpd.cpd_pfn = spfn; 10427c478bd9Sstevel@tonic-gate datap = cpr_compress_pages(&cpd, pages, C_COMPRESSING); 10437c478bd9Sstevel@tonic-gate datalen = cpd.cpd_length; 10447c478bd9Sstevel@tonic-gate descp->csd_clean_compressed = (cpd.cpd_flag & CPD_COMPRESS); 10457c478bd9Sstevel@tonic-gate #ifdef DEBUG 10467c478bd9Sstevel@tonic-gate descp->csd_usum = cpd.cpd_usum; 10477c478bd9Sstevel@tonic-gate descp->csd_csum = cpd.cpd_csum; 10487c478bd9Sstevel@tonic-gate #endif 10497c478bd9Sstevel@tonic-gate 10507c478bd9Sstevel@tonic-gate error = 0; 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate /* 10537c478bd9Sstevel@tonic-gate * Save the raw or compressed data to the storage area pointed to by 10547c478bd9Sstevel@tonic-gate * sensitive_write_ptr. Make sure the storage space is big enough to 10557c478bd9Sstevel@tonic-gate * hold the result. Otherwise roll back to increase the storage space. 10567c478bd9Sstevel@tonic-gate */ 10577c478bd9Sstevel@tonic-gate descp->csd_clean_sva = (cpr_ptr)sensitive_write_ptr; 10587c478bd9Sstevel@tonic-gate descp->csd_clean_sz = datalen; 10597c478bd9Sstevel@tonic-gate if ((sensitive_write_ptr + datalen) < i_cpr_storage_data_end) { 10607c478bd9Sstevel@tonic-gate extern void cprbcopy(void *, void *, size_t); 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate cprbcopy(datap, sensitive_write_ptr, datalen); 10637c478bd9Sstevel@tonic-gate sensitive_size_saved += datalen; 10647c478bd9Sstevel@tonic-gate sensitive_pages_saved += descp->csd_dirty_npages; 10657c478bd9Sstevel@tonic-gate sensitive_write_ptr += datalen; 10667c478bd9Sstevel@tonic-gate } else { 10677c478bd9Sstevel@tonic-gate remaining = (i_cpr_storage_data_end - sensitive_write_ptr); 1068ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "i_cpr_compress_and_save: The storage " 10697c478bd9Sstevel@tonic-gate "space is too small!\ngot %d, want %d\n\n", 1070ae115bc7Smrj remaining, (remaining + datalen)); 10717c478bd9Sstevel@tonic-gate #ifdef DEBUG 10727c478bd9Sstevel@tonic-gate /* 10737c478bd9Sstevel@tonic-gate * Check to see if the content of the sensitive pages that we 10747c478bd9Sstevel@tonic-gate * just copied have changed during this small time window. 10757c478bd9Sstevel@tonic-gate */ 10767c478bd9Sstevel@tonic-gate test_usum = checksum32(CPR->c_mapping_area, mmu_ptob(pages)); 10777c478bd9Sstevel@tonic-gate descp->csd_usum = cpd.cpd_usum; 10787c478bd9Sstevel@tonic-gate if (test_usum != descp->csd_usum) { 1079ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "\nWARNING: " 1080ae115bc7Smrj "i_cpr_compress_and_save: " 1081bf30efa4Smathue "Data in the range of pfn 0x%lx to pfn " 1082bf30efa4Smathue "0x%lx has changed after they are saved " 1083ae115bc7Smrj "into storage.", spfn, (spfn + pages - 1)); 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate #endif 10867c478bd9Sstevel@tonic-gate error = ENOMEM; 10877c478bd9Sstevel@tonic-gate } 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate i_cpr_mapout(CPR->c_mapping_area, pages); 10907c478bd9Sstevel@tonic-gate return (error); 10917c478bd9Sstevel@tonic-gate } 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate /* 10957c478bd9Sstevel@tonic-gate * This routine is derived from cpr_count_kpages(). 10967c478bd9Sstevel@tonic-gate * It goes through kernel data nucleus and segkmem segments to select 10977c478bd9Sstevel@tonic-gate * pages in use and mark them in the corresponding bitmap. 10987c478bd9Sstevel@tonic-gate */ 10997c478bd9Sstevel@tonic-gate pgcnt_t 11007c478bd9Sstevel@tonic-gate i_cpr_count_sensitive_kpages(int mapflag, bitfunc_t bitfunc) 11017c478bd9Sstevel@tonic-gate { 11027c478bd9Sstevel@tonic-gate pgcnt_t kdata_cnt = 0, segkmem_cnt = 0; 11037c478bd9Sstevel@tonic-gate extern caddr_t e_moddata; 11047c478bd9Sstevel@tonic-gate extern struct seg kvalloc; 11057c478bd9Sstevel@tonic-gate extern struct seg kmem64; 11067c478bd9Sstevel@tonic-gate size_t size; 11077c478bd9Sstevel@tonic-gate 11087c478bd9Sstevel@tonic-gate /* 11097c478bd9Sstevel@tonic-gate * Kernel data nucleus pages 11107c478bd9Sstevel@tonic-gate */ 11117c478bd9Sstevel@tonic-gate size = e_moddata - s_data; 11127c478bd9Sstevel@tonic-gate kdata_cnt += cpr_count_pages(s_data, size, 11137c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate /* 11167c478bd9Sstevel@tonic-gate * kvseg and kvalloc pages 11177c478bd9Sstevel@tonic-gate */ 11187c478bd9Sstevel@tonic-gate segkmem_cnt += cpr_scan_kvseg(mapflag, bitfunc, &kvseg); 11197c478bd9Sstevel@tonic-gate segkmem_cnt += cpr_count_pages(kvalloc.s_base, kvalloc.s_size, 11207c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11217c478bd9Sstevel@tonic-gate 11227c478bd9Sstevel@tonic-gate /* segment to support kernel memory usage above 32-bit space (4GB) */ 11237c478bd9Sstevel@tonic-gate if (kmem64.s_base) 11247c478bd9Sstevel@tonic-gate segkmem_cnt += cpr_count_pages(kmem64.s_base, kmem64.s_size, 11257c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11267c478bd9Sstevel@tonic-gate 1127ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "\ni_cpr_count_sensitive_kpages:\n" 11287c478bd9Sstevel@tonic-gate "\tkdata_cnt %ld + segkmem_cnt %ld = %ld pages\n", 1129ae115bc7Smrj kdata_cnt, segkmem_cnt, kdata_cnt + segkmem_cnt); 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate return (kdata_cnt + segkmem_cnt); 11327c478bd9Sstevel@tonic-gate } 11337c478bd9Sstevel@tonic-gate 11347c478bd9Sstevel@tonic-gate 11357c478bd9Sstevel@tonic-gate pgcnt_t 11367c478bd9Sstevel@tonic-gate i_cpr_count_storage_pages(int mapflag, bitfunc_t bitfunc) 11377c478bd9Sstevel@tonic-gate { 11387c478bd9Sstevel@tonic-gate pgcnt_t count = 0; 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate if (i_cpr_storage_desc_base) { 11417c478bd9Sstevel@tonic-gate count += cpr_count_pages((caddr_t)i_cpr_storage_desc_base, 11427c478bd9Sstevel@tonic-gate (size_t)mmu_ptob(i_cpr_storage_desc_pgcnt), 11437c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11447c478bd9Sstevel@tonic-gate } 11457c478bd9Sstevel@tonic-gate if (i_cpr_storage_data_base) { 11467c478bd9Sstevel@tonic-gate count += cpr_count_pages(i_cpr_storage_data_base, 11477c478bd9Sstevel@tonic-gate (size_t)mmu_ptob(i_cpr_storage_data_sz), 11487c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate return (count); 11517c478bd9Sstevel@tonic-gate } 11527c478bd9Sstevel@tonic-gate 11537c478bd9Sstevel@tonic-gate 11547c478bd9Sstevel@tonic-gate /* 11557c478bd9Sstevel@tonic-gate * Derived from cpr_write_statefile(). 11567c478bd9Sstevel@tonic-gate * Allocate (or reallocate after exhausting the supply) descriptors for each 11577c478bd9Sstevel@tonic-gate * chunk of contiguous sensitive kpages. 11587c478bd9Sstevel@tonic-gate */ 11597c478bd9Sstevel@tonic-gate static int 11607c478bd9Sstevel@tonic-gate i_cpr_storage_desc_alloc(csd_t **basepp, pgcnt_t *pgsp, csd_t **endpp, 11617c478bd9Sstevel@tonic-gate int retry) 11627c478bd9Sstevel@tonic-gate { 11637c478bd9Sstevel@tonic-gate pgcnt_t npages; 11647c478bd9Sstevel@tonic-gate int chunks; 11657c478bd9Sstevel@tonic-gate csd_t *descp, *end; 11667c478bd9Sstevel@tonic-gate size_t len; 11677c478bd9Sstevel@tonic-gate char *str = "i_cpr_storage_desc_alloc:"; 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate /* 11707c478bd9Sstevel@tonic-gate * On initial allocation, add some extra to cover overhead caused 11717c478bd9Sstevel@tonic-gate * by the allocation for the storage area later. 11727c478bd9Sstevel@tonic-gate */ 11737c478bd9Sstevel@tonic-gate if (retry == 0) { 11747c478bd9Sstevel@tonic-gate chunks = cpr_contig_pages(NULL, STORAGE_DESC_ALLOC) + 11757c478bd9Sstevel@tonic-gate EXTRA_DESCS; 11767c478bd9Sstevel@tonic-gate npages = mmu_btopr(sizeof (**basepp) * (pgcnt_t)chunks); 1177ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s chunks %d, ", str, chunks); 11787c478bd9Sstevel@tonic-gate } else { 1179ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s retry %d: ", str, retry); 11807c478bd9Sstevel@tonic-gate npages = *pgsp + 1; 11817c478bd9Sstevel@tonic-gate } 11827c478bd9Sstevel@tonic-gate /* Free old descriptors, if any */ 11837c478bd9Sstevel@tonic-gate if (*basepp) 11847c478bd9Sstevel@tonic-gate kmem_free((caddr_t)*basepp, mmu_ptob(*pgsp)); 11857c478bd9Sstevel@tonic-gate 11867c478bd9Sstevel@tonic-gate descp = *basepp = kmem_alloc(mmu_ptob(npages), KM_NOSLEEP); 11877c478bd9Sstevel@tonic-gate if (descp == NULL) { 1188ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s no space for descriptors!\n", str); 11897c478bd9Sstevel@tonic-gate return (ENOMEM); 11907c478bd9Sstevel@tonic-gate } 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate *pgsp = npages; 11937c478bd9Sstevel@tonic-gate len = mmu_ptob(npages); 11947c478bd9Sstevel@tonic-gate end = *endpp = descp + (len / (sizeof (**basepp))); 1195ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "npages 0x%lx, len 0x%lx, items 0x%lx\n\t*basepp " 11967c478bd9Sstevel@tonic-gate "%p, *endpp %p\n", npages, len, (len / (sizeof (**basepp))), 1197903a11ebSrh87107 (void *)*basepp, (void *)*endpp); 11987c478bd9Sstevel@tonic-gate i_cpr_storage_desc_init(descp, npages, end); 11997c478bd9Sstevel@tonic-gate return (0); 12007c478bd9Sstevel@tonic-gate } 12017c478bd9Sstevel@tonic-gate 12027c478bd9Sstevel@tonic-gate static void 12037c478bd9Sstevel@tonic-gate i_cpr_storage_desc_init(csd_t *descp, pgcnt_t npages, csd_t *end) 12047c478bd9Sstevel@tonic-gate { 12057c478bd9Sstevel@tonic-gate size_t len = mmu_ptob(npages); 12067c478bd9Sstevel@tonic-gate 12077c478bd9Sstevel@tonic-gate /* Initialize the descriptors to something impossible. */ 12087c478bd9Sstevel@tonic-gate bzero(descp, len); 12097c478bd9Sstevel@tonic-gate #ifdef DEBUG 12107c478bd9Sstevel@tonic-gate /* 12117c478bd9Sstevel@tonic-gate * This condition is tested by an ASSERT 12127c478bd9Sstevel@tonic-gate */ 12137c478bd9Sstevel@tonic-gate for (; descp < end; descp++) 12147c478bd9Sstevel@tonic-gate descp->csd_dirty_spfn = (uint_t)-1; 12157c478bd9Sstevel@tonic-gate #endif 12167c478bd9Sstevel@tonic-gate } 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate int 12197c478bd9Sstevel@tonic-gate i_cpr_dump_sensitive_kpages(vnode_t *vp) 12207c478bd9Sstevel@tonic-gate { 12217c478bd9Sstevel@tonic-gate int error = 0; 12227c478bd9Sstevel@tonic-gate uint_t spin_cnt = 0; 12237c478bd9Sstevel@tonic-gate csd_t *descp; 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate /* 12267c478bd9Sstevel@tonic-gate * These following two variables need to be reinitialized 12277c478bd9Sstevel@tonic-gate * for each cpr cycle. 12287c478bd9Sstevel@tonic-gate */ 12297c478bd9Sstevel@tonic-gate i_cpr_sensitive_bytes_dumped = 0; 12307c478bd9Sstevel@tonic-gate i_cpr_sensitive_pgs_dumped = 0; 12317c478bd9Sstevel@tonic-gate 12327c478bd9Sstevel@tonic-gate if (i_cpr_storage_desc_base) { 12337c478bd9Sstevel@tonic-gate for (descp = i_cpr_storage_desc_base; 12347c478bd9Sstevel@tonic-gate descp <= i_cpr_storage_desc_last_used; descp++) { 12357c478bd9Sstevel@tonic-gate if (error = cpr_dump_sensitive(vp, descp)) 12367c478bd9Sstevel@tonic-gate return (error); 12377c478bd9Sstevel@tonic-gate spin_cnt++; 12387c478bd9Sstevel@tonic-gate if ((spin_cnt & 0x5F) == 1) 12397c478bd9Sstevel@tonic-gate cpr_spinning_bar(); 12407c478bd9Sstevel@tonic-gate } 12417c478bd9Sstevel@tonic-gate prom_printf(" \b"); 12427c478bd9Sstevel@tonic-gate } 12437c478bd9Sstevel@tonic-gate 1244ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "\ni_cpr_dump_sensitive_kpages: dumped %ld\n", 1245ae115bc7Smrj i_cpr_sensitive_pgs_dumped); 12467c478bd9Sstevel@tonic-gate return (0); 12477c478bd9Sstevel@tonic-gate } 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate 12507c478bd9Sstevel@tonic-gate /* 12517c478bd9Sstevel@tonic-gate * 1. Fill the cpr page descriptor with the info of the dirty pages 12527c478bd9Sstevel@tonic-gate * and 12537c478bd9Sstevel@tonic-gate * write the descriptor out. It will be used at resume. 12547c478bd9Sstevel@tonic-gate * 2. Write the clean data in stead of the dirty data out. 12557c478bd9Sstevel@tonic-gate * Note: to save space, the clean data is already compressed. 12567c478bd9Sstevel@tonic-gate */ 12577c478bd9Sstevel@tonic-gate static int 12587c478bd9Sstevel@tonic-gate cpr_dump_sensitive(vnode_t *vp, csd_t *descp) 12597c478bd9Sstevel@tonic-gate { 12607c478bd9Sstevel@tonic-gate int error = 0; 12617c478bd9Sstevel@tonic-gate caddr_t datap; 12627c478bd9Sstevel@tonic-gate cpd_t cpd; /* cpr page descriptor */ 12637c478bd9Sstevel@tonic-gate pfn_t dirty_spfn; 12647c478bd9Sstevel@tonic-gate pgcnt_t dirty_npages; 12657c478bd9Sstevel@tonic-gate size_t clean_sz; 12667c478bd9Sstevel@tonic-gate caddr_t clean_sva; 12677c478bd9Sstevel@tonic-gate int clean_compressed; 12687c478bd9Sstevel@tonic-gate extern uchar_t cpr_pagecopy[]; 12697c478bd9Sstevel@tonic-gate 12707c478bd9Sstevel@tonic-gate dirty_spfn = descp->csd_dirty_spfn; 12717c478bd9Sstevel@tonic-gate dirty_npages = descp->csd_dirty_npages; 12727c478bd9Sstevel@tonic-gate clean_sva = (caddr_t)descp->csd_clean_sva; 12737c478bd9Sstevel@tonic-gate clean_sz = descp->csd_clean_sz; 12747c478bd9Sstevel@tonic-gate clean_compressed = descp->csd_clean_compressed; 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate /* Fill cpr page descriptor. */ 12777c478bd9Sstevel@tonic-gate cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC; 12787c478bd9Sstevel@tonic-gate cpd.cpd_pfn = dirty_spfn; 12797c478bd9Sstevel@tonic-gate cpd.cpd_flag = 0; /* must init to zero */ 12807c478bd9Sstevel@tonic-gate cpd.cpd_pages = dirty_npages; 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate #ifdef DEBUG 12837c478bd9Sstevel@tonic-gate if ((cpd.cpd_usum = descp->csd_usum) != 0) 12847c478bd9Sstevel@tonic-gate cpd.cpd_flag |= CPD_USUM; 12857c478bd9Sstevel@tonic-gate if ((cpd.cpd_csum = descp->csd_csum) != 0) 12867c478bd9Sstevel@tonic-gate cpd.cpd_flag |= CPD_CSUM; 12877c478bd9Sstevel@tonic-gate #endif 12887c478bd9Sstevel@tonic-gate 12897c478bd9Sstevel@tonic-gate STAT->cs_dumped_statefsz += mmu_ptob(dirty_npages); 12907c478bd9Sstevel@tonic-gate 12917c478bd9Sstevel@tonic-gate /* 12927c478bd9Sstevel@tonic-gate * The sensitive kpages are usually saved with compression 12937c478bd9Sstevel@tonic-gate * unless compression could not reduce the size of the data. 12947c478bd9Sstevel@tonic-gate * If user choose not to have the statefile compressed, 12957c478bd9Sstevel@tonic-gate * we need to decompress the data back before dumping it to disk. 12967c478bd9Sstevel@tonic-gate */ 12977c478bd9Sstevel@tonic-gate if (CPR->c_flags & C_COMPRESSING) { 12987c478bd9Sstevel@tonic-gate cpd.cpd_length = clean_sz; 12997c478bd9Sstevel@tonic-gate datap = clean_sva; 13007c478bd9Sstevel@tonic-gate if (clean_compressed) 13017c478bd9Sstevel@tonic-gate cpd.cpd_flag |= CPD_COMPRESS; 13027c478bd9Sstevel@tonic-gate } else { 13037c478bd9Sstevel@tonic-gate if (clean_compressed) { 13047c478bd9Sstevel@tonic-gate cpd.cpd_length = decompress(clean_sva, cpr_pagecopy, 13057c478bd9Sstevel@tonic-gate clean_sz, mmu_ptob(dirty_npages)); 13067c478bd9Sstevel@tonic-gate datap = (caddr_t)cpr_pagecopy; 13077c478bd9Sstevel@tonic-gate ASSERT(cpd.cpd_length == mmu_ptob(dirty_npages)); 13087c478bd9Sstevel@tonic-gate } else { 13097c478bd9Sstevel@tonic-gate cpd.cpd_length = clean_sz; 13107c478bd9Sstevel@tonic-gate datap = clean_sva; 13117c478bd9Sstevel@tonic-gate } 13127c478bd9Sstevel@tonic-gate cpd.cpd_csum = 0; 13137c478bd9Sstevel@tonic-gate } 13147c478bd9Sstevel@tonic-gate 13157c478bd9Sstevel@tonic-gate /* Write cpr page descriptor */ 13167c478bd9Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd)); 13177c478bd9Sstevel@tonic-gate if (error) { 1318903a11ebSrh87107 CPR_DEBUG(CPR_DEBUG7, "descp: %p\n", (void *)descp); 13197c478bd9Sstevel@tonic-gate #ifdef DEBUG 13207c478bd9Sstevel@tonic-gate debug_enter("cpr_dump_sensitive: cpr_write() page " 13217c478bd9Sstevel@tonic-gate "descriptor failed!\n"); 13227c478bd9Sstevel@tonic-gate #endif 13237c478bd9Sstevel@tonic-gate return (error); 13247c478bd9Sstevel@tonic-gate } 13257c478bd9Sstevel@tonic-gate 13267c478bd9Sstevel@tonic-gate i_cpr_sensitive_bytes_dumped += sizeof (cpd_t); 13277c478bd9Sstevel@tonic-gate 13287c478bd9Sstevel@tonic-gate /* Write page data */ 13297c478bd9Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length); 13307c478bd9Sstevel@tonic-gate if (error) { 1331ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "error: %x\n", error); 1332903a11ebSrh87107 CPR_DEBUG(CPR_DEBUG7, "descp: %p\n", (void *)descp); 1333903a11ebSrh87107 CPR_DEBUG(CPR_DEBUG7, "cpr_write(%p, %p , %lx)\n", 1334903a11ebSrh87107 (void *)vp, (void *)datap, cpd.cpd_length); 13357c478bd9Sstevel@tonic-gate #ifdef DEBUG 13367c478bd9Sstevel@tonic-gate debug_enter("cpr_dump_sensitive: cpr_write() data failed!\n"); 13377c478bd9Sstevel@tonic-gate #endif 13387c478bd9Sstevel@tonic-gate return (error); 13397c478bd9Sstevel@tonic-gate } 13407c478bd9Sstevel@tonic-gate 13417c478bd9Sstevel@tonic-gate i_cpr_sensitive_bytes_dumped += cpd.cpd_length; 13427c478bd9Sstevel@tonic-gate i_cpr_sensitive_pgs_dumped += dirty_npages; 13437c478bd9Sstevel@tonic-gate 13447c478bd9Sstevel@tonic-gate return (error); 13457c478bd9Sstevel@tonic-gate } 13467c478bd9Sstevel@tonic-gate 13477c478bd9Sstevel@tonic-gate 13487c478bd9Sstevel@tonic-gate /* 13497c478bd9Sstevel@tonic-gate * Sanity check to make sure that we have dumped right amount 13507c478bd9Sstevel@tonic-gate * of pages from different sources to statefile. 13517c478bd9Sstevel@tonic-gate */ 13527c478bd9Sstevel@tonic-gate int 13537c478bd9Sstevel@tonic-gate i_cpr_check_pgs_dumped(uint_t pgs_expected, uint_t regular_pgs_dumped) 13547c478bd9Sstevel@tonic-gate { 13557c478bd9Sstevel@tonic-gate uint_t total_pgs_dumped; 13567c478bd9Sstevel@tonic-gate 13577c478bd9Sstevel@tonic-gate total_pgs_dumped = regular_pgs_dumped + i_cpr_sensitive_pgs_dumped; 13587c478bd9Sstevel@tonic-gate 1359ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "\ncheck_pgs: reg %d + sens %ld = %d, " 1360ae115bc7Smrj "expect %d\n\n", regular_pgs_dumped, i_cpr_sensitive_pgs_dumped, 1361ae115bc7Smrj total_pgs_dumped, pgs_expected); 13627c478bd9Sstevel@tonic-gate 13637c478bd9Sstevel@tonic-gate if (pgs_expected == total_pgs_dumped) 13647c478bd9Sstevel@tonic-gate return (0); 13657c478bd9Sstevel@tonic-gate 13667c478bd9Sstevel@tonic-gate return (EINVAL); 13677c478bd9Sstevel@tonic-gate } 13687c478bd9Sstevel@tonic-gate 13697c478bd9Sstevel@tonic-gate 13707c478bd9Sstevel@tonic-gate int 13717c478bd9Sstevel@tonic-gate i_cpr_reusefini(void) 13727c478bd9Sstevel@tonic-gate { 13737c478bd9Sstevel@tonic-gate struct vnode *vp; 13747c478bd9Sstevel@tonic-gate cdef_t *cdef; 13757c478bd9Sstevel@tonic-gate size_t size; 13767c478bd9Sstevel@tonic-gate char *bufp; 13777c478bd9Sstevel@tonic-gate int rc; 13787c478bd9Sstevel@tonic-gate 13797c478bd9Sstevel@tonic-gate if (cpr_reusable_mode) 13807c478bd9Sstevel@tonic-gate cpr_reusable_mode = 0; 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate if (rc = cpr_open_deffile(FREAD|FWRITE, &vp)) { 13837c478bd9Sstevel@tonic-gate if (rc == EROFS) { 13847c478bd9Sstevel@tonic-gate cpr_err(CE_CONT, "uadmin A_FREEZE AD_REUSEFINI " 13857c478bd9Sstevel@tonic-gate "(uadmin %d %d)\nmust be done with / mounted " 13867c478bd9Sstevel@tonic-gate "writeable.\n", A_FREEZE, AD_REUSEFINI); 13877c478bd9Sstevel@tonic-gate } 13887c478bd9Sstevel@tonic-gate return (rc); 13897c478bd9Sstevel@tonic-gate } 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate cdef = kmem_alloc(sizeof (*cdef), KM_SLEEP); 13927c478bd9Sstevel@tonic-gate rc = cpr_rdwr(UIO_READ, vp, cdef, sizeof (*cdef)); 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate if (rc) { 13957c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "Failed reading %s, errno = %d", 13967c478bd9Sstevel@tonic-gate cpr_default_path, rc); 13977c478bd9Sstevel@tonic-gate } else if (cdef->mini.magic != CPR_DEFAULT_MAGIC) { 13987c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "bad magic number in %s, cannot restore " 13997c478bd9Sstevel@tonic-gate "prom values for %s", cpr_default_path, 14007c478bd9Sstevel@tonic-gate cpr_enumerate_promprops(&bufp, &size)); 14017c478bd9Sstevel@tonic-gate kmem_free(bufp, size); 14027c478bd9Sstevel@tonic-gate rc = EINVAL; 14037c478bd9Sstevel@tonic-gate } else { 14047c478bd9Sstevel@tonic-gate /* 14057c478bd9Sstevel@tonic-gate * clean up prom properties 14067c478bd9Sstevel@tonic-gate */ 14077c478bd9Sstevel@tonic-gate rc = cpr_update_nvram(cdef->props); 14087c478bd9Sstevel@tonic-gate if (rc == 0) { 14097c478bd9Sstevel@tonic-gate /* 14107c478bd9Sstevel@tonic-gate * invalidate the disk copy and turn off reusable 14117c478bd9Sstevel@tonic-gate */ 14127c478bd9Sstevel@tonic-gate cdef->mini.magic = 0; 14137c478bd9Sstevel@tonic-gate cdef->mini.reusable = 0; 14147c478bd9Sstevel@tonic-gate if (rc = cpr_rdwr(UIO_WRITE, vp, 14157c478bd9Sstevel@tonic-gate &cdef->mini, sizeof (cdef->mini))) { 14167c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "Failed writing %s, errno %d", 14177c478bd9Sstevel@tonic-gate cpr_default_path, rc); 14187c478bd9Sstevel@tonic-gate } 14197c478bd9Sstevel@tonic-gate } 14207c478bd9Sstevel@tonic-gate } 14217c478bd9Sstevel@tonic-gate 1422da6c28aaSamw (void) VOP_CLOSE(vp, FREAD|FWRITE, 1, (offset_t)0, CRED(), NULL); 14237c478bd9Sstevel@tonic-gate VN_RELE(vp); 14247c478bd9Sstevel@tonic-gate kmem_free(cdef, sizeof (*cdef)); 14257c478bd9Sstevel@tonic-gate 14267c478bd9Sstevel@tonic-gate return (rc); 14277c478bd9Sstevel@tonic-gate } 14287c478bd9Sstevel@tonic-gate 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate int 14317c478bd9Sstevel@tonic-gate i_cpr_reuseinit(void) 14327c478bd9Sstevel@tonic-gate { 14337c478bd9Sstevel@tonic-gate int rc = 0; 14347c478bd9Sstevel@tonic-gate 14357c478bd9Sstevel@tonic-gate if (rc = cpr_default_setup(1)) 14367c478bd9Sstevel@tonic-gate return (rc); 14377c478bd9Sstevel@tonic-gate 14387c478bd9Sstevel@tonic-gate /* 14397c478bd9Sstevel@tonic-gate * We need to validate default file 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate rc = cpr_validate_definfo(1); 14427c478bd9Sstevel@tonic-gate if (rc == 0) 14437c478bd9Sstevel@tonic-gate cpr_reusable_mode = 1; 14447c478bd9Sstevel@tonic-gate else if (rc == EROFS) { 14457c478bd9Sstevel@tonic-gate cpr_err(CE_NOTE, "reuseinit must be performed " 14467c478bd9Sstevel@tonic-gate "while / is mounted writeable"); 14477c478bd9Sstevel@tonic-gate } 14487c478bd9Sstevel@tonic-gate 14497c478bd9Sstevel@tonic-gate (void) cpr_default_setup(0); 14507c478bd9Sstevel@tonic-gate 14517c478bd9Sstevel@tonic-gate return (rc); 14527c478bd9Sstevel@tonic-gate } 14537c478bd9Sstevel@tonic-gate 14547c478bd9Sstevel@tonic-gate 14557c478bd9Sstevel@tonic-gate int 14567c478bd9Sstevel@tonic-gate i_cpr_check_cprinfo(void) 14577c478bd9Sstevel@tonic-gate { 14587c478bd9Sstevel@tonic-gate struct vnode *vp; 14597c478bd9Sstevel@tonic-gate cmini_t mini; 14607c478bd9Sstevel@tonic-gate int rc = 0; 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate if (rc = cpr_open_deffile(FREAD, &vp)) { 14637c478bd9Sstevel@tonic-gate if (rc == ENOENT) 14647c478bd9Sstevel@tonic-gate cpr_err(CE_NOTE, "cprinfo file does not " 14657c478bd9Sstevel@tonic-gate "exist. You must run 'uadmin %d %d' " 14667c478bd9Sstevel@tonic-gate "command while / is mounted writeable,\n" 14677c478bd9Sstevel@tonic-gate "then reboot and run 'uadmin %d %d' " 14687c478bd9Sstevel@tonic-gate "to create a reusable statefile", 14697c478bd9Sstevel@tonic-gate A_FREEZE, AD_REUSEINIT, A_FREEZE, AD_REUSABLE); 14707c478bd9Sstevel@tonic-gate return (rc); 14717c478bd9Sstevel@tonic-gate } 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate rc = cpr_rdwr(UIO_READ, vp, &mini, sizeof (mini)); 1474da6c28aaSamw (void) VOP_CLOSE(vp, FREAD, 1, (offset_t)0, CRED(), NULL); 14757c478bd9Sstevel@tonic-gate VN_RELE(vp); 14767c478bd9Sstevel@tonic-gate 14777c478bd9Sstevel@tonic-gate if (rc) { 14787c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "Failed reading %s, errno = %d", 14797c478bd9Sstevel@tonic-gate cpr_default_path, rc); 14807c478bd9Sstevel@tonic-gate } else if (mini.magic != CPR_DEFAULT_MAGIC) { 14817c478bd9Sstevel@tonic-gate cpr_err(CE_CONT, "bad magic number in cprinfo file.\n" 14827c478bd9Sstevel@tonic-gate "You must run 'uadmin %d %d' while / is mounted " 14837c478bd9Sstevel@tonic-gate "writeable, then reboot and run 'uadmin %d %d' " 14847c478bd9Sstevel@tonic-gate "to create a reusable statefile\n", 14857c478bd9Sstevel@tonic-gate A_FREEZE, AD_REUSEINIT, A_FREEZE, AD_REUSABLE); 14867c478bd9Sstevel@tonic-gate rc = EINVAL; 14877c478bd9Sstevel@tonic-gate } 14887c478bd9Sstevel@tonic-gate 14897c478bd9Sstevel@tonic-gate return (rc); 14907c478bd9Sstevel@tonic-gate } 14917c478bd9Sstevel@tonic-gate 14927c478bd9Sstevel@tonic-gate 14937c478bd9Sstevel@tonic-gate int 14947c478bd9Sstevel@tonic-gate i_cpr_reusable_supported(void) 14957c478bd9Sstevel@tonic-gate { 14967c478bd9Sstevel@tonic-gate return (1); 14977c478bd9Sstevel@tonic-gate } 14987c478bd9Sstevel@tonic-gate 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate /* 15017c478bd9Sstevel@tonic-gate * find prom phys pages and alloc space for a tmp copy 15027c478bd9Sstevel@tonic-gate */ 15037c478bd9Sstevel@tonic-gate static int 15047c478bd9Sstevel@tonic-gate i_cpr_find_ppages(void) 15057c478bd9Sstevel@tonic-gate { 15067c478bd9Sstevel@tonic-gate struct page *pp; 15077c478bd9Sstevel@tonic-gate struct memlist *pmem; 15087c478bd9Sstevel@tonic-gate pgcnt_t npages, pcnt, scnt, vcnt; 15097c478bd9Sstevel@tonic-gate pfn_t ppn, plast, *dst; 15107c478bd9Sstevel@tonic-gate int mapflag; 15117c478bd9Sstevel@tonic-gate 15127c478bd9Sstevel@tonic-gate cpr_clear_bitmaps(); 15137c478bd9Sstevel@tonic-gate mapflag = REGULAR_BITMAP; 15147c478bd9Sstevel@tonic-gate 15157c478bd9Sstevel@tonic-gate /* 15167c478bd9Sstevel@tonic-gate * there should be a page_t for each phys page used by the kernel; 15177c478bd9Sstevel@tonic-gate * set a bit for each phys page not tracked by a page_t 15187c478bd9Sstevel@tonic-gate */ 15197c478bd9Sstevel@tonic-gate pcnt = 0; 15207c478bd9Sstevel@tonic-gate memlist_read_lock(); 152156f33205SJonathan Adams for (pmem = phys_install; pmem; pmem = pmem->ml_next) { 152256f33205SJonathan Adams npages = mmu_btop(pmem->ml_size); 152356f33205SJonathan Adams ppn = mmu_btop(pmem->ml_address); 15247c478bd9Sstevel@tonic-gate for (plast = ppn + npages; ppn < plast; ppn++) { 15257c478bd9Sstevel@tonic-gate if (page_numtopp_nolock(ppn)) 15267c478bd9Sstevel@tonic-gate continue; 15277c478bd9Sstevel@tonic-gate (void) cpr_setbit(ppn, mapflag); 15287c478bd9Sstevel@tonic-gate pcnt++; 15297c478bd9Sstevel@tonic-gate } 15307c478bd9Sstevel@tonic-gate } 15317c478bd9Sstevel@tonic-gate memlist_read_unlock(); 15327c478bd9Sstevel@tonic-gate 15337c478bd9Sstevel@tonic-gate /* 15347c478bd9Sstevel@tonic-gate * clear bits for phys pages in each segment 15357c478bd9Sstevel@tonic-gate */ 15367c478bd9Sstevel@tonic-gate scnt = cpr_count_seg_pages(mapflag, cpr_clrbit); 15377c478bd9Sstevel@tonic-gate 15387c478bd9Sstevel@tonic-gate /* 1539af4c679fSSean McEnroe * set bits for phys pages referenced by the promvp vnode; 15407c478bd9Sstevel@tonic-gate * these pages are mostly comprised of forthdebug words 15417c478bd9Sstevel@tonic-gate */ 15427c478bd9Sstevel@tonic-gate vcnt = 0; 1543af4c679fSSean McEnroe for (pp = promvp.v_pages; pp; ) { 15447c478bd9Sstevel@tonic-gate if (cpr_setbit(pp->p_offset, mapflag) == 0) 15457c478bd9Sstevel@tonic-gate vcnt++; 15467c478bd9Sstevel@tonic-gate pp = pp->p_vpnext; 1547af4c679fSSean McEnroe if (pp == promvp.v_pages) 15487c478bd9Sstevel@tonic-gate break; 15497c478bd9Sstevel@tonic-gate } 15507c478bd9Sstevel@tonic-gate 15517c478bd9Sstevel@tonic-gate /* 15527c478bd9Sstevel@tonic-gate * total number of prom pages are: 15537c478bd9Sstevel@tonic-gate * (non-page_t pages - seg pages + vnode pages) 15547c478bd9Sstevel@tonic-gate */ 15557c478bd9Sstevel@tonic-gate ppage_count = pcnt - scnt + vcnt; 1556ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, 1557ae115bc7Smrj "find_ppages: pcnt %ld - scnt %ld + vcnt %ld = %ld\n", 1558ae115bc7Smrj pcnt, scnt, vcnt, ppage_count); 15597c478bd9Sstevel@tonic-gate 15607c478bd9Sstevel@tonic-gate /* 15617c478bd9Sstevel@tonic-gate * alloc array of pfn_t to store phys page list 15627c478bd9Sstevel@tonic-gate */ 15637c478bd9Sstevel@tonic-gate pphys_list_size = ppage_count * sizeof (pfn_t); 15647c478bd9Sstevel@tonic-gate pphys_list = kmem_alloc(pphys_list_size, KM_NOSLEEP); 15657c478bd9Sstevel@tonic-gate if (pphys_list == NULL) { 15667c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "cannot alloc pphys_list"); 15677c478bd9Sstevel@tonic-gate return (ENOMEM); 15687c478bd9Sstevel@tonic-gate } 15697c478bd9Sstevel@tonic-gate 15707c478bd9Sstevel@tonic-gate /* 15717c478bd9Sstevel@tonic-gate * phys pages referenced in the bitmap should be 15727c478bd9Sstevel@tonic-gate * those used by the prom; scan bitmap and save 15737c478bd9Sstevel@tonic-gate * a list of prom phys page numbers 15747c478bd9Sstevel@tonic-gate */ 15757c478bd9Sstevel@tonic-gate dst = pphys_list; 15767c478bd9Sstevel@tonic-gate memlist_read_lock(); 157756f33205SJonathan Adams for (pmem = phys_install; pmem; pmem = pmem->ml_next) { 157856f33205SJonathan Adams npages = mmu_btop(pmem->ml_size); 157956f33205SJonathan Adams ppn = mmu_btop(pmem->ml_address); 15807c478bd9Sstevel@tonic-gate for (plast = ppn + npages; ppn < plast; ppn++) { 15817c478bd9Sstevel@tonic-gate if (cpr_isset(ppn, mapflag)) { 15827c478bd9Sstevel@tonic-gate ASSERT(dst < (pphys_list + ppage_count)); 15837c478bd9Sstevel@tonic-gate *dst++ = ppn; 15847c478bd9Sstevel@tonic-gate } 15857c478bd9Sstevel@tonic-gate } 15867c478bd9Sstevel@tonic-gate } 15877c478bd9Sstevel@tonic-gate memlist_read_unlock(); 15887c478bd9Sstevel@tonic-gate 15897c478bd9Sstevel@tonic-gate /* 15907c478bd9Sstevel@tonic-gate * allocate space to store prom pages 15917c478bd9Sstevel@tonic-gate */ 15927c478bd9Sstevel@tonic-gate ppage_buf = kmem_alloc(mmu_ptob(ppage_count), KM_NOSLEEP); 15937c478bd9Sstevel@tonic-gate if (ppage_buf == NULL) { 15947c478bd9Sstevel@tonic-gate kmem_free(pphys_list, pphys_list_size); 15957c478bd9Sstevel@tonic-gate pphys_list = NULL; 15967c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "cannot alloc ppage_buf"); 15977c478bd9Sstevel@tonic-gate return (ENOMEM); 15987c478bd9Sstevel@tonic-gate } 15997c478bd9Sstevel@tonic-gate 16007c478bd9Sstevel@tonic-gate return (0); 16017c478bd9Sstevel@tonic-gate } 16027c478bd9Sstevel@tonic-gate 16037c478bd9Sstevel@tonic-gate 16047c478bd9Sstevel@tonic-gate /* 16057c478bd9Sstevel@tonic-gate * save prom pages to kmem pages 16067c478bd9Sstevel@tonic-gate */ 16077c478bd9Sstevel@tonic-gate static void 16087c478bd9Sstevel@tonic-gate i_cpr_save_ppages(void) 16097c478bd9Sstevel@tonic-gate { 16107c478bd9Sstevel@tonic-gate pfn_t *pphys, *plast; 16117c478bd9Sstevel@tonic-gate caddr_t dst; 16127c478bd9Sstevel@tonic-gate 16137c478bd9Sstevel@tonic-gate /* 16147c478bd9Sstevel@tonic-gate * map in each prom page and copy to a kmem page 16157c478bd9Sstevel@tonic-gate */ 16167c478bd9Sstevel@tonic-gate dst = ppage_buf; 16177c478bd9Sstevel@tonic-gate plast = pphys_list + ppage_count; 16187c478bd9Sstevel@tonic-gate for (pphys = pphys_list; pphys < plast; pphys++) { 16197c478bd9Sstevel@tonic-gate i_cpr_mapin(cpr_vaddr, 1, *pphys); 16207c478bd9Sstevel@tonic-gate bcopy(cpr_vaddr, dst, MMU_PAGESIZE); 16217c478bd9Sstevel@tonic-gate i_cpr_mapout(cpr_vaddr, 1); 16227c478bd9Sstevel@tonic-gate dst += MMU_PAGESIZE; 16237c478bd9Sstevel@tonic-gate } 16247c478bd9Sstevel@tonic-gate 1625ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "saved %ld prom pages\n", ppage_count); 16267c478bd9Sstevel@tonic-gate } 16277c478bd9Sstevel@tonic-gate 16287c478bd9Sstevel@tonic-gate 16297c478bd9Sstevel@tonic-gate /* 16307c478bd9Sstevel@tonic-gate * restore prom pages from kmem pages 16317c478bd9Sstevel@tonic-gate */ 16327c478bd9Sstevel@tonic-gate static void 16337c478bd9Sstevel@tonic-gate i_cpr_restore_ppages(void) 16347c478bd9Sstevel@tonic-gate { 16357c478bd9Sstevel@tonic-gate pfn_t *pphys, *plast; 16367c478bd9Sstevel@tonic-gate caddr_t src; 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate dcache_flushall(); 16397c478bd9Sstevel@tonic-gate 16407c478bd9Sstevel@tonic-gate /* 16417c478bd9Sstevel@tonic-gate * map in each prom page and copy from a kmem page 16427c478bd9Sstevel@tonic-gate */ 16437c478bd9Sstevel@tonic-gate src = ppage_buf; 16447c478bd9Sstevel@tonic-gate plast = pphys_list + ppage_count; 16457c478bd9Sstevel@tonic-gate for (pphys = pphys_list; pphys < plast; pphys++) { 16467c478bd9Sstevel@tonic-gate i_cpr_mapin(cpr_vaddr, 1, *pphys); 16477c478bd9Sstevel@tonic-gate bcopy(src, cpr_vaddr, MMU_PAGESIZE); 16487c478bd9Sstevel@tonic-gate i_cpr_mapout(cpr_vaddr, 1); 16497c478bd9Sstevel@tonic-gate src += MMU_PAGESIZE; 16507c478bd9Sstevel@tonic-gate } 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate dcache_flushall(); 16537c478bd9Sstevel@tonic-gate 1654ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "restored %ld prom pages\n", ppage_count); 16557c478bd9Sstevel@tonic-gate } 16567c478bd9Sstevel@tonic-gate 16577c478bd9Sstevel@tonic-gate 16587c478bd9Sstevel@tonic-gate /* 16597c478bd9Sstevel@tonic-gate * save/restore prom pages or free related allocs 16607c478bd9Sstevel@tonic-gate */ 16617c478bd9Sstevel@tonic-gate int 16627c478bd9Sstevel@tonic-gate i_cpr_prom_pages(int action) 16637c478bd9Sstevel@tonic-gate { 16647c478bd9Sstevel@tonic-gate int error; 16657c478bd9Sstevel@tonic-gate 16667c478bd9Sstevel@tonic-gate if (action == CPR_PROM_SAVE) { 16677c478bd9Sstevel@tonic-gate if (ppage_buf == NULL) { 16687c478bd9Sstevel@tonic-gate ASSERT(pphys_list == NULL); 16697c478bd9Sstevel@tonic-gate if (error = i_cpr_find_ppages()) 16707c478bd9Sstevel@tonic-gate return (error); 16717c478bd9Sstevel@tonic-gate i_cpr_save_ppages(); 16727c478bd9Sstevel@tonic-gate } 16737c478bd9Sstevel@tonic-gate } else if (action == CPR_PROM_RESTORE) { 16747c478bd9Sstevel@tonic-gate i_cpr_restore_ppages(); 16757c478bd9Sstevel@tonic-gate } else if (action == CPR_PROM_FREE) { 16767c478bd9Sstevel@tonic-gate if (pphys_list) { 16777c478bd9Sstevel@tonic-gate ASSERT(pphys_list_size); 16787c478bd9Sstevel@tonic-gate kmem_free(pphys_list, pphys_list_size); 16797c478bd9Sstevel@tonic-gate pphys_list = NULL; 16807c478bd9Sstevel@tonic-gate pphys_list_size = 0; 16817c478bd9Sstevel@tonic-gate } 16827c478bd9Sstevel@tonic-gate if (ppage_buf) { 16837c478bd9Sstevel@tonic-gate ASSERT(ppage_count); 16847c478bd9Sstevel@tonic-gate kmem_free(ppage_buf, mmu_ptob(ppage_count)); 1685ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "freed %ld prom pages\n", 1686ae115bc7Smrj ppage_count); 16877c478bd9Sstevel@tonic-gate ppage_buf = NULL; 16887c478bd9Sstevel@tonic-gate ppage_count = 0; 16897c478bd9Sstevel@tonic-gate } 16907c478bd9Sstevel@tonic-gate } 16917c478bd9Sstevel@tonic-gate return (0); 16927c478bd9Sstevel@tonic-gate } 16937c478bd9Sstevel@tonic-gate 16947c478bd9Sstevel@tonic-gate 16957c478bd9Sstevel@tonic-gate /* 16967c478bd9Sstevel@tonic-gate * record tlb data for the nucleus, bigktsb's, and the cpr module; 16977c478bd9Sstevel@tonic-gate * this data is later used by cprboot to install dtlb/itlb entries. 16987c478bd9Sstevel@tonic-gate * when we jump into the cpr module during the resume phase, those 16997c478bd9Sstevel@tonic-gate * mappings are needed until switching to the kernel trap table. 17007c478bd9Sstevel@tonic-gate * to make the dtte/itte info available during resume, we need 17017c478bd9Sstevel@tonic-gate * the info recorded prior to saving sensitive pages, otherwise 17027c478bd9Sstevel@tonic-gate * all the data would appear as NULLs. 17037c478bd9Sstevel@tonic-gate */ 17047c478bd9Sstevel@tonic-gate static void 17057c478bd9Sstevel@tonic-gate i_cpr_save_tlbinfo(void) 17067c478bd9Sstevel@tonic-gate { 170725cf1a30Sjl139090 cti_t cti = {0}; 17087c478bd9Sstevel@tonic-gate 17097c478bd9Sstevel@tonic-gate /* 17107c478bd9Sstevel@tonic-gate * during resume - shortly after jumping into the cpr module, 17117c478bd9Sstevel@tonic-gate * sfmmu_load_mmustate() will overwrite any dtlb entry at any 17127c478bd9Sstevel@tonic-gate * index used for TSBs; skip is set so that any saved tte will 17137c478bd9Sstevel@tonic-gate * target other tlb offsets and prevent being lost during 17147c478bd9Sstevel@tonic-gate * resume. now scan the dtlb and save locked entries, 17157c478bd9Sstevel@tonic-gate * then add entries for the tmp stack / data page and the 17167c478bd9Sstevel@tonic-gate * cpr thread structure. 17177c478bd9Sstevel@tonic-gate */ 17187c478bd9Sstevel@tonic-gate cti.dst = m_info.dtte; 17197c478bd9Sstevel@tonic-gate cti.tail = cti.dst + CPR_MAX_TLB; 17207c478bd9Sstevel@tonic-gate cti.reader = dtlb_rd_entry; 17217c478bd9Sstevel@tonic-gate cti.writer = NULL; 17227c478bd9Sstevel@tonic-gate cti.filter = i_cpr_lnb; 17237c478bd9Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1; 172425cf1a30Sjl139090 172525cf1a30Sjl139090 if (utsb_dtlb_ttenum != -1) 17267c478bd9Sstevel@tonic-gate cti.skip = (1 << utsb_dtlb_ttenum); 172725cf1a30Sjl139090 172825cf1a30Sjl139090 if (utsb4m_dtlb_ttenum != -1) 17297c478bd9Sstevel@tonic-gate cti.skip |= (1 << utsb4m_dtlb_ttenum); 173025cf1a30Sjl139090 17317c478bd9Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 17327c478bd9Sstevel@tonic-gate i_cpr_make_tte(&cti, &i_cpr_data_page, datava); 17337c478bd9Sstevel@tonic-gate i_cpr_make_tte(&cti, curthread, datava); 17347c478bd9Sstevel@tonic-gate 17357c478bd9Sstevel@tonic-gate /* 17367c478bd9Sstevel@tonic-gate * scan itlb and save locked entries; add an entry for 17377c478bd9Sstevel@tonic-gate * the first text page of the cpr module; cprboot will 17387c478bd9Sstevel@tonic-gate * jump to that page after restoring kernel pages. 17397c478bd9Sstevel@tonic-gate */ 17407c478bd9Sstevel@tonic-gate cti.dst = m_info.itte; 17417c478bd9Sstevel@tonic-gate cti.tail = cti.dst + CPR_MAX_TLB; 17427c478bd9Sstevel@tonic-gate cti.reader = itlb_rd_entry; 17437c478bd9Sstevel@tonic-gate cti.index = cpunodes[CPU->cpu_id].itlb_size - 1; 17447c478bd9Sstevel@tonic-gate cti.skip = 0; 17457c478bd9Sstevel@tonic-gate i_cpr_scan_tlb(&cti); 17467c478bd9Sstevel@tonic-gate i_cpr_make_tte(&cti, (void *)i_cpr_resume_setup, textva); 17477c478bd9Sstevel@tonic-gate } 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate 17507c478bd9Sstevel@tonic-gate /* ARGSUSED */ 17517c478bd9Sstevel@tonic-gate int 17527c478bd9Sstevel@tonic-gate i_cpr_dump_setup(vnode_t *vp) 17537c478bd9Sstevel@tonic-gate { 17547c478bd9Sstevel@tonic-gate /* 17557c478bd9Sstevel@tonic-gate * zero out m_info and add info to dtte/itte arrays 17567c478bd9Sstevel@tonic-gate */ 17577c478bd9Sstevel@tonic-gate bzero(&m_info, sizeof (m_info)); 17587c478bd9Sstevel@tonic-gate i_cpr_save_tlbinfo(); 17597c478bd9Sstevel@tonic-gate return (0); 17607c478bd9Sstevel@tonic-gate } 17617c478bd9Sstevel@tonic-gate 17627c478bd9Sstevel@tonic-gate 17637c478bd9Sstevel@tonic-gate int 17642df1fe9cSrandyf i_cpr_is_supported(int sleeptype) 17657c478bd9Sstevel@tonic-gate { 17667c478bd9Sstevel@tonic-gate char es_prop[] = "energystar-v2"; 1767fa9e4066Sahrens pnode_t node; 17687c478bd9Sstevel@tonic-gate int last; 17697c478bd9Sstevel@tonic-gate extern int cpr_supported_override; 17707c478bd9Sstevel@tonic-gate extern int cpr_platform_enable; 17717c478bd9Sstevel@tonic-gate 17722df1fe9cSrandyf if (sleeptype != CPR_TODISK) 17732df1fe9cSrandyf return (0); 17742df1fe9cSrandyf 17757c478bd9Sstevel@tonic-gate /* 17767c478bd9Sstevel@tonic-gate * The next statement tests if a specific platform has turned off 17777c478bd9Sstevel@tonic-gate * cpr support. 17787c478bd9Sstevel@tonic-gate */ 17797c478bd9Sstevel@tonic-gate if (cpr_supported_override) 17807c478bd9Sstevel@tonic-gate return (0); 17817c478bd9Sstevel@tonic-gate 17827c478bd9Sstevel@tonic-gate /* 17837c478bd9Sstevel@tonic-gate * Do not inspect energystar-v* property if a platform has 17847c478bd9Sstevel@tonic-gate * specifically turned on cpr support 17857c478bd9Sstevel@tonic-gate */ 17867c478bd9Sstevel@tonic-gate if (cpr_platform_enable) 17877c478bd9Sstevel@tonic-gate return (1); 17887c478bd9Sstevel@tonic-gate 17897c478bd9Sstevel@tonic-gate node = prom_rootnode(); 17907c478bd9Sstevel@tonic-gate if (prom_getproplen(node, es_prop) != -1) 17917c478bd9Sstevel@tonic-gate return (1); 17927c478bd9Sstevel@tonic-gate last = strlen(es_prop) - 1; 17937c478bd9Sstevel@tonic-gate es_prop[last] = '3'; 17947c478bd9Sstevel@tonic-gate return (prom_getproplen(node, es_prop) != -1); 17957c478bd9Sstevel@tonic-gate } 17967c478bd9Sstevel@tonic-gate 17977c478bd9Sstevel@tonic-gate 17987c478bd9Sstevel@tonic-gate /* 17997c478bd9Sstevel@tonic-gate * the actual size of the statefile data isn't known until after all the 18007c478bd9Sstevel@tonic-gate * compressed pages are written; even the inode size doesn't reflect the 18017c478bd9Sstevel@tonic-gate * data size since there are usually many extra fs blocks. for recording 18027c478bd9Sstevel@tonic-gate * the actual data size, the first sector of the statefile is copied to 18037c478bd9Sstevel@tonic-gate * a tmp buf, and the copy is later updated and flushed to disk. 18047c478bd9Sstevel@tonic-gate */ 18057c478bd9Sstevel@tonic-gate int 18067c478bd9Sstevel@tonic-gate i_cpr_blockzero(char *base, char **bufpp, int *blkno, vnode_t *vp) 18077c478bd9Sstevel@tonic-gate { 18087c478bd9Sstevel@tonic-gate extern int cpr_flush_write(vnode_t *); 18097c478bd9Sstevel@tonic-gate static char cpr_sector[DEV_BSIZE]; 18107c478bd9Sstevel@tonic-gate cpr_ext bytes, *dst; 18117c478bd9Sstevel@tonic-gate 18127c478bd9Sstevel@tonic-gate /* 18137c478bd9Sstevel@tonic-gate * this routine is called after cdd_t and csu_md_t are copied 18147c478bd9Sstevel@tonic-gate * to cpr_buf; mini-hack alert: the save/update method creates 18157c478bd9Sstevel@tonic-gate * a dependency on the combined struct size being >= one sector 18167c478bd9Sstevel@tonic-gate * or DEV_BSIZE; since introduction in Sol2.7, csu_md_t size is 18177c478bd9Sstevel@tonic-gate * over 1K bytes and will probably grow with any changes. 18187c478bd9Sstevel@tonic-gate * 18197c478bd9Sstevel@tonic-gate * copy when vp is NULL, flush when non-NULL 18207c478bd9Sstevel@tonic-gate */ 18217c478bd9Sstevel@tonic-gate if (vp == NULL) { 18227c478bd9Sstevel@tonic-gate ASSERT((*bufpp - base) >= DEV_BSIZE); 18237c478bd9Sstevel@tonic-gate bcopy(base, cpr_sector, sizeof (cpr_sector)); 18247c478bd9Sstevel@tonic-gate return (0); 18257c478bd9Sstevel@tonic-gate } else { 18267c478bd9Sstevel@tonic-gate bytes = dbtob(*blkno); 18277c478bd9Sstevel@tonic-gate dst = &((cdd_t *)cpr_sector)->cdd_filesize; 18287c478bd9Sstevel@tonic-gate bcopy(&bytes, dst, sizeof (bytes)); 18297c478bd9Sstevel@tonic-gate bcopy(cpr_sector, base, sizeof (cpr_sector)); 18307c478bd9Sstevel@tonic-gate *bufpp = base + sizeof (cpr_sector); 18317c478bd9Sstevel@tonic-gate *blkno = cpr_statefile_offset(); 1832ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "statefile data size: %ld\n\n", bytes); 18337c478bd9Sstevel@tonic-gate return (cpr_flush_write(vp)); 18347c478bd9Sstevel@tonic-gate } 18357c478bd9Sstevel@tonic-gate } 18367c478bd9Sstevel@tonic-gate 18377c478bd9Sstevel@tonic-gate 18387c478bd9Sstevel@tonic-gate /* 18397c478bd9Sstevel@tonic-gate * Allocate bitmaps according to the phys_install list. 18407c478bd9Sstevel@tonic-gate */ 18417c478bd9Sstevel@tonic-gate static int 18427c478bd9Sstevel@tonic-gate i_cpr_bitmap_setup(void) 18437c478bd9Sstevel@tonic-gate { 18447c478bd9Sstevel@tonic-gate struct memlist *pmem; 18457c478bd9Sstevel@tonic-gate cbd_t *dp, *tail; 18467c478bd9Sstevel@tonic-gate void *space; 18477c478bd9Sstevel@tonic-gate size_t size; 18487c478bd9Sstevel@tonic-gate 18497c478bd9Sstevel@tonic-gate /* 18507c478bd9Sstevel@tonic-gate * The number of bitmap descriptors will be the count of 18517c478bd9Sstevel@tonic-gate * phys_install ranges plus 1 for a trailing NULL struct. 18527c478bd9Sstevel@tonic-gate */ 18537c478bd9Sstevel@tonic-gate cpr_nbitmaps = 1; 185456f33205SJonathan Adams for (pmem = phys_install; pmem; pmem = pmem->ml_next) 18557c478bd9Sstevel@tonic-gate cpr_nbitmaps++; 18567c478bd9Sstevel@tonic-gate 18577c478bd9Sstevel@tonic-gate if (cpr_nbitmaps > (CPR_MAX_BMDESC - 1)) { 18587c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "too many physical memory ranges %d, max %d", 18597c478bd9Sstevel@tonic-gate cpr_nbitmaps, CPR_MAX_BMDESC - 1); 18607c478bd9Sstevel@tonic-gate return (EFBIG); 18617c478bd9Sstevel@tonic-gate } 18627c478bd9Sstevel@tonic-gate 18637c478bd9Sstevel@tonic-gate /* Alloc an array of bitmap descriptors. */ 18647c478bd9Sstevel@tonic-gate dp = kmem_zalloc(cpr_nbitmaps * sizeof (*dp), KM_NOSLEEP); 18657c478bd9Sstevel@tonic-gate if (dp == NULL) { 18667c478bd9Sstevel@tonic-gate cpr_nbitmaps = 0; 18677c478bd9Sstevel@tonic-gate return (ENOMEM); 18687c478bd9Sstevel@tonic-gate } 18697c478bd9Sstevel@tonic-gate tail = dp + cpr_nbitmaps; 18707c478bd9Sstevel@tonic-gate 18717c478bd9Sstevel@tonic-gate CPR->c_bmda = dp; 187256f33205SJonathan Adams for (pmem = phys_install; pmem; pmem = pmem->ml_next) { 187356f33205SJonathan Adams size = BITMAP_BYTES(pmem->ml_size); 18747c478bd9Sstevel@tonic-gate space = kmem_zalloc(size * 2, KM_NOSLEEP); 18757c478bd9Sstevel@tonic-gate if (space == NULL) 18767c478bd9Sstevel@tonic-gate return (ENOMEM); 18777c478bd9Sstevel@tonic-gate ASSERT(dp < tail); 18787c478bd9Sstevel@tonic-gate dp->cbd_magic = CPR_BITMAP_MAGIC; 187956f33205SJonathan Adams dp->cbd_spfn = mmu_btop(pmem->ml_address); 188056f33205SJonathan Adams dp->cbd_epfn = mmu_btop(pmem->ml_address + pmem->ml_size) - 1; 18817c478bd9Sstevel@tonic-gate dp->cbd_size = size; 18827c478bd9Sstevel@tonic-gate dp->cbd_reg_bitmap = (cpr_ptr)space; 18837c478bd9Sstevel@tonic-gate dp->cbd_vlt_bitmap = (cpr_ptr)((caddr_t)space + size); 18847c478bd9Sstevel@tonic-gate dp++; 18857c478bd9Sstevel@tonic-gate } 18867c478bd9Sstevel@tonic-gate 18877c478bd9Sstevel@tonic-gate /* set magic for the last descriptor */ 18887c478bd9Sstevel@tonic-gate ASSERT(dp == (tail - 1)); 18897c478bd9Sstevel@tonic-gate dp->cbd_magic = CPR_BITMAP_MAGIC; 18907c478bd9Sstevel@tonic-gate 18917c478bd9Sstevel@tonic-gate return (0); 18927c478bd9Sstevel@tonic-gate } 18937c478bd9Sstevel@tonic-gate 18947c478bd9Sstevel@tonic-gate 18957c478bd9Sstevel@tonic-gate void 18967c478bd9Sstevel@tonic-gate i_cpr_bitmap_cleanup(void) 18977c478bd9Sstevel@tonic-gate { 18987c478bd9Sstevel@tonic-gate cbd_t *dp; 18997c478bd9Sstevel@tonic-gate 19007c478bd9Sstevel@tonic-gate if (CPR->c_bmda == NULL) 19017c478bd9Sstevel@tonic-gate return; 19027c478bd9Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) 19037c478bd9Sstevel@tonic-gate kmem_free((void *)dp->cbd_reg_bitmap, dp->cbd_size * 2); 19047c478bd9Sstevel@tonic-gate kmem_free(CPR->c_bmda, cpr_nbitmaps * sizeof (*CPR->c_bmda)); 19057c478bd9Sstevel@tonic-gate CPR->c_bmda = NULL; 19067c478bd9Sstevel@tonic-gate cpr_nbitmaps = 0; 19077c478bd9Sstevel@tonic-gate } 19087c478bd9Sstevel@tonic-gate 19097c478bd9Sstevel@tonic-gate 19107c478bd9Sstevel@tonic-gate /* 19117c478bd9Sstevel@tonic-gate * A "regular" and "volatile" bitmap are created for each range of 19127c478bd9Sstevel@tonic-gate * physical memory. The volatile maps are used to count and track pages 19137c478bd9Sstevel@tonic-gate * susceptible to heap corruption - caused by drivers that allocate mem 19147c478bd9Sstevel@tonic-gate * during VOP_DUMP(); the regular maps are used for all the other non- 19157c478bd9Sstevel@tonic-gate * susceptible pages. Before writing the bitmaps to the statefile, 19167c478bd9Sstevel@tonic-gate * each bitmap pair gets merged to simplify handling within cprboot. 19177c478bd9Sstevel@tonic-gate */ 19187c478bd9Sstevel@tonic-gate int 19197c478bd9Sstevel@tonic-gate i_cpr_alloc_bitmaps(void) 19207c478bd9Sstevel@tonic-gate { 19217c478bd9Sstevel@tonic-gate int err; 19227c478bd9Sstevel@tonic-gate 19237c478bd9Sstevel@tonic-gate memlist_read_lock(); 19247c478bd9Sstevel@tonic-gate err = i_cpr_bitmap_setup(); 19257c478bd9Sstevel@tonic-gate memlist_read_unlock(); 19267c478bd9Sstevel@tonic-gate if (err) 19277c478bd9Sstevel@tonic-gate i_cpr_bitmap_cleanup(); 19287c478bd9Sstevel@tonic-gate return (err); 19297c478bd9Sstevel@tonic-gate } 19302df1fe9cSrandyf 19312df1fe9cSrandyf 19322df1fe9cSrandyf 19332df1fe9cSrandyf /* 19342df1fe9cSrandyf * Power down the system. 19352df1fe9cSrandyf */ 19362df1fe9cSrandyf int 19372df1fe9cSrandyf i_cpr_power_down(int sleeptype) 19382df1fe9cSrandyf { 19392df1fe9cSrandyf int is_defined = 0; 19402df1fe9cSrandyf char *wordexists = "p\" power-off\" find nip swap l! "; 19412df1fe9cSrandyf char *req = "power-off"; 19422df1fe9cSrandyf 19432df1fe9cSrandyf ASSERT(sleeptype == CPR_TODISK); 19442df1fe9cSrandyf 19452df1fe9cSrandyf /* 19462df1fe9cSrandyf * is_defined has value -1 when defined 19472df1fe9cSrandyf */ 19482df1fe9cSrandyf prom_interpret(wordexists, (uintptr_t)&is_defined, 0, 0, 0, 0); 19492df1fe9cSrandyf if (is_defined) { 19502df1fe9cSrandyf CPR_DEBUG(CPR_DEBUG1, "\ncpr: %s...\n", req); 19512df1fe9cSrandyf prom_interpret(req, 0, 0, 0, 0, 0); 19522df1fe9cSrandyf } 19532df1fe9cSrandyf /* 19542df1fe9cSrandyf * Only returns if failed 19552df1fe9cSrandyf */ 19562df1fe9cSrandyf return (EIO); 19572df1fe9cSrandyf } 19582df1fe9cSrandyf 19592df1fe9cSrandyf void 19602df1fe9cSrandyf i_cpr_stop_other_cpus(void) 19612df1fe9cSrandyf { 19622df1fe9cSrandyf stop_other_cpus(); 19632df1fe9cSrandyf } 19642df1fe9cSrandyf 19652df1fe9cSrandyf /* 19662df1fe9cSrandyf * Save context for the specified CPU 19672df1fe9cSrandyf */ 19682df1fe9cSrandyf /* ARGSUSED */ 19692df1fe9cSrandyf void * 19702df1fe9cSrandyf i_cpr_save_context(void *arg) 19712df1fe9cSrandyf { 19722df1fe9cSrandyf /* 19732df1fe9cSrandyf * Not yet 19742df1fe9cSrandyf */ 19752df1fe9cSrandyf ASSERT(0); 19762df1fe9cSrandyf return (NULL); 19772df1fe9cSrandyf } 19782df1fe9cSrandyf 19792df1fe9cSrandyf void 19802df1fe9cSrandyf i_cpr_pre_resume_cpus(void) 19812df1fe9cSrandyf { 19822df1fe9cSrandyf /* 19832df1fe9cSrandyf * Not yet 19842df1fe9cSrandyf */ 19852df1fe9cSrandyf ASSERT(0); 19862df1fe9cSrandyf } 19872df1fe9cSrandyf 19882df1fe9cSrandyf void 19892df1fe9cSrandyf i_cpr_post_resume_cpus(void) 19902df1fe9cSrandyf { 19912df1fe9cSrandyf /* 19922df1fe9cSrandyf * Not yet 19932df1fe9cSrandyf */ 19942df1fe9cSrandyf ASSERT(0); 19952df1fe9cSrandyf } 19962df1fe9cSrandyf 19972df1fe9cSrandyf /* 19982df1fe9cSrandyf * nothing to do 19992df1fe9cSrandyf */ 20002df1fe9cSrandyf void 20012df1fe9cSrandyf i_cpr_alloc_cpus(void) 20022df1fe9cSrandyf { 20032df1fe9cSrandyf } 20042df1fe9cSrandyf 20052df1fe9cSrandyf /* 20062df1fe9cSrandyf * nothing to do 20072df1fe9cSrandyf */ 20082df1fe9cSrandyf void 20092df1fe9cSrandyf i_cpr_free_cpus(void) 20102df1fe9cSrandyf { 20112df1fe9cSrandyf } 20122df1fe9cSrandyf 20132df1fe9cSrandyf /* ARGSUSED */ 20142df1fe9cSrandyf void 20152df1fe9cSrandyf i_cpr_save_configuration(dev_info_t *dip) 20162df1fe9cSrandyf { 20172df1fe9cSrandyf /* 20182df1fe9cSrandyf * this is a no-op on sparc 20192df1fe9cSrandyf */ 20202df1fe9cSrandyf } 20212df1fe9cSrandyf 20222df1fe9cSrandyf /* ARGSUSED */ 20232df1fe9cSrandyf void 20242df1fe9cSrandyf i_cpr_restore_configuration(dev_info_t *dip) 20252df1fe9cSrandyf { 20262df1fe9cSrandyf /* 20272df1fe9cSrandyf * this is a no-op on sparc 20282df1fe9cSrandyf */ 20292df1fe9cSrandyf } 2030