17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5ad23a2dbSjohansen * Common Development and Distribution License (the "License"). 6ad23a2dbSjohansen * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*56f33205SJonathan Adams * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* 277c478bd9Sstevel@tonic-gate * Fill in and write out the cpr state file 287c478bd9Sstevel@tonic-gate * 1. Allocate and write headers, ELF and cpr dump header 297c478bd9Sstevel@tonic-gate * 2. Allocate bitmaps according to phys_install 307c478bd9Sstevel@tonic-gate * 3. Tag kernel pages into corresponding bitmap 317c478bd9Sstevel@tonic-gate * 4. Write bitmaps to state file 327c478bd9Sstevel@tonic-gate * 5. Write actual physical page data to state file 337c478bd9Sstevel@tonic-gate */ 347c478bd9Sstevel@tonic-gate 357c478bd9Sstevel@tonic-gate #include <sys/types.h> 367c478bd9Sstevel@tonic-gate #include <sys/systm.h> 377c478bd9Sstevel@tonic-gate #include <sys/vm.h> 387c478bd9Sstevel@tonic-gate #include <sys/memlist.h> 397c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 407c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 417c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_inode.h> 427c478bd9Sstevel@tonic-gate #include <sys/errno.h> 437c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 447c478bd9Sstevel@tonic-gate #include <sys/debug.h> 457c478bd9Sstevel@tonic-gate #include <vm/page.h> 467c478bd9Sstevel@tonic-gate #include <vm/seg.h> 477c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 487c478bd9Sstevel@tonic-gate #include <vm/seg_kpm.h> 497c478bd9Sstevel@tonic-gate #include <vm/hat.h> 507c478bd9Sstevel@tonic-gate #include <sys/cpr.h> 517c478bd9Sstevel@tonic-gate #include <sys/conf.h> 527c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 537c478bd9Sstevel@tonic-gate #include <sys/panic.h> 547c478bd9Sstevel@tonic-gate #include <sys/thread.h> 552df1fe9cSrandyf #include <sys/note.h> 567c478bd9Sstevel@tonic-gate 577c478bd9Sstevel@tonic-gate /* Local defines and variables */ 587c478bd9Sstevel@tonic-gate #define BTOb(bytes) ((bytes) << 3) /* Bytes to bits, log2(NBBY) */ 597c478bd9Sstevel@tonic-gate #define bTOB(bits) ((bits) >> 3) /* bits to Bytes, log2(NBBY) */ 607c478bd9Sstevel@tonic-gate 612df1fe9cSrandyf #if defined(__sparc) 627c478bd9Sstevel@tonic-gate static uint_t cpr_pages_tobe_dumped; 637c478bd9Sstevel@tonic-gate static uint_t cpr_regular_pgs_dumped; 647c478bd9Sstevel@tonic-gate static int cpr_dump_regular_pages(vnode_t *); 657c478bd9Sstevel@tonic-gate static int cpr_count_upages(int, bitfunc_t); 667c478bd9Sstevel@tonic-gate static int cpr_compress_and_write(vnode_t *, uint_t, pfn_t, pgcnt_t); 672df1fe9cSrandyf #endif 682df1fe9cSrandyf 697c478bd9Sstevel@tonic-gate int cpr_flush_write(vnode_t *); 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate int cpr_contig_pages(vnode_t *, int); 727c478bd9Sstevel@tonic-gate 737c478bd9Sstevel@tonic-gate void cpr_clear_bitmaps(); 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate extern size_t cpr_get_devsize(dev_t); 767c478bd9Sstevel@tonic-gate extern int i_cpr_dump_setup(vnode_t *); 777c478bd9Sstevel@tonic-gate extern int i_cpr_blockzero(char *, char **, int *, vnode_t *); 787c478bd9Sstevel@tonic-gate extern int cpr_test_mode; 792df1fe9cSrandyf int cpr_setbit(pfn_t, int); 802df1fe9cSrandyf int cpr_clrbit(pfn_t, int); 817c478bd9Sstevel@tonic-gate 827c478bd9Sstevel@tonic-gate ctrm_t cpr_term; 837c478bd9Sstevel@tonic-gate 847c478bd9Sstevel@tonic-gate char *cpr_buf, *cpr_buf_end; 857c478bd9Sstevel@tonic-gate int cpr_buf_blocks; /* size of cpr_buf in blocks */ 867c478bd9Sstevel@tonic-gate size_t cpr_buf_size; /* size of cpr_buf in bytes */ 877c478bd9Sstevel@tonic-gate size_t cpr_bitmap_size; 887c478bd9Sstevel@tonic-gate int cpr_nbitmaps; 897c478bd9Sstevel@tonic-gate 907c478bd9Sstevel@tonic-gate char *cpr_pagedata; /* page buffer for compression / tmp copy */ 917c478bd9Sstevel@tonic-gate size_t cpr_pagedata_size; /* page buffer size in bytes */ 927c478bd9Sstevel@tonic-gate 932df1fe9cSrandyf #if defined(__sparc) 947c478bd9Sstevel@tonic-gate static char *cpr_wptr; /* keep track of where to write to next */ 957c478bd9Sstevel@tonic-gate static int cpr_file_bn; /* cpr state-file block offset */ 967c478bd9Sstevel@tonic-gate static int cpr_disk_writes_ok; 977c478bd9Sstevel@tonic-gate static size_t cpr_dev_space = 0; 982df1fe9cSrandyf #endif 997c478bd9Sstevel@tonic-gate 1007c478bd9Sstevel@tonic-gate char cpr_pagecopy[CPR_MAXCONTIG * MMU_PAGESIZE]; 1017c478bd9Sstevel@tonic-gate 1022df1fe9cSrandyf #if defined(__sparc) 1037c478bd9Sstevel@tonic-gate /* 1047c478bd9Sstevel@tonic-gate * On some platforms bcopy may modify the thread structure 1057c478bd9Sstevel@tonic-gate * during bcopy (eg, to prevent cpu migration). If the 1067c478bd9Sstevel@tonic-gate * range we are currently writing out includes our own 1077c478bd9Sstevel@tonic-gate * thread structure then it will be snapshotted by bcopy 1087c478bd9Sstevel@tonic-gate * including those modified members - and the updates made 1097c478bd9Sstevel@tonic-gate * on exit from bcopy will no longer be seen when we later 1107c478bd9Sstevel@tonic-gate * restore the mid-bcopy kthread_t. So if the range we 1117c478bd9Sstevel@tonic-gate * need to copy overlaps with our thread structure we will 1127c478bd9Sstevel@tonic-gate * use a simple byte copy. 1137c478bd9Sstevel@tonic-gate */ 1147c478bd9Sstevel@tonic-gate void 1157c478bd9Sstevel@tonic-gate cprbcopy(void *from, void *to, size_t bytes) 1167c478bd9Sstevel@tonic-gate { 1177c478bd9Sstevel@tonic-gate extern int curthreadremapped; 1187c478bd9Sstevel@tonic-gate caddr_t kthrend; 1197c478bd9Sstevel@tonic-gate 1207c478bd9Sstevel@tonic-gate kthrend = (caddr_t)curthread + sizeof (kthread_t) - 1; 1217c478bd9Sstevel@tonic-gate if (curthreadremapped || (kthrend >= (caddr_t)from && 1227c478bd9Sstevel@tonic-gate kthrend < (caddr_t)from + bytes + sizeof (kthread_t) - 1)) { 1237c478bd9Sstevel@tonic-gate caddr_t src = from, dst = to; 1247c478bd9Sstevel@tonic-gate 1257c478bd9Sstevel@tonic-gate while (bytes-- > 0) 1267c478bd9Sstevel@tonic-gate *dst++ = *src++; 1277c478bd9Sstevel@tonic-gate } else { 1287c478bd9Sstevel@tonic-gate bcopy(from, to, bytes); 1297c478bd9Sstevel@tonic-gate } 1307c478bd9Sstevel@tonic-gate } 1317c478bd9Sstevel@tonic-gate 1327c478bd9Sstevel@tonic-gate /* 1337c478bd9Sstevel@tonic-gate * Allocate pages for buffers used in writing out the statefile 1347c478bd9Sstevel@tonic-gate */ 1357c478bd9Sstevel@tonic-gate static int 1367c478bd9Sstevel@tonic-gate cpr_alloc_bufs(void) 1377c478bd9Sstevel@tonic-gate { 1387c478bd9Sstevel@tonic-gate char *allocerr = "Unable to allocate memory for cpr buffer"; 1397c478bd9Sstevel@tonic-gate size_t size; 1407c478bd9Sstevel@tonic-gate 1417c478bd9Sstevel@tonic-gate /* 1427c478bd9Sstevel@tonic-gate * set the cpr write buffer size to at least the historic 1437c478bd9Sstevel@tonic-gate * size (128k) or large enough to store the both the early 1447c478bd9Sstevel@tonic-gate * set of statefile structures (well under 0x800) plus the 1457c478bd9Sstevel@tonic-gate * bitmaps, and roundup to the next pagesize. 1467c478bd9Sstevel@tonic-gate */ 1477c478bd9Sstevel@tonic-gate size = PAGE_ROUNDUP(dbtob(4) + cpr_bitmap_size); 1487c478bd9Sstevel@tonic-gate cpr_buf_size = MAX(size, CPRBUFSZ); 1497c478bd9Sstevel@tonic-gate cpr_buf_blocks = btodb(cpr_buf_size); 1507c478bd9Sstevel@tonic-gate cpr_buf = kmem_alloc(cpr_buf_size, KM_NOSLEEP); 1517c478bd9Sstevel@tonic-gate if (cpr_buf == NULL) { 1527c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, allocerr); 1537c478bd9Sstevel@tonic-gate return (ENOMEM); 1547c478bd9Sstevel@tonic-gate } 1557c478bd9Sstevel@tonic-gate cpr_buf_end = cpr_buf + cpr_buf_size; 1567c478bd9Sstevel@tonic-gate 1577c478bd9Sstevel@tonic-gate cpr_pagedata_size = mmu_ptob(CPR_MAXCONTIG + 1); 1587c478bd9Sstevel@tonic-gate cpr_pagedata = kmem_alloc(cpr_pagedata_size, KM_NOSLEEP); 1597c478bd9Sstevel@tonic-gate if (cpr_pagedata == NULL) { 1607c478bd9Sstevel@tonic-gate kmem_free(cpr_buf, cpr_buf_size); 1617c478bd9Sstevel@tonic-gate cpr_buf = NULL; 1627c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, allocerr); 1637c478bd9Sstevel@tonic-gate return (ENOMEM); 1647c478bd9Sstevel@tonic-gate } 1657c478bd9Sstevel@tonic-gate 1667c478bd9Sstevel@tonic-gate return (0); 1677c478bd9Sstevel@tonic-gate } 1687c478bd9Sstevel@tonic-gate 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate /* 1717c478bd9Sstevel@tonic-gate * Set bitmap size in bytes based on phys_install. 1727c478bd9Sstevel@tonic-gate */ 1737c478bd9Sstevel@tonic-gate void 1747c478bd9Sstevel@tonic-gate cpr_set_bitmap_size(void) 1757c478bd9Sstevel@tonic-gate { 1767c478bd9Sstevel@tonic-gate struct memlist *pmem; 1777c478bd9Sstevel@tonic-gate size_t size = 0; 1787c478bd9Sstevel@tonic-gate 1797c478bd9Sstevel@tonic-gate memlist_read_lock(); 180*56f33205SJonathan Adams for (pmem = phys_install; pmem; pmem = pmem->ml_next) 181*56f33205SJonathan Adams size += pmem->ml_size; 1827c478bd9Sstevel@tonic-gate memlist_read_unlock(); 1837c478bd9Sstevel@tonic-gate cpr_bitmap_size = BITMAP_BYTES(size); 1847c478bd9Sstevel@tonic-gate } 1857c478bd9Sstevel@tonic-gate 1867c478bd9Sstevel@tonic-gate 1877c478bd9Sstevel@tonic-gate /* 1887c478bd9Sstevel@tonic-gate * CPR dump header contains the following information: 1897c478bd9Sstevel@tonic-gate * 1. header magic -- unique to cpr state file 1907c478bd9Sstevel@tonic-gate * 2. kernel return pc & ppn for resume 1917c478bd9Sstevel@tonic-gate * 3. current thread info 1927c478bd9Sstevel@tonic-gate * 4. debug level and test mode 1937c478bd9Sstevel@tonic-gate * 5. number of bitmaps allocated 1947c478bd9Sstevel@tonic-gate * 6. number of page records 1957c478bd9Sstevel@tonic-gate */ 1967c478bd9Sstevel@tonic-gate static int 1977c478bd9Sstevel@tonic-gate cpr_write_header(vnode_t *vp) 1987c478bd9Sstevel@tonic-gate { 1997c478bd9Sstevel@tonic-gate extern ushort_t cpr_mach_type; 2007c478bd9Sstevel@tonic-gate struct cpr_dump_desc cdump; 2017c478bd9Sstevel@tonic-gate pgcnt_t bitmap_pages; 2027c478bd9Sstevel@tonic-gate pgcnt_t kpages, vpages, upages; 2032df1fe9cSrandyf pgcnt_t cpr_count_kpages(int mapflag, bitfunc_t bitfunc); 2047c478bd9Sstevel@tonic-gate 2057c478bd9Sstevel@tonic-gate cdump.cdd_magic = (uint_t)CPR_DUMP_MAGIC; 2067c478bd9Sstevel@tonic-gate cdump.cdd_version = CPR_VERSION; 2077c478bd9Sstevel@tonic-gate cdump.cdd_machine = cpr_mach_type; 2087c478bd9Sstevel@tonic-gate cdump.cdd_debug = cpr_debug; 2097c478bd9Sstevel@tonic-gate cdump.cdd_test_mode = cpr_test_mode; 2107c478bd9Sstevel@tonic-gate cdump.cdd_bitmaprec = cpr_nbitmaps; 2117c478bd9Sstevel@tonic-gate 2127c478bd9Sstevel@tonic-gate cpr_clear_bitmaps(); 2137c478bd9Sstevel@tonic-gate 2147c478bd9Sstevel@tonic-gate /* 2157c478bd9Sstevel@tonic-gate * Remember how many pages we plan to save to statefile. 2167c478bd9Sstevel@tonic-gate * This information will be used for sanity checks. 2177c478bd9Sstevel@tonic-gate * Untag those pages that will not be saved to statefile. 2187c478bd9Sstevel@tonic-gate */ 2197c478bd9Sstevel@tonic-gate kpages = cpr_count_kpages(REGULAR_BITMAP, cpr_setbit); 2207c478bd9Sstevel@tonic-gate vpages = cpr_count_volatile_pages(REGULAR_BITMAP, cpr_clrbit); 2217c478bd9Sstevel@tonic-gate upages = cpr_count_upages(REGULAR_BITMAP, cpr_setbit); 2227c478bd9Sstevel@tonic-gate cdump.cdd_dumppgsize = kpages - vpages + upages; 2237c478bd9Sstevel@tonic-gate cpr_pages_tobe_dumped = cdump.cdd_dumppgsize; 224ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 2257c478bd9Sstevel@tonic-gate "\ncpr_write_header: kpages %ld - vpages %ld + upages %ld = %d\n", 226ae115bc7Smrj kpages, vpages, upages, cdump.cdd_dumppgsize); 2277c478bd9Sstevel@tonic-gate 2287c478bd9Sstevel@tonic-gate /* 2297c478bd9Sstevel@tonic-gate * Some pages contain volatile data (cpr_buf and storage area for 2307c478bd9Sstevel@tonic-gate * sensitive kpages), which are no longer needed after the statefile 2317c478bd9Sstevel@tonic-gate * is dumped to disk. We have already untagged them from regular 2327c478bd9Sstevel@tonic-gate * bitmaps. Now tag them into the volatile bitmaps. The pages in 2337c478bd9Sstevel@tonic-gate * volatile bitmaps will be claimed during resume, and the resumed 2347c478bd9Sstevel@tonic-gate * kernel will free them. 2357c478bd9Sstevel@tonic-gate */ 2367c478bd9Sstevel@tonic-gate (void) cpr_count_volatile_pages(VOLATILE_BITMAP, cpr_setbit); 2377c478bd9Sstevel@tonic-gate 2387c478bd9Sstevel@tonic-gate bitmap_pages = mmu_btopr(cpr_bitmap_size); 2397c478bd9Sstevel@tonic-gate 2407c478bd9Sstevel@tonic-gate /* 2417c478bd9Sstevel@tonic-gate * Export accurate statefile size for statefile allocation retry. 2427c478bd9Sstevel@tonic-gate * statefile_size = all the headers + total pages + 2437c478bd9Sstevel@tonic-gate * number of pages used by the bitmaps. 2447c478bd9Sstevel@tonic-gate * Roundup will be done in the file allocation code. 2457c478bd9Sstevel@tonic-gate */ 2467c478bd9Sstevel@tonic-gate STAT->cs_nocomp_statefsz = sizeof (cdd_t) + sizeof (cmd_t) + 2477c478bd9Sstevel@tonic-gate (sizeof (cbd_t) * cdump.cdd_bitmaprec) + 2487c478bd9Sstevel@tonic-gate (sizeof (cpd_t) * cdump.cdd_dumppgsize) + 2497c478bd9Sstevel@tonic-gate mmu_ptob(cdump.cdd_dumppgsize + bitmap_pages); 2507c478bd9Sstevel@tonic-gate 2517c478bd9Sstevel@tonic-gate /* 2527c478bd9Sstevel@tonic-gate * If the estimated statefile is not big enough, 2537c478bd9Sstevel@tonic-gate * go retry now to save un-necessary operations. 2547c478bd9Sstevel@tonic-gate */ 2557c478bd9Sstevel@tonic-gate if (!(CPR->c_flags & C_COMPRESSING) && 2567c478bd9Sstevel@tonic-gate (STAT->cs_nocomp_statefsz > STAT->cs_est_statefsz)) { 257ae115bc7Smrj if (cpr_debug & (CPR_DEBUG1 | CPR_DEBUG7)) 2582df1fe9cSrandyf prom_printf("cpr_write_header: " 2592df1fe9cSrandyf "STAT->cs_nocomp_statefsz > " 2607c478bd9Sstevel@tonic-gate "STAT->cs_est_statefsz\n"); 2617c478bd9Sstevel@tonic-gate return (ENOSPC); 2627c478bd9Sstevel@tonic-gate } 2637c478bd9Sstevel@tonic-gate 2647c478bd9Sstevel@tonic-gate /* now write cpr dump descriptor */ 2657c478bd9Sstevel@tonic-gate return (cpr_write(vp, (caddr_t)&cdump, sizeof (cdd_t))); 2667c478bd9Sstevel@tonic-gate } 2677c478bd9Sstevel@tonic-gate 2687c478bd9Sstevel@tonic-gate 2697c478bd9Sstevel@tonic-gate /* 2707c478bd9Sstevel@tonic-gate * CPR dump tail record contains the following information: 2717c478bd9Sstevel@tonic-gate * 1. header magic -- unique to cpr state file 2727c478bd9Sstevel@tonic-gate * 2. all misc info that needs to be passed to cprboot or resumed kernel 2737c478bd9Sstevel@tonic-gate */ 2747c478bd9Sstevel@tonic-gate static int 2757c478bd9Sstevel@tonic-gate cpr_write_terminator(vnode_t *vp) 2767c478bd9Sstevel@tonic-gate { 2777c478bd9Sstevel@tonic-gate cpr_term.magic = (uint_t)CPR_TERM_MAGIC; 2787c478bd9Sstevel@tonic-gate cpr_term.va = (cpr_ptr)&cpr_term; 2797c478bd9Sstevel@tonic-gate cpr_term.pfn = (cpr_ext)va_to_pfn(&cpr_term); 2807c478bd9Sstevel@tonic-gate 2817c478bd9Sstevel@tonic-gate /* count the last one (flush) */ 2827c478bd9Sstevel@tonic-gate cpr_term.real_statef_size = STAT->cs_real_statefsz + 2837c478bd9Sstevel@tonic-gate btod(cpr_wptr - cpr_buf) * DEV_BSIZE; 2847c478bd9Sstevel@tonic-gate 285ae115bc7Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_dump: Real Statefile Size: %ld\n", 286ae115bc7Smrj STAT->cs_real_statefsz); 2877c478bd9Sstevel@tonic-gate 2887c478bd9Sstevel@tonic-gate cpr_tod_get(&cpr_term.tm_shutdown); 2897c478bd9Sstevel@tonic-gate 2907c478bd9Sstevel@tonic-gate return (cpr_write(vp, (caddr_t)&cpr_term, sizeof (cpr_term))); 2917c478bd9Sstevel@tonic-gate } 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate /* 2947c478bd9Sstevel@tonic-gate * Write bitmap descriptor array, followed by merged bitmaps. 2957c478bd9Sstevel@tonic-gate */ 2967c478bd9Sstevel@tonic-gate static int 2977c478bd9Sstevel@tonic-gate cpr_write_bitmap(vnode_t *vp) 2987c478bd9Sstevel@tonic-gate { 2997c478bd9Sstevel@tonic-gate char *rmap, *vmap, *dst, *tail; 3007c478bd9Sstevel@tonic-gate size_t size, bytes; 3017c478bd9Sstevel@tonic-gate cbd_t *dp; 3027c478bd9Sstevel@tonic-gate int err; 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate dp = CPR->c_bmda; 3057c478bd9Sstevel@tonic-gate if (err = cpr_write(vp, (caddr_t)dp, cpr_nbitmaps * sizeof (*dp))) 3067c478bd9Sstevel@tonic-gate return (err); 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate /* 3097c478bd9Sstevel@tonic-gate * merge regular and volatile bitmaps into tmp space 3107c478bd9Sstevel@tonic-gate * and write to disk 3117c478bd9Sstevel@tonic-gate */ 3127c478bd9Sstevel@tonic-gate for (; dp->cbd_size; dp++) { 3137c478bd9Sstevel@tonic-gate rmap = (char *)dp->cbd_reg_bitmap; 3147c478bd9Sstevel@tonic-gate vmap = (char *)dp->cbd_vlt_bitmap; 3157c478bd9Sstevel@tonic-gate for (size = dp->cbd_size; size; size -= bytes) { 3167c478bd9Sstevel@tonic-gate bytes = min(size, sizeof (cpr_pagecopy)); 3177c478bd9Sstevel@tonic-gate tail = &cpr_pagecopy[bytes]; 3187c478bd9Sstevel@tonic-gate for (dst = cpr_pagecopy; dst < tail; dst++) 3197c478bd9Sstevel@tonic-gate *dst = *rmap++ | *vmap++; 3207c478bd9Sstevel@tonic-gate if (err = cpr_write(vp, cpr_pagecopy, bytes)) 3217c478bd9Sstevel@tonic-gate break; 3227c478bd9Sstevel@tonic-gate } 3237c478bd9Sstevel@tonic-gate } 3247c478bd9Sstevel@tonic-gate 3257c478bd9Sstevel@tonic-gate return (err); 3267c478bd9Sstevel@tonic-gate } 3277c478bd9Sstevel@tonic-gate 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate static int 3307c478bd9Sstevel@tonic-gate cpr_write_statefile(vnode_t *vp) 3317c478bd9Sstevel@tonic-gate { 3327c478bd9Sstevel@tonic-gate uint_t error = 0; 3337c478bd9Sstevel@tonic-gate extern int i_cpr_check_pgs_dumped(); 3347c478bd9Sstevel@tonic-gate void flush_windows(void); 3357c478bd9Sstevel@tonic-gate pgcnt_t spages; 3367c478bd9Sstevel@tonic-gate char *str; 3377c478bd9Sstevel@tonic-gate 3387c478bd9Sstevel@tonic-gate flush_windows(); 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate /* 3417c478bd9Sstevel@tonic-gate * to get an accurate view of kas, we need to untag sensitive 3427c478bd9Sstevel@tonic-gate * pages *before* dumping them because the disk driver makes 3437c478bd9Sstevel@tonic-gate * allocations and changes kas along the way. The remaining 3447c478bd9Sstevel@tonic-gate * pages referenced in the bitmaps are dumped out later as 3457c478bd9Sstevel@tonic-gate * regular kpages. 3467c478bd9Sstevel@tonic-gate */ 3477c478bd9Sstevel@tonic-gate str = "cpr_write_statefile:"; 3487c478bd9Sstevel@tonic-gate spages = i_cpr_count_sensitive_kpages(REGULAR_BITMAP, cpr_clrbit); 349ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "%s untag %ld sens pages\n", str, spages); 3507c478bd9Sstevel@tonic-gate 3517c478bd9Sstevel@tonic-gate /* 3527c478bd9Sstevel@tonic-gate * now it's OK to call a driver that makes allocations 3537c478bd9Sstevel@tonic-gate */ 3547c478bd9Sstevel@tonic-gate cpr_disk_writes_ok = 1; 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate /* 3577c478bd9Sstevel@tonic-gate * now write out the clean sensitive kpages 3587c478bd9Sstevel@tonic-gate * according to the sensitive descriptors 3597c478bd9Sstevel@tonic-gate */ 3607c478bd9Sstevel@tonic-gate error = i_cpr_dump_sensitive_kpages(vp); 3617c478bd9Sstevel@tonic-gate if (error) { 362ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 363ae115bc7Smrj "%s cpr_dump_sensitive_kpages() failed!\n", str); 3647c478bd9Sstevel@tonic-gate return (error); 3657c478bd9Sstevel@tonic-gate } 3667c478bd9Sstevel@tonic-gate 3677c478bd9Sstevel@tonic-gate /* 3687c478bd9Sstevel@tonic-gate * cpr_dump_regular_pages() counts cpr_regular_pgs_dumped 3697c478bd9Sstevel@tonic-gate */ 3707c478bd9Sstevel@tonic-gate error = cpr_dump_regular_pages(vp); 3717c478bd9Sstevel@tonic-gate if (error) { 372ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 373ae115bc7Smrj "%s cpr_dump_regular_pages() failed!\n", str); 3747c478bd9Sstevel@tonic-gate return (error); 3757c478bd9Sstevel@tonic-gate } 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate /* 3787c478bd9Sstevel@tonic-gate * sanity check to verify the right number of pages were dumped 3797c478bd9Sstevel@tonic-gate */ 3807c478bd9Sstevel@tonic-gate error = i_cpr_check_pgs_dumped(cpr_pages_tobe_dumped, 3817c478bd9Sstevel@tonic-gate cpr_regular_pgs_dumped); 3827c478bd9Sstevel@tonic-gate 3837c478bd9Sstevel@tonic-gate if (error) { 384ae115bc7Smrj prom_printf("\n%s page count mismatch!\n", str); 3857c478bd9Sstevel@tonic-gate #ifdef DEBUG 3867c478bd9Sstevel@tonic-gate if (cpr_test_mode) 3877c478bd9Sstevel@tonic-gate debug_enter(NULL); 3887c478bd9Sstevel@tonic-gate #endif 3897c478bd9Sstevel@tonic-gate } 3907c478bd9Sstevel@tonic-gate 3917c478bd9Sstevel@tonic-gate return (error); 3927c478bd9Sstevel@tonic-gate } 3932df1fe9cSrandyf #endif 3947c478bd9Sstevel@tonic-gate 3957c478bd9Sstevel@tonic-gate 3967c478bd9Sstevel@tonic-gate /* 3977c478bd9Sstevel@tonic-gate * creates the CPR state file, the following sections are 3987c478bd9Sstevel@tonic-gate * written out in sequence: 3997c478bd9Sstevel@tonic-gate * - writes the cpr dump header 4007c478bd9Sstevel@tonic-gate * - writes the memory usage bitmaps 4017c478bd9Sstevel@tonic-gate * - writes the platform dependent info 4027c478bd9Sstevel@tonic-gate * - writes the remaining user pages 4037c478bd9Sstevel@tonic-gate * - writes the kernel pages 4047c478bd9Sstevel@tonic-gate */ 4052df1fe9cSrandyf #if defined(__x86) 4062df1fe9cSrandyf _NOTE(ARGSUSED(0)) 4072df1fe9cSrandyf #endif 4087c478bd9Sstevel@tonic-gate int 4097c478bd9Sstevel@tonic-gate cpr_dump(vnode_t *vp) 4107c478bd9Sstevel@tonic-gate { 4112df1fe9cSrandyf #if defined(__sparc) 4127c478bd9Sstevel@tonic-gate int error; 4137c478bd9Sstevel@tonic-gate 4147c478bd9Sstevel@tonic-gate if (cpr_buf == NULL) { 4157c478bd9Sstevel@tonic-gate ASSERT(cpr_pagedata == NULL); 4167c478bd9Sstevel@tonic-gate if (error = cpr_alloc_bufs()) 4177c478bd9Sstevel@tonic-gate return (error); 4187c478bd9Sstevel@tonic-gate } 4197c478bd9Sstevel@tonic-gate /* point to top of internal buffer */ 4207c478bd9Sstevel@tonic-gate cpr_wptr = cpr_buf; 4217c478bd9Sstevel@tonic-gate 4227c478bd9Sstevel@tonic-gate /* initialize global variables used by the write operation */ 4237c478bd9Sstevel@tonic-gate cpr_file_bn = cpr_statefile_offset(); 4247c478bd9Sstevel@tonic-gate cpr_dev_space = 0; 4257c478bd9Sstevel@tonic-gate 4267c478bd9Sstevel@tonic-gate /* allocate bitmaps */ 4277c478bd9Sstevel@tonic-gate if (CPR->c_bmda == NULL) { 4287c478bd9Sstevel@tonic-gate if (error = i_cpr_alloc_bitmaps()) { 4297c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "cannot allocate bitmaps"); 4307c478bd9Sstevel@tonic-gate return (error); 4317c478bd9Sstevel@tonic-gate } 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate 4347c478bd9Sstevel@tonic-gate if (error = i_cpr_prom_pages(CPR_PROM_SAVE)) 4357c478bd9Sstevel@tonic-gate return (error); 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate if (error = i_cpr_dump_setup(vp)) 4387c478bd9Sstevel@tonic-gate return (error); 4397c478bd9Sstevel@tonic-gate 4407c478bd9Sstevel@tonic-gate /* 4417c478bd9Sstevel@tonic-gate * set internal cross checking; we dont want to call 4427c478bd9Sstevel@tonic-gate * a disk driver that makes allocations until after 4437c478bd9Sstevel@tonic-gate * sensitive pages are saved 4447c478bd9Sstevel@tonic-gate */ 4457c478bd9Sstevel@tonic-gate cpr_disk_writes_ok = 0; 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate /* 4487c478bd9Sstevel@tonic-gate * 1253112: heap corruption due to memory allocation when dumpping 4497c478bd9Sstevel@tonic-gate * statefile. 4507c478bd9Sstevel@tonic-gate * Theoretically on Sun4u only the kernel data nucleus, kvalloc and 4517c478bd9Sstevel@tonic-gate * kvseg segments can be contaminated should memory allocations happen 4527c478bd9Sstevel@tonic-gate * during sddump, which is not supposed to happen after the system 4537c478bd9Sstevel@tonic-gate * is quiesced. Let's call the kernel pages that tend to be affected 4547c478bd9Sstevel@tonic-gate * 'sensitive kpages' here. To avoid saving inconsistent pages, we 4557c478bd9Sstevel@tonic-gate * will allocate some storage space to save the clean sensitive pages 4567c478bd9Sstevel@tonic-gate * aside before statefile dumping takes place. Since there may not be 4577c478bd9Sstevel@tonic-gate * much memory left at this stage, the sensitive pages will be 4587c478bd9Sstevel@tonic-gate * compressed before they are saved into the storage area. 4597c478bd9Sstevel@tonic-gate */ 4607c478bd9Sstevel@tonic-gate if (error = i_cpr_save_sensitive_kpages()) { 461ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 462ae115bc7Smrj "cpr_dump: save_sensitive_kpages failed!\n"); 4637c478bd9Sstevel@tonic-gate return (error); 4647c478bd9Sstevel@tonic-gate } 4657c478bd9Sstevel@tonic-gate 4667c478bd9Sstevel@tonic-gate /* 4677c478bd9Sstevel@tonic-gate * since all cpr allocations are done (space for sensitive kpages, 4687c478bd9Sstevel@tonic-gate * bitmaps, cpr_buf), kas is stable, and now we can accurately 4697c478bd9Sstevel@tonic-gate * count regular and sensitive kpages. 4707c478bd9Sstevel@tonic-gate */ 4717c478bd9Sstevel@tonic-gate if (error = cpr_write_header(vp)) { 472ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 473ae115bc7Smrj "cpr_dump: cpr_write_header() failed!\n"); 4747c478bd9Sstevel@tonic-gate return (error); 4757c478bd9Sstevel@tonic-gate } 4767c478bd9Sstevel@tonic-gate 4777c478bd9Sstevel@tonic-gate if (error = i_cpr_write_machdep(vp)) 4787c478bd9Sstevel@tonic-gate return (error); 4797c478bd9Sstevel@tonic-gate 4807c478bd9Sstevel@tonic-gate if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, NULL, NULL)) 4817c478bd9Sstevel@tonic-gate return (error); 4827c478bd9Sstevel@tonic-gate 4837c478bd9Sstevel@tonic-gate if (error = cpr_write_bitmap(vp)) 4847c478bd9Sstevel@tonic-gate return (error); 4857c478bd9Sstevel@tonic-gate 4867c478bd9Sstevel@tonic-gate if (error = cpr_write_statefile(vp)) { 487ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, 488ae115bc7Smrj "cpr_dump: cpr_write_statefile() failed!\n"); 4897c478bd9Sstevel@tonic-gate return (error); 4907c478bd9Sstevel@tonic-gate } 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate if (error = cpr_write_terminator(vp)) 4937c478bd9Sstevel@tonic-gate return (error); 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate if (error = cpr_flush_write(vp)) 4967c478bd9Sstevel@tonic-gate return (error); 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate if (error = i_cpr_blockzero(cpr_buf, &cpr_wptr, &cpr_file_bn, vp)) 4997c478bd9Sstevel@tonic-gate return (error); 5002df1fe9cSrandyf #endif 5017c478bd9Sstevel@tonic-gate 5027c478bd9Sstevel@tonic-gate return (0); 5037c478bd9Sstevel@tonic-gate } 5047c478bd9Sstevel@tonic-gate 5057c478bd9Sstevel@tonic-gate 5062df1fe9cSrandyf #if defined(__sparc) 5077c478bd9Sstevel@tonic-gate /* 5087bc98a2eSeg155566 * cpr_xwalk() is called many 100x with a range within kvseg or kvseg_reloc; 5097c478bd9Sstevel@tonic-gate * a page-count from each range is accumulated at arg->pages. 5107c478bd9Sstevel@tonic-gate */ 5117c478bd9Sstevel@tonic-gate static void 5127bc98a2eSeg155566 cpr_xwalk(void *arg, void *base, size_t size) 5137c478bd9Sstevel@tonic-gate { 5147c478bd9Sstevel@tonic-gate struct cpr_walkinfo *cwip = arg; 5157c478bd9Sstevel@tonic-gate 5167c478bd9Sstevel@tonic-gate cwip->pages += cpr_count_pages(base, size, 5177c478bd9Sstevel@tonic-gate cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE); 5187c478bd9Sstevel@tonic-gate cwip->size += size; 5197c478bd9Sstevel@tonic-gate cwip->ranges++; 5207c478bd9Sstevel@tonic-gate } 5217c478bd9Sstevel@tonic-gate 5227bc98a2eSeg155566 /* 5237bc98a2eSeg155566 * cpr_walk() is called many 100x with a range within kvseg or kvseg_reloc; 5247bc98a2eSeg155566 * a page-count from each range is accumulated at arg->pages. 5257bc98a2eSeg155566 */ 5267bc98a2eSeg155566 static void 5277bc98a2eSeg155566 cpr_walk(void *arg, void *base, size_t size) 5287bc98a2eSeg155566 { 5297bc98a2eSeg155566 caddr_t addr = base; 5307bc98a2eSeg155566 caddr_t addr_end = addr + size; 5317bc98a2eSeg155566 5327bc98a2eSeg155566 /* 5337bc98a2eSeg155566 * If we are about to start walking the range of addresses we 5347bc98a2eSeg155566 * carved out of the kernel heap for the large page heap walk 5357bc98a2eSeg155566 * heap_lp_arena to find what segments are actually populated 5367bc98a2eSeg155566 */ 5377bc98a2eSeg155566 if (SEGKMEM_USE_LARGEPAGES && 5387bc98a2eSeg155566 addr == heap_lp_base && addr_end == heap_lp_end && 5397bc98a2eSeg155566 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) { 5407bc98a2eSeg155566 vmem_walk(heap_lp_arena, VMEM_ALLOC, cpr_xwalk, arg); 5417bc98a2eSeg155566 } else { 5427bc98a2eSeg155566 cpr_xwalk(arg, base, size); 5437bc98a2eSeg155566 } 5447bc98a2eSeg155566 } 5457bc98a2eSeg155566 5467c478bd9Sstevel@tonic-gate 5477c478bd9Sstevel@tonic-gate /* 5487c478bd9Sstevel@tonic-gate * faster scan of kvseg using vmem_walk() to visit 5497c478bd9Sstevel@tonic-gate * allocated ranges. 5507c478bd9Sstevel@tonic-gate */ 5517c478bd9Sstevel@tonic-gate pgcnt_t 5527c478bd9Sstevel@tonic-gate cpr_scan_kvseg(int mapflag, bitfunc_t bitfunc, struct seg *seg) 5537c478bd9Sstevel@tonic-gate { 5547c478bd9Sstevel@tonic-gate struct cpr_walkinfo cwinfo; 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate bzero(&cwinfo, sizeof (cwinfo)); 5577c478bd9Sstevel@tonic-gate cwinfo.mapflag = mapflag; 5587c478bd9Sstevel@tonic-gate cwinfo.bitfunc = bitfunc; 5597c478bd9Sstevel@tonic-gate 5607c478bd9Sstevel@tonic-gate vmem_walk(heap_arena, VMEM_ALLOC, cpr_walk, &cwinfo); 5617c478bd9Sstevel@tonic-gate 562ae115bc7Smrj if (cpr_debug & CPR_DEBUG7) { 563ae115bc7Smrj prom_printf("walked %d sub-ranges, total pages %ld\n", 5647c478bd9Sstevel@tonic-gate cwinfo.ranges, mmu_btop(cwinfo.size)); 5657c478bd9Sstevel@tonic-gate cpr_show_range(seg->s_base, seg->s_size, 5667c478bd9Sstevel@tonic-gate mapflag, bitfunc, cwinfo.pages); 5677c478bd9Sstevel@tonic-gate } 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate return (cwinfo.pages); 5707c478bd9Sstevel@tonic-gate } 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate 5737c478bd9Sstevel@tonic-gate /* 5747c478bd9Sstevel@tonic-gate * cpr_walk_kpm() is called for every used area within the large 5757c478bd9Sstevel@tonic-gate * segkpm virtual address window. A page-count is accumulated at 5767c478bd9Sstevel@tonic-gate * arg->pages. 5777c478bd9Sstevel@tonic-gate */ 5787c478bd9Sstevel@tonic-gate static void 5797c478bd9Sstevel@tonic-gate cpr_walk_kpm(void *arg, void *base, size_t size) 5807c478bd9Sstevel@tonic-gate { 5817c478bd9Sstevel@tonic-gate struct cpr_walkinfo *cwip = arg; 5827c478bd9Sstevel@tonic-gate 5837c478bd9Sstevel@tonic-gate cwip->pages += cpr_count_pages(base, size, 5847c478bd9Sstevel@tonic-gate cwip->mapflag, cwip->bitfunc, DBG_DONTSHOWRANGE); 5857c478bd9Sstevel@tonic-gate cwip->size += size; 5867c478bd9Sstevel@tonic-gate cwip->ranges++; 5877c478bd9Sstevel@tonic-gate } 5887c478bd9Sstevel@tonic-gate 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate /* 5917c478bd9Sstevel@tonic-gate * faster scan of segkpm using hat_kpm_walk() to visit only used ranges. 5927c478bd9Sstevel@tonic-gate */ 5937c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5947c478bd9Sstevel@tonic-gate static pgcnt_t 5957c478bd9Sstevel@tonic-gate cpr_scan_segkpm(int mapflag, bitfunc_t bitfunc, struct seg *seg) 5967c478bd9Sstevel@tonic-gate { 5977c478bd9Sstevel@tonic-gate struct cpr_walkinfo cwinfo; 5987c478bd9Sstevel@tonic-gate 5997c478bd9Sstevel@tonic-gate if (kpm_enable == 0) 6007c478bd9Sstevel@tonic-gate return (0); 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate bzero(&cwinfo, sizeof (cwinfo)); 6037c478bd9Sstevel@tonic-gate cwinfo.mapflag = mapflag; 6047c478bd9Sstevel@tonic-gate cwinfo.bitfunc = bitfunc; 6057c478bd9Sstevel@tonic-gate hat_kpm_walk(cpr_walk_kpm, &cwinfo); 6067c478bd9Sstevel@tonic-gate 607ae115bc7Smrj if (cpr_debug & CPR_DEBUG7) { 608ae115bc7Smrj prom_printf("walked %d sub-ranges, total pages %ld\n", 6097c478bd9Sstevel@tonic-gate cwinfo.ranges, mmu_btop(cwinfo.size)); 6107c478bd9Sstevel@tonic-gate cpr_show_range(segkpm->s_base, segkpm->s_size, 6117c478bd9Sstevel@tonic-gate mapflag, bitfunc, cwinfo.pages); 6127c478bd9Sstevel@tonic-gate } 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate return (cwinfo.pages); 6157c478bd9Sstevel@tonic-gate } 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate /* 6197c478bd9Sstevel@tonic-gate * Sparsely filled kernel segments are registered in kseg_table for 6207c478bd9Sstevel@tonic-gate * easier lookup. See also block comment for cpr_count_seg_pages. 6217c478bd9Sstevel@tonic-gate */ 6227c478bd9Sstevel@tonic-gate 6237c478bd9Sstevel@tonic-gate #define KSEG_SEG_ADDR 0 /* address of struct seg */ 6247c478bd9Sstevel@tonic-gate #define KSEG_PTR_ADDR 1 /* address of pointer to struct seg */ 6257c478bd9Sstevel@tonic-gate 6267c478bd9Sstevel@tonic-gate typedef struct { 6277c478bd9Sstevel@tonic-gate struct seg **st_seg; /* segment pointer or segment address */ 6287c478bd9Sstevel@tonic-gate pgcnt_t (*st_fcn)(int, bitfunc_t, struct seg *); /* function to call */ 6297c478bd9Sstevel@tonic-gate int st_addrtype; /* address type in st_seg */ 6307c478bd9Sstevel@tonic-gate } ksegtbl_entry_t; 6317c478bd9Sstevel@tonic-gate 6327c478bd9Sstevel@tonic-gate ksegtbl_entry_t kseg_table[] = { 6337c478bd9Sstevel@tonic-gate {(struct seg **)&kvseg, cpr_scan_kvseg, KSEG_SEG_ADDR}, 6347c478bd9Sstevel@tonic-gate {&segkpm, cpr_scan_segkpm, KSEG_PTR_ADDR}, 6357c478bd9Sstevel@tonic-gate {NULL, 0, 0} 6367c478bd9Sstevel@tonic-gate }; 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate 6397c478bd9Sstevel@tonic-gate /* 6407c478bd9Sstevel@tonic-gate * Compare seg with each entry in kseg_table; when there is a match 6417c478bd9Sstevel@tonic-gate * return the entry pointer, otherwise return NULL. 6427c478bd9Sstevel@tonic-gate */ 6437c478bd9Sstevel@tonic-gate static ksegtbl_entry_t * 6447c478bd9Sstevel@tonic-gate cpr_sparse_seg_check(struct seg *seg) 6457c478bd9Sstevel@tonic-gate { 6467c478bd9Sstevel@tonic-gate ksegtbl_entry_t *ste = &kseg_table[0]; 6477c478bd9Sstevel@tonic-gate struct seg *tseg; 6487c478bd9Sstevel@tonic-gate 6497c478bd9Sstevel@tonic-gate for (; ste->st_seg; ste++) { 6507c478bd9Sstevel@tonic-gate tseg = (ste->st_addrtype == KSEG_PTR_ADDR) ? 6517c478bd9Sstevel@tonic-gate *ste->st_seg : (struct seg *)ste->st_seg; 6522df1fe9cSrandyf 6537c478bd9Sstevel@tonic-gate if (seg == tseg) 6547c478bd9Sstevel@tonic-gate return (ste); 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate return ((ksegtbl_entry_t *)NULL); 6587c478bd9Sstevel@tonic-gate } 6597c478bd9Sstevel@tonic-gate 6607c478bd9Sstevel@tonic-gate 6617c478bd9Sstevel@tonic-gate /* 6627c478bd9Sstevel@tonic-gate * Count pages within each kernel segment; call cpr_sparse_seg_check() 6637c478bd9Sstevel@tonic-gate * to find out whether a sparsely filled segment needs special 6647c478bd9Sstevel@tonic-gate * treatment (e.g. kvseg). 6657c478bd9Sstevel@tonic-gate * Todo: A "SEGOP_CPR" like SEGOP_DUMP should be introduced, the cpr 6667c478bd9Sstevel@tonic-gate * module shouldn't need to know segment details like if it is 6677c478bd9Sstevel@tonic-gate * sparsely filled or not (makes kseg_table obsolete). 6687c478bd9Sstevel@tonic-gate */ 6697c478bd9Sstevel@tonic-gate pgcnt_t 6707c478bd9Sstevel@tonic-gate cpr_count_seg_pages(int mapflag, bitfunc_t bitfunc) 6717c478bd9Sstevel@tonic-gate { 6727c478bd9Sstevel@tonic-gate struct seg *segp; 6737c478bd9Sstevel@tonic-gate pgcnt_t pages; 6747c478bd9Sstevel@tonic-gate ksegtbl_entry_t *ste; 6757c478bd9Sstevel@tonic-gate 6767c478bd9Sstevel@tonic-gate pages = 0; 6777c478bd9Sstevel@tonic-gate for (segp = AS_SEGFIRST(&kas); segp; segp = AS_SEGNEXT(&kas, segp)) { 6787c478bd9Sstevel@tonic-gate if (ste = cpr_sparse_seg_check(segp)) { 6797c478bd9Sstevel@tonic-gate pages += (ste->st_fcn)(mapflag, bitfunc, segp); 6807c478bd9Sstevel@tonic-gate } else { 6817c478bd9Sstevel@tonic-gate pages += cpr_count_pages(segp->s_base, 6827c478bd9Sstevel@tonic-gate segp->s_size, mapflag, bitfunc, DBG_SHOWRANGE); 6837c478bd9Sstevel@tonic-gate } 6847c478bd9Sstevel@tonic-gate } 6857c478bd9Sstevel@tonic-gate 6867c478bd9Sstevel@tonic-gate return (pages); 6877c478bd9Sstevel@tonic-gate } 6887c478bd9Sstevel@tonic-gate 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate /* 6917c478bd9Sstevel@tonic-gate * count kernel pages within kas and any special ranges 6927c478bd9Sstevel@tonic-gate */ 6937c478bd9Sstevel@tonic-gate pgcnt_t 6947c478bd9Sstevel@tonic-gate cpr_count_kpages(int mapflag, bitfunc_t bitfunc) 6957c478bd9Sstevel@tonic-gate { 6967c478bd9Sstevel@tonic-gate pgcnt_t kas_cnt; 6977c478bd9Sstevel@tonic-gate 6987c478bd9Sstevel@tonic-gate /* 6997c478bd9Sstevel@tonic-gate * Some pages need to be taken care of differently. 7007c478bd9Sstevel@tonic-gate * eg: panicbuf pages of sun4m are not in kas but they need 7017c478bd9Sstevel@tonic-gate * to be saved. On sun4u, the physical pages of panicbuf are 7027c478bd9Sstevel@tonic-gate * allocated via prom_retain(). 7037c478bd9Sstevel@tonic-gate */ 7047c478bd9Sstevel@tonic-gate kas_cnt = i_cpr_count_special_kpages(mapflag, bitfunc); 7057c478bd9Sstevel@tonic-gate kas_cnt += cpr_count_seg_pages(mapflag, bitfunc); 7067c478bd9Sstevel@tonic-gate 707ae115bc7Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_count_kpages: kas_cnt=%ld\n", kas_cnt); 708ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "\ncpr_count_kpages: %ld pages, 0x%lx bytes\n", 709ae115bc7Smrj kas_cnt, mmu_ptob(kas_cnt)); 7102df1fe9cSrandyf 7117c478bd9Sstevel@tonic-gate return (kas_cnt); 7127c478bd9Sstevel@tonic-gate } 7137c478bd9Sstevel@tonic-gate 7147c478bd9Sstevel@tonic-gate 7157c478bd9Sstevel@tonic-gate /* 7167c478bd9Sstevel@tonic-gate * Set a bit corresponding to the arg phys page number; 7177c478bd9Sstevel@tonic-gate * returns 0 when the ppn is valid and the corresponding 7187c478bd9Sstevel@tonic-gate * map bit was clear, otherwise returns 1. 7197c478bd9Sstevel@tonic-gate */ 7207c478bd9Sstevel@tonic-gate int 7217c478bd9Sstevel@tonic-gate cpr_setbit(pfn_t ppn, int mapflag) 7227c478bd9Sstevel@tonic-gate { 7237c478bd9Sstevel@tonic-gate char *bitmap; 7247c478bd9Sstevel@tonic-gate cbd_t *dp; 7257c478bd9Sstevel@tonic-gate pfn_t rel; 7267c478bd9Sstevel@tonic-gate int clr; 7277c478bd9Sstevel@tonic-gate 7287c478bd9Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 7297c478bd9Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) { 7307c478bd9Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag); 7317c478bd9Sstevel@tonic-gate rel = ppn - dp->cbd_spfn; 7327c478bd9Sstevel@tonic-gate if ((clr = isclr(bitmap, rel)) != 0) 7337c478bd9Sstevel@tonic-gate setbit(bitmap, rel); 7347c478bd9Sstevel@tonic-gate return (clr == 0); 7357c478bd9Sstevel@tonic-gate } 7367c478bd9Sstevel@tonic-gate } 7377c478bd9Sstevel@tonic-gate 7387c478bd9Sstevel@tonic-gate return (1); 7397c478bd9Sstevel@tonic-gate } 7407c478bd9Sstevel@tonic-gate 7417c478bd9Sstevel@tonic-gate 7427c478bd9Sstevel@tonic-gate /* 7437c478bd9Sstevel@tonic-gate * Clear a bit corresponding to the arg phys page number. 7447c478bd9Sstevel@tonic-gate */ 7457c478bd9Sstevel@tonic-gate int 7467c478bd9Sstevel@tonic-gate cpr_clrbit(pfn_t ppn, int mapflag) 7477c478bd9Sstevel@tonic-gate { 7487c478bd9Sstevel@tonic-gate char *bitmap; 7497c478bd9Sstevel@tonic-gate cbd_t *dp; 7507c478bd9Sstevel@tonic-gate pfn_t rel; 7517c478bd9Sstevel@tonic-gate int set; 7527c478bd9Sstevel@tonic-gate 7537c478bd9Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 7547c478bd9Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) { 7557c478bd9Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag); 7567c478bd9Sstevel@tonic-gate rel = ppn - dp->cbd_spfn; 7577c478bd9Sstevel@tonic-gate if ((set = isset(bitmap, rel)) != 0) 7587c478bd9Sstevel@tonic-gate clrbit(bitmap, rel); 7597c478bd9Sstevel@tonic-gate return (set == 0); 7607c478bd9Sstevel@tonic-gate } 7617c478bd9Sstevel@tonic-gate } 7627c478bd9Sstevel@tonic-gate 7637c478bd9Sstevel@tonic-gate return (1); 7647c478bd9Sstevel@tonic-gate } 7657c478bd9Sstevel@tonic-gate 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate /* ARGSUSED */ 7687c478bd9Sstevel@tonic-gate int 7697c478bd9Sstevel@tonic-gate cpr_nobit(pfn_t ppn, int mapflag) 7707c478bd9Sstevel@tonic-gate { 7717c478bd9Sstevel@tonic-gate return (0); 7727c478bd9Sstevel@tonic-gate } 7737c478bd9Sstevel@tonic-gate 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate /* 7767c478bd9Sstevel@tonic-gate * Lookup a bit corresponding to the arg phys page number. 7777c478bd9Sstevel@tonic-gate */ 7787c478bd9Sstevel@tonic-gate int 7797c478bd9Sstevel@tonic-gate cpr_isset(pfn_t ppn, int mapflag) 7807c478bd9Sstevel@tonic-gate { 7817c478bd9Sstevel@tonic-gate char *bitmap; 7827c478bd9Sstevel@tonic-gate cbd_t *dp; 7837c478bd9Sstevel@tonic-gate pfn_t rel; 7847c478bd9Sstevel@tonic-gate 7857c478bd9Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 7867c478bd9Sstevel@tonic-gate if (PPN_IN_RANGE(ppn, dp)) { 7877c478bd9Sstevel@tonic-gate bitmap = DESC_TO_MAP(dp, mapflag); 7887c478bd9Sstevel@tonic-gate rel = ppn - dp->cbd_spfn; 7897c478bd9Sstevel@tonic-gate return (isset(bitmap, rel)); 7907c478bd9Sstevel@tonic-gate } 7917c478bd9Sstevel@tonic-gate } 7927c478bd9Sstevel@tonic-gate 7937c478bd9Sstevel@tonic-gate return (0); 7947c478bd9Sstevel@tonic-gate } 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate 7977c478bd9Sstevel@tonic-gate /* 7987c478bd9Sstevel@tonic-gate * Go thru all pages and pick up any page not caught during the invalidation 7997c478bd9Sstevel@tonic-gate * stage. This is also used to save pages with cow lock or phys page lock held 8007c478bd9Sstevel@tonic-gate * (none zero p_lckcnt or p_cowcnt) 8017c478bd9Sstevel@tonic-gate */ 8027c478bd9Sstevel@tonic-gate static int 8037c478bd9Sstevel@tonic-gate cpr_count_upages(int mapflag, bitfunc_t bitfunc) 8047c478bd9Sstevel@tonic-gate { 8057c478bd9Sstevel@tonic-gate page_t *pp, *page0; 8067c478bd9Sstevel@tonic-gate pgcnt_t dcnt = 0, tcnt = 0; 8077c478bd9Sstevel@tonic-gate pfn_t pfn; 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate page0 = pp = page_first(); 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate do { 812ad23a2dbSjohansen if (pp->p_vnode == NULL || PP_ISKAS(pp) || 8137c478bd9Sstevel@tonic-gate PP_ISFREE(pp) && PP_ISAGED(pp)) 8147c478bd9Sstevel@tonic-gate continue; 8157c478bd9Sstevel@tonic-gate 8167c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp); 8177c478bd9Sstevel@tonic-gate if (pf_is_memory(pfn)) { 8187c478bd9Sstevel@tonic-gate tcnt++; 8197c478bd9Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0) 8207c478bd9Sstevel@tonic-gate dcnt++; /* dirty count */ 8217c478bd9Sstevel@tonic-gate } 8227c478bd9Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0); 8237c478bd9Sstevel@tonic-gate 8247c478bd9Sstevel@tonic-gate STAT->cs_upage2statef = dcnt; 825ae115bc7Smrj CPR_DEBUG(CPR_DEBUG9, "cpr_count_upages: dirty=%ld total=%ld\n", 826ae115bc7Smrj dcnt, tcnt); 827ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_count_upages: %ld pages, 0x%lx bytes\n", 828ae115bc7Smrj dcnt, mmu_ptob(dcnt)); 829af4c679fSSean McEnroe page0 = NULL; /* for Lint */ 8307c478bd9Sstevel@tonic-gate return (dcnt); 8317c478bd9Sstevel@tonic-gate } 8327c478bd9Sstevel@tonic-gate 8337c478bd9Sstevel@tonic-gate 8347c478bd9Sstevel@tonic-gate /* 8357c478bd9Sstevel@tonic-gate * try compressing pages based on cflag, 8367c478bd9Sstevel@tonic-gate * and for DEBUG kernels, verify uncompressed data checksum; 8377c478bd9Sstevel@tonic-gate * 8387c478bd9Sstevel@tonic-gate * this routine replaces common code from 8397c478bd9Sstevel@tonic-gate * i_cpr_compress_and_save() and cpr_compress_and_write() 8407c478bd9Sstevel@tonic-gate */ 8417c478bd9Sstevel@tonic-gate char * 8427c478bd9Sstevel@tonic-gate cpr_compress_pages(cpd_t *dp, pgcnt_t pages, int cflag) 8437c478bd9Sstevel@tonic-gate { 8447c478bd9Sstevel@tonic-gate size_t nbytes, clen, len; 8457c478bd9Sstevel@tonic-gate uint32_t test_sum; 8467c478bd9Sstevel@tonic-gate char *datap; 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate nbytes = mmu_ptob(pages); 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate /* 8517c478bd9Sstevel@tonic-gate * set length to the original uncompressed data size; 8527c478bd9Sstevel@tonic-gate * always init cpd_flag to zero 8537c478bd9Sstevel@tonic-gate */ 8547c478bd9Sstevel@tonic-gate dp->cpd_length = nbytes; 8557c478bd9Sstevel@tonic-gate dp->cpd_flag = 0; 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate #ifdef DEBUG 8587c478bd9Sstevel@tonic-gate /* 8597c478bd9Sstevel@tonic-gate * Make a copy of the uncompressed data so we can checksum it. 8607c478bd9Sstevel@tonic-gate * Compress that copy so the checksum works at the other end 8617c478bd9Sstevel@tonic-gate */ 8627c478bd9Sstevel@tonic-gate cprbcopy(CPR->c_mapping_area, cpr_pagecopy, nbytes); 8637c478bd9Sstevel@tonic-gate dp->cpd_usum = checksum32(cpr_pagecopy, nbytes); 8647c478bd9Sstevel@tonic-gate dp->cpd_flag |= CPD_USUM; 8657c478bd9Sstevel@tonic-gate datap = cpr_pagecopy; 8667c478bd9Sstevel@tonic-gate #else 8677c478bd9Sstevel@tonic-gate datap = CPR->c_mapping_area; 8687c478bd9Sstevel@tonic-gate dp->cpd_usum = 0; 8697c478bd9Sstevel@tonic-gate #endif 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * try compressing the raw data to cpr_pagedata; 8737c478bd9Sstevel@tonic-gate * if there was a size reduction: record the new length, 8747c478bd9Sstevel@tonic-gate * flag the compression, and point to the compressed data. 8757c478bd9Sstevel@tonic-gate */ 8767c478bd9Sstevel@tonic-gate dp->cpd_csum = 0; 8777c478bd9Sstevel@tonic-gate if (cflag) { 8787c478bd9Sstevel@tonic-gate clen = compress(datap, cpr_pagedata, nbytes); 8797c478bd9Sstevel@tonic-gate if (clen < nbytes) { 8807c478bd9Sstevel@tonic-gate dp->cpd_flag |= CPD_COMPRESS; 8817c478bd9Sstevel@tonic-gate dp->cpd_length = clen; 8827c478bd9Sstevel@tonic-gate datap = cpr_pagedata; 8837c478bd9Sstevel@tonic-gate #ifdef DEBUG 8847c478bd9Sstevel@tonic-gate dp->cpd_csum = checksum32(datap, clen); 8857c478bd9Sstevel@tonic-gate dp->cpd_flag |= CPD_CSUM; 8867c478bd9Sstevel@tonic-gate 8877c478bd9Sstevel@tonic-gate /* 8887c478bd9Sstevel@tonic-gate * decompress the data back to a scratch area 8897c478bd9Sstevel@tonic-gate * and compare the new checksum with the original 8907c478bd9Sstevel@tonic-gate * checksum to verify the compression. 8917c478bd9Sstevel@tonic-gate */ 8927c478bd9Sstevel@tonic-gate bzero(cpr_pagecopy, sizeof (cpr_pagecopy)); 8937c478bd9Sstevel@tonic-gate len = decompress(datap, cpr_pagecopy, 8947c478bd9Sstevel@tonic-gate clen, sizeof (cpr_pagecopy)); 8957c478bd9Sstevel@tonic-gate test_sum = checksum32(cpr_pagecopy, len); 8967c478bd9Sstevel@tonic-gate ASSERT(test_sum == dp->cpd_usum); 8977c478bd9Sstevel@tonic-gate #endif 8987c478bd9Sstevel@tonic-gate } 8997c478bd9Sstevel@tonic-gate } 9007c478bd9Sstevel@tonic-gate 9017c478bd9Sstevel@tonic-gate return (datap); 9027c478bd9Sstevel@tonic-gate } 9037c478bd9Sstevel@tonic-gate 9047c478bd9Sstevel@tonic-gate 9057c478bd9Sstevel@tonic-gate /* 9067c478bd9Sstevel@tonic-gate * 1. Prepare cpr page descriptor and write it to file 9077c478bd9Sstevel@tonic-gate * 2. Compress page data and write it out 9087c478bd9Sstevel@tonic-gate */ 9097c478bd9Sstevel@tonic-gate static int 9107c478bd9Sstevel@tonic-gate cpr_compress_and_write(vnode_t *vp, uint_t va, pfn_t pfn, pgcnt_t npg) 9117c478bd9Sstevel@tonic-gate { 9127c478bd9Sstevel@tonic-gate int error = 0; 9137c478bd9Sstevel@tonic-gate char *datap; 9147c478bd9Sstevel@tonic-gate cpd_t cpd; /* cpr page descriptor */ 9157c478bd9Sstevel@tonic-gate extern void i_cpr_mapin(caddr_t, uint_t, pfn_t); 9167c478bd9Sstevel@tonic-gate extern void i_cpr_mapout(caddr_t, uint_t); 9177c478bd9Sstevel@tonic-gate 9187c478bd9Sstevel@tonic-gate i_cpr_mapin(CPR->c_mapping_area, npg, pfn); 9197c478bd9Sstevel@tonic-gate 920ae115bc7Smrj CPR_DEBUG(CPR_DEBUG3, "mapped-in %ld pages, vaddr 0x%p, pfn 0x%lx\n", 921903a11ebSrh87107 npg, (void *)CPR->c_mapping_area, pfn); 9227c478bd9Sstevel@tonic-gate 9237c478bd9Sstevel@tonic-gate /* 9247c478bd9Sstevel@tonic-gate * Fill cpr page descriptor. 9257c478bd9Sstevel@tonic-gate */ 9267c478bd9Sstevel@tonic-gate cpd.cpd_magic = (uint_t)CPR_PAGE_MAGIC; 9277c478bd9Sstevel@tonic-gate cpd.cpd_pfn = pfn; 9287c478bd9Sstevel@tonic-gate cpd.cpd_pages = npg; 9297c478bd9Sstevel@tonic-gate 9307c478bd9Sstevel@tonic-gate STAT->cs_dumped_statefsz += mmu_ptob(npg); 9317c478bd9Sstevel@tonic-gate 9327c478bd9Sstevel@tonic-gate datap = cpr_compress_pages(&cpd, npg, CPR->c_flags & C_COMPRESSING); 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate /* Write cpr page descriptor */ 9357c478bd9Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)&cpd, sizeof (cpd_t)); 9367c478bd9Sstevel@tonic-gate 9377c478bd9Sstevel@tonic-gate /* Write compressed page data */ 9387c478bd9Sstevel@tonic-gate error = cpr_write(vp, (caddr_t)datap, cpd.cpd_length); 9397c478bd9Sstevel@tonic-gate 9407c478bd9Sstevel@tonic-gate /* 9417c478bd9Sstevel@tonic-gate * Unmap the pages for tlb and vac flushing 9427c478bd9Sstevel@tonic-gate */ 9437c478bd9Sstevel@tonic-gate i_cpr_mapout(CPR->c_mapping_area, npg); 9447c478bd9Sstevel@tonic-gate 9457c478bd9Sstevel@tonic-gate if (error) { 946ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, 947903a11ebSrh87107 "cpr_compress_and_write: vp 0x%p va 0x%x ", (void *)vp, va); 948ae115bc7Smrj CPR_DEBUG(CPR_DEBUG1, "pfn 0x%lx blk %d err %d\n", 949ae115bc7Smrj pfn, cpr_file_bn, error); 9507c478bd9Sstevel@tonic-gate } else { 9517c478bd9Sstevel@tonic-gate cpr_regular_pgs_dumped += npg; 9527c478bd9Sstevel@tonic-gate } 9537c478bd9Sstevel@tonic-gate 9547c478bd9Sstevel@tonic-gate return (error); 9557c478bd9Sstevel@tonic-gate } 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate int 9597c478bd9Sstevel@tonic-gate cpr_write(vnode_t *vp, caddr_t buffer, size_t size) 9607c478bd9Sstevel@tonic-gate { 9617c478bd9Sstevel@tonic-gate caddr_t fromp = buffer; 9627c478bd9Sstevel@tonic-gate size_t bytes, wbytes; 9637c478bd9Sstevel@tonic-gate int error; 9647c478bd9Sstevel@tonic-gate 9657c478bd9Sstevel@tonic-gate if (cpr_dev_space == 0) { 9667c478bd9Sstevel@tonic-gate if (vp->v_type == VBLK) { 9677c478bd9Sstevel@tonic-gate cpr_dev_space = cpr_get_devsize(vp->v_rdev); 9687c478bd9Sstevel@tonic-gate ASSERT(cpr_dev_space); 9697c478bd9Sstevel@tonic-gate } else 9707c478bd9Sstevel@tonic-gate cpr_dev_space = 1; /* not used in this case */ 9717c478bd9Sstevel@tonic-gate } 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate /* 9747c478bd9Sstevel@tonic-gate * break the write into multiple part if request is large, 9757c478bd9Sstevel@tonic-gate * calculate count up to buf page boundary, then write it out. 9767c478bd9Sstevel@tonic-gate * repeat until done. 9777c478bd9Sstevel@tonic-gate */ 9787c478bd9Sstevel@tonic-gate while (size) { 9797c478bd9Sstevel@tonic-gate bytes = MIN(size, cpr_buf_end - cpr_wptr); 9807c478bd9Sstevel@tonic-gate cprbcopy(fromp, cpr_wptr, bytes); 9817c478bd9Sstevel@tonic-gate cpr_wptr += bytes; 9827c478bd9Sstevel@tonic-gate fromp += bytes; 9837c478bd9Sstevel@tonic-gate size -= bytes; 9847c478bd9Sstevel@tonic-gate if (cpr_wptr < cpr_buf_end) 9857c478bd9Sstevel@tonic-gate return (0); /* buffer not full yet */ 9867c478bd9Sstevel@tonic-gate ASSERT(cpr_wptr == cpr_buf_end); 9877c478bd9Sstevel@tonic-gate 9887c478bd9Sstevel@tonic-gate wbytes = dbtob(cpr_file_bn + cpr_buf_blocks); 9897c478bd9Sstevel@tonic-gate if (vp->v_type == VBLK) { 9907c478bd9Sstevel@tonic-gate if (wbytes > cpr_dev_space) 9917c478bd9Sstevel@tonic-gate return (ENOSPC); 9927c478bd9Sstevel@tonic-gate } else { 9937c478bd9Sstevel@tonic-gate if (wbytes > VTOI(vp)->i_size) 9947c478bd9Sstevel@tonic-gate return (ENOSPC); 9957c478bd9Sstevel@tonic-gate } 9967c478bd9Sstevel@tonic-gate 997ae115bc7Smrj CPR_DEBUG(CPR_DEBUG3, 998ae115bc7Smrj "cpr_write: frmp=%p wptr=%p cnt=%lx...", 999903a11ebSrh87107 (void *)fromp, (void *)cpr_wptr, bytes); 10007c478bd9Sstevel@tonic-gate /* 10017c478bd9Sstevel@tonic-gate * cross check, this should not happen! 10027c478bd9Sstevel@tonic-gate */ 10037c478bd9Sstevel@tonic-gate if (cpr_disk_writes_ok == 0) { 1004ae115bc7Smrj prom_printf("cpr_write: disk write too early!\n"); 10057c478bd9Sstevel@tonic-gate return (EINVAL); 10067c478bd9Sstevel@tonic-gate } 10077c478bd9Sstevel@tonic-gate 10087c478bd9Sstevel@tonic-gate do_polled_io = 1; 1009da6c28aaSamw error = VOP_DUMP(vp, cpr_buf, cpr_file_bn, cpr_buf_blocks, 1010da6c28aaSamw NULL); 10117c478bd9Sstevel@tonic-gate do_polled_io = 0; 1012ae115bc7Smrj CPR_DEBUG(CPR_DEBUG3, "done\n"); 10137c478bd9Sstevel@tonic-gate 10147c478bd9Sstevel@tonic-gate STAT->cs_real_statefsz += cpr_buf_size; 10157c478bd9Sstevel@tonic-gate 10167c478bd9Sstevel@tonic-gate if (error) { 10177c478bd9Sstevel@tonic-gate cpr_err(CE_WARN, "cpr_write error %d", error); 10187c478bd9Sstevel@tonic-gate return (error); 10197c478bd9Sstevel@tonic-gate } 10207c478bd9Sstevel@tonic-gate cpr_file_bn += cpr_buf_blocks; /* Increment block count */ 10217c478bd9Sstevel@tonic-gate cpr_wptr = cpr_buf; /* back to top of buffer */ 10227c478bd9Sstevel@tonic-gate } 10237c478bd9Sstevel@tonic-gate return (0); 10247c478bd9Sstevel@tonic-gate } 10257c478bd9Sstevel@tonic-gate 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate int 10287c478bd9Sstevel@tonic-gate cpr_flush_write(vnode_t *vp) 10297c478bd9Sstevel@tonic-gate { 10307c478bd9Sstevel@tonic-gate int nblk; 10317c478bd9Sstevel@tonic-gate int error; 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate /* 10347c478bd9Sstevel@tonic-gate * Calculate remaining blocks in buffer, rounded up to nearest 10357c478bd9Sstevel@tonic-gate * disk block 10367c478bd9Sstevel@tonic-gate */ 10377c478bd9Sstevel@tonic-gate nblk = btod(cpr_wptr - cpr_buf); 10387c478bd9Sstevel@tonic-gate 10397c478bd9Sstevel@tonic-gate do_polled_io = 1; 1040da6c28aaSamw error = VOP_DUMP(vp, (caddr_t)cpr_buf, cpr_file_bn, nblk, NULL); 10417c478bd9Sstevel@tonic-gate do_polled_io = 0; 10427c478bd9Sstevel@tonic-gate 10437c478bd9Sstevel@tonic-gate cpr_file_bn += nblk; 10447c478bd9Sstevel@tonic-gate if (error) 1045ae115bc7Smrj CPR_DEBUG(CPR_DEBUG2, "cpr_flush_write: error (%d)\n", 1046ae115bc7Smrj error); 10477c478bd9Sstevel@tonic-gate return (error); 10487c478bd9Sstevel@tonic-gate } 10497c478bd9Sstevel@tonic-gate 10507c478bd9Sstevel@tonic-gate void 10517c478bd9Sstevel@tonic-gate cpr_clear_bitmaps(void) 10527c478bd9Sstevel@tonic-gate { 10537c478bd9Sstevel@tonic-gate cbd_t *dp; 10547c478bd9Sstevel@tonic-gate 10557c478bd9Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 10567c478bd9Sstevel@tonic-gate bzero((void *)dp->cbd_reg_bitmap, 10577c478bd9Sstevel@tonic-gate (size_t)dp->cbd_size * 2); 10587c478bd9Sstevel@tonic-gate } 1059ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "\ncleared reg and vlt bitmaps\n"); 10607c478bd9Sstevel@tonic-gate } 10617c478bd9Sstevel@tonic-gate 10627c478bd9Sstevel@tonic-gate int 10637c478bd9Sstevel@tonic-gate cpr_contig_pages(vnode_t *vp, int flag) 10647c478bd9Sstevel@tonic-gate { 10657c478bd9Sstevel@tonic-gate int chunks = 0, error = 0; 10667c478bd9Sstevel@tonic-gate pgcnt_t i, j, totbit; 10677c478bd9Sstevel@tonic-gate pfn_t spfn; 10687c478bd9Sstevel@tonic-gate cbd_t *dp; 10697c478bd9Sstevel@tonic-gate uint_t spin_cnt = 0; 10707c478bd9Sstevel@tonic-gate extern int i_cpr_compress_and_save(); 10717c478bd9Sstevel@tonic-gate 10727c478bd9Sstevel@tonic-gate for (dp = CPR->c_bmda; dp->cbd_size; dp++) { 10737c478bd9Sstevel@tonic-gate spfn = dp->cbd_spfn; 10747c478bd9Sstevel@tonic-gate totbit = BTOb(dp->cbd_size); 10757c478bd9Sstevel@tonic-gate i = 0; /* Beginning of bitmap */ 10767c478bd9Sstevel@tonic-gate j = 0; 10777c478bd9Sstevel@tonic-gate while (i < totbit) { 10787c478bd9Sstevel@tonic-gate while ((j < CPR_MAXCONTIG) && ((j + i) < totbit)) { 10797c478bd9Sstevel@tonic-gate if (isset((char *)dp->cbd_reg_bitmap, j+i)) 10807c478bd9Sstevel@tonic-gate j++; 10817c478bd9Sstevel@tonic-gate else /* not contiguous anymore */ 10827c478bd9Sstevel@tonic-gate break; 10837c478bd9Sstevel@tonic-gate } 10847c478bd9Sstevel@tonic-gate 10857c478bd9Sstevel@tonic-gate if (j) { 10867c478bd9Sstevel@tonic-gate chunks++; 10877c478bd9Sstevel@tonic-gate if (flag == SAVE_TO_STORAGE) { 10887c478bd9Sstevel@tonic-gate error = i_cpr_compress_and_save( 10897c478bd9Sstevel@tonic-gate chunks, spfn + i, j); 10907c478bd9Sstevel@tonic-gate if (error) 10917c478bd9Sstevel@tonic-gate return (error); 10927c478bd9Sstevel@tonic-gate } else if (flag == WRITE_TO_STATEFILE) { 10937c478bd9Sstevel@tonic-gate error = cpr_compress_and_write(vp, 0, 10947c478bd9Sstevel@tonic-gate spfn + i, j); 10957c478bd9Sstevel@tonic-gate if (error) 10967c478bd9Sstevel@tonic-gate return (error); 10977c478bd9Sstevel@tonic-gate else { 10987c478bd9Sstevel@tonic-gate spin_cnt++; 10997c478bd9Sstevel@tonic-gate if ((spin_cnt & 0x5F) == 1) 11007c478bd9Sstevel@tonic-gate cpr_spinning_bar(); 11017c478bd9Sstevel@tonic-gate } 11027c478bd9Sstevel@tonic-gate } 11037c478bd9Sstevel@tonic-gate } 11047c478bd9Sstevel@tonic-gate 11057c478bd9Sstevel@tonic-gate i += j; 11067c478bd9Sstevel@tonic-gate if (j != CPR_MAXCONTIG) { 11077c478bd9Sstevel@tonic-gate /* Stopped on a non-tagged page */ 11087c478bd9Sstevel@tonic-gate i++; 11097c478bd9Sstevel@tonic-gate } 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate j = 0; 11127c478bd9Sstevel@tonic-gate } 11137c478bd9Sstevel@tonic-gate } 11147c478bd9Sstevel@tonic-gate 11157c478bd9Sstevel@tonic-gate if (flag == STORAGE_DESC_ALLOC) 11167c478bd9Sstevel@tonic-gate return (chunks); 11177c478bd9Sstevel@tonic-gate else 11187c478bd9Sstevel@tonic-gate return (0); 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate 11227c478bd9Sstevel@tonic-gate void 11237c478bd9Sstevel@tonic-gate cpr_show_range(caddr_t vaddr, size_t size, 11247c478bd9Sstevel@tonic-gate int mapflag, bitfunc_t bitfunc, pgcnt_t count) 11257c478bd9Sstevel@tonic-gate { 11267c478bd9Sstevel@tonic-gate char *action, *bname; 11277c478bd9Sstevel@tonic-gate 11287c478bd9Sstevel@tonic-gate bname = (mapflag == REGULAR_BITMAP) ? "regular" : "volatile"; 11297c478bd9Sstevel@tonic-gate if (bitfunc == cpr_setbit) 11307c478bd9Sstevel@tonic-gate action = "tag"; 11317c478bd9Sstevel@tonic-gate else if (bitfunc == cpr_clrbit) 11327c478bd9Sstevel@tonic-gate action = "untag"; 11337c478bd9Sstevel@tonic-gate else 11347c478bd9Sstevel@tonic-gate action = "none"; 1135ae115bc7Smrj prom_printf("range (0x%p, 0x%p), %s bitmap, %s %ld\n", 1136903a11ebSrh87107 (void *)vaddr, (void *)(vaddr + size), bname, action, count); 11377c478bd9Sstevel@tonic-gate } 11387c478bd9Sstevel@tonic-gate 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate pgcnt_t 11417c478bd9Sstevel@tonic-gate cpr_count_pages(caddr_t sva, size_t size, 11427c478bd9Sstevel@tonic-gate int mapflag, bitfunc_t bitfunc, int showrange) 11437c478bd9Sstevel@tonic-gate { 11447c478bd9Sstevel@tonic-gate caddr_t va, eva; 11457c478bd9Sstevel@tonic-gate pfn_t pfn; 11467c478bd9Sstevel@tonic-gate pgcnt_t count = 0; 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate eva = sva + PAGE_ROUNDUP(size); 11497c478bd9Sstevel@tonic-gate for (va = sva; va < eva; va += MMU_PAGESIZE) { 11507c478bd9Sstevel@tonic-gate pfn = va_to_pfn(va); 11517c478bd9Sstevel@tonic-gate if (pfn != PFN_INVALID && pf_is_memory(pfn)) { 11527c478bd9Sstevel@tonic-gate if ((*bitfunc)(pfn, mapflag) == 0) 11537c478bd9Sstevel@tonic-gate count++; 11547c478bd9Sstevel@tonic-gate } 11557c478bd9Sstevel@tonic-gate } 11567c478bd9Sstevel@tonic-gate 1157ae115bc7Smrj if ((cpr_debug & CPR_DEBUG7) && showrange == DBG_SHOWRANGE) 11587c478bd9Sstevel@tonic-gate cpr_show_range(sva, size, mapflag, bitfunc, count); 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate return (count); 11617c478bd9Sstevel@tonic-gate } 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate 11647c478bd9Sstevel@tonic-gate pgcnt_t 11657c478bd9Sstevel@tonic-gate cpr_count_volatile_pages(int mapflag, bitfunc_t bitfunc) 11667c478bd9Sstevel@tonic-gate { 11677c478bd9Sstevel@tonic-gate pgcnt_t count = 0; 11687c478bd9Sstevel@tonic-gate 11697c478bd9Sstevel@tonic-gate if (cpr_buf) { 11707c478bd9Sstevel@tonic-gate count += cpr_count_pages(cpr_buf, cpr_buf_size, 11717c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11727c478bd9Sstevel@tonic-gate } 11737c478bd9Sstevel@tonic-gate if (cpr_pagedata) { 11747c478bd9Sstevel@tonic-gate count += cpr_count_pages(cpr_pagedata, cpr_pagedata_size, 11757c478bd9Sstevel@tonic-gate mapflag, bitfunc, DBG_SHOWRANGE); 11767c478bd9Sstevel@tonic-gate } 11777c478bd9Sstevel@tonic-gate count += i_cpr_count_storage_pages(mapflag, bitfunc); 11787c478bd9Sstevel@tonic-gate 1179ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_count_vpages: %ld pages, 0x%lx bytes\n", 1180ae115bc7Smrj count, mmu_ptob(count)); 11817c478bd9Sstevel@tonic-gate return (count); 11827c478bd9Sstevel@tonic-gate } 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate 11857c478bd9Sstevel@tonic-gate static int 11867c478bd9Sstevel@tonic-gate cpr_dump_regular_pages(vnode_t *vp) 11877c478bd9Sstevel@tonic-gate { 11887c478bd9Sstevel@tonic-gate int error; 11897c478bd9Sstevel@tonic-gate 11907c478bd9Sstevel@tonic-gate cpr_regular_pgs_dumped = 0; 11917c478bd9Sstevel@tonic-gate error = cpr_contig_pages(vp, WRITE_TO_STATEFILE); 11927c478bd9Sstevel@tonic-gate if (!error) 1193ae115bc7Smrj CPR_DEBUG(CPR_DEBUG7, "cpr_dump_regular_pages() done.\n"); 11947c478bd9Sstevel@tonic-gate return (error); 11957c478bd9Sstevel@tonic-gate } 11962df1fe9cSrandyf #endif 1197