1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/cpr.h> 31 #include <sys/ddi.h> 32 #include "cprboot.h" 33 34 35 /* 36 * check if any cpd_t pages clash with the statefile buffer and shuffle 37 * buf pages to free space; since kpages are saved in ascending order, 38 * any buf pages preceding the current statefile buffer offset can be 39 * written because those pages have already been read and restored 40 */ 41 static void 42 shuffle_pages(cpd_t *descp) 43 { 44 pfn_t low_src_ppn, dst_ppn, tail_ppn, new_ppn; 45 size_t dst_off; 46 47 /* 48 * set the lowest source buf ppn for the (precede) comparison 49 * below; the ORIG macro is used for the case where the src buf 50 * page had already been moved - and would confuse the compare 51 */ 52 low_src_ppn = SF_ORIG_PPN(sfile.buf_offset); 53 54 tail_ppn = descp->cpd_pfn + descp->cpd_pages; 55 for (dst_ppn = descp->cpd_pfn; dst_ppn < tail_ppn; dst_ppn++) { 56 /* 57 * if the dst page is outside the range of statefile 58 * buffer phys pages, it's OK to write that page; 59 * buf pages may have been moved outside the range, 60 * but only to locations isolated from any dst page 61 */ 62 if (dst_ppn < sfile.low_ppn || dst_ppn > sfile.high_ppn) { 63 SF_STAT_INC(outside); 64 continue; 65 } 66 67 /* 68 * the dst page is inside the range of buf ppns; 69 * dont need to move the buf page if the dst page 70 * precedes the lowest src buf page 71 */ 72 if (dst_ppn < low_src_ppn) { 73 SF_STAT_INC(precede); 74 continue; 75 } 76 77 /* 78 * the dst page clashes with the statefile buffer; 79 * move the buf page to a free location and update 80 * the buffer map 81 */ 82 new_ppn = find_apage(); 83 phys_xcopy(PN_TO_ADDR(dst_ppn), PN_TO_ADDR(new_ppn), 84 MMU_PAGESIZE); 85 dst_off = mmu_ptob(dst_ppn - sfile.low_ppn); 86 SF_BUF_PPN(dst_off) = new_ppn; 87 SF_STAT_INC(move); 88 } 89 } 90 91 92 /* 93 * map-in source statefile buffer pages (read-only) at CB_SRC_VIRT; 94 * sets the starting source vaddr with correct page offset 95 */ 96 static void 97 mapin_buf_pages(size_t datalen, caddr_t *srcp) 98 { 99 int dtlb_index, pg_off; 100 caddr_t vaddr, tail; 101 size_t off, bytes; 102 pfn_t src_ppn; 103 104 dtlb_index = cb_dents - CB_MAX_KPAGES - 1; 105 off = sfile.buf_offset; 106 pg_off = off & MMU_PAGEOFFSET; 107 bytes = PAGE_ROUNDUP(pg_off + datalen); 108 vaddr = (caddr_t)CB_SRC_VIRT; 109 *srcp = vaddr + pg_off; 110 111 for (tail = vaddr + bytes; vaddr < tail; vaddr += MMU_PAGESIZE) { 112 src_ppn = SF_BUF_PPN(off); 113 cb_mapin(vaddr, src_ppn, TTE8K, 0, dtlb_index); 114 dtlb_index--; 115 off += MMU_PAGESIZE; 116 } 117 } 118 119 120 /* 121 * map-in destination kernel pages (read/write) at CB_DST_VIRT 122 */ 123 static void 124 mapin_dst_pages(cpd_t *descp) 125 { 126 int dtlb_index, pages; 127 caddr_t vaddr; 128 pfn_t dst_ppn; 129 130 dtlb_index = cb_dents - 1; 131 vaddr = (caddr_t)CB_DST_VIRT; 132 dst_ppn = descp->cpd_pfn; 133 for (pages = 0; pages < descp->cpd_pages; pages++) { 134 cb_mapin(vaddr, dst_ppn, TTE8K, TTE_HWWR_INT, dtlb_index); 135 dtlb_index--; 136 vaddr += MMU_PAGESIZE; 137 dst_ppn++; 138 } 139 } 140 141 142 /* 143 * run a checksum on un/compressed data when flag is set 144 */ 145 static int 146 kdata_cksum(void *data, cpd_t *descp, uint_t flag) 147 { 148 uint_t sum, expect; 149 size_t len; 150 151 if ((descp->cpd_flag & flag) == 0) 152 return (0); 153 else if (flag == CPD_CSUM) { 154 expect = descp->cpd_csum; 155 len = descp->cpd_length; 156 } else { 157 expect = descp->cpd_usum; 158 len = mmu_ptob(descp->cpd_pages); 159 } 160 sum = checksum32(data, len); 161 if (sum != expect) { 162 prom_printf("\n%scompressed data checksum error, " 163 "expect 0x%x, got 0x%x\n", (flag == CPD_USUM) ? "un" : "", 164 expect, sum); 165 return (ERR); 166 } 167 168 return (0); 169 } 170 171 172 /* 173 * primary kpage restoration routine 174 */ 175 static int 176 restore_page_group(cpd_t *descp) 177 { 178 caddr_t dst, datap; 179 size_t size, len; 180 caddr_t src; 181 int raw; 182 183 #if defined(lint) 184 (void) compress(0, 0, 0); 185 #endif 186 187 /* 188 * move any source buf pages that clash with dst kernel pages; 189 * create tlb entries for the orig/new source buf pages and 190 * the dst kpages 191 */ 192 shuffle_pages(descp); 193 mapin_buf_pages(descp->cpd_length, &src); 194 mapin_dst_pages(descp); 195 196 /* 197 * for compressed pages, run a checksum at the src vaddr and 198 * decompress to the mapped-in dst kpages; for uncompressed pages, 199 * just copy direct; uncompressed checksums are used for either 200 * uncompressed src data or decompressed result data 201 */ 202 dst = (caddr_t)CB_DST_VIRT; 203 if (descp->cpd_flag & CPD_COMPRESS) { 204 if (kdata_cksum(src, descp, CPD_CSUM)) 205 return (ERR); 206 size = mmu_ptob(descp->cpd_pages); 207 len = decompress(src, dst, descp->cpd_length, size); 208 if (len != size) { 209 prom_printf("\nbad decompressed len %lu, size %lu\n", 210 len, size); 211 return (ERR); 212 } 213 raw = 0; 214 datap = dst; 215 } else { 216 raw = 1; 217 datap = src; 218 } 219 if (kdata_cksum(datap, descp, CPD_USUM)) 220 return (ERR); 221 if (raw) 222 bcopy(src, dst, descp->cpd_length); 223 224 /* 225 * advance past the kdata for this cpd_t 226 */ 227 SF_ADV(descp->cpd_length); 228 229 return (0); 230 } 231 232 233 /* 234 * mapin part of the statefile buffer, copy to the virt destination, 235 * and advance the statefile buffer offset. this is used primarily 236 * to copy thousands of tiny cpd_t into aligned struct space. 237 */ 238 static void 239 get_phys_data(void *vdst, size_t size) 240 { 241 caddr_t src; 242 243 mapin_buf_pages(size, &src); 244 bcopy(src, vdst, size); 245 SF_ADV(size); 246 } 247 248 249 /* 250 * clear leftover locked dtlb entries 251 */ 252 static void 253 dtlb_cleanup(void) 254 { 255 int dtlb_index; 256 caddr_t vaddr; 257 tte_t tte; 258 259 CB_VENTRY(dtlb_cleanup); 260 261 dtlb_index = cb_dents - CB_MAX_KPAGES - CB_MAX_BPAGES - 1; 262 for (; dtlb_index < cb_dents; dtlb_index++) { 263 get_dtlb_entry(dtlb_index, &vaddr, &tte); 264 if (TTE_IS_LOCKED(&tte)) { 265 tte.ll = 0; 266 set_dtlb_entry(dtlb_index, (caddr_t)0, &tte); 267 CB_VPRINTF((" cleared dtlb entry %x\n", dtlb_index)); 268 } 269 } 270 } 271 272 273 /* 274 * before calling this routine, all cprboot phys pages 275 * are isolated from kernel pages; now we can restore 276 * kpages from the statefile buffer 277 */ 278 int 279 cb_restore_kpages(void) 280 { 281 int npages, compressed, regular; 282 cpd_t desc; 283 char *str; 284 285 str = "cb_restore_kpages"; 286 CB_VPRINTF((ent_fmt, str, entry)); 287 288 DEBUG1(prom_printf("%s: restoring kpages... ", prog)); 289 npages = compressed = regular = 0; 290 while (npages < sfile.kpages) { 291 get_phys_data(&desc, sizeof (desc)); 292 if (desc.cpd_magic != CPR_PAGE_MAGIC) { 293 prom_printf("\nbad page magic 0x%x, expect 0x%x\n", 294 desc.cpd_magic, CPR_PAGE_MAGIC); 295 return (ERR); 296 } 297 if (restore_page_group(&desc)) 298 return (ERR); 299 npages += desc.cpd_pages; 300 301 if (desc.cpd_flag & CPD_COMPRESS) 302 compressed += desc.cpd_pages; 303 else 304 regular += desc.cpd_pages; 305 306 /* 307 * display a spin char for every 32 page groups 308 * (a full spin <= each MB restored) 309 */ 310 if ((sfile.ngroups++ & 0x1f) == 0) 311 cb_spin(); 312 } 313 DEBUG1(prom_printf(" \b\n")); 314 315 dtlb_cleanup(); 316 317 if (verbose) { 318 prom_printf("\npage stats: total %d, outside %d, " 319 "move %d, precede %d\n", sfile.kpages, sfile.outside, 320 sfile.move, sfile.precede); 321 prom_printf("page stats: ngroups %d, recycle %d\n", 322 sfile.ngroups, sfile.recycle); 323 } 324 325 DEBUG4(prom_printf( 326 "%s: total=%d, npages=%d, compressed=%d, regular=%d\n", 327 str, sfile.kpages, npages, compressed, regular)); 328 329 /* 330 * sanity check 331 */ 332 if (npages != sfile.kpages) { 333 prom_printf("\n%s: page count mismatch, expect %d, got %d\n", 334 str, sfile.kpages, npages); 335 return (ERR); 336 } 337 338 return (0); 339 } 340 341 342 /* 343 * check and update the statefile terminator; 344 * on exit there will be a leftover tlb entry, 345 * but it will soon get replaced by restore_tlb() 346 */ 347 int 348 cb_terminator(void) 349 { 350 ctrm_t cterm; 351 352 CB_VENTRY(cb_terminator); 353 get_phys_data(&cterm, sizeof (cterm)); 354 if (cterm.magic != CPR_TERM_MAGIC) { 355 prom_printf("\nbad term magic 0x%x, expect 0x%x\n", 356 cterm.magic, CPR_TERM_MAGIC); 357 return (ERR); 358 } 359 cterm.tm_cprboot_start.tv_sec = cb_msec / 1000; 360 cb_mapin((caddr_t)CB_DST_VIRT, cterm.pfn, 361 TTE8K, TTE_HWWR_INT, cb_dents - 1); 362 cpr_update_terminator(&cterm, (caddr_t)CB_DST_VIRT); 363 return (0); 364 } 365