xref: /titanic_54/usr/src/uts/sun4u/os/memscrub.c (revision a08365b4128b8204802f5346e697ee9995a4059f)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate  * with the License.
87c478bd9Sstevel@tonic-gate  *
97c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate  * and limitations under the License.
137c478bd9Sstevel@tonic-gate  *
147c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate  *
207c478bd9Sstevel@tonic-gate  * CDDL HEADER END
217c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
237c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
287c478bd9Sstevel@tonic-gate 
297c478bd9Sstevel@tonic-gate /*
307c478bd9Sstevel@tonic-gate  * sun4u Memory Scrubbing
317c478bd9Sstevel@tonic-gate  *
327c478bd9Sstevel@tonic-gate  * On detection of a correctable memory ECC error, the sun4u kernel
337c478bd9Sstevel@tonic-gate  * returns the corrected data to the requester and re-writes it
347c478bd9Sstevel@tonic-gate  * to memory (DRAM).  So if the correctable error was transient,
357c478bd9Sstevel@tonic-gate  * the read has effectively been cleaned (scrubbed) from memory.
367c478bd9Sstevel@tonic-gate  *
377c478bd9Sstevel@tonic-gate  * Scrubbing thus reduces the likelyhood that multiple transient errors
387c478bd9Sstevel@tonic-gate  * will occur in the same memory word, making uncorrectable errors due
397c478bd9Sstevel@tonic-gate  * to transients less likely.
407c478bd9Sstevel@tonic-gate  *
417c478bd9Sstevel@tonic-gate  * Thus is born the desire that every memory location be periodically
427c478bd9Sstevel@tonic-gate  * accessed.
437c478bd9Sstevel@tonic-gate  *
447c478bd9Sstevel@tonic-gate  * This file implements a memory scrubbing thread.  This scrubber
457c478bd9Sstevel@tonic-gate  * guarantees that all of physical memory is accessed periodically
467c478bd9Sstevel@tonic-gate  * (memscrub_period_sec -- 12 hours).
477c478bd9Sstevel@tonic-gate  *
487c478bd9Sstevel@tonic-gate  * It attempts to do this as unobtrusively as possible.  The thread
497c478bd9Sstevel@tonic-gate  * schedules itself to wake up at an interval such that if it reads
507c478bd9Sstevel@tonic-gate  * memscrub_span_pages (8MB) on each wakeup, it will read all of physical
517c478bd9Sstevel@tonic-gate  * memory in in memscrub_period_sec (12 hours).
527c478bd9Sstevel@tonic-gate  *
537c478bd9Sstevel@tonic-gate  * The scrubber uses the block load hardware to read memory @ 268MB/s,
547c478bd9Sstevel@tonic-gate  * so it reads spans of 8MB in 0.03 seconds.  Unlike the original sun4d
557c478bd9Sstevel@tonic-gate  * scrubber the sun4u scrubber does not read ahead if the system is idle
567c478bd9Sstevel@tonic-gate  * because we can read memory very efficently.
577c478bd9Sstevel@tonic-gate  *
587c478bd9Sstevel@tonic-gate  * The scrubber maintains a private copy of the phys_install memory list
597c478bd9Sstevel@tonic-gate  * to keep track of what memory should be scrubbed.
607c478bd9Sstevel@tonic-gate  *
617c478bd9Sstevel@tonic-gate  * The global routines memscrub_add_span() and memscrub_delete_span() are
627c478bd9Sstevel@tonic-gate  * used to add and delete from this list.  If hotplug memory is later
637c478bd9Sstevel@tonic-gate  * supported these two routines can be used to notify the scrubber of
647c478bd9Sstevel@tonic-gate  * memory configuration changes.
657c478bd9Sstevel@tonic-gate  *
667c478bd9Sstevel@tonic-gate  * The following parameters can be set via /etc/system
677c478bd9Sstevel@tonic-gate  *
687c478bd9Sstevel@tonic-gate  * memscrub_span_pages = MEMSCRUB_DFL_SPAN_PAGES (8MB)
697c478bd9Sstevel@tonic-gate  * memscrub_period_sec = MEMSCRUB_DFL_PERIOD_SEC (12 hours)
707c478bd9Sstevel@tonic-gate  * memscrub_thread_pri = MEMSCRUB_DFL_THREAD_PRI (MINCLSYSPRI)
717c478bd9Sstevel@tonic-gate  * memscrub_delay_start_sec = (5 minutes)
727c478bd9Sstevel@tonic-gate  * memscrub_verbose = (0)
737c478bd9Sstevel@tonic-gate  * memscrub_override_ticks = (1 tick)
747c478bd9Sstevel@tonic-gate  * disable_memscrub = (0)
757c478bd9Sstevel@tonic-gate  * pause_memscrub = (0)
767c478bd9Sstevel@tonic-gate  * read_all_memscrub = (0)
777c478bd9Sstevel@tonic-gate  *
787c478bd9Sstevel@tonic-gate  * The scrubber will print NOTICE messages of what it is doing if
797c478bd9Sstevel@tonic-gate  * "memscrub_verbose" is set.
807c478bd9Sstevel@tonic-gate  *
817c478bd9Sstevel@tonic-gate  * If the scrubber's sleep time calculation drops to zero ticks,
827c478bd9Sstevel@tonic-gate  * memscrub_override_ticks will be used as the sleep time instead. The
837c478bd9Sstevel@tonic-gate  * sleep time should only drop to zero on a system with over 32.95
847c478bd9Sstevel@tonic-gate  * terabytes of memory, or where the default scrubber parameters have
857c478bd9Sstevel@tonic-gate  * been adjusted. For example, reducing memscrub_span_pages or
867c478bd9Sstevel@tonic-gate  * memscrub_period_sec causes the sleep time to drop to zero with less
877c478bd9Sstevel@tonic-gate  * memory. Note that since the sleep time is calculated in clock ticks,
887c478bd9Sstevel@tonic-gate  * using hires clock ticks allows for more memory before the sleep time
897c478bd9Sstevel@tonic-gate  * becomes zero.
907c478bd9Sstevel@tonic-gate  *
917c478bd9Sstevel@tonic-gate  * The scrubber will exit (or never be started) if it finds the variable
927c478bd9Sstevel@tonic-gate  * "disable_memscrub" set.
937c478bd9Sstevel@tonic-gate  *
947c478bd9Sstevel@tonic-gate  * The scrubber will pause (not read memory) when "pause_memscrub"
957c478bd9Sstevel@tonic-gate  * is set.  It will check the state of pause_memscrub at each wakeup
967c478bd9Sstevel@tonic-gate  * period.  The scrubber will not make up for lost time.  If you
977c478bd9Sstevel@tonic-gate  * pause the scrubber for a prolonged period of time you can use
987c478bd9Sstevel@tonic-gate  * the "read_all_memscrub" switch (see below) to catch up. In addition,
997c478bd9Sstevel@tonic-gate  * pause_memscrub is used internally by the post memory DR callbacks.
1007c478bd9Sstevel@tonic-gate  * It is set for the small period of time during which the callbacks
1017c478bd9Sstevel@tonic-gate  * are executing. This ensures "memscrub_lock" will be released,
1027c478bd9Sstevel@tonic-gate  * allowing the callbacks to finish.
1037c478bd9Sstevel@tonic-gate  *
1047c478bd9Sstevel@tonic-gate  * The scrubber will read all memory if "read_all_memscrub" is set.
1057c478bd9Sstevel@tonic-gate  * The normal span read will also occur during the wakeup.
1067c478bd9Sstevel@tonic-gate  *
1077c478bd9Sstevel@tonic-gate  * MEMSCRUB_MIN_PAGES (32MB) is the minimum amount of memory a system
1087c478bd9Sstevel@tonic-gate  * must have before we'll start the scrubber.
1097c478bd9Sstevel@tonic-gate  *
1107c478bd9Sstevel@tonic-gate  * MEMSCRUB_DFL_SPAN_PAGES (8MB) is based on the guess that 0.03 sec
1117c478bd9Sstevel@tonic-gate  * is a "good" amount of minimum time for the thread to run at a time.
1127c478bd9Sstevel@tonic-gate  *
1137c478bd9Sstevel@tonic-gate  * MEMSCRUB_DFL_PERIOD_SEC (12 hours) is nearly a total guess --
1147c478bd9Sstevel@tonic-gate  * twice the frequency the hardware folk estimated would be necessary.
1157c478bd9Sstevel@tonic-gate  *
1167c478bd9Sstevel@tonic-gate  * MEMSCRUB_DFL_THREAD_PRI (MINCLSYSPRI) is based on the assumption
1177c478bd9Sstevel@tonic-gate  * that the scurbber should get its fair share of time (since it
1187c478bd9Sstevel@tonic-gate  * is short).  At a priority of 0 the scrubber will be starved.
1197c478bd9Sstevel@tonic-gate  */
1207c478bd9Sstevel@tonic-gate 
1217c478bd9Sstevel@tonic-gate #include <sys/systm.h>		/* timeout, types, t_lock */
1227c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
1237c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>	/* MIN */
1247c478bd9Sstevel@tonic-gate #include <sys/memlist.h>	/* memlist */
1257c478bd9Sstevel@tonic-gate #include <sys/mem_config.h>	/* memory add/delete */
1267c478bd9Sstevel@tonic-gate #include <sys/kmem.h>		/* KMEM_NOSLEEP */
1277c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>		/* ncpus_online */
1287c478bd9Sstevel@tonic-gate #include <sys/debug.h>		/* ASSERTs */
1297c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>	/* lddphys */
1307c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h>	/* vtag_flushpage */
1317c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
1327c478bd9Sstevel@tonic-gate #include <sys/atomic.h>		/* atomic_add_32 */
1337c478bd9Sstevel@tonic-gate 
1347c478bd9Sstevel@tonic-gate #include <vm/hat.h>
1357c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
1367c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>	/* XXX FIXME - delete */
1377c478bd9Sstevel@tonic-gate 
1387c478bd9Sstevel@tonic-gate #include <sys/time.h>
1397c478bd9Sstevel@tonic-gate #include <sys/callb.h>		/* CPR callback */
1407c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
1417c478bd9Sstevel@tonic-gate 
1427c478bd9Sstevel@tonic-gate /*
1437c478bd9Sstevel@tonic-gate  * Should really have paddr_t defined, but it is broken.  Use
1447c478bd9Sstevel@tonic-gate  * ms_paddr_t in the meantime to make the code cleaner
1457c478bd9Sstevel@tonic-gate  */
1467c478bd9Sstevel@tonic-gate typedef uint64_t ms_paddr_t;
1477c478bd9Sstevel@tonic-gate 
1487c478bd9Sstevel@tonic-gate /*
1497c478bd9Sstevel@tonic-gate  * Global Routines:
1507c478bd9Sstevel@tonic-gate  */
1517c478bd9Sstevel@tonic-gate int memscrub_add_span(pfn_t pfn, pgcnt_t pages);
1527c478bd9Sstevel@tonic-gate int memscrub_delete_span(pfn_t pfn, pgcnt_t pages);
1537c478bd9Sstevel@tonic-gate int memscrub_init(void);
1547c478bd9Sstevel@tonic-gate 
1557c478bd9Sstevel@tonic-gate /*
1567c478bd9Sstevel@tonic-gate  * Global Data:
1577c478bd9Sstevel@tonic-gate  */
1587c478bd9Sstevel@tonic-gate 
1597c478bd9Sstevel@tonic-gate /*
1607c478bd9Sstevel@tonic-gate  * scrub if we have at least this many pages
1617c478bd9Sstevel@tonic-gate  */
1627c478bd9Sstevel@tonic-gate #define	MEMSCRUB_MIN_PAGES (32 * 1024 * 1024 / PAGESIZE)
1637c478bd9Sstevel@tonic-gate 
1647c478bd9Sstevel@tonic-gate /*
1657c478bd9Sstevel@tonic-gate  * scan all of physical memory at least once every MEMSCRUB_PERIOD_SEC
1667c478bd9Sstevel@tonic-gate  */
1677c478bd9Sstevel@tonic-gate #define	MEMSCRUB_DFL_PERIOD_SEC	(12 * 60 * 60)	/* 12 hours */
1687c478bd9Sstevel@tonic-gate 
1697c478bd9Sstevel@tonic-gate /*
1707c478bd9Sstevel@tonic-gate  * scan at least MEMSCRUB_DFL_SPAN_PAGES each iteration
1717c478bd9Sstevel@tonic-gate  */
1727c478bd9Sstevel@tonic-gate #define	MEMSCRUB_DFL_SPAN_PAGES	((8 * 1024 * 1024) / PAGESIZE)
1737c478bd9Sstevel@tonic-gate 
1747c478bd9Sstevel@tonic-gate /*
1757c478bd9Sstevel@tonic-gate  * almost anything is higher priority than scrubbing
1767c478bd9Sstevel@tonic-gate  */
1777c478bd9Sstevel@tonic-gate #define	MEMSCRUB_DFL_THREAD_PRI	MINCLSYSPRI
1787c478bd9Sstevel@tonic-gate 
1797c478bd9Sstevel@tonic-gate /*
1807c478bd9Sstevel@tonic-gate  * size used when scanning memory
1817c478bd9Sstevel@tonic-gate  */
1827c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BLOCK_SIZE		256
1837c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BLOCK_SIZE_SHIFT	8 	/* log2(MEMSCRUB_BLOCK_SIZE) */
1847c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BLOCKS_PER_PAGE	(PAGESIZE >> MEMSCRUB_BLOCK_SIZE_SHIFT)
1857c478bd9Sstevel@tonic-gate 
1867c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BPP4M		MMU_PAGESIZE4M >> MEMSCRUB_BLOCK_SIZE_SHIFT
1877c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BPP512K	MMU_PAGESIZE512K >> MEMSCRUB_BLOCK_SIZE_SHIFT
1887c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BPP64K		MMU_PAGESIZE64K >> MEMSCRUB_BLOCK_SIZE_SHIFT
1897c478bd9Sstevel@tonic-gate #define	MEMSCRUB_BPP		MMU_PAGESIZE >> MEMSCRUB_BLOCK_SIZE_SHIFT
1907c478bd9Sstevel@tonic-gate 
1917c478bd9Sstevel@tonic-gate /*
1927c478bd9Sstevel@tonic-gate  * This message indicates that we have exceeded the limitations of
1937c478bd9Sstevel@tonic-gate  * the memscrubber. See the comments above regarding what would
1947c478bd9Sstevel@tonic-gate  * cause the sleep time to become zero. In DEBUG mode, this message
1957c478bd9Sstevel@tonic-gate  * is logged on the console and in the messages file. In non-DEBUG
1967c478bd9Sstevel@tonic-gate  * mode, it is only logged in the messages file.
1977c478bd9Sstevel@tonic-gate  */
1987c478bd9Sstevel@tonic-gate #ifdef DEBUG
1997c478bd9Sstevel@tonic-gate #define	MEMSCRUB_OVERRIDE_MSG	"Memory scrubber sleep time is zero " \
2007c478bd9Sstevel@tonic-gate 	"seconds, consuming entire CPU."
2017c478bd9Sstevel@tonic-gate #else
2027c478bd9Sstevel@tonic-gate #define	MEMSCRUB_OVERRIDE_MSG	"!Memory scrubber sleep time is zero " \
2037c478bd9Sstevel@tonic-gate 	"seconds, consuming entire CPU."
2047c478bd9Sstevel@tonic-gate #endif /* DEBUG */
2057c478bd9Sstevel@tonic-gate 
2067c478bd9Sstevel@tonic-gate /*
2077c478bd9Sstevel@tonic-gate  * we can patch these defaults in /etc/system if necessary
2087c478bd9Sstevel@tonic-gate  */
2097c478bd9Sstevel@tonic-gate uint_t disable_memscrub = 0;
2107c478bd9Sstevel@tonic-gate uint_t pause_memscrub = 0;
2117c478bd9Sstevel@tonic-gate uint_t read_all_memscrub = 0;
2127c478bd9Sstevel@tonic-gate uint_t memscrub_verbose = 0;
2137c478bd9Sstevel@tonic-gate uint_t memscrub_all_idle = 0;
2147c478bd9Sstevel@tonic-gate uint_t memscrub_span_pages = MEMSCRUB_DFL_SPAN_PAGES;
2157c478bd9Sstevel@tonic-gate uint_t memscrub_period_sec = MEMSCRUB_DFL_PERIOD_SEC;
2167c478bd9Sstevel@tonic-gate uint_t memscrub_thread_pri = MEMSCRUB_DFL_THREAD_PRI;
2177c478bd9Sstevel@tonic-gate uint_t memscrub_delay_start_sec = 5 * 60;
2187c478bd9Sstevel@tonic-gate uint_t memscrub_override_ticks = 1;
2197c478bd9Sstevel@tonic-gate 
2207c478bd9Sstevel@tonic-gate /*
2217c478bd9Sstevel@tonic-gate  * Static Routines
2227c478bd9Sstevel@tonic-gate  */
2237c478bd9Sstevel@tonic-gate static void memscrubber(void);
2247c478bd9Sstevel@tonic-gate static void memscrub_cleanup(void);
2257c478bd9Sstevel@tonic-gate static int memscrub_add_span_gen(pfn_t, pgcnt_t, struct memlist **, uint_t *);
2267c478bd9Sstevel@tonic-gate static int memscrub_verify_span(ms_paddr_t *addrp, pgcnt_t *pagesp);
2277c478bd9Sstevel@tonic-gate static void memscrub_scan(uint_t blks, ms_paddr_t src);
2287c478bd9Sstevel@tonic-gate 
2297c478bd9Sstevel@tonic-gate /*
2307c478bd9Sstevel@tonic-gate  * Static Data
2317c478bd9Sstevel@tonic-gate  */
2327c478bd9Sstevel@tonic-gate 
2337c478bd9Sstevel@tonic-gate static struct memlist *memscrub_memlist;
2347c478bd9Sstevel@tonic-gate static uint_t memscrub_phys_pages;
2357c478bd9Sstevel@tonic-gate 
2367c478bd9Sstevel@tonic-gate static kcondvar_t memscrub_cv;
2377c478bd9Sstevel@tonic-gate static kmutex_t memscrub_lock;
2387c478bd9Sstevel@tonic-gate /*
2397c478bd9Sstevel@tonic-gate  * memscrub_lock protects memscrub_memlist, interval_ticks, cprinfo, ...
2407c478bd9Sstevel@tonic-gate  */
2417c478bd9Sstevel@tonic-gate static void memscrub_init_mem_config(void);
2427c478bd9Sstevel@tonic-gate static void memscrub_uninit_mem_config(void);
2437c478bd9Sstevel@tonic-gate 
2447c478bd9Sstevel@tonic-gate /*
2457c478bd9Sstevel@tonic-gate  * Keep track of some interesting statistics
2467c478bd9Sstevel@tonic-gate  */
2477c478bd9Sstevel@tonic-gate static struct memscrub_kstats {
2487c478bd9Sstevel@tonic-gate 	kstat_named_t	done_early;	/* ahead of schedule */
2497c478bd9Sstevel@tonic-gate 	kstat_named_t	early_sec;	/* by cumulative num secs */
2507c478bd9Sstevel@tonic-gate 	kstat_named_t	done_late;	/* behind schedule */
2517c478bd9Sstevel@tonic-gate 	kstat_named_t	late_sec;	/* by cumulative num secs */
2527c478bd9Sstevel@tonic-gate 	kstat_named_t	interval_ticks;	/* num ticks between intervals */
2537c478bd9Sstevel@tonic-gate 	kstat_named_t	force_run;	/* forced to run, non-timeout */
2547c478bd9Sstevel@tonic-gate 	kstat_named_t	errors_found;	/* num errors found by memscrub */
2557c478bd9Sstevel@tonic-gate } memscrub_counts = {
2567c478bd9Sstevel@tonic-gate 	{ "done_early",		KSTAT_DATA_UINT32 },
2577c478bd9Sstevel@tonic-gate 	{ "early_sec", 		KSTAT_DATA_UINT32 },
2587c478bd9Sstevel@tonic-gate 	{ "done_late", 		KSTAT_DATA_UINT32 },
2597c478bd9Sstevel@tonic-gate 	{ "late_sec",		KSTAT_DATA_UINT32 },
2607c478bd9Sstevel@tonic-gate 	{ "interval_ticks",	KSTAT_DATA_UINT32 },
2617c478bd9Sstevel@tonic-gate 	{ "force_run",		KSTAT_DATA_UINT32 },
2627c478bd9Sstevel@tonic-gate 	{ "errors_found",	KSTAT_DATA_UINT32 },
2637c478bd9Sstevel@tonic-gate };
2647c478bd9Sstevel@tonic-gate static struct kstat *memscrub_ksp = (struct kstat *)NULL;
2657c478bd9Sstevel@tonic-gate 
2667c478bd9Sstevel@tonic-gate static timeout_id_t memscrub_tid = 0;	/* keep track of timeout id */
2677c478bd9Sstevel@tonic-gate 
2687c478bd9Sstevel@tonic-gate /*
2697c478bd9Sstevel@tonic-gate  * create memscrub_memlist from phys_install list
2707c478bd9Sstevel@tonic-gate  * initialize locks, set memscrub_phys_pages.
2717c478bd9Sstevel@tonic-gate  */
2727c478bd9Sstevel@tonic-gate int
2737c478bd9Sstevel@tonic-gate memscrub_init(void)
2747c478bd9Sstevel@tonic-gate {
2757c478bd9Sstevel@tonic-gate 	struct memlist *src;
2767c478bd9Sstevel@tonic-gate 
2777c478bd9Sstevel@tonic-gate 	/*
2787c478bd9Sstevel@tonic-gate 	 * only startup the scrubber if we have a minimum
2797c478bd9Sstevel@tonic-gate 	 * number of pages
2807c478bd9Sstevel@tonic-gate 	 */
2817c478bd9Sstevel@tonic-gate 	if (physinstalled >= MEMSCRUB_MIN_PAGES) {
2827c478bd9Sstevel@tonic-gate 
2837c478bd9Sstevel@tonic-gate 		/*
2847c478bd9Sstevel@tonic-gate 		 * initialize locks
2857c478bd9Sstevel@tonic-gate 		 */
2867c478bd9Sstevel@tonic-gate 		mutex_init(&memscrub_lock, NULL, MUTEX_DRIVER, NULL);
2877c478bd9Sstevel@tonic-gate 		cv_init(&memscrub_cv, NULL, CV_DRIVER, NULL);
2887c478bd9Sstevel@tonic-gate 
2897c478bd9Sstevel@tonic-gate 		/*
2907c478bd9Sstevel@tonic-gate 		 * copy phys_install to memscrub_memlist
2917c478bd9Sstevel@tonic-gate 		 */
2927c478bd9Sstevel@tonic-gate 		for (src = phys_install; src; src = src->next) {
2937c478bd9Sstevel@tonic-gate 			if (memscrub_add_span(
2947c478bd9Sstevel@tonic-gate 			    (pfn_t)(src->address >> PAGESHIFT),
2957c478bd9Sstevel@tonic-gate 			    (pgcnt_t)(src->size >> PAGESHIFT))) {
2967c478bd9Sstevel@tonic-gate 				memscrub_cleanup();
2977c478bd9Sstevel@tonic-gate 				return (-1);
2987c478bd9Sstevel@tonic-gate 			}
2997c478bd9Sstevel@tonic-gate 		}
3007c478bd9Sstevel@tonic-gate 
3017c478bd9Sstevel@tonic-gate 		/*
3027c478bd9Sstevel@tonic-gate 		 * initialize kstats
3037c478bd9Sstevel@tonic-gate 		 */
3047c478bd9Sstevel@tonic-gate 		memscrub_ksp = kstat_create("unix", 0, "memscrub_kstat",
3057c478bd9Sstevel@tonic-gate 			"misc", KSTAT_TYPE_NAMED,
3067c478bd9Sstevel@tonic-gate 			sizeof (memscrub_counts) / sizeof (kstat_named_t),
3077c478bd9Sstevel@tonic-gate 			KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE);
3087c478bd9Sstevel@tonic-gate 
3097c478bd9Sstevel@tonic-gate 		if (memscrub_ksp) {
3107c478bd9Sstevel@tonic-gate 			memscrub_ksp->ks_data = (void *)&memscrub_counts;
3117c478bd9Sstevel@tonic-gate 			kstat_install(memscrub_ksp);
3127c478bd9Sstevel@tonic-gate 		} else {
3137c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "Memscrubber cannot create kstats\n");
3147c478bd9Sstevel@tonic-gate 		}
3157c478bd9Sstevel@tonic-gate 
3167c478bd9Sstevel@tonic-gate 		/*
3177c478bd9Sstevel@tonic-gate 		 * create memscrubber thread
3187c478bd9Sstevel@tonic-gate 		 */
3197c478bd9Sstevel@tonic-gate 		(void) thread_create(NULL, 0, (void (*)())memscrubber,
3207c478bd9Sstevel@tonic-gate 		    NULL, 0, &p0, TS_RUN, memscrub_thread_pri);
3217c478bd9Sstevel@tonic-gate 
3227c478bd9Sstevel@tonic-gate 		/*
3237c478bd9Sstevel@tonic-gate 		 * We don't want call backs changing the list
3247c478bd9Sstevel@tonic-gate 		 * if there is no thread running. We do not
3257c478bd9Sstevel@tonic-gate 		 * attempt to deal with stopping/starting scrubbing
3267c478bd9Sstevel@tonic-gate 		 * on memory size changes.
3277c478bd9Sstevel@tonic-gate 		 */
3287c478bd9Sstevel@tonic-gate 		memscrub_init_mem_config();
3297c478bd9Sstevel@tonic-gate 	}
3307c478bd9Sstevel@tonic-gate 
3317c478bd9Sstevel@tonic-gate 	return (0);
3327c478bd9Sstevel@tonic-gate }
3337c478bd9Sstevel@tonic-gate 
3347c478bd9Sstevel@tonic-gate static void
3357c478bd9Sstevel@tonic-gate memscrub_cleanup(void)
3367c478bd9Sstevel@tonic-gate {
3377c478bd9Sstevel@tonic-gate 	memscrub_uninit_mem_config();
3387c478bd9Sstevel@tonic-gate 	while (memscrub_memlist) {
3397c478bd9Sstevel@tonic-gate 		(void) memscrub_delete_span(
3407c478bd9Sstevel@tonic-gate 			(pfn_t)(memscrub_memlist->address >> PAGESHIFT),
3417c478bd9Sstevel@tonic-gate 			(pgcnt_t)(memscrub_memlist->size >> PAGESHIFT));
3427c478bd9Sstevel@tonic-gate 	}
3437c478bd9Sstevel@tonic-gate 	if (memscrub_ksp)
3447c478bd9Sstevel@tonic-gate 		kstat_delete(memscrub_ksp);
3457c478bd9Sstevel@tonic-gate 	cv_destroy(&memscrub_cv);
3467c478bd9Sstevel@tonic-gate 	mutex_destroy(&memscrub_lock);
3477c478bd9Sstevel@tonic-gate }
3487c478bd9Sstevel@tonic-gate 
3497c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
3507c478bd9Sstevel@tonic-gate static void
3517c478bd9Sstevel@tonic-gate memscrub_printmemlist(char *title, struct memlist *listp)
3527c478bd9Sstevel@tonic-gate {
3537c478bd9Sstevel@tonic-gate 	struct memlist *list;
3547c478bd9Sstevel@tonic-gate 
3557c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "%s:\n", title);
3567c478bd9Sstevel@tonic-gate 
3577c478bd9Sstevel@tonic-gate 	for (list = listp; list; list = list->next) {
3587c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT, "addr = 0x%llx, size = 0x%llx\n",
3597c478bd9Sstevel@tonic-gate 		    list->address, list->size);
3607c478bd9Sstevel@tonic-gate 	}
3617c478bd9Sstevel@tonic-gate }
3627c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
3637c478bd9Sstevel@tonic-gate 
3647c478bd9Sstevel@tonic-gate /* ARGSUSED */
3657c478bd9Sstevel@tonic-gate static void
3667c478bd9Sstevel@tonic-gate memscrub_wakeup(void *c)
3677c478bd9Sstevel@tonic-gate {
3687c478bd9Sstevel@tonic-gate 	/*
3697c478bd9Sstevel@tonic-gate 	 * grab mutex to guarantee that our wakeup call
3707c478bd9Sstevel@tonic-gate 	 * arrives after we go to sleep -- so we can't sleep forever.
3717c478bd9Sstevel@tonic-gate 	 */
3727c478bd9Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
3737c478bd9Sstevel@tonic-gate 	cv_signal(&memscrub_cv);
3747c478bd9Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
3757c478bd9Sstevel@tonic-gate }
3767c478bd9Sstevel@tonic-gate 
3777c478bd9Sstevel@tonic-gate /*
3787c478bd9Sstevel@tonic-gate  * provide an interface external to the memscrubber
3797c478bd9Sstevel@tonic-gate  * which will force the memscrub thread to run vs.
3807c478bd9Sstevel@tonic-gate  * waiting for the timeout, if one is set
3817c478bd9Sstevel@tonic-gate  */
3827c478bd9Sstevel@tonic-gate void
3837c478bd9Sstevel@tonic-gate memscrub_run(void)
3847c478bd9Sstevel@tonic-gate {
3857c478bd9Sstevel@tonic-gate 	memscrub_counts.force_run.value.ui32++;
3867c478bd9Sstevel@tonic-gate 	if (memscrub_tid) {
3877c478bd9Sstevel@tonic-gate 		(void) untimeout(memscrub_tid);
3887c478bd9Sstevel@tonic-gate 		memscrub_wakeup((void *)NULL);
3897c478bd9Sstevel@tonic-gate 	}
3907c478bd9Sstevel@tonic-gate }
3917c478bd9Sstevel@tonic-gate 
3927c478bd9Sstevel@tonic-gate /*
3937c478bd9Sstevel@tonic-gate  * this calculation doesn't account for the time
3947c478bd9Sstevel@tonic-gate  * that the actual scan consumes -- so we'd fall
3957c478bd9Sstevel@tonic-gate  * slightly behind schedule with this interval.
3967c478bd9Sstevel@tonic-gate  * It's very small.
3977c478bd9Sstevel@tonic-gate  */
3987c478bd9Sstevel@tonic-gate 
3997c478bd9Sstevel@tonic-gate static uint_t
4007c478bd9Sstevel@tonic-gate compute_interval_ticks(void)
4017c478bd9Sstevel@tonic-gate {
4027c478bd9Sstevel@tonic-gate 	/*
4037c478bd9Sstevel@tonic-gate 	 * We use msp_safe mpp_safe below to insure somebody
4047c478bd9Sstevel@tonic-gate 	 * doesn't set memscrub_span_pages or memscrub_phys_pages
4057c478bd9Sstevel@tonic-gate 	 * to 0 on us.
4067c478bd9Sstevel@tonic-gate 	 */
4077c478bd9Sstevel@tonic-gate 	static uint_t msp_safe, mpp_safe;
4087c478bd9Sstevel@tonic-gate 	static uint_t interval_ticks, period_ticks;
4097c478bd9Sstevel@tonic-gate 	msp_safe = memscrub_span_pages;
4107c478bd9Sstevel@tonic-gate 	mpp_safe = memscrub_phys_pages;
4117c478bd9Sstevel@tonic-gate 
4127c478bd9Sstevel@tonic-gate 	period_ticks = memscrub_period_sec * hz;
4137c478bd9Sstevel@tonic-gate 	interval_ticks = period_ticks;
4147c478bd9Sstevel@tonic-gate 
4157c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&memscrub_lock));
4167c478bd9Sstevel@tonic-gate 
4177c478bd9Sstevel@tonic-gate 	if ((msp_safe != 0) && (mpp_safe != 0)) {
4187c478bd9Sstevel@tonic-gate 		if (memscrub_phys_pages <= msp_safe) {
4197c478bd9Sstevel@tonic-gate 			interval_ticks = period_ticks;
4207c478bd9Sstevel@tonic-gate 		} else {
4217c478bd9Sstevel@tonic-gate 			interval_ticks = (period_ticks /
4227c478bd9Sstevel@tonic-gate 			    (mpp_safe / msp_safe));
4237c478bd9Sstevel@tonic-gate 		}
4247c478bd9Sstevel@tonic-gate 	}
4257c478bd9Sstevel@tonic-gate 	return (interval_ticks);
4267c478bd9Sstevel@tonic-gate }
4277c478bd9Sstevel@tonic-gate 
4287c478bd9Sstevel@tonic-gate void
4297c478bd9Sstevel@tonic-gate memscrubber(void)
4307c478bd9Sstevel@tonic-gate {
4317c478bd9Sstevel@tonic-gate 	ms_paddr_t address, addr;
4327c478bd9Sstevel@tonic-gate 	time_t deadline;
4337c478bd9Sstevel@tonic-gate 	pgcnt_t pages;
4347c478bd9Sstevel@tonic-gate 	uint_t reached_end = 1;
4357c478bd9Sstevel@tonic-gate 	uint_t paused_message = 0;
4367c478bd9Sstevel@tonic-gate 	uint_t interval_ticks = 0;
4377c478bd9Sstevel@tonic-gate 	uint_t sleep_warn_printed = 0;
4387c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
4397c478bd9Sstevel@tonic-gate 
4407c478bd9Sstevel@tonic-gate 	/*
4417c478bd9Sstevel@tonic-gate 	 * notify CPR of our existence
4427c478bd9Sstevel@tonic-gate 	 */
4437c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &memscrub_lock, callb_generic_cpr, "memscrub");
4447c478bd9Sstevel@tonic-gate 
4457c478bd9Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
4467c478bd9Sstevel@tonic-gate 
4477c478bd9Sstevel@tonic-gate 	if (memscrub_memlist == NULL) {
4487c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "memscrub_memlist not initialized.");
4497c478bd9Sstevel@tonic-gate 		goto memscrub_exit;
4507c478bd9Sstevel@tonic-gate 	}
4517c478bd9Sstevel@tonic-gate 
4527c478bd9Sstevel@tonic-gate 	address = memscrub_memlist->address;
4537c478bd9Sstevel@tonic-gate 
4547c478bd9Sstevel@tonic-gate 	deadline = gethrestime_sec() + memscrub_delay_start_sec;
4557c478bd9Sstevel@tonic-gate 
4567c478bd9Sstevel@tonic-gate 	for (;;) {
4577c478bd9Sstevel@tonic-gate 		if (disable_memscrub)
4587c478bd9Sstevel@tonic-gate 			break;
4597c478bd9Sstevel@tonic-gate 
4607c478bd9Sstevel@tonic-gate 		/*
4617c478bd9Sstevel@tonic-gate 		 * compute interval_ticks
4627c478bd9Sstevel@tonic-gate 		 */
4637c478bd9Sstevel@tonic-gate 		interval_ticks = compute_interval_ticks();
4647c478bd9Sstevel@tonic-gate 
4657c478bd9Sstevel@tonic-gate 		/*
4667c478bd9Sstevel@tonic-gate 		 * If the calculated sleep time is zero, and pause_memscrub
4677c478bd9Sstevel@tonic-gate 		 * has been set, make sure we sleep so that another thread
4687c478bd9Sstevel@tonic-gate 		 * can acquire memscrub_lock.
4697c478bd9Sstevel@tonic-gate 		 */
4707c478bd9Sstevel@tonic-gate 		if (interval_ticks == 0 && pause_memscrub) {
4717c478bd9Sstevel@tonic-gate 			interval_ticks = hz;
4727c478bd9Sstevel@tonic-gate 		}
4737c478bd9Sstevel@tonic-gate 
4747c478bd9Sstevel@tonic-gate 		/*
4757c478bd9Sstevel@tonic-gate 		 * And as a fail safe, under normal non-paused operation, do
4767c478bd9Sstevel@tonic-gate 		 * not allow the sleep time to be zero.
4777c478bd9Sstevel@tonic-gate 		 */
4787c478bd9Sstevel@tonic-gate 		if (interval_ticks == 0) {
4797c478bd9Sstevel@tonic-gate 			interval_ticks = memscrub_override_ticks;
4807c478bd9Sstevel@tonic-gate 			if (!sleep_warn_printed) {
4817c478bd9Sstevel@tonic-gate 				cmn_err(CE_NOTE, MEMSCRUB_OVERRIDE_MSG);
4827c478bd9Sstevel@tonic-gate 				sleep_warn_printed = 1;
4837c478bd9Sstevel@tonic-gate 			}
4847c478bd9Sstevel@tonic-gate 		}
4857c478bd9Sstevel@tonic-gate 
4867c478bd9Sstevel@tonic-gate 		memscrub_counts.interval_ticks.value.ui32 = interval_ticks;
4877c478bd9Sstevel@tonic-gate 
4887c478bd9Sstevel@tonic-gate 		/*
4897c478bd9Sstevel@tonic-gate 		 * Did we just reach the end of memory? If we are at the
4907c478bd9Sstevel@tonic-gate 		 * end of memory, delay end of memory processing until
4917c478bd9Sstevel@tonic-gate 		 * pause_memscrub is not set.
4927c478bd9Sstevel@tonic-gate 		 */
4937c478bd9Sstevel@tonic-gate 		if (reached_end && !pause_memscrub) {
4947c478bd9Sstevel@tonic-gate 			time_t now = gethrestime_sec();
4957c478bd9Sstevel@tonic-gate 
4967c478bd9Sstevel@tonic-gate 			if (now >= deadline) {
4977c478bd9Sstevel@tonic-gate 				memscrub_counts.done_late.value.ui32++;
4987c478bd9Sstevel@tonic-gate 				memscrub_counts.late_sec.value.ui32 +=
4997c478bd9Sstevel@tonic-gate 					(now - deadline);
5007c478bd9Sstevel@tonic-gate 				/*
5017c478bd9Sstevel@tonic-gate 				 * past deadline, start right away
5027c478bd9Sstevel@tonic-gate 				 */
5037c478bd9Sstevel@tonic-gate 				interval_ticks = 0;
5047c478bd9Sstevel@tonic-gate 
5057c478bd9Sstevel@tonic-gate 				deadline = now + memscrub_period_sec;
5067c478bd9Sstevel@tonic-gate 			} else {
5077c478bd9Sstevel@tonic-gate 				/*
5087c478bd9Sstevel@tonic-gate 				 * we finished ahead of schedule.
5097c478bd9Sstevel@tonic-gate 				 * wait till previous deadline before re-start.
5107c478bd9Sstevel@tonic-gate 				 */
5117c478bd9Sstevel@tonic-gate 				interval_ticks = (deadline - now) * hz;
5127c478bd9Sstevel@tonic-gate 				memscrub_counts.done_early.value.ui32++;
5137c478bd9Sstevel@tonic-gate 				memscrub_counts.early_sec.value.ui32 +=
5147c478bd9Sstevel@tonic-gate 					(deadline - now);
5157c478bd9Sstevel@tonic-gate 				deadline += memscrub_period_sec;
5167c478bd9Sstevel@tonic-gate 			}
5177c478bd9Sstevel@tonic-gate 			reached_end = 0;
5187c478bd9Sstevel@tonic-gate 			sleep_warn_printed = 0;
5197c478bd9Sstevel@tonic-gate 		}
5207c478bd9Sstevel@tonic-gate 
5217c478bd9Sstevel@tonic-gate 		if (interval_ticks != 0) {
5227c478bd9Sstevel@tonic-gate 			/*
5237c478bd9Sstevel@tonic-gate 			 * it is safe from our standpoint for CPR to
5247c478bd9Sstevel@tonic-gate 			 * suspend the system
5257c478bd9Sstevel@tonic-gate 			 */
5267c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
5277c478bd9Sstevel@tonic-gate 
5287c478bd9Sstevel@tonic-gate 			/*
5297c478bd9Sstevel@tonic-gate 			 * hit the snooze bar
5307c478bd9Sstevel@tonic-gate 			 */
5317c478bd9Sstevel@tonic-gate 			memscrub_tid = timeout(memscrub_wakeup, NULL,
5327c478bd9Sstevel@tonic-gate 			    interval_ticks);
5337c478bd9Sstevel@tonic-gate 
5347c478bd9Sstevel@tonic-gate 			/*
5357c478bd9Sstevel@tonic-gate 			 * go to sleep
5367c478bd9Sstevel@tonic-gate 			 */
5377c478bd9Sstevel@tonic-gate 			cv_wait(&memscrub_cv, &memscrub_lock);
5387c478bd9Sstevel@tonic-gate 
5397c478bd9Sstevel@tonic-gate 			/*
5407c478bd9Sstevel@tonic-gate 			 * at this point, no timeout should be set
5417c478bd9Sstevel@tonic-gate 			 */
5427c478bd9Sstevel@tonic-gate 			memscrub_tid = 0;
5437c478bd9Sstevel@tonic-gate 
5447c478bd9Sstevel@tonic-gate 			/*
5457c478bd9Sstevel@tonic-gate 			 * we need to goto work and will be modifying
5467c478bd9Sstevel@tonic-gate 			 * our internal state and mapping/unmapping
5477c478bd9Sstevel@tonic-gate 			 * TTEs
5487c478bd9Sstevel@tonic-gate 			 */
5497c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &memscrub_lock);
5507c478bd9Sstevel@tonic-gate 		}
5517c478bd9Sstevel@tonic-gate 
5527c478bd9Sstevel@tonic-gate 
5537c478bd9Sstevel@tonic-gate 		if (memscrub_phys_pages == 0) {
5547c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN, "Memory scrubber has 0 pages to read");
5557c478bd9Sstevel@tonic-gate 			goto memscrub_exit;
5567c478bd9Sstevel@tonic-gate 		}
5577c478bd9Sstevel@tonic-gate 
5587c478bd9Sstevel@tonic-gate 		if (!pause_memscrub) {
5597c478bd9Sstevel@tonic-gate 			if (paused_message) {
5607c478bd9Sstevel@tonic-gate 				paused_message = 0;
5617c478bd9Sstevel@tonic-gate 				if (memscrub_verbose)
5627c478bd9Sstevel@tonic-gate 					cmn_err(CE_NOTE, "Memory scrubber "
5637c478bd9Sstevel@tonic-gate 					    "resuming");
5647c478bd9Sstevel@tonic-gate 			}
5657c478bd9Sstevel@tonic-gate 
5667c478bd9Sstevel@tonic-gate 			if (read_all_memscrub) {
5677c478bd9Sstevel@tonic-gate 				if (memscrub_verbose)
5687c478bd9Sstevel@tonic-gate 					cmn_err(CE_NOTE, "Memory scrubber "
5697c478bd9Sstevel@tonic-gate 					    "reading all memory per request");
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 				addr = memscrub_memlist->address;
5727c478bd9Sstevel@tonic-gate 				reached_end = 0;
5737c478bd9Sstevel@tonic-gate 				while (!reached_end) {
5747c478bd9Sstevel@tonic-gate 					if (disable_memscrub)
5757c478bd9Sstevel@tonic-gate 						break;
5767c478bd9Sstevel@tonic-gate 					pages = memscrub_phys_pages;
5777c478bd9Sstevel@tonic-gate 					reached_end = memscrub_verify_span(
5787c478bd9Sstevel@tonic-gate 					    &addr, &pages);
5797c478bd9Sstevel@tonic-gate 					memscrub_scan(pages *
5807c478bd9Sstevel@tonic-gate 					    MEMSCRUB_BLOCKS_PER_PAGE, addr);
5817c478bd9Sstevel@tonic-gate 					addr += ((uint64_t)pages * PAGESIZE);
5827c478bd9Sstevel@tonic-gate 				}
5837c478bd9Sstevel@tonic-gate 				read_all_memscrub = 0;
5847c478bd9Sstevel@tonic-gate 			}
5857c478bd9Sstevel@tonic-gate 
5867c478bd9Sstevel@tonic-gate 			/*
5877c478bd9Sstevel@tonic-gate 			 * read 1 span
5887c478bd9Sstevel@tonic-gate 			 */
5897c478bd9Sstevel@tonic-gate 			pages = memscrub_span_pages;
5907c478bd9Sstevel@tonic-gate 
5917c478bd9Sstevel@tonic-gate 			if (disable_memscrub)
5927c478bd9Sstevel@tonic-gate 				break;
5937c478bd9Sstevel@tonic-gate 
5947c478bd9Sstevel@tonic-gate 			/*
5957c478bd9Sstevel@tonic-gate 			 * determine physical address range
5967c478bd9Sstevel@tonic-gate 			 */
5977c478bd9Sstevel@tonic-gate 			reached_end = memscrub_verify_span(&address,
5987c478bd9Sstevel@tonic-gate 			    &pages);
5997c478bd9Sstevel@tonic-gate 
6007c478bd9Sstevel@tonic-gate 			memscrub_scan(pages * MEMSCRUB_BLOCKS_PER_PAGE,
6017c478bd9Sstevel@tonic-gate 			    address);
6027c478bd9Sstevel@tonic-gate 
6037c478bd9Sstevel@tonic-gate 			address += ((uint64_t)pages * PAGESIZE);
6047c478bd9Sstevel@tonic-gate 		}
6057c478bd9Sstevel@tonic-gate 
6067c478bd9Sstevel@tonic-gate 		if (pause_memscrub && !paused_message) {
6077c478bd9Sstevel@tonic-gate 			paused_message = 1;
6087c478bd9Sstevel@tonic-gate 			if (memscrub_verbose)
6097c478bd9Sstevel@tonic-gate 				cmn_err(CE_NOTE, "Memory scrubber paused");
6107c478bd9Sstevel@tonic-gate 		}
6117c478bd9Sstevel@tonic-gate 	}
6127c478bd9Sstevel@tonic-gate 
6137c478bd9Sstevel@tonic-gate memscrub_exit:
6147c478bd9Sstevel@tonic-gate 	cmn_err(CE_NOTE, "Memory scrubber exiting");
6157c478bd9Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
6167c478bd9Sstevel@tonic-gate 	memscrub_cleanup();
6177c478bd9Sstevel@tonic-gate 	thread_exit();
6187c478bd9Sstevel@tonic-gate 	/* NOTREACHED */
6197c478bd9Sstevel@tonic-gate }
6207c478bd9Sstevel@tonic-gate 
6217c478bd9Sstevel@tonic-gate /*
6227c478bd9Sstevel@tonic-gate  * condition address and size
6237c478bd9Sstevel@tonic-gate  * such that they span legal physical addresses.
6247c478bd9Sstevel@tonic-gate  *
6257c478bd9Sstevel@tonic-gate  * when appropriate, address will be rounded up to start of next
6267c478bd9Sstevel@tonic-gate  * struct memlist, and pages will be rounded down to the end of the
6277c478bd9Sstevel@tonic-gate  * memlist size.
6287c478bd9Sstevel@tonic-gate  *
6297c478bd9Sstevel@tonic-gate  * returns 1 if reached end of list, else returns 0.
6307c478bd9Sstevel@tonic-gate  */
6317c478bd9Sstevel@tonic-gate static int
6327c478bd9Sstevel@tonic-gate memscrub_verify_span(ms_paddr_t *addrp, pgcnt_t *pagesp)
6337c478bd9Sstevel@tonic-gate {
6347c478bd9Sstevel@tonic-gate 	struct memlist *mlp;
6357c478bd9Sstevel@tonic-gate 	ms_paddr_t address = *addrp;
6367c478bd9Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)*pagesp * PAGESIZE;
6377c478bd9Sstevel@tonic-gate 	uint64_t bytes_remaining;
6387c478bd9Sstevel@tonic-gate 	int reached_end = 0;
6397c478bd9Sstevel@tonic-gate 
6407c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&memscrub_lock));
6417c478bd9Sstevel@tonic-gate 
6427c478bd9Sstevel@tonic-gate 	/*
6437c478bd9Sstevel@tonic-gate 	 * find memlist struct that contains addrp
6447c478bd9Sstevel@tonic-gate 	 * assumes memlist is sorted by ascending address.
6457c478bd9Sstevel@tonic-gate 	 */
6467c478bd9Sstevel@tonic-gate 	for (mlp = memscrub_memlist; mlp != NULL; mlp = mlp->next) {
6477c478bd9Sstevel@tonic-gate 		/*
6487c478bd9Sstevel@tonic-gate 		 * if before this chunk, round up to beginning
6497c478bd9Sstevel@tonic-gate 		 */
6507c478bd9Sstevel@tonic-gate 		if (address < mlp->address) {
6517c478bd9Sstevel@tonic-gate 			address = mlp->address;
6527c478bd9Sstevel@tonic-gate 			break;
6537c478bd9Sstevel@tonic-gate 		}
6547c478bd9Sstevel@tonic-gate 		/*
6557c478bd9Sstevel@tonic-gate 		 * if before end of chunk, then we found it
6567c478bd9Sstevel@tonic-gate 		 */
6577c478bd9Sstevel@tonic-gate 		if (address < (mlp->address + mlp->size))
6587c478bd9Sstevel@tonic-gate 			break;
6597c478bd9Sstevel@tonic-gate 
6607c478bd9Sstevel@tonic-gate 		/* else go to next struct memlist */
6617c478bd9Sstevel@tonic-gate 	}
6627c478bd9Sstevel@tonic-gate 	/*
6637c478bd9Sstevel@tonic-gate 	 * if we hit end of list, start at beginning
6647c478bd9Sstevel@tonic-gate 	 */
6657c478bd9Sstevel@tonic-gate 	if (mlp == NULL) {
6667c478bd9Sstevel@tonic-gate 		mlp = memscrub_memlist;
6677c478bd9Sstevel@tonic-gate 		address = mlp->address;
6687c478bd9Sstevel@tonic-gate 	}
6697c478bd9Sstevel@tonic-gate 
6707c478bd9Sstevel@tonic-gate 	/*
6717c478bd9Sstevel@tonic-gate 	 * now we have legal address, and its mlp, condition bytes
6727c478bd9Sstevel@tonic-gate 	 */
6737c478bd9Sstevel@tonic-gate 	bytes_remaining = (mlp->address + mlp->size) - address;
6747c478bd9Sstevel@tonic-gate 
6757c478bd9Sstevel@tonic-gate 	if (bytes > bytes_remaining)
6767c478bd9Sstevel@tonic-gate 		bytes = bytes_remaining;
6777c478bd9Sstevel@tonic-gate 
6787c478bd9Sstevel@tonic-gate 	/*
6797c478bd9Sstevel@tonic-gate 	 * will this span take us to end of list?
6807c478bd9Sstevel@tonic-gate 	 */
6817c478bd9Sstevel@tonic-gate 	if ((mlp->next == NULL) &&
6827c478bd9Sstevel@tonic-gate 	    ((mlp->address + mlp->size) == (address + bytes)))
6837c478bd9Sstevel@tonic-gate 		reached_end = 1;
6847c478bd9Sstevel@tonic-gate 
6857c478bd9Sstevel@tonic-gate 	/* return values */
6867c478bd9Sstevel@tonic-gate 	*addrp = address;
6877c478bd9Sstevel@tonic-gate 	*pagesp = bytes / PAGESIZE;
6887c478bd9Sstevel@tonic-gate 
6897c478bd9Sstevel@tonic-gate 	return (reached_end);
6907c478bd9Sstevel@tonic-gate }
6917c478bd9Sstevel@tonic-gate 
6927c478bd9Sstevel@tonic-gate /*
6937c478bd9Sstevel@tonic-gate  * add a span to the memscrub list
6947c478bd9Sstevel@tonic-gate  * add to memscrub_phys_pages
6957c478bd9Sstevel@tonic-gate  */
6967c478bd9Sstevel@tonic-gate int
6977c478bd9Sstevel@tonic-gate memscrub_add_span(pfn_t pfn, pgcnt_t pages)
6987c478bd9Sstevel@tonic-gate {
6997c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
7007c478bd9Sstevel@tonic-gate 	ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
7017c478bd9Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)pages << PAGESHIFT;
7027c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
7037c478bd9Sstevel@tonic-gate 
7047c478bd9Sstevel@tonic-gate 	int retval;
7057c478bd9Sstevel@tonic-gate 
7067c478bd9Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
7077c478bd9Sstevel@tonic-gate 
7087c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
7097c478bd9Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist before", memscrub_memlist);
7107c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
7117c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_add_span: address: 0x%llx"
7127c478bd9Sstevel@tonic-gate 	    " size: 0x%llx\n", address, bytes);
7137c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
7147c478bd9Sstevel@tonic-gate 
7157c478bd9Sstevel@tonic-gate 	retval = memscrub_add_span_gen(pfn, pages, &memscrub_memlist,
7167c478bd9Sstevel@tonic-gate 	    &memscrub_phys_pages);
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
7197c478bd9Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist after", memscrub_memlist);
7207c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
7217c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
7227c478bd9Sstevel@tonic-gate 
7237c478bd9Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
7247c478bd9Sstevel@tonic-gate 
7257c478bd9Sstevel@tonic-gate 	return (retval);
7267c478bd9Sstevel@tonic-gate }
7277c478bd9Sstevel@tonic-gate 
7287c478bd9Sstevel@tonic-gate static int
7297c478bd9Sstevel@tonic-gate memscrub_add_span_gen(
7307c478bd9Sstevel@tonic-gate 	pfn_t pfn,
7317c478bd9Sstevel@tonic-gate 	pgcnt_t pages,
7327c478bd9Sstevel@tonic-gate 	struct memlist **list,
7337c478bd9Sstevel@tonic-gate 	uint_t *npgs)
7347c478bd9Sstevel@tonic-gate {
7357c478bd9Sstevel@tonic-gate 	ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
7367c478bd9Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)pages << PAGESHIFT;
7377c478bd9Sstevel@tonic-gate 	struct memlist *dst;
7387c478bd9Sstevel@tonic-gate 	struct memlist *prev, *next;
7397c478bd9Sstevel@tonic-gate 	int retval = 0;
7407c478bd9Sstevel@tonic-gate 
7417c478bd9Sstevel@tonic-gate 	/*
7427c478bd9Sstevel@tonic-gate 	 * allocate a new struct memlist
7437c478bd9Sstevel@tonic-gate 	 */
7447c478bd9Sstevel@tonic-gate 
7457c478bd9Sstevel@tonic-gate 	dst = (struct memlist *)
7467c478bd9Sstevel@tonic-gate 	    kmem_alloc(sizeof (struct memlist), KM_NOSLEEP);
7477c478bd9Sstevel@tonic-gate 
7487c478bd9Sstevel@tonic-gate 	if (dst == NULL) {
7497c478bd9Sstevel@tonic-gate 		retval = -1;
7507c478bd9Sstevel@tonic-gate 		goto add_done;
7517c478bd9Sstevel@tonic-gate 	}
7527c478bd9Sstevel@tonic-gate 
7537c478bd9Sstevel@tonic-gate 	dst->address = address;
7547c478bd9Sstevel@tonic-gate 	dst->size = bytes;
7557c478bd9Sstevel@tonic-gate 
7567c478bd9Sstevel@tonic-gate 	/*
7577c478bd9Sstevel@tonic-gate 	 * first insert
7587c478bd9Sstevel@tonic-gate 	 */
7597c478bd9Sstevel@tonic-gate 	if (*list == NULL) {
7607c478bd9Sstevel@tonic-gate 		dst->prev = NULL;
7617c478bd9Sstevel@tonic-gate 		dst->next = NULL;
7627c478bd9Sstevel@tonic-gate 		*list = dst;
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate 		goto add_done;
7657c478bd9Sstevel@tonic-gate 	}
7667c478bd9Sstevel@tonic-gate 
7677c478bd9Sstevel@tonic-gate 	/*
7687c478bd9Sstevel@tonic-gate 	 * insert into sorted list
7697c478bd9Sstevel@tonic-gate 	 */
7707c478bd9Sstevel@tonic-gate 	for (prev = NULL, next = *list;
7717c478bd9Sstevel@tonic-gate 	    next != NULL;
7727c478bd9Sstevel@tonic-gate 	    prev = next, next = next->next) {
7737c478bd9Sstevel@tonic-gate 		if (address > (next->address + next->size))
7747c478bd9Sstevel@tonic-gate 			continue;
7757c478bd9Sstevel@tonic-gate 
7767c478bd9Sstevel@tonic-gate 		/*
7777c478bd9Sstevel@tonic-gate 		 * else insert here
7787c478bd9Sstevel@tonic-gate 		 */
7797c478bd9Sstevel@tonic-gate 
7807c478bd9Sstevel@tonic-gate 		/*
7817c478bd9Sstevel@tonic-gate 		 * prepend to next
7827c478bd9Sstevel@tonic-gate 		 */
7837c478bd9Sstevel@tonic-gate 		if ((address + bytes) == next->address) {
7847c478bd9Sstevel@tonic-gate 			kmem_free(dst, sizeof (struct memlist));
7857c478bd9Sstevel@tonic-gate 
7867c478bd9Sstevel@tonic-gate 			next->address = address;
7877c478bd9Sstevel@tonic-gate 			next->size += bytes;
7887c478bd9Sstevel@tonic-gate 
7897c478bd9Sstevel@tonic-gate 			goto add_done;
7907c478bd9Sstevel@tonic-gate 		}
7917c478bd9Sstevel@tonic-gate 
7927c478bd9Sstevel@tonic-gate 		/*
7937c478bd9Sstevel@tonic-gate 		 * append to next
7947c478bd9Sstevel@tonic-gate 		 */
7957c478bd9Sstevel@tonic-gate 		if (address == (next->address + next->size)) {
7967c478bd9Sstevel@tonic-gate 			kmem_free(dst, sizeof (struct memlist));
7977c478bd9Sstevel@tonic-gate 
7987c478bd9Sstevel@tonic-gate 			if (next->next) {
7997c478bd9Sstevel@tonic-gate 				/*
8007c478bd9Sstevel@tonic-gate 				 * don't overlap with next->next
8017c478bd9Sstevel@tonic-gate 				 */
8027c478bd9Sstevel@tonic-gate 				if ((address + bytes) > next->next->address) {
8037c478bd9Sstevel@tonic-gate 					retval = -1;
8047c478bd9Sstevel@tonic-gate 					goto add_done;
8057c478bd9Sstevel@tonic-gate 				}
8067c478bd9Sstevel@tonic-gate 				/*
8077c478bd9Sstevel@tonic-gate 				 * concatenate next and next->next
8087c478bd9Sstevel@tonic-gate 				 */
8097c478bd9Sstevel@tonic-gate 				if ((address + bytes) == next->next->address) {
8107c478bd9Sstevel@tonic-gate 					struct memlist *mlp = next->next;
8117c478bd9Sstevel@tonic-gate 
8127c478bd9Sstevel@tonic-gate 					if (next == *list)
8137c478bd9Sstevel@tonic-gate 						*list = next->next;
8147c478bd9Sstevel@tonic-gate 
8157c478bd9Sstevel@tonic-gate 					mlp->address = next->address;
8167c478bd9Sstevel@tonic-gate 					mlp->size += next->size;
8177c478bd9Sstevel@tonic-gate 					mlp->size += bytes;
8187c478bd9Sstevel@tonic-gate 
8197c478bd9Sstevel@tonic-gate 					if (next->prev)
8207c478bd9Sstevel@tonic-gate 						next->prev->next = mlp;
8217c478bd9Sstevel@tonic-gate 					mlp->prev = next->prev;
8227c478bd9Sstevel@tonic-gate 
8237c478bd9Sstevel@tonic-gate 					kmem_free(next,
8247c478bd9Sstevel@tonic-gate 						sizeof (struct memlist));
8257c478bd9Sstevel@tonic-gate 					goto add_done;
8267c478bd9Sstevel@tonic-gate 				}
8277c478bd9Sstevel@tonic-gate 			}
8287c478bd9Sstevel@tonic-gate 
8297c478bd9Sstevel@tonic-gate 			next->size += bytes;
8307c478bd9Sstevel@tonic-gate 
8317c478bd9Sstevel@tonic-gate 			goto add_done;
8327c478bd9Sstevel@tonic-gate 		}
8337c478bd9Sstevel@tonic-gate 
8347c478bd9Sstevel@tonic-gate 		/* don't overlap with next */
8357c478bd9Sstevel@tonic-gate 		if ((address + bytes) > next->address) {
8367c478bd9Sstevel@tonic-gate 			retval = -1;
8377c478bd9Sstevel@tonic-gate 			kmem_free(dst, sizeof (struct memlist));
8387c478bd9Sstevel@tonic-gate 			goto add_done;
8397c478bd9Sstevel@tonic-gate 		}
8407c478bd9Sstevel@tonic-gate 
8417c478bd9Sstevel@tonic-gate 		/*
8427c478bd9Sstevel@tonic-gate 		 * insert before next
8437c478bd9Sstevel@tonic-gate 		 */
8447c478bd9Sstevel@tonic-gate 		dst->prev = prev;
8457c478bd9Sstevel@tonic-gate 		dst->next = next;
8467c478bd9Sstevel@tonic-gate 		next->prev = dst;
8477c478bd9Sstevel@tonic-gate 		if (prev == NULL) {
8487c478bd9Sstevel@tonic-gate 			*list = dst;
8497c478bd9Sstevel@tonic-gate 		} else {
8507c478bd9Sstevel@tonic-gate 			prev->next = dst;
8517c478bd9Sstevel@tonic-gate 		}
8527c478bd9Sstevel@tonic-gate 		goto add_done;
8537c478bd9Sstevel@tonic-gate 	}	/* end for */
8547c478bd9Sstevel@tonic-gate 
8557c478bd9Sstevel@tonic-gate 	/*
8567c478bd9Sstevel@tonic-gate 	 * end of list, prev is valid and next is NULL
8577c478bd9Sstevel@tonic-gate 	 */
8587c478bd9Sstevel@tonic-gate 	prev->next = dst;
8597c478bd9Sstevel@tonic-gate 	dst->prev = prev;
8607c478bd9Sstevel@tonic-gate 	dst->next = NULL;
8617c478bd9Sstevel@tonic-gate 
8627c478bd9Sstevel@tonic-gate add_done:
8637c478bd9Sstevel@tonic-gate 
8647c478bd9Sstevel@tonic-gate 	if (retval != -1)
8657c478bd9Sstevel@tonic-gate 		*npgs += pages;
8667c478bd9Sstevel@tonic-gate 
8677c478bd9Sstevel@tonic-gate 	return (retval);
8687c478bd9Sstevel@tonic-gate }
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate /*
8717c478bd9Sstevel@tonic-gate  * delete a span from the memscrub list
8727c478bd9Sstevel@tonic-gate  * subtract from memscrub_phys_pages
8737c478bd9Sstevel@tonic-gate  */
8747c478bd9Sstevel@tonic-gate int
8757c478bd9Sstevel@tonic-gate memscrub_delete_span(pfn_t pfn, pgcnt_t pages)
8767c478bd9Sstevel@tonic-gate {
8777c478bd9Sstevel@tonic-gate 	ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
8787c478bd9Sstevel@tonic-gate 	uint64_t bytes = (uint64_t)pages << PAGESHIFT;
8797c478bd9Sstevel@tonic-gate 	struct memlist *dst, *next;
8807c478bd9Sstevel@tonic-gate 	int retval = 0;
8817c478bd9Sstevel@tonic-gate 
8827c478bd9Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
8837c478bd9Sstevel@tonic-gate 
8847c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
8857c478bd9Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist Before", memscrub_memlist);
8867c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
8877c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_delete_span: 0x%llx 0x%llx\n",
8887c478bd9Sstevel@tonic-gate 	    address, bytes);
8897c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
8907c478bd9Sstevel@tonic-gate 
8917c478bd9Sstevel@tonic-gate 	/*
8927c478bd9Sstevel@tonic-gate 	 * find struct memlist containing page
8937c478bd9Sstevel@tonic-gate 	 */
8947c478bd9Sstevel@tonic-gate 	for (next = memscrub_memlist; next != NULL; next = next->next) {
8957c478bd9Sstevel@tonic-gate 		if ((address >= next->address) &&
8967c478bd9Sstevel@tonic-gate 		    (address < next->address + next->size))
8977c478bd9Sstevel@tonic-gate 			break;
8987c478bd9Sstevel@tonic-gate 	}
8997c478bd9Sstevel@tonic-gate 
9007c478bd9Sstevel@tonic-gate 	/*
9017c478bd9Sstevel@tonic-gate 	 * if start address not in list
9027c478bd9Sstevel@tonic-gate 	 */
9037c478bd9Sstevel@tonic-gate 	if (next == NULL) {
9047c478bd9Sstevel@tonic-gate 		retval = -1;
9057c478bd9Sstevel@tonic-gate 		goto delete_done;
9067c478bd9Sstevel@tonic-gate 	}
9077c478bd9Sstevel@tonic-gate 
9087c478bd9Sstevel@tonic-gate 	/*
9097c478bd9Sstevel@tonic-gate 	 * error if size goes off end of this struct memlist
9107c478bd9Sstevel@tonic-gate 	 */
9117c478bd9Sstevel@tonic-gate 	if (address + bytes > next->address + next->size) {
9127c478bd9Sstevel@tonic-gate 		retval = -1;
9137c478bd9Sstevel@tonic-gate 		goto delete_done;
9147c478bd9Sstevel@tonic-gate 	}
9157c478bd9Sstevel@tonic-gate 
9167c478bd9Sstevel@tonic-gate 	/*
9177c478bd9Sstevel@tonic-gate 	 * pages at beginning of struct memlist
9187c478bd9Sstevel@tonic-gate 	 */
9197c478bd9Sstevel@tonic-gate 	if (address == next->address) {
9207c478bd9Sstevel@tonic-gate 		/*
9217c478bd9Sstevel@tonic-gate 		 * if start & size match, delete from list
9227c478bd9Sstevel@tonic-gate 		 */
9237c478bd9Sstevel@tonic-gate 		if (bytes == next->size) {
9247c478bd9Sstevel@tonic-gate 			if (next == memscrub_memlist)
9257c478bd9Sstevel@tonic-gate 				memscrub_memlist = next->next;
9267c478bd9Sstevel@tonic-gate 			if (next->prev != NULL)
9277c478bd9Sstevel@tonic-gate 				next->prev->next = next->next;
9287c478bd9Sstevel@tonic-gate 			if (next->next != NULL)
9297c478bd9Sstevel@tonic-gate 				next->next->prev = next->prev;
9307c478bd9Sstevel@tonic-gate 
9317c478bd9Sstevel@tonic-gate 			kmem_free(next, sizeof (struct memlist));
9327c478bd9Sstevel@tonic-gate 		} else {
9337c478bd9Sstevel@tonic-gate 		/*
9347c478bd9Sstevel@tonic-gate 		 * increment start address by bytes
9357c478bd9Sstevel@tonic-gate 		 */
9367c478bd9Sstevel@tonic-gate 			next->address += bytes;
9377c478bd9Sstevel@tonic-gate 			next->size -= bytes;
9387c478bd9Sstevel@tonic-gate 		}
9397c478bd9Sstevel@tonic-gate 		goto delete_done;
9407c478bd9Sstevel@tonic-gate 	}
9417c478bd9Sstevel@tonic-gate 
9427c478bd9Sstevel@tonic-gate 	/*
9437c478bd9Sstevel@tonic-gate 	 * pages at end of struct memlist
9447c478bd9Sstevel@tonic-gate 	 */
9457c478bd9Sstevel@tonic-gate 	if (address + bytes == next->address + next->size) {
9467c478bd9Sstevel@tonic-gate 		/*
9477c478bd9Sstevel@tonic-gate 		 * decrement size by bytes
9487c478bd9Sstevel@tonic-gate 		 */
9497c478bd9Sstevel@tonic-gate 		next->size -= bytes;
9507c478bd9Sstevel@tonic-gate 		goto delete_done;
9517c478bd9Sstevel@tonic-gate 	}
9527c478bd9Sstevel@tonic-gate 
9537c478bd9Sstevel@tonic-gate 	/*
9547c478bd9Sstevel@tonic-gate 	 * delete a span in the middle of the struct memlist
9557c478bd9Sstevel@tonic-gate 	 */
9567c478bd9Sstevel@tonic-gate 	{
9577c478bd9Sstevel@tonic-gate 		/*
9587c478bd9Sstevel@tonic-gate 		 * create a new struct memlist
9597c478bd9Sstevel@tonic-gate 		 */
9607c478bd9Sstevel@tonic-gate 		dst = (struct memlist *)
9617c478bd9Sstevel@tonic-gate 		    kmem_alloc(sizeof (struct memlist), KM_NOSLEEP);
9627c478bd9Sstevel@tonic-gate 
9637c478bd9Sstevel@tonic-gate 		if (dst == NULL) {
9647c478bd9Sstevel@tonic-gate 			retval = -1;
9657c478bd9Sstevel@tonic-gate 			goto delete_done;
9667c478bd9Sstevel@tonic-gate 		}
9677c478bd9Sstevel@tonic-gate 
9687c478bd9Sstevel@tonic-gate 		/*
9697c478bd9Sstevel@tonic-gate 		 * existing struct memlist gets address
9707c478bd9Sstevel@tonic-gate 		 * and size up to pfn
9717c478bd9Sstevel@tonic-gate 		 */
9727c478bd9Sstevel@tonic-gate 		dst->address = address + bytes;
9737c478bd9Sstevel@tonic-gate 		dst->size = (next->address + next->size) - dst->address;
9747c478bd9Sstevel@tonic-gate 		next->size = address - next->address;
9757c478bd9Sstevel@tonic-gate 
9767c478bd9Sstevel@tonic-gate 		/*
9777c478bd9Sstevel@tonic-gate 		 * new struct memlist gets address starting
9787c478bd9Sstevel@tonic-gate 		 * after pfn, until end
9797c478bd9Sstevel@tonic-gate 		 */
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate 		/*
9827c478bd9Sstevel@tonic-gate 		 * link in new memlist after old
9837c478bd9Sstevel@tonic-gate 		 */
9847c478bd9Sstevel@tonic-gate 		dst->next = next->next;
9857c478bd9Sstevel@tonic-gate 		dst->prev = next;
9867c478bd9Sstevel@tonic-gate 
9877c478bd9Sstevel@tonic-gate 		if (next->next != NULL)
9887c478bd9Sstevel@tonic-gate 			next->next->prev = dst;
9897c478bd9Sstevel@tonic-gate 		next->next = dst;
9907c478bd9Sstevel@tonic-gate 	}
9917c478bd9Sstevel@tonic-gate 
9927c478bd9Sstevel@tonic-gate delete_done:
9937c478bd9Sstevel@tonic-gate 	if (retval != -1) {
9947c478bd9Sstevel@tonic-gate 		memscrub_phys_pages -= pages;
9957c478bd9Sstevel@tonic-gate 		if (memscrub_phys_pages == 0)
9967c478bd9Sstevel@tonic-gate 			disable_memscrub = 1;
9977c478bd9Sstevel@tonic-gate 	}
9987c478bd9Sstevel@tonic-gate 
9997c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
10007c478bd9Sstevel@tonic-gate 	memscrub_printmemlist("memscrub_memlist After", memscrub_memlist);
10017c478bd9Sstevel@tonic-gate 	cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
10027c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
10037c478bd9Sstevel@tonic-gate 
10047c478bd9Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
10057c478bd9Sstevel@tonic-gate 	return (retval);
10067c478bd9Sstevel@tonic-gate }
10077c478bd9Sstevel@tonic-gate 
10087c478bd9Sstevel@tonic-gate static void
10097c478bd9Sstevel@tonic-gate memscrub_scan(uint_t blks, ms_paddr_t src)
10107c478bd9Sstevel@tonic-gate {
10117c478bd9Sstevel@tonic-gate 	uint_t 		psz, bpp, pgsread;
10127c478bd9Sstevel@tonic-gate 	pfn_t		pfn;
10137c478bd9Sstevel@tonic-gate 	ms_paddr_t	pa;
10147c478bd9Sstevel@tonic-gate 	caddr_t		va;
10157c478bd9Sstevel@tonic-gate 	on_trap_data_t	otd;
10167c478bd9Sstevel@tonic-gate 
10177c478bd9Sstevel@tonic-gate 	extern void memscrub_read(caddr_t src, uint_t blks);
10187c478bd9Sstevel@tonic-gate 
10197c478bd9Sstevel@tonic-gate 	ASSERT(mutex_owned(&memscrub_lock));
10207c478bd9Sstevel@tonic-gate 
10217c478bd9Sstevel@tonic-gate 	pgsread = 0;
10227c478bd9Sstevel@tonic-gate 	pa = src;
10237c478bd9Sstevel@tonic-gate 
10247c478bd9Sstevel@tonic-gate 	while (blks != 0) {
10257c478bd9Sstevel@tonic-gate 		/* Ensure the PA is properly aligned */
10267c478bd9Sstevel@tonic-gate 		if (((pa & MMU_PAGEMASK4M) == pa) &&
10277c478bd9Sstevel@tonic-gate 			(blks >= MEMSCRUB_BPP4M)) {
10287c478bd9Sstevel@tonic-gate 			psz = MMU_PAGESIZE4M;
10297c478bd9Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP4M;
10307c478bd9Sstevel@tonic-gate 		} else if (((pa & MMU_PAGEMASK512K) == pa) &&
10317c478bd9Sstevel@tonic-gate 			(blks >= MEMSCRUB_BPP512K)) {
10327c478bd9Sstevel@tonic-gate 			psz = MMU_PAGESIZE512K;
10337c478bd9Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP512K;
10347c478bd9Sstevel@tonic-gate 		} else if (((pa & MMU_PAGEMASK64K) == pa) &&
10357c478bd9Sstevel@tonic-gate 			(blks >= MEMSCRUB_BPP64K)) {
10367c478bd9Sstevel@tonic-gate 			psz = MMU_PAGESIZE64K;
10377c478bd9Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP64K;
10387c478bd9Sstevel@tonic-gate 		} else if ((pa & MMU_PAGEMASK) == pa) {
10397c478bd9Sstevel@tonic-gate 			psz = MMU_PAGESIZE;
10407c478bd9Sstevel@tonic-gate 			bpp = MEMSCRUB_BPP;
10417c478bd9Sstevel@tonic-gate 		} else {
10427c478bd9Sstevel@tonic-gate 			if (memscrub_verbose) {
10437c478bd9Sstevel@tonic-gate 				cmn_err(CE_NOTE, "Memory scrubber ignoring "
10447c478bd9Sstevel@tonic-gate 				    "non-page aligned block starting at 0x%"
10457c478bd9Sstevel@tonic-gate 				    PRIx64, src);
10467c478bd9Sstevel@tonic-gate 			}
10477c478bd9Sstevel@tonic-gate 			return;
10487c478bd9Sstevel@tonic-gate 		}
10497c478bd9Sstevel@tonic-gate 		if (blks < bpp) bpp = blks;
10507c478bd9Sstevel@tonic-gate 
10517c478bd9Sstevel@tonic-gate #ifdef MEMSCRUB_DEBUG
10527c478bd9Sstevel@tonic-gate 		cmn_err(CE_NOTE, "Going to run psz=%x, "
10537c478bd9Sstevel@tonic-gate 		    "bpp=%x pa=%llx\n", psz, bpp, pa);
10547c478bd9Sstevel@tonic-gate #endif /* MEMSCRUB_DEBUG */
10557c478bd9Sstevel@tonic-gate 
10567c478bd9Sstevel@tonic-gate 		/*
10577c478bd9Sstevel@tonic-gate 		 * MEMSCRUBBASE is a 4MB aligned page in the
10587c478bd9Sstevel@tonic-gate 		 * kernel so that we can quickly map the PA
10597c478bd9Sstevel@tonic-gate 		 * to a VA for the block loads performed in
10607c478bd9Sstevel@tonic-gate 		 * memscrub_read.
10617c478bd9Sstevel@tonic-gate 		 */
10627c478bd9Sstevel@tonic-gate 		pfn = mmu_btop(pa);
10637c478bd9Sstevel@tonic-gate 		va = (caddr_t)MEMSCRUBBASE;
10647c478bd9Sstevel@tonic-gate 		hat_devload(kas.a_hat, va, psz, pfn, PROT_READ,
10657c478bd9Sstevel@tonic-gate 			HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
10667c478bd9Sstevel@tonic-gate 
10677c478bd9Sstevel@tonic-gate 		/*
10687c478bd9Sstevel@tonic-gate 		 * Can't allow the memscrubber to migrate across CPUs as
10697c478bd9Sstevel@tonic-gate 		 * we need to know whether CEEN is enabled for the current
10707c478bd9Sstevel@tonic-gate 		 * CPU to enable us to scrub the memory. Don't use
10717c478bd9Sstevel@tonic-gate 		 * kpreempt_disable as the time we take to scan a span (even
10727c478bd9Sstevel@tonic-gate 		 * without cpu_check_ce having to manually cpu_check_block)
10737c478bd9Sstevel@tonic-gate 		 * is too long to hold a higher priority thread (eg, RT)
10747c478bd9Sstevel@tonic-gate 		 * off cpu.
10757c478bd9Sstevel@tonic-gate 		 */
10767c478bd9Sstevel@tonic-gate 		thread_affinity_set(curthread, CPU_CURRENT);
10777c478bd9Sstevel@tonic-gate 
10787c478bd9Sstevel@tonic-gate 		/*
10797c478bd9Sstevel@tonic-gate 		 * Protect read scrub from async faults.  For now, we simply
10807c478bd9Sstevel@tonic-gate 		 * maintain a count of such faults caught.
10817c478bd9Sstevel@tonic-gate 		 */
10827c478bd9Sstevel@tonic-gate 
10837c478bd9Sstevel@tonic-gate 		if (!on_trap(&otd, OT_DATA_EC)) {
10847c478bd9Sstevel@tonic-gate 			memscrub_read(va, bpp);
10857c478bd9Sstevel@tonic-gate 			/*
10867c478bd9Sstevel@tonic-gate 			 * Check if CEs require logging
10877c478bd9Sstevel@tonic-gate 			 */
10887c478bd9Sstevel@tonic-gate 			cpu_check_ce(SCRUBBER_CEEN_CHECK,
10897c478bd9Sstevel@tonic-gate 			    (uint64_t)pa, va, psz);
1090*a08365b4Srjnoe 			no_trap();
10917c478bd9Sstevel@tonic-gate 			thread_affinity_clear(curthread);
10927c478bd9Sstevel@tonic-gate 		} else {
10937c478bd9Sstevel@tonic-gate 			no_trap();
10947c478bd9Sstevel@tonic-gate 			thread_affinity_clear(curthread);
10957c478bd9Sstevel@tonic-gate 
10967c478bd9Sstevel@tonic-gate 			/*
10977c478bd9Sstevel@tonic-gate 			 * Got an async error..
10987c478bd9Sstevel@tonic-gate 			 * Try rescanning it at MMU_PAGESIZE
10997c478bd9Sstevel@tonic-gate 			 * granularity if we were trying to
11007c478bd9Sstevel@tonic-gate 			 * read at a larger page size.
11017c478bd9Sstevel@tonic-gate 			 * This is to ensure we continue to
11027c478bd9Sstevel@tonic-gate 			 * scan the rest of the span.
11037c478bd9Sstevel@tonic-gate 			 */
11047c478bd9Sstevel@tonic-gate 			if (psz > MMU_PAGESIZE) {
11057c478bd9Sstevel@tonic-gate 			    caddr_t vaddr = va;
11067c478bd9Sstevel@tonic-gate 			    ms_paddr_t paddr = pa;
11077c478bd9Sstevel@tonic-gate 			    int tmp = 0;
11087c478bd9Sstevel@tonic-gate 			    for (; tmp < bpp; tmp += MEMSCRUB_BPP) {
11097c478bd9Sstevel@tonic-gate 				thread_affinity_set(curthread, CPU_CURRENT);
1110*a08365b4Srjnoe 				if (!on_trap(&otd, OT_DATA_EC)) {
11117c478bd9Sstevel@tonic-gate 				    memscrub_read(vaddr, MEMSCRUB_BPP);
11127c478bd9Sstevel@tonic-gate 				    cpu_check_ce(SCRUBBER_CEEN_CHECK,
11137c478bd9Sstevel@tonic-gate 					(uint64_t)paddr, vaddr, MMU_PAGESIZE);
1114*a08365b4Srjnoe 				    no_trap();
1115*a08365b4Srjnoe 				} else {
1116*a08365b4Srjnoe 				    no_trap();
1117*a08365b4Srjnoe 				    memscrub_counts.errors_found.value.ui32++;
1118*a08365b4Srjnoe 				}
11197c478bd9Sstevel@tonic-gate 				thread_affinity_clear(curthread);
11207c478bd9Sstevel@tonic-gate 				vaddr += MMU_PAGESIZE;
11217c478bd9Sstevel@tonic-gate 				paddr += MMU_PAGESIZE;
11227c478bd9Sstevel@tonic-gate 			    }
11237c478bd9Sstevel@tonic-gate 			}
11247c478bd9Sstevel@tonic-gate 		}
11257c478bd9Sstevel@tonic-gate 		hat_unload(kas.a_hat, va, psz, HAT_UNLOAD_UNLOCK);
11267c478bd9Sstevel@tonic-gate 
11277c478bd9Sstevel@tonic-gate 		blks -= bpp;
11287c478bd9Sstevel@tonic-gate 		pa += psz;
11297c478bd9Sstevel@tonic-gate 		pgsread++;
11307c478bd9Sstevel@tonic-gate 	}
11317c478bd9Sstevel@tonic-gate 	if (memscrub_verbose) {
11327c478bd9Sstevel@tonic-gate 		cmn_err(CE_NOTE, "Memory scrubber read 0x%x pages starting "
11337c478bd9Sstevel@tonic-gate 		    "at 0x%" PRIx64, pgsread, src);
11347c478bd9Sstevel@tonic-gate 	}
11357c478bd9Sstevel@tonic-gate }
11367c478bd9Sstevel@tonic-gate 
11377c478bd9Sstevel@tonic-gate /*
11387c478bd9Sstevel@tonic-gate  * The memory add/delete callback mechanism does not pass in the
11397c478bd9Sstevel@tonic-gate  * page ranges. The phys_install list has been updated though, so
11407c478bd9Sstevel@tonic-gate  * create a new scrub list from it.
11417c478bd9Sstevel@tonic-gate  */
11427c478bd9Sstevel@tonic-gate 
11437c478bd9Sstevel@tonic-gate static int
11447c478bd9Sstevel@tonic-gate new_memscrub()
11457c478bd9Sstevel@tonic-gate {
11467c478bd9Sstevel@tonic-gate 	struct memlist *src, *list, *old_list;
11477c478bd9Sstevel@tonic-gate 	uint_t npgs;
11487c478bd9Sstevel@tonic-gate 
11497c478bd9Sstevel@tonic-gate 	/*
11507c478bd9Sstevel@tonic-gate 	 * copy phys_install to memscrub_memlist
11517c478bd9Sstevel@tonic-gate 	 */
11527c478bd9Sstevel@tonic-gate 	list = NULL;
11537c478bd9Sstevel@tonic-gate 	npgs = 0;
11547c478bd9Sstevel@tonic-gate 	memlist_read_lock();
11557c478bd9Sstevel@tonic-gate 	for (src = phys_install; src; src = src->next) {
11567c478bd9Sstevel@tonic-gate 		if (memscrub_add_span_gen((pfn_t)(src->address >> PAGESHIFT),
11577c478bd9Sstevel@tonic-gate 		    (pgcnt_t)(src->size >> PAGESHIFT), &list, &npgs)) {
11587c478bd9Sstevel@tonic-gate 			memlist_read_unlock();
11597c478bd9Sstevel@tonic-gate 			while (list) {
11607c478bd9Sstevel@tonic-gate 				struct memlist *el;
11617c478bd9Sstevel@tonic-gate 
11627c478bd9Sstevel@tonic-gate 				el = list;
11637c478bd9Sstevel@tonic-gate 				list = list->next;
11647c478bd9Sstevel@tonic-gate 				kmem_free(el, sizeof (struct memlist));
11657c478bd9Sstevel@tonic-gate 			}
11667c478bd9Sstevel@tonic-gate 			return (-1);
11677c478bd9Sstevel@tonic-gate 		}
11687c478bd9Sstevel@tonic-gate 	}
11697c478bd9Sstevel@tonic-gate 	memlist_read_unlock();
11707c478bd9Sstevel@tonic-gate 
11717c478bd9Sstevel@tonic-gate 	mutex_enter(&memscrub_lock);
11727c478bd9Sstevel@tonic-gate 	memscrub_phys_pages = npgs;
11737c478bd9Sstevel@tonic-gate 	old_list = memscrub_memlist;
11747c478bd9Sstevel@tonic-gate 	memscrub_memlist = list;
11757c478bd9Sstevel@tonic-gate 	mutex_exit(&memscrub_lock);
11767c478bd9Sstevel@tonic-gate 
11777c478bd9Sstevel@tonic-gate 	while (old_list) {
11787c478bd9Sstevel@tonic-gate 		struct memlist *el;
11797c478bd9Sstevel@tonic-gate 
11807c478bd9Sstevel@tonic-gate 		el = old_list;
11817c478bd9Sstevel@tonic-gate 		old_list = old_list->next;
11827c478bd9Sstevel@tonic-gate 		kmem_free(el, sizeof (struct memlist));
11837c478bd9Sstevel@tonic-gate 	}
11847c478bd9Sstevel@tonic-gate 	return (0);
11857c478bd9Sstevel@tonic-gate }
11867c478bd9Sstevel@tonic-gate 
11877c478bd9Sstevel@tonic-gate /*ARGSUSED*/
11887c478bd9Sstevel@tonic-gate static void
11897c478bd9Sstevel@tonic-gate memscrub_mem_config_post_add(
11907c478bd9Sstevel@tonic-gate 	void *arg,
11917c478bd9Sstevel@tonic-gate 	pgcnt_t delta_pages)
11927c478bd9Sstevel@tonic-gate {
11937c478bd9Sstevel@tonic-gate 	/*
11947c478bd9Sstevel@tonic-gate 	 * We increment pause_memscrub before entering new_memscrub(). This
11957c478bd9Sstevel@tonic-gate 	 * will force the memscrubber to sleep, allowing the DR callback
11967c478bd9Sstevel@tonic-gate 	 * thread to acquire memscrub_lock in new_memscrub(). The use of
11977c478bd9Sstevel@tonic-gate 	 * atomic_add_32() allows concurrent memory DR operations to use the
11987c478bd9Sstevel@tonic-gate 	 * callbacks safely.
11997c478bd9Sstevel@tonic-gate 	 */
12007c478bd9Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, 1);
12017c478bd9Sstevel@tonic-gate 	ASSERT(pause_memscrub != 0);
12027c478bd9Sstevel@tonic-gate 
12037c478bd9Sstevel@tonic-gate 	/*
12047c478bd9Sstevel@tonic-gate 	 * "Don't care" if we are not scrubbing new memory.
12057c478bd9Sstevel@tonic-gate 	 */
12067c478bd9Sstevel@tonic-gate 	(void) new_memscrub();
12077c478bd9Sstevel@tonic-gate 
12087c478bd9Sstevel@tonic-gate 	/* Restore the pause setting. */
12097c478bd9Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, -1);
12107c478bd9Sstevel@tonic-gate }
12117c478bd9Sstevel@tonic-gate 
12127c478bd9Sstevel@tonic-gate /*ARGSUSED*/
12137c478bd9Sstevel@tonic-gate static int
12147c478bd9Sstevel@tonic-gate memscrub_mem_config_pre_del(
12157c478bd9Sstevel@tonic-gate 	void *arg,
12167c478bd9Sstevel@tonic-gate 	pgcnt_t delta_pages)
12177c478bd9Sstevel@tonic-gate {
12187c478bd9Sstevel@tonic-gate 	/* Nothing to do. */
12197c478bd9Sstevel@tonic-gate 	return (0);
12207c478bd9Sstevel@tonic-gate }
12217c478bd9Sstevel@tonic-gate 
12227c478bd9Sstevel@tonic-gate /*ARGSUSED*/
12237c478bd9Sstevel@tonic-gate static void
12247c478bd9Sstevel@tonic-gate memscrub_mem_config_post_del(
12257c478bd9Sstevel@tonic-gate 	void *arg,
12267c478bd9Sstevel@tonic-gate 	pgcnt_t delta_pages,
12277c478bd9Sstevel@tonic-gate 	int cancelled)
12287c478bd9Sstevel@tonic-gate {
12297c478bd9Sstevel@tonic-gate 	/*
12307c478bd9Sstevel@tonic-gate 	 * We increment pause_memscrub before entering new_memscrub(). This
12317c478bd9Sstevel@tonic-gate 	 * will force the memscrubber to sleep, allowing the DR callback
12327c478bd9Sstevel@tonic-gate 	 * thread to acquire memscrub_lock in new_memscrub(). The use of
12337c478bd9Sstevel@tonic-gate 	 * atomic_add_32() allows concurrent memory DR operations to use the
12347c478bd9Sstevel@tonic-gate 	 * callbacks safely.
12357c478bd9Sstevel@tonic-gate 	 */
12367c478bd9Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, 1);
12377c478bd9Sstevel@tonic-gate 	ASSERT(pause_memscrub != 0);
12387c478bd9Sstevel@tonic-gate 
12397c478bd9Sstevel@tonic-gate 	/*
12407c478bd9Sstevel@tonic-gate 	 * Must stop scrubbing deleted memory as it may be disconnected.
12417c478bd9Sstevel@tonic-gate 	 */
12427c478bd9Sstevel@tonic-gate 	if (new_memscrub()) {
12437c478bd9Sstevel@tonic-gate 		disable_memscrub = 1;
12447c478bd9Sstevel@tonic-gate 	}
12457c478bd9Sstevel@tonic-gate 
12467c478bd9Sstevel@tonic-gate 	/* Restore the pause setting. */
12477c478bd9Sstevel@tonic-gate 	atomic_add_32(&pause_memscrub, -1);
12487c478bd9Sstevel@tonic-gate }
12497c478bd9Sstevel@tonic-gate 
12507c478bd9Sstevel@tonic-gate static kphysm_setup_vector_t memscrub_mem_config_vec = {
12517c478bd9Sstevel@tonic-gate 	KPHYSM_SETUP_VECTOR_VERSION,
12527c478bd9Sstevel@tonic-gate 	memscrub_mem_config_post_add,
12537c478bd9Sstevel@tonic-gate 	memscrub_mem_config_pre_del,
12547c478bd9Sstevel@tonic-gate 	memscrub_mem_config_post_del,
12557c478bd9Sstevel@tonic-gate };
12567c478bd9Sstevel@tonic-gate 
12577c478bd9Sstevel@tonic-gate static void
12587c478bd9Sstevel@tonic-gate memscrub_init_mem_config()
12597c478bd9Sstevel@tonic-gate {
12607c478bd9Sstevel@tonic-gate 	int ret;
12617c478bd9Sstevel@tonic-gate 
12627c478bd9Sstevel@tonic-gate 	ret = kphysm_setup_func_register(&memscrub_mem_config_vec,
12637c478bd9Sstevel@tonic-gate 	    (void *)NULL);
12647c478bd9Sstevel@tonic-gate 	ASSERT(ret == 0);
12657c478bd9Sstevel@tonic-gate }
12667c478bd9Sstevel@tonic-gate 
12677c478bd9Sstevel@tonic-gate static void
12687c478bd9Sstevel@tonic-gate memscrub_uninit_mem_config()
12697c478bd9Sstevel@tonic-gate {
12707c478bd9Sstevel@tonic-gate 	/* This call is OK if the register call was not done. */
12717c478bd9Sstevel@tonic-gate 	kphysm_setup_func_unregister(&memscrub_mem_config_vec, (void *)NULL);
12727c478bd9Sstevel@tonic-gate }
1273