xref: /titanic_51/usr/src/uts/common/avs/ns/sdbc/sd_misc.c (revision d3d50737e566cade9a08d73d2af95105ac7cd960)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #define	_SCM_
27 
28 #include <sys/types.h>
29 #include <sys/ksynch.h>
30 #include <sys/cmn_err.h>
31 #include <sys/modctl.h>
32 #include <sys/conf.h>
33 #include <sys/errno.h>
34 #include <sys/file.h>
35 #include <sys/kmem.h>
36 #include <sys/cred.h>
37 #include <sys/ddi.h>
38 #include <sys/nsc_thread.h>
39 
40 #include "sd_bcache.h"
41 #include "sd_misc.h"
42 #include "sd_trace.h"
43 #include "sd_ft.h"
44 #include "sd_io.h"
45 #include "sd_bio.h"
46 #include "sd_pcu.h"
47 #include "sd_tdaemon.h"
48 #include "sdbc_ioctl.h"
49 #include <sys/ncall/ncall.h>
50 #include <sys/nsctl/nsctl.h>
51 #include <sys/nsctl/nsvers.h>
52 
53 #include <sys/sdt.h>		/* dtrace is S10 or later */
54 
55 #include <sys/unistat/spcs_s.h>
56 #include <sys/unistat/spcs_s_k.h>
57 #include <sys/unistat/spcs_errors.h>
58 static dev_info_t *dev_dip;
59 dev_info_t *sdbc_get_dip();
60 
61 
62 /*
63  *  A global variable to set the threshold for large writes to
64  *  be in write through mode when NVRAM is present. This should
65  *  solve the NVRAM bandwidth problem.
66  */
67 
68 int sdbc_wrthru_len;
69 nsc_size_t sdbc_max_fbas = _SD_MAX_FBAS;
70 int sdbc_max_devs = 0;
71 
72 krwlock_t sdbc_queue_lock;
73 
74 static int _sd_debug_level = 0;
75 
76 static kmutex_t _sd_block_lk;
77 
78 #define	REGISTER_SVC(X, Y) (ncall_register_svc(X, Y))
79 #define	UNREGISTER_SVC(X) (ncall_unregister_svc(X))
80 
81 const int sdbc_major_rev = ISS_VERSION_MAJ;
82 const int sdbc_minor_rev = ISS_VERSION_MIN;
83 const int sdbc_micro_rev = ISS_VERSION_MIC;
84 const int sdbc_baseline_rev = ISS_VERSION_NUM;
85 static char sdbc_version[16];
86 
87 static int _sdbc_attached = 0;
88 
89 static int _sdbc_print(dev_t dev, char *s);
90 static int sdbcunload(void);
91 static int sdbcload(void);
92 static int sdbcopen(dev_t *devp, int flag, int otyp, cred_t *crp);
93 static int sdbcclose(dev_t dev, int flag, int otyp, cred_t *crp);
94 static int sdbcioctl(dev_t dev, int cmd, void *arg, int mode, cred_t *crp,
95     int *rvp);
96 static int _sdbc_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
97 static int _sdbc_probe(dev_info_t *dip);
98 static int _sdbc_attach(dev_info_t *, ddi_attach_cmd_t);
99 static int _sdbc_detach(dev_info_t *, ddi_detach_cmd_t);
100 static int _sdbc_reset(dev_info_t *, ddi_reset_cmd_t);
101 
102 #ifdef sun
103 /*
104  * Solaris specific driver module interface code.
105  */
106 
107 #ifdef USES_SOFT_STATE
108 struct	sdbc_state {
109 	dev_info_t	*dip;		/* everyone would need a devinfo */
110 };
111 
112 static	void	*sdbc_statep;		/* for soft state routines */
113 #endif /* USES_SOFT_STATE */
114 
115 static	struct	cb_ops sdbc_cb_ops = {
116 	sdbcopen,	/* open */
117 	sdbcclose,	/* close */
118 	nodev,		/* not a block driver, strategy not an entry point */
119 	_sdbc_print,	/* no print routine */
120 	nodev,		/* no dump routine */
121 	nodev,		/* read */
122 	nodev,		/* write */
123 	(int (*) ()) sdbcioctl,	/* ioctl */
124 	nodev,		/* no devmap routine */
125 	nodev,		/* no mmap routine */
126 	nodev,		/* no segmap routine */
127 	nochpoll,	/* no chpoll routine */
128 	ddi_prop_op,
129 	0,		/* not a STREAMS driver, no cb_str routine */
130 	D_NEW | D_MP,	/* safe for multi-thread/multi-processor */
131 };
132 
133 
134 static	struct	dev_ops sdbc_ops = {
135 	DEVO_REV,			/* Driver build version */
136 	0,				/* device reference count */
137 	_sdbc_getinfo,
138 	nulldev,
139 	_sdbc_probe,
140 	_sdbc_attach,
141 	_sdbc_detach,
142 	_sdbc_reset,
143 	&sdbc_cb_ops,
144 	(struct bus_ops *)NULL
145 };
146 
147 static struct modldrv sdbc_ldrv = {
148 	&mod_driverops,
149 	"nws:Storage Cache:" ISS_VERSION_STR,
150 	&sdbc_ops
151 };
152 
153 static	struct modlinkage sdbc_modlinkage = {
154 	MODREV_1,
155 	&sdbc_ldrv,
156 	NULL
157 };
158 
159 /*
160  * dynmem interface
161  */
162 static int mutex_and_condvar_flag;
163 
164 /*
165  * Solaris module load time code
166  */
167 int
168 _init(void)
169 {
170 
171 	int err;
172 
173 	mutex_and_condvar_flag = 0;
174 
175 #ifdef USES_SOFT_STATE
176 	ddi_soft_state_init(&sdbc_statep, sizeof (struct sdbc_state),
177 	    MAX_INSTANCES);
178 #endif /* USES_SOFT_STATE */
179 
180 	/*
181 	 * It is "load" time, call the unixware equivalent.
182 	 */
183 	err = sdbcload();
184 	if (!err)
185 		err = mod_install(&sdbc_modlinkage);
186 
187 	if (err) {
188 		(void) sdbcunload();
189 #ifdef USES_SOFT_STATE
190 		ddi_soft_state_fini(&sdbc_statep);
191 #endif /* USES_SOFT_STATE */
192 	}
193 
194 	if (!err) {
195 		mutex_and_condvar_flag = 1;
196 		mutex_init(&dynmem_processing_dm.thread_dm_lock, "dynmem",
197 		    MUTEX_DRIVER, NULL);
198 		cv_init(&dynmem_processing_dm.thread_dm_cv, "dynmem",
199 		    CV_DRIVER, NULL);
200 	}
201 
202 	return (err);
203 
204 }
205 /*
206  * Solaris module unload time code
207  */
208 
209 int
210 _fini(void)
211 {
212 	int err;
213 
214 	if (_sd_cache_initialized) {
215 		return (EBUSY);
216 	} else if (_sd_ioset &&
217 	    (_sd_ioset->set_nlive || _sd_ioset->set_nthread)) {
218 		cmn_err(CE_WARN, "!sdbc:_fini() %d threads still "
219 		    "active; %d threads in set\n", _sd_ioset->set_nlive,
220 		    _sd_ioset->set_nthread);
221 		return (EBUSY);
222 	}
223 	if ((err = mod_remove(&sdbc_modlinkage)) == 0) {
224 		DTRACE_PROBE2(_sdbc_fini_mod_remove_succeeded,
225 		    int, err,
226 		    struct modlinkage *, &sdbc_modlinkage);
227 		err = sdbcunload();
228 #ifdef USES_SOFT_STATE
229 		ddi_soft_state_fini(&sdbc_statep);
230 #endif /* USES_SOFT_STATE */
231 
232 		if (mutex_and_condvar_flag) {
233 			cv_destroy(&dynmem_processing_dm.thread_dm_cv);
234 			mutex_destroy(&dynmem_processing_dm.thread_dm_lock);
235 			mutex_and_condvar_flag = 0;
236 		}
237 	}
238 
239 	return (err);
240 }
241 
242 /*
243  * Solaris module info code
244  */
245 int
246 _info(struct modinfo *modinfop)
247 {
248 	return (mod_info(&sdbc_modlinkage, modinfop));
249 }
250 
251 /*ARGSUSED*/
252 static int
253 _sdbc_probe(dev_info_t *dip)
254 {
255 	return (DDI_PROBE_SUCCESS);
256 }
257 
258 /*
259  * Attach an instance of the device. This happens before an open
260  * can succeed.
261  */
262 static int
263 _sdbc_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
264 {
265 	_dm_process_vars_t local_dm_process_vars;
266 	struct buf bp;
267 
268 	if (cmd != DDI_ATTACH)
269 		return (DDI_FAILURE);
270 
271 	/*
272 	 *  Get the threshold value for setting large writes in
273 	 *  write through mode(when NVRAM is present)
274 	 */
275 
276 	sdbc_wrthru_len =  ddi_prop_get_int(DDI_DEV_T_ANY, dip,
277 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_wrthru_thresh", 64);
278 
279 	/* Get sdbc_max_fbas from sdbc.conf */
280 	sdbc_max_fbas =  ddi_prop_get_int(DDI_DEV_T_ANY, dip,
281 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_max_fbas",
282 	    _SD_MAX_FBAS);
283 
284 	bp.b_bcount = (size_t)FBA_SIZE(sdbc_max_fbas);
285 	minphys(&bp); /* clamps value to maxphys */
286 
287 	sdbc_max_fbas = FBA_NUM(bp.b_bcount);
288 
289 	if (sdbc_max_fbas > _SD_MAX_FBAS) {
290 		cmn_err(CE_WARN,
291 		    "!_sdbc_attach: sdbc_max_fbas set to %d", _SD_MAX_FBAS);
292 		sdbc_max_fbas = _SD_MAX_FBAS;
293 	}
294 
295 	/*
296 	 * -get the maximum list length for multipage dynmem
297 	 * -time between aging
298 	 * -number of agings before dealloc
299 	 * -what to report D0=shutdown, D1=thread variables
300 	 */
301 	dynmem_processing_dm.max_dyn_list = MAX_DYN_LIST_DEFAULT;
302 	dynmem_processing_dm.monitor_dynmem_process =
303 	    MONITOR_DYNMEM_PROCESS_DEFAULT;
304 	dynmem_processing_dm.cache_aging_ct1 = CACHE_AGING_CT_DEFAULT;
305 	dynmem_processing_dm.cache_aging_ct2 = CACHE_AGING_CT_DEFAULT;
306 	dynmem_processing_dm.cache_aging_ct3 = CACHE_AGING_CT_DEFAULT;
307 	dynmem_processing_dm.cache_aging_sec1 = CACHE_AGING_SEC1_DEFAULT;
308 	dynmem_processing_dm.cache_aging_sec2 = CACHE_AGING_SEC2_DEFAULT;
309 	dynmem_processing_dm.cache_aging_sec3 = CACHE_AGING_SEC3_DEFAULT;
310 	dynmem_processing_dm.cache_aging_pcnt1 = CACHE_AGING_PCNT1_DEFAULT;
311 	dynmem_processing_dm.cache_aging_pcnt2 = CACHE_AGING_PCNT2_DEFAULT;
312 	dynmem_processing_dm.max_holds_pcnt = MAX_HOLDS_PCNT_DEFAULT;
313 	dynmem_processing_dm.process_directive = PROCESS_DIRECTIVE_DEFAULT;
314 
315 	local_dm_process_vars.max_dyn_list = ddi_prop_get_int(DDI_DEV_T_ANY,
316 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_max_dyn_list",
317 	    MAX_DYN_LIST_DEFAULT);
318 
319 	local_dm_process_vars.monitor_dynmem_process =
320 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
321 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_monitor_dynmem",
322 	    MONITOR_DYNMEM_PROCESS_DEFAULT);
323 
324 	local_dm_process_vars.cache_aging_ct1 = ddi_prop_get_int(DDI_DEV_T_ANY,
325 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_ct1",
326 	    CACHE_AGING_CT_DEFAULT);
327 
328 	local_dm_process_vars.cache_aging_ct2 = ddi_prop_get_int(DDI_DEV_T_ANY,
329 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_ct2",
330 	    CACHE_AGING_CT_DEFAULT);
331 
332 	local_dm_process_vars.cache_aging_ct3 = ddi_prop_get_int(DDI_DEV_T_ANY,
333 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_ct3",
334 	    CACHE_AGING_CT_DEFAULT);
335 
336 	local_dm_process_vars.cache_aging_sec1 = ddi_prop_get_int(DDI_DEV_T_ANY,
337 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_sec1",
338 	    CACHE_AGING_SEC1_DEFAULT);
339 
340 	local_dm_process_vars.cache_aging_sec2 = ddi_prop_get_int(DDI_DEV_T_ANY,
341 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_sec2",
342 	    CACHE_AGING_SEC2_DEFAULT);
343 
344 	local_dm_process_vars.cache_aging_sec3 = ddi_prop_get_int(DDI_DEV_T_ANY,
345 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_sec3",
346 	    CACHE_AGING_SEC3_DEFAULT);
347 
348 	local_dm_process_vars.cache_aging_pcnt1 =
349 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
350 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_pcnt1",
351 	    CACHE_AGING_PCNT1_DEFAULT);
352 
353 	local_dm_process_vars.cache_aging_pcnt2 =
354 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
355 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_cache_aging_pcnt2",
356 	    CACHE_AGING_PCNT2_DEFAULT);
357 
358 	local_dm_process_vars.process_directive =
359 	    ddi_prop_get_int(DDI_DEV_T_ANY, dip,
360 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_process_directive",
361 	    PROCESS_DIRECTIVE_DEFAULT);
362 
363 	local_dm_process_vars.max_holds_pcnt = ddi_prop_get_int(DDI_DEV_T_ANY,
364 	    dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "sdbc_max_holds_pcnt",
365 	    MAX_HOLDS_PCNT_DEFAULT);
366 
367 	(void) sdbc_edit_xfer_process_vars_dm(&local_dm_process_vars);
368 
369 #define	MINOR_NAME	"c,sdbc"		/* character device */
370 #define	MINOR_NUMBER	0
371 #ifdef MINOR_NAME
372 	if (ddi_create_minor_node(dip, MINOR_NAME, S_IFCHR,
373 	    MINOR_NUMBER, DDI_PSEUDO, 0) != DDI_SUCCESS) {
374 		/* free anything we allocated here */
375 		return (DDI_FAILURE);
376 	}
377 #endif /* MINOR_NAME */
378 
379 	/* Announce presence of the device */
380 	ddi_report_dev(dip);
381 	dev_dip = dip;
382 	/* mark the device as attached, opens may proceed */
383 	_sdbc_attached = 1;
384 
385 	rw_init(&sdbc_queue_lock, NULL, RW_DRIVER, NULL);
386 
387 	return (DDI_SUCCESS);
388 }
389 
390 /*ARGSUSED*/
391 static int
392 _sdbc_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
393 {
394 	if (cmd == DDI_DETACH) {
395 		/*
396 		 * Check first if the cache is still in use
397 		 * and if it is, prevent the detach.
398 		 */
399 		if (_sd_cache_initialized)
400 			return (EBUSY);
401 
402 		_sdbc_attached = 0;
403 
404 		rw_destroy(&sdbc_queue_lock);
405 		dev_dip = NULL;
406 
407 		return (DDI_SUCCESS);
408 	} else
409 		return (DDI_FAILURE);
410 }
411 
412 /*ARGSUSED*/
413 static int
414 _sdbc_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
415 {
416 	return (DDI_SUCCESS);
417 }
418 
419 /*ARGSUSED*/
420 static int
421 _sdbc_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
422 {
423 	dev_t dev;
424 #ifdef USES_SOFT_STATE
425 	struct sdbc_state *xsp;
426 	int instance;
427 #endif /* USES_SOFT_STATE */
428 	int rc;
429 
430 	switch (cmd) {
431 		case DDI_INFO_DEVT2INSTANCE:
432 			dev = (dev_t)arg;
433 			/* The "instance" number is the minor number */
434 			*result = (void *)(unsigned long)getminor(dev);
435 			rc = DDI_SUCCESS;
436 			break;
437 
438 		case DDI_INFO_DEVT2DEVINFO:
439 			dev = (dev_t)arg;
440 #ifdef USES_SOFT_STATE
441 			/* the instance number is the minor number */
442 			instance = getminor(dev);
443 			xsp = ddi_get_soft_state(sdbc_statep, instance);
444 			if (xsp == NULL)
445 				return (DDI_FAILURE);
446 			*result = (void *) xsp->dip;
447 #else
448 			*result = (void *) NULL;
449 #endif /* USES_SOFT_STATE */
450 			rc = DDI_SUCCESS;
451 			break;
452 
453 		default:
454 			rc = DDI_FAILURE;
455 			break;
456 	}
457 	return (rc);
458 }
459 
460 /*ARGSUSED*/
461 int
462 _sdbc_print(dev_t dev, char *s)
463 {
464 	cmn_err(CE_WARN, "!sdbc(_sdbc_print) %s", s);
465 	return (0);
466 }
467 #else
468 MOD_DRV_WRAPPER(sdbc, sdbcload, sdbcunload, NULL, "Storage Device Block Cache");
469 #endif /* sun */
470 
471 static int sdbc_inited;
472 
473 static int
474 sdbcinit(void)
475 {
476 	int rc;
477 
478 	sdbc_inited = 0;
479 
480 	(void) strncpy(sdbc_version, _VERSION_, sizeof (sdbc_version));
481 
482 	mutex_init(&_sd_cache_lock, NULL, MUTEX_DRIVER, NULL);
483 	mutex_init(&_sdbc_config_lock, NULL, MUTEX_DRIVER, NULL);
484 
485 #ifdef m88k
486 	REGISTER_SVC(SD_DUAL_WRITE,	r_sd_ifs_write);
487 	REGISTER_SVC(SD_DUAL_READ,	r_sd_ifs_read);
488 	REGISTER_SVC(SD_SET_CD,		r_sd_set_cd);
489 	REGISTER_SVC(SD_GETSIZE,	r_sd_getsize);
490 	REGISTER_SVC(SD_DUAL_OPEN,	r_sd_ifs_open);
491 	REGISTER_SVC(SD_REMOTE_FLUSH,	r_sd_remote_flush);
492 	REGISTER_SVC(SD_SGREMOTE_FLUSH,	r_sd_sgremote_flush);
493 	REGISTER_SVC(SD_DISK_IO,	r_sd_disk_io);
494 	REGISTER_SVC(SD_GET_BMAP,	r_rem_get_bmap);
495 
496 	if ((rc = hpf_register_module("SDBC", _sd_hpf_stats)) != 0)
497 		return (rc);
498 #endif
499 	REGISTER_SVC(SD_ENABLE,		r_sd_ifs_cache_enable);
500 	REGISTER_SVC(SD_DISABLE,	r_sd_ifs_cache_disable);
501 	REGISTER_SVC(SD_CD_DISCARD,	r_cd_discard);
502 
503 	cv_init(&_sd_flush_cv, NULL, CV_DRIVER, NULL);
504 
505 	mutex_init(&_sd_block_lk, NULL, MUTEX_DRIVER, NULL);
506 
507 	sdbc_max_devs = nsc_max_devices();
508 
509 	/*
510 	 * Initialize the bitmap array that would be useful in determining
511 	 * if the mask is not fragmented, instead of determinig this
512 	 * at run time. Also initialize a lookup array for each mask, with
513 	 * the starting position, the length, and the mask subset
514 	 */
515 	_sd_init_contig_bmap();
516 	_sd_init_lookup_map();
517 
518 	if ((rc = _sdbc_iobuf_load()) != 0)
519 		return (rc);
520 	if ((rc = _sdbc_handles_load()) != 0)
521 		return (rc);
522 	if ((rc = _sdbc_tr_load()) != 0)
523 		return (rc);
524 	if ((rc = _sdbc_ft_load()) != 0)
525 		return (rc);
526 	if ((rc = _sdbc_tdaemon_load()) != 0)
527 		return (rc);
528 	if ((rc = _sdbc_hash_load()) != 0)
529 		return (rc);
530 #ifdef DEBUG
531 	_sdbc_ioj_load();
532 #endif
533 	sdbc_inited = 1;
534 
535 	return (0);
536 }
537 
538 static int
539 sdbcunload(void)
540 {
541 	if (_sd_cache_initialized) {
542 		cmn_err(CE_WARN,
543 		    "!sdbc(sdbcunload) cannot unload module - cache in use!");
544 		return (EEXIST);
545 	}
546 #ifdef m88k
547 	UNREGISTER_SVC(SD_DUAL_WRITE);
548 	UNREGISTER_SVC(SD_DUAL_READ);
549 	UNREGISTER_SVC(SD_SET_CD);
550 	UNREGISTER_SVC(SD_GETSIZE);
551 	UNREGISTER_SVC(SD_DUAL_OPEN);
552 	UNREGISTER_SVC(SD_REMOTE_FLUSH);
553 	UNREGISTER_SVC(SD_SGREMOTE_FLUSH);
554 	UNREGISTER_SVC(SD_DISK_IO);
555 	UNREGISTER_SVC(SD_GET_BMAP);
556 
557 	(void) hpf_unregister_module("SDBC");
558 #endif
559 	UNREGISTER_SVC(SD_ENABLE);
560 	UNREGISTER_SVC(SD_DISABLE);
561 	UNREGISTER_SVC(SD_CD_DISCARD);
562 
563 	cv_destroy(&_sd_flush_cv);
564 	mutex_destroy(&_sd_block_lk);
565 
566 	_sdbc_hash_unload();
567 	_sdbc_ft_unload();
568 	_sdbc_tr_unload();
569 	_sdbc_tdaemon_unload();
570 	_sdbc_handles_unload();
571 	_sdbc_iobuf_unload();
572 #ifdef DEBUG
573 	_sdbc_ioj_unload();
574 #endif
575 
576 	mutex_destroy(&_sd_cache_lock);
577 	mutex_destroy(&_sdbc_config_lock);
578 
579 	/*
580 	 * Normally we would unregister memory at deconfig time.
581 	 * However when chasing things like memory leaks it is
582 	 * useful to defer until unload time.
583 	 */
584 	if (_sdbc_memtype_deconfigure_delayed)
585 		_sdbc_memtype_deconfigure();
586 
587 	return (0);
588 }
589 
590 
591 static int
592 sdbcload(void)
593 {
594 	int err;
595 
596 	if ((err = sdbcinit()) != 0) {
597 		(void) sdbcunload();
598 		return (err);
599 	}
600 	return (0);
601 }
602 
603 
604 /* ARGSUSED */
605 
606 static int
607 sdbcopen(dev_t *devp, int flag, int otyp, cred_t *crp)
608 {
609 	int nd = nsc_node_id();
610 
611 	/*
612 	 * If we were statically linked in then returning an error out
613 	 * of sdbcinit won't prevent someone from coming thru here.
614 	 * We must prevent them from getting any further.
615 	 */
616 	if (!sdbc_inited)
617 		return (EINVAL);
618 
619 	if (nd < nsc_min_nodeid) {
620 		cmn_err(CE_WARN,
621 		    "!sdbc(sdbcopen) open failed, systemid (%d) must be >= %d",
622 		    nd, nsc_min_nodeid);
623 		return (EINVAL);
624 	}
625 	if (!_sdbc_attached)
626 		return (ENXIO);
627 
628 	return (0);
629 }
630 
631 
632 /* ARGSUSED */
633 
634 static int
635 sdbcclose(dev_t dev, int flag, int otyp, cred_t *crp)
636 {
637 	return (0);
638 }
639 
640 #ifdef _MULTI_DATAMODEL
641 static int
642 convert_ioctl_args(int cmd, void *arg, int mode, _sdbc_ioctl_t *args)
643 /*
644  * convert_ioctl-args - Do a case by case conversion of a ILP32 ioctl
645  * structure to an LP64 structure.
646  * The main concern here is whether to sign-extend or not. The rule
647  * is that pointers are not sign extended, the rest are obvious.
648  * Since most everything is sign-extended the definition of
649  * _sdbc_ioctl32_t uses signed fields.
650  *
651  */
652 {
653 	_sdbc_ioctl32_t args32;
654 
655 	if (ddi_copyin(arg, &args32, sizeof (_sdbc_ioctl32_t), mode))
656 		return (EFAULT);
657 
658 	bzero((void *) args, sizeof (_sdbc_ioctl_t));
659 
660 	switch (cmd) {
661 
662 	case SDBC_UNUSED_1:
663 	case SDBC_UNUSED_2:
664 	case SDBC_UNUSED_3:
665 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
666 		cmn_err(CE_WARN,
667 		    "!sdbc(convert_ioctl_args) obsolete sdbc ioctl used");
668 		return (EINVAL);
669 
670 	case SDBC_ADUMP:
671 		args->arg0 = args32.arg0; /* cd */
672 		args->arg1 = (uint32_t)args32.arg1; /* &tt */
673 		args->arg2 = (uint32_t)args32.arg2; /* NULL (buf) */
674 		args->arg3 = args32.arg3; /*  size of buf */
675 		args->arg4 = args32.arg4; /* flag */
676 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
677 		break;
678 
679 	case SDBC_TEST_INIT:
680 		args->arg0 = (uint32_t)args32.arg0; /* fname (char *) */
681 		args->arg1 = args32.arg1; /* index */
682 		args->arg2 = args32.arg2; /* len */
683 		args->arg3 = args32.arg3; /* track size */
684 		args->arg4 = args32.arg4; /* flag */
685 		break;
686 
687 	case SDBC_TEST_START:
688 		args->arg0 = args32.arg0; /* num */
689 		args->arg1 = args32.arg1; /* type */
690 		args->arg2 = args32.arg2; /* loops */
691 		args->arg3 = args32.arg3; /* from */
692 		args->arg4 = args32.arg4; /* seed */
693 		break;
694 
695 	case SDBC_TEST_END:
696 		break;
697 
698 	case SDBC_ENABLE:
699 	case SDBC_VERSION:
700 		args->arg0 = (uint32_t)args32.arg0; /* pointer */
701 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
702 		break;
703 
704 	case SDBC_DISABLE:
705 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
706 		break;
707 
708 	case SDBC_GET_CLUSTER_SIZE:
709 		args->arg0 = (uint32_t)args32.arg0; /* (int * ) */
710 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
711 		break;
712 
713 	/* get the gl_file data */
714 	case SDBC_GET_CLUSTER_DATA:
715 		/* pointer to array[2*cluster_size] */
716 		args->arg0 = (uint32_t)args32.arg0;
717 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
718 		break;
719 
720 	/*  get the size of the global info pages for each board */
721 	case SDBC_GET_GLMUL_SIZES:
722 		args->arg0 = (uint32_t)args32.arg0; /* int[CACHE_MEM_PAD] * */
723 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
724 		break;
725 
726 	/* get the global info about write blocks */
727 	case SDBC_GET_GLMUL_INFO:
728 		/* pointer to array[2*(sum of GLMUL_SIZES)] */
729 		args->arg0 = (uint32_t)args32.arg0;
730 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
731 		break;
732 
733 	case SDBC_SET_CD_HINT:
734 		args->arg0 = args32.arg0; /* cd */
735 		args->arg1 = args32.arg1; /* hint */
736 		args->arg2 = args32.arg2; /* flag */
737 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
738 		break;
739 
740 	case SDBC_GET_CD_HINT:
741 		args->arg0 = args32.arg0;
742 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
743 		break;
744 
745 	case SDBC_SET_NODE_HINT:
746 		args->arg0 = args32.arg0; /* hint */
747 		args->arg1 = args32.arg1; /* flag */
748 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
749 		break;
750 
751 	case SDBC_GET_NODE_HINT:
752 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
753 		break;
754 
755 	case SDBC_STATS:
756 		args->arg0 = (uint32_t)args32.arg0; /* (_sd_stats_t *) */
757 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
758 		break;
759 
760 	case SDBC_ZAP_STATS:
761 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
762 		break;
763 
764 	case SDBC_GET_CD_BLK:
765 		args->arg0 = args32.arg0; /* cd */
766 		args->arg1 = (uint32_t)args32.arg1; /* blk */
767 		args->arg2 = (uint32_t)args32.arg2; /* (addr[5] *) */
768 		break;
769 
770 	case SDBC_GET_CONFIG:
771 		args->arg0 = (uint32_t)args32.arg0; /* (_sdbc_config_t *) */
772 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
773 		break;
774 
775 	case SDBC_SET_CONFIG:
776 		args->arg0 = (uint32_t)args32.arg0; /* (_sdbc_config_t *) */
777 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
778 		break;
779 
780 	case SDBC_MAXFILES:
781 		args->arg0 = (uint32_t)args32.arg0; /* (int * ) */
782 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
783 		break;
784 
785 #ifdef DEBUG
786 	/* toggle flusher flag for testing */
787 	case SDBC_TOGGLE_FLUSH:
788 		args->sdbc_ustatus = (spcs_s_info_t)args32.sdbc_ustatus;
789 		break;
790 
791 	case SDBC_INJ_IOERR: /* cd, errnum */
792 		args->arg0 = args32.arg0; /* cd */
793 		args->arg1 = args32.arg1; /* i/o error number */
794 		args->arg2 = args32.arg2; /* countdown to issuing error */
795 		break;
796 
797 	/* clear injected i/o errors */
798 	case SDBC_CLR_IOERR: /* cd */
799 		args->arg0 = args32.arg0; /* cd */
800 		break;
801 #endif /* DEBUG */
802 	default:
803 		return (EINVAL);
804 	}
805 
806 	return (0);
807 }
808 #endif /* _MULTI_DATAMODEL */
809 
810 static int
811 sdbc_get_cd_blk(_sdbc_ioctl_t *args, int mode)
812 {
813 
814 	_sd_cctl_t *cc_ent;
815 	caddr_t data;
816 	char *taddr;
817 	intptr_t addr[5];
818 #ifdef _MULTI_DATAMODEL
819 	uint32_t addr_32[5];
820 #endif /* _MULTI_DATAMODEL */
821 	char *lookup_file = NULL;
822 	int rc;
823 	sdbc_info_t info;
824 	nsc_off_t fba_pos;	/* disk block number */
825 
826 	if (_sd_cache_initialized == 0) {
827 		return (EINVAL);
828 	}
829 
830 	/* copyin the block number */
831 	if (ddi_copyin((void *)args->arg1, &fba_pos, sizeof (nsc_off_t),
832 	    mode)) {
833 		return (EFAULT);
834 	}
835 
836 #ifdef _MULTI_DATAMODEL
837 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
838 		if (ddi_copyin((void *)args->arg2, addr_32, sizeof (addr_32),
839 		    mode)) {
840 			return (EFAULT);
841 		}
842 		addr[0] = addr_32[0]; /* (sdbc_info_t *) */
843 		addr[1] = addr_32[1]; /* (char *) cdata */
844 		addr[2] = addr_32[2]; /* ( int * ) cblk_size */
845 		addr[3] = addr_32[3]; /* ( char * ) filename */
846 		addr[4] = addr_32[4]; /* ( char *) wdata */
847 	} else {
848 		if (ddi_copyin((void *)args->arg2, addr, sizeof (addr), mode)) {
849 			return (EFAULT);
850 		}
851 	}
852 #else /* _MULTI_DATAMODEL */
853 	if (ddi_copyin((void *)args->arg2, addr, sizeof (addr), mode)) {
854 		return (EFAULT);
855 	}
856 #endif /* _MULTI_DATAMODEL */
857 
858 	(void) copyout(&CACHE_BLOCK_SIZE, (void *)addr[2], sizeof (int));
859 
860 	if (_sd_get_cd_blk((int)args->arg0, FBA_TO_BLK_NUM(fba_pos),
861 	    &cc_ent, &data, &lookup_file)) {
862 		if (lookup_file != NULL)
863 			(void) copyout(lookup_file, (void *)addr[3],
864 			    NSC_MAXPATH);
865 		return (ENOENT);
866 	}
867 	rc = 0;
868 	taddr = NULL;
869 
870 	info.ci_write = cc_ent->cc_write ? 1 : 0;
871 	info.ci_dirty = cc_ent->cc_dirty;
872 	info.ci_valid = cc_ent->cc_valid;
873 	info.ci_cd = CENTRY_CD(cc_ent);
874 	info.ci_dblk = BLK_TO_FBA_NUM(CENTRY_BLK(cc_ent));
875 	(void) copyout(lookup_file, (void *)addr[3], NSC_MAXPATH);
876 	(void) copyout(&info, (void *)addr[0], sizeof (sdbc_info_t));
877 
878 	(void) copyout(data, (void *)addr[1], CACHE_BLOCK_SIZE);
879 
880 	/* get the write data if any */
881 	if (cc_ent->cc_write) {
882 
883 		if (sdbc_safestore) {
884 			cmn_err(CE_WARN,
885 			    "!sdbc(sdbc_get_cd_blk) cc_write 0x%p sc-res 0x%p",
886 			    (void *)cc_ent->cc_write,
887 			    (void *)cc_ent->cc_write->sc_res);
888 
889 			if ((taddr = kmem_alloc(CACHE_BLOCK_SIZE,
890 			    KM_NOSLEEP)) == NULL) {
891 				cmn_err(CE_WARN,
892 				    "!sdbc(sdbc_get_cd_blk) kmem_alloc failed."
893 				    " cannot get write data");
894 				info.ci_write = NULL;
895 				rc = EFAULT;
896 			} else if (SSOP_READ_CBLOCK(sdbc_safestore,
897 			    cc_ent->cc_write->sc_res, taddr,
898 			    CACHE_BLOCK_SIZE, 0) == SS_ERR) {
899 
900 				cmn_err(CE_WARN, "sdbc(sdbc_get_cd_blk) "
901 				    "!safestore read failed");
902 				rc = EFAULT;
903 
904 			} else if (copyout(taddr, (void *)addr[4],
905 			    CACHE_BLOCK_SIZE)) {
906 				cmn_err(CE_WARN,
907 				    "!sdbc(sdbc_get_cd_blk) copyout failed."
908 				    " cannot get write data");
909 				rc = EFAULT;
910 			}
911 		}
912 
913 	}
914 
915 	if (taddr)
916 		kmem_free(taddr, CACHE_BLOCK_SIZE);
917 
918 	return (rc);
919 }
920 
921 /* ARGSUSED */
922 static int
923 sdbcioctl(dev_t dev, int cmd, void *arg, int mode, cred_t *crp, int *rvp)
924 {
925 	int rc = 0;
926 	_sdbc_ioctl_t args;
927 	int convert_32 = 0;
928 	spcs_s_info_t kstatus;
929 
930 	*rvp = 0;
931 
932 #ifdef _MULTI_DATAMODEL
933 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
934 		int rc;
935 		convert_32 = 1;
936 		if ((rc = convert_ioctl_args(cmd, arg, mode, &args)) != 0)
937 			return (rc);
938 	} else {
939 		if (ddi_copyin(arg, &args, sizeof (_sdbc_ioctl_t), mode)) {
940 			return (EFAULT);
941 		}
942 	}
943 #else /* _MULTI_DATAMODEL */
944 	if (ddi_copyin(arg, &args, sizeof (_sdbc_ioctl_t), mode)) {
945 		return (EFAULT);
946 	}
947 #endif /* _MULTI_DATAMODEL */
948 
949 	kstatus = spcs_s_kcreate();
950 	if (!kstatus)
951 		return (ENOMEM);
952 
953 	switch (cmd) {
954 
955 	case SDBC_UNUSED_1:
956 	case SDBC_UNUSED_2:
957 	case SDBC_UNUSED_3:
958 
959 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
960 		    SDBC_EOBSOLETE));
961 
962 	case SDBC_ADUMP:
963 		rc = _sd_adump(&args, rvp);
964 		break;
965 
966 	case SDBC_TEST_INIT:
967 		rc = _sd_test_init(&args);
968 		break;
969 
970 	case SDBC_TEST_START:
971 		rc = _sd_test_start(&args, rvp);
972 		break;
973 
974 	case SDBC_TEST_END:
975 		rc = _sd_test_end();
976 		break;
977 
978 	case SDBC_ENABLE:
979 		mutex_enter(&_sdbc_config_lock);
980 		rc = _sdbc_configure((_sd_cache_param_t *)args.arg0,
981 		    NULL, kstatus);
982 		if (rc && rc != EALREADY && rc != SDBC_ENONETMEM) {
983 			(void) _sdbc_deconfigure(kstatus);
984 			mutex_exit(&_sdbc_config_lock);
985 			return (spcs_s_ocopyoutf
986 			    (&kstatus, args.sdbc_ustatus, rc));
987 		}
988 		mutex_exit(&_sdbc_config_lock);
989 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
990 
991 	case SDBC_DISABLE:
992 		mutex_enter(&_sdbc_config_lock);
993 		if (_sd_cache_initialized == 0) {
994 
995 			mutex_exit(&_sdbc_config_lock);
996 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
997 			    SDBC_EDISABLE));
998 		}
999 		rc = _sdbc_deconfigure(kstatus);
1000 		mutex_exit(&_sdbc_config_lock);
1001 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1002 
1003 	case SDBC_GET_CLUSTER_SIZE:
1004 		if (_sd_cache_initialized == 0) {
1005 
1006 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1007 			    SDBC_ECLUSTER_SIZE));
1008 		}
1009 
1010 		rc = sd_get_file_info_size((void *)args.arg0);
1011 		break;
1012 
1013 	/* get the gl_file data */
1014 	case SDBC_GET_CLUSTER_DATA:
1015 		if (_sd_cache_initialized == 0) {
1016 
1017 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1018 			    SDBC_ECLUSTER_DATA));
1019 		}
1020 		rc = sd_get_file_info_data((void *)args.arg0);
1021 		break;
1022 
1023 	/*  get the size of the global info pages for each board */
1024 	case SDBC_GET_GLMUL_SIZES:
1025 		if (_sd_cache_initialized == 0) {
1026 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1027 			    SDBC_EGLMUL_SIZE));
1028 		}
1029 		rc = sd_get_glmul_sizes((void *)args.arg0);
1030 		break;
1031 
1032 	/* get the global info about write blocks */
1033 	case SDBC_GET_GLMUL_INFO:
1034 		if (_sd_cache_initialized == 0) {
1035 
1036 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1037 			    SDBC_EGLMUL_INFO));
1038 
1039 		}
1040 		rc = sd_get_glmul_info((void *)args.arg0);
1041 		break;
1042 
1043 	case SDBC_SET_CD_HINT:
1044 		if (_sd_cache_initialized == 0)
1045 			return (spcs_s_ocopyoutf(&kstatus,
1046 			    args.sdbc_ustatus, EINVAL));
1047 		rc = ((args.arg2) ?
1048 		    _sd_set_hint((int)args.arg0, (uint_t)args.arg1) :
1049 		    _sd_clear_hint((int)args.arg0, (uint_t)args.arg1));
1050 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1051 
1052 	case SDBC_GET_CD_HINT:
1053 		{
1054 			uint_t hint;
1055 
1056 			if (_sd_cache_initialized == 0)
1057 				return (spcs_s_ocopyoutf(&kstatus,
1058 				    args.sdbc_ustatus, EINVAL));
1059 			if ((rc = _sd_get_cd_hint((int)args.arg0, &hint)) == 0)
1060 				*rvp = hint;
1061 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1062 			    rc));
1063 		}
1064 
1065 	case SDBC_SET_NODE_HINT:
1066 		rc = ((args.arg1) ? _sd_set_node_hint((uint_t)args.arg0) :
1067 		    _sd_clear_node_hint((uint_t)args.arg0));
1068 		if (rc)
1069 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1070 			    rc));
1071 		/* FALLTHRU */
1072 	case SDBC_GET_NODE_HINT:
1073 		{
1074 			uint_t hint;
1075 			if ((rc = _sd_get_node_hint(&hint)) == 0)
1076 				*rvp = hint;
1077 			return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1078 			    rc));
1079 		}
1080 
1081 	case SDBC_STATS:
1082 		rc = _sd_get_stats((void *)args.arg0, convert_32);
1083 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1084 
1085 	case SDBC_ZAP_STATS:
1086 		_sd_zap_stats();
1087 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, 0));
1088 
1089 	case SDBC_GET_CD_BLK:
1090 		if (_sd_cache_initialized == 0)
1091 			return (spcs_s_ocopyoutf(&kstatus,
1092 			    args.sdbc_ustatus, EINVAL));
1093 		rc = sdbc_get_cd_blk(&args, mode);
1094 		break;
1095 
1096 	case SDBC_GET_CONFIG:
1097 		{
1098 		_sdbc_config_t sdbc_config_info;
1099 
1100 		if (ddi_copyin((void *)args.arg0,
1101 		    &sdbc_config_info,
1102 		    sizeof (_sdbc_config_t),
1103 		    mode)) {
1104 			spcs_s_kfree(kstatus);
1105 			return (EFAULT);
1106 		}
1107 		rc = _sdbc_get_config(&sdbc_config_info);
1108 		(void) ddi_copyout(&sdbc_config_info,
1109 		    (void *)args.arg0,
1110 		    sizeof (_sdbc_config_t),
1111 		    mode);
1112 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1113 		}
1114 
1115 	case SDBC_SET_CONFIG:
1116 	{
1117 		_sdbc_config_t mgmt_config_info;
1118 
1119 		if (ddi_copyin((void *)args.arg0,
1120 		    &mgmt_config_info,
1121 		    sizeof (_sdbc_config_t),
1122 		    mode)) {
1123 			spcs_s_kfree(kstatus);
1124 			return (EFAULT);
1125 		}
1126 
1127 		rc = _sdbc_configure(NULL, &mgmt_config_info, kstatus);
1128 		if (rc && rc != EALREADY) {
1129 			(void) _sdbc_deconfigure(kstatus);
1130 			return (spcs_s_ocopyoutf
1131 			    (&kstatus, args.sdbc_ustatus, rc));
1132 		}
1133 
1134 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus, rc));
1135 	}
1136 
1137 	case SDBC_MAXFILES:
1138 		if (copyout(&sdbc_max_devs, (void *)args.arg0,
1139 		    sizeof (sdbc_max_devs)))
1140 			rc = EFAULT;
1141 		else
1142 			rc = 0;
1143 
1144 		break;
1145 
1146 	case SDBC_VERSION:
1147 	{
1148 		cache_version_t cache_version;
1149 
1150 		cache_version.major = sdbc_major_rev;
1151 		cache_version.minor = sdbc_minor_rev;
1152 		cache_version.micro = sdbc_micro_rev;
1153 		cache_version.baseline = sdbc_baseline_rev;
1154 
1155 		if (ddi_copyout(&cache_version, (void *)args.arg0,
1156 		    sizeof (cache_version_t), mode)) {
1157 			rc = EFAULT;
1158 			break;
1159 		}
1160 
1161 		break;
1162 	}
1163 
1164 
1165 #ifdef DEBUG
1166 	/* toggle flusher flag for testing */
1167 	case SDBC_TOGGLE_FLUSH:
1168 		_sdbc_flush_flag ^= 1;
1169 		*rvp = _sdbc_flush_flag;
1170 		rc = 0;
1171 
1172 		return (spcs_s_ocopyoutf(&kstatus, args.sdbc_ustatus,
1173 		    SDBC_ETOGGLE_FLUSH, _sdbc_flush_flag ? "on" : "off"));
1174 
1175 
1176 	/* inject i/o errors */
1177 	case SDBC_INJ_IOERR: /* cd, errnum */
1178 		if (_sd_cache_initialized == 0)
1179 			return (spcs_s_ocopyoutf(&kstatus,
1180 			    args.sdbc_ustatus, EINVAL));
1181 		rc = _sdbc_inject_ioerr(args.arg0, args.arg1, args.arg2);
1182 		break;
1183 
1184 	/* clear injected i/o errors */
1185 	case SDBC_CLR_IOERR: /* cd */
1186 		if (_sd_cache_initialized == 0)
1187 			return (spcs_s_ocopyoutf(&kstatus,
1188 			    args.sdbc_ustatus, EINVAL));
1189 		rc = _sdbc_clear_ioerr(args.arg0);
1190 		break;
1191 
1192 #endif /* DEBUG */
1193 	default:
1194 		_sd_print(3, "!SDBC unknown ioctl: 0x%x unsupported", cmd);
1195 		rc = EINVAL;
1196 		break;
1197 	}
1198 
1199 	spcs_s_kfree(kstatus);
1200 	return (rc);
1201 }
1202 
1203 
1204 /*
1205  * _sd_timed_block - sleep waiting for ticks time delay.
1206  * ticks - # of ticks to sleep
1207  * cvp - pointer to the cv we wait on while we delay.
1208  *
1209  * NO spin locks can be held at entry!
1210  *
1211  */
1212 void
1213 _sd_timed_block(clock_t ticks, kcondvar_t *cvp)
1214 {
1215 	mutex_enter(&_sd_block_lk);
1216 	(void) cv_reltimedwait(cvp, &_sd_block_lk, ticks, TR_CLOCK_TICK);
1217 	mutex_exit(&_sd_block_lk);
1218 }
1219 
1220 
1221 /*
1222  * _sd_unblock - awake a sleeper waiting on cv pointed to by cvp.
1223  *
1224  * NO spin locks can be held at entry as we may sleep.
1225  *
1226  */
1227 void
1228 _sd_unblock(kcondvar_t *cvp)
1229 {
1230 
1231 	mutex_enter(&_sd_block_lk);
1232 	cv_broadcast(cvp);
1233 	mutex_exit(&_sd_block_lk);
1234 }
1235 
1236 /* ARGSUSED */
1237 void
1238 _sd_data_log(int num, _sd_cctl_t *centry, nsc_off_t st, nsc_size_t len)
1239 {
1240 #if defined(_SD_FBA_DATA_LOG)
1241 	nsc_size_t i;
1242 	nsc_off_t blk;
1243 
1244 	blk = BLK_TO_FBA_NUM(CENTRY_BLK(centry));
1245 	for (i = st; i < (st + len); i++)
1246 		SDTRACE(num, CENTRY_CD(centry), 1, blk + i,
1247 		    *(int *)(centry->cc_data + FBA_SIZE(i)),
1248 		    *(int *)(centry->cc_data + FBA_SIZE(i) + 4));
1249 #endif /* _SD_FBA_DATA_LOG */
1250 }
1251 
1252 /* ARGSUSED */
1253 void
1254 _sd_data_log_chain(int num, _sd_cctl_t *centry, nsc_off_t fba_pos,
1255     nsc_size_t fba_len)
1256 {
1257 #if defined(_SD_FBA_DATA_LOG)
1258 	sdbc_cblk_fba_t st_cblk_len;	/* FBA len of starting cache block */
1259 	sdbc_cblk_fba_t end_cblk_len;	/* FBA len of ending cache block */
1260 	sdbc_cblk_fba_t st_cblk_off;	/* FBA offset into starting cblock */
1261 
1262 	while (CENTRY_BLK(centry) != FBA_TO_BLK_NUM(fba_pos))
1263 		centry = centry->cc_chain;
1264 
1265 	st_cblk_off = BLK_FBA_OFF(fba_pos);
1266 	st_cblk_len = BLK_FBAS - st_cblk_off;
1267 	if (st_cblk_len >= fba_len) {
1268 		end_cblk_len = 0;
1269 		st_cblk_len = fba_len;
1270 	} else {
1271 		end_cblk_len = BLK_FBA_OFF(fba_pos + fba_len);
1272 	}
1273 
1274 	DATA_LOG(num, centry, st_cblk_off, st_cblk_len);
1275 
1276 	fba_len -= st_cblk_len;
1277 	centry = centry->cc_chain;
1278 
1279 	while (fba_len > end_cblk_len) {
1280 		DATA_LOG(num, centry, 0, BLK_FBAS);
1281 		fba_len -= BLK_FBAS;
1282 		centry = centry->cc_chain;
1283 	}
1284 	if (end_cblk_len) DATA_LOG(num, centry, 0, end_cblk_len);
1285 #endif /* _SD_FBA_DATA_LOG */
1286 }
1287 
1288 
1289 void
1290 _sd_zap_stats(void)
1291 {
1292 	int i;
1293 
1294 	if (_sd_cache_stats == NULL)
1295 		return;
1296 
1297 	_sd_cache_stats->st_rdhits = 0;
1298 	_sd_cache_stats->st_rdmiss = 0;
1299 	_sd_cache_stats->st_wrhits = 0;
1300 	_sd_cache_stats->st_wrmiss = 0;
1301 	_sd_lru_q.sq_noreq_stat = 0;
1302 	_sd_lru_q.sq_req_stat = 0;
1303 
1304 	for (i = 0; i < sdbc_max_devs; i++) {
1305 		_sd_cache_stats->st_shared[i].sh_cache_read  = 0;
1306 		_sd_cache_stats->st_shared[i].sh_cache_write = 0;
1307 		_sd_cache_stats->st_shared[i].sh_disk_read   = 0;
1308 		_sd_cache_stats->st_shared[i].sh_disk_write  = 0;
1309 	}
1310 }
1311 
1312 
1313 /*
1314  * Return the cache sizes used by the Sense Subsystem Status CCW
1315  */
1316 int
1317 _sd_cache_sizes(int *asize, int *wsize)
1318 {
1319 	int	psize;
1320 
1321 	*asize = 0;
1322 	*wsize = 0;
1323 
1324 	/*
1325 	 * add in the total cache size and the
1326 	 * non-volatile (battery-backed) cache size.
1327 	 */
1328 	if (_sd_net_config.sn_configured) {
1329 		psize = _sd_net_config.sn_psize;
1330 		*asize += (_sd_net_config.sn_cpages * psize);
1331 		*wsize += (safestore_config.ssc_wsize);
1332 	}
1333 
1334 	return (0);
1335 }
1336 
1337 
1338 /*PRINTFLIKE2*/
1339 void
1340 _sd_print(int level, char *fmt, ...)
1341 {
1342 	va_list adx;
1343 	if (level <= _sd_debug_level) {
1344 		va_start(adx, fmt);
1345 		vcmn_err(CE_NOTE, fmt, adx);
1346 		va_end(adx);
1347 
1348 	}
1349 }
1350 
1351 
1352 int
1353 _sd_get_cd_blk(int cd, nsc_off_t cblk, _sd_cctl_t **cc, caddr_t *data,
1354     char **filename)
1355 {
1356 	_sd_cctl_t *cc_ent;
1357 
1358 	if (FILE_OPENED(cd) != 0) {
1359 		*filename = _sd_cache_files[cd].cd_info->sh_filename;
1360 		if (cc_ent = (_sd_cctl_t *)
1361 		    _sd_hash_search(cd, cblk, _sd_htable)) {
1362 			*cc = cc_ent;
1363 			*data = (caddr_t)cc_ent->cc_data;
1364 			return (0);
1365 		}
1366 	}
1367 	return (-1);
1368 }
1369 
1370 /*
1371  * central dyn mem processing vars edit rtn.
1372  * input a local copy and xfer to global
1373  *
1374  * sec0,sec1,sec2
1375  * range check 1 to 255 (arbitrary but in any case must be <= 2000 due to
1376  *	32bit signed int limits in later calc)
1377  * aging_ct
1378  * range check 1 to 255 (only 8 bits reserved for aging ctr)
1379  *
1380  */
1381 int
1382 sdbc_edit_xfer_process_vars_dm(_dm_process_vars_t *process_vars)
1383 {
1384 	if (process_vars->max_dyn_list > 0)
1385 		dynmem_processing_dm.max_dyn_list = process_vars->max_dyn_list;
1386 
1387 	/* no edit on monitor_dynmem_process */
1388 	dynmem_processing_dm.monitor_dynmem_process =
1389 	    process_vars->monitor_dynmem_process;
1390 	/* no edit on process_directive */
1391 	dynmem_processing_dm.process_directive =
1392 	    process_vars->process_directive;
1393 
1394 	if (process_vars->cache_aging_ct1 > 0 &&
1395 	    process_vars->cache_aging_ct1 <= CACHE_AGING_CT_MAX)
1396 		dynmem_processing_dm.cache_aging_ct1 =
1397 		    process_vars->cache_aging_ct1;
1398 	if (process_vars->cache_aging_ct2 > 0 &&
1399 	    process_vars->cache_aging_ct2 <= CACHE_AGING_CT_MAX)
1400 		dynmem_processing_dm.cache_aging_ct2 =
1401 		    process_vars->cache_aging_ct2;
1402 	if (process_vars->cache_aging_ct3 > 0 &&
1403 	    process_vars->cache_aging_ct3 <= CACHE_AGING_CT_MAX)
1404 		dynmem_processing_dm.cache_aging_ct3 =
1405 		    process_vars->cache_aging_ct3;
1406 	if (process_vars->cache_aging_sec1 > 0 &&
1407 	    process_vars->cache_aging_sec1 <= CACHE_AGING_SEC1_MAX)
1408 		dynmem_processing_dm.cache_aging_sec1 =
1409 		    process_vars->cache_aging_sec1;
1410 	if (process_vars->cache_aging_sec2 > 0 &&
1411 	    process_vars->cache_aging_sec2 <= CACHE_AGING_SEC2_MAX)
1412 		dynmem_processing_dm.cache_aging_sec2 =
1413 		    process_vars->cache_aging_sec2;
1414 	if (process_vars->cache_aging_sec3 > 0 &&
1415 	    process_vars->cache_aging_sec3 <= CACHE_AGING_SEC3_MAX)
1416 		dynmem_processing_dm.cache_aging_sec3 =
1417 		    process_vars->cache_aging_sec3;
1418 	if (process_vars->cache_aging_pcnt1 >= 0 &&
1419 	    process_vars->cache_aging_pcnt1 <= CACHE_AGING_PCNT1_MAX)
1420 		dynmem_processing_dm.cache_aging_pcnt1 =
1421 		    process_vars->cache_aging_pcnt1;
1422 	if (process_vars->cache_aging_pcnt2 >= 0 &&
1423 	    process_vars->cache_aging_pcnt2 <= CACHE_AGING_PCNT2_MAX)
1424 		dynmem_processing_dm.cache_aging_pcnt2 =
1425 		    process_vars->cache_aging_pcnt2;
1426 	if (process_vars->max_holds_pcnt >= 0 &&
1427 	    process_vars->max_holds_pcnt <= MAX_HOLDS_PCNT_MAX)
1428 		dynmem_processing_dm.max_holds_pcnt =
1429 		    process_vars->max_holds_pcnt;
1430 	return (0);
1431 }
1432 
1433 dev_info_t *
1434 sdbc_get_dip()
1435 {
1436 	return (dev_dip);
1437 }
1438