xref: /titanic_51/usr/src/uts/sun4u/starcat/io/iosram.c (revision 745c8c960c38d44c7ea8ade6e1f4e7ca3956eb09)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 /*
29  * IOSRAM leaf driver to SBBC nexus driver.  This driver is used
30  * by Starcat Domain SW to read/write from/to the IO sram.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/conf.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/ddi_impldefs.h>
38 #include <sys/obpdefs.h>
39 #include <sys/promif.h>
40 #include <sys/prom_plat.h>
41 #include <sys/cmn_err.h>
42 #include <sys/conf.h>		/* req. by dev_ops flags MTSAFE etc. */
43 #include <sys/modctl.h>		/* for modldrv */
44 #include <sys/stat.h>		/* ddi_create_minor_node S_IFCHR */
45 #include <sys/errno.h>
46 #include <sys/kmem.h>
47 #include <sys/kstat.h>
48 #include <sys/debug.h>
49 
50 #include <sys/axq.h>
51 #include <sys/iosramreg.h>
52 #include <sys/iosramio.h>
53 #include <sys/iosramvar.h>
54 
55 
56 #if defined(DEBUG)
57 int	iosram_debug = 0;
58 static void iosram_dprintf(const char *fmt, ...);
59 #define	DPRINTF(level, arg)	\
60 		{ if (iosram_debug >= level) iosram_dprintf arg; }
61 #else	/* !DEBUG */
62 #define	DPRINTF(level, arg)
63 #endif	/* !DEBUG */
64 
65 
66 /*
67  * IOSRAM module global state
68  */
69 static void	*iosramsoft_statep;	/* IOSRAM state pointer */
70 static kmutex_t	iosram_mutex;		/* mutex lock */
71 
72 static iosram_chunk_t	*chunks = NULL;	/* array of TOC entries */
73 static int	nchunks = 0;		/* # of TOC entries */
74 static iosram_chunk_t	*iosram_hashtab[IOSRAM_HASHSZ];	/* key hash table */
75 
76 static kcondvar_t	iosram_tswitch_wait;	/* tunnel switch wait cv */
77 static int	iosram_tswitch_wakeup = 0;	/* flag indicationg one or */
78 						/* more threads waiting on */
79 						/* iosram_tswitch_wait cv */
80 static int	iosram_tswitch_active = 0;	/* tunnel switch active flag */
81 static int	iosram_tswitch_aborted = 0;	/* tunnel switch abort flag */
82 static clock_t	iosram_tswitch_tstamp = 0;	/* lbolt of last tswitch end */
83 static kcondvar_t	iosram_rw_wait;		/* read/write wait cv */
84 static int	iosram_rw_wakeup = 0;		/* flag indicationg one or */
85 						/* more threads waiting on */
86 						/* iosram_rw_wait cv */
87 static int	iosram_rw_active = 0;		/* # threads accessing IOSRAM */
88 #if defined(DEBUG)
89 static int	iosram_rw_active_max = 0;
90 #endif
91 
92 static struct iosramsoft *iosram_new_master = NULL;	/* new tunnel target */
93 static struct iosramsoft *iosram_master = NULL;		/* master tunnel */
94 static struct iosramsoft *iosram_instances = NULL;	/* list of softstates */
95 
96 static ddi_acc_handle_t	iosram_handle = NULL;	/* master IOSRAM map handle */
97 
98 static void	(*iosram_hdrchange_handler)() = NULL;
99 
100 #if IOSRAM_STATS
101 static struct	iosram_stat iosram_stats;	/* IOSRAM statistics */
102 static void	iosram_print_stats();		/* forward declaration */
103 #endif /* IOSRAM_STATS */
104 
105 
106 #if IOSRAM_LOG
107 kmutex_t 	iosram_log_mutex;
108 int		iosram_log_level = 1;
109 int		iosram_log_print = 0;		/* print log when recorded */
110 uint32_t	iosram_logseq;
111 iosram_log_t	iosram_logbuf[IOSRAM_MAXLOG];
112 static void	iosram_print_log(int cnt);	/* forward declaration */
113 #endif	/* IOSRAM_LOG */
114 
115 
116 /* driver entry point fn definitions */
117 static int 	iosram_open(dev_t *, int, int, cred_t *);
118 static int	iosram_close(dev_t, int, int, cred_t *);
119 static int	iosram_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
120 
121 /* configuration entry point fn definitions */
122 static int 	iosram_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
123 static int	iosram_attach(dev_info_t *, ddi_attach_cmd_t);
124 static int	iosram_detach(dev_info_t *, ddi_detach_cmd_t);
125 
126 
127 /* forward declaractions */
128 static iosram_chunk_t	*iosram_find_chunk(uint32_t key);
129 static void	iosram_set_master(struct iosramsoft *softp);
130 static int	iosram_is_chosen(struct iosramsoft *softp);
131 static int	iosram_tunnel_capable(struct iosramsoft *softp);
132 static int	iosram_read_toc(struct iosramsoft *softp);
133 static void	iosram_init_hashtab(void);
134 static void	iosram_update_addrs(struct iosramsoft *softp);
135 
136 static int	iosram_setup_map(struct iosramsoft *softp);
137 static void	iosram_remove_map(struct iosramsoft *softp);
138 static int	iosram_add_intr(iosramsoft_t *);
139 static int	iosram_remove_intr(iosramsoft_t *);
140 
141 static void	iosram_add_instance(struct iosramsoft *softp);
142 static void	iosram_remove_instance(int instance);
143 static int	iosram_switch_tunnel(iosramsoft_t *softp);
144 static void	iosram_abort_tswitch();
145 
146 #if defined(DEBUG)
147 /* forward declaractions for debugging */
148 static int	iosram_get_keys(iosram_toc_entry_t *buf, uint32_t *len);
149 static void	iosram_print_cback();
150 static void	iosram_print_state(int);
151 static void	iosram_print_flags();
152 #endif
153 
154 
155 
156 /*
157  * cb_ops
158  */
159 static struct cb_ops iosram_cb_ops = {
160 	iosram_open,		/* cb_open */
161 	iosram_close,		/* cb_close */
162 	nodev,			/* cb_strategy */
163 	nodev,			/* cb_print */
164 	nodev,			/* cb_dump */
165 	nodev,			/* cb_read */
166 	nodev,			/* cb_write */
167 	iosram_ioctl,		/* cb_ioctl */
168 	nodev,			/* cb_devmap */
169 	nodev,			/* cb_mmap */
170 	nodev,			/* cb_segmap */
171 	nochpoll,		/* cb_chpoll */
172 	ddi_prop_op,		/* cb_prop_op */
173 	NULL,			/* cb_stream */
174 	(int)(D_NEW | D_MP | D_HOTPLUG)	/* cb_flag */
175 };
176 
177 /*
178  * Declare ops vectors for auto configuration.
179  */
180 struct dev_ops  iosram_ops = {
181 	DEVO_REV,		/* devo_rev */
182 	0,			/* devo_refcnt */
183 	iosram_getinfo,		/* devo_getinfo */
184 	nulldev,		/* devo_identify */
185 	nulldev,		/* devo_probe */
186 	iosram_attach,		/* devo_attach */
187 	iosram_detach,		/* devo_detach */
188 	nodev,			/* devo_reset */
189 	&iosram_cb_ops,		/* devo_cb_ops */
190 	(struct bus_ops *)NULL,	/* devo_bus_ops */
191 	nulldev,		/* devo_power */
192 	ddi_quiesce_not_supported,	/* devo_quiesce */
193 };
194 
195 /*
196  * Loadable module support.
197  */
198 extern struct mod_ops mod_driverops;
199 
200 static struct modldrv iosrammodldrv = {
201 	&mod_driverops,		/* type of module - driver */
202 	"IOSRAM Leaf driver",
203 	&iosram_ops,
204 };
205 
206 static struct modlinkage iosrammodlinkage = {
207 	MODREV_1,
208 	&iosrammodldrv,
209 	NULL
210 };
211 
212 
213 int
214 _init(void)
215 {
216 	int    error;
217 	int	i;
218 
219 	mutex_init(&iosram_mutex, NULL, MUTEX_DRIVER, (void *)NULL);
220 	cv_init(&iosram_tswitch_wait, NULL, CV_DRIVER, NULL);
221 	cv_init(&iosram_rw_wait, NULL, CV_DRIVER, NULL);
222 #if defined(IOSRAM_LOG)
223 	mutex_init(&iosram_log_mutex, NULL, MUTEX_DRIVER, (void *)NULL);
224 #endif
225 
226 	DPRINTF(1, ("_init:IOSRAM\n"));
227 
228 	for (i = 0; i < IOSRAM_HASHSZ; i++) {
229 		iosram_hashtab[i] = NULL;
230 	}
231 
232 	if ((error = ddi_soft_state_init(&iosramsoft_statep,
233 	    sizeof (struct iosramsoft), 1)) != 0) {
234 		goto failed;
235 	}
236 	if ((error = mod_install(&iosrammodlinkage)) != 0) {
237 		ddi_soft_state_fini(&iosramsoft_statep);
238 		goto failed;
239 	}
240 
241 	IOSRAMLOG(0, "_init:IOSRAM ... error:%d  statep:%p\n",
242 	    error, iosramsoft_statep, NULL, NULL);
243 
244 	return (error);
245 
246 failed:
247 	cv_destroy(&iosram_tswitch_wait);
248 	cv_destroy(&iosram_rw_wait);
249 	mutex_destroy(&iosram_mutex);
250 #if defined(IOSRAM_LOG)
251 	mutex_destroy(&iosram_log_mutex);
252 #endif
253 	IOSRAMLOG(0, "_init:IOSRAM ... error:%d  statep:%p\n",
254 	    error, iosramsoft_statep, NULL, NULL);
255 
256 	return (error);
257 }
258 
259 
260 int
261 _fini(void)
262 {
263 #ifndef DEBUG
264 	return (EBUSY);
265 #else /* !DEBUG */
266 	int    error;
267 
268 	if ((error = mod_remove(&iosrammodlinkage)) == 0) {
269 		ddi_soft_state_fini(&iosramsoft_statep);
270 
271 		cv_destroy(&iosram_tswitch_wait);
272 		cv_destroy(&iosram_rw_wait);
273 		mutex_destroy(&iosram_mutex);
274 #if defined(IOSRAM_LOG)
275 		mutex_destroy(&iosram_log_mutex);
276 #endif
277 	}
278 	DPRINTF(1, ("_fini:IOSRAM  error:%d\n", error));
279 
280 	return (error);
281 #endif /* !DEBUG */
282 }
283 
284 
285 int
286 _info(struct modinfo *modinfop)
287 {
288 	return (mod_info(&iosrammodlinkage, modinfop));
289 }
290 
291 
292 static int
293 iosram_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
294 {
295 	int	instance;
296 	int	propval;
297 	int	length;
298 	char	name[32];
299 	struct	iosramsoft *softp;
300 
301 	instance = ddi_get_instance(dip);
302 
303 	DPRINTF(1, ("iosram(%d): attach dip:%p\n", instance));
304 
305 	IOSRAMLOG(1, "ATTACH: dip:%p instance %d ... start\n",
306 	    dip, instance, NULL, NULL);
307 	switch (cmd) {
308 	case DDI_ATTACH:
309 		break;
310 	case DDI_RESUME:
311 		if (!(softp = ddi_get_soft_state(iosramsoft_statep,
312 		    instance))) {
313 			return (DDI_FAILURE);
314 		}
315 		mutex_enter(&iosram_mutex);
316 		mutex_enter(&softp->intr_mutex);
317 		if (!softp->suspended) {
318 			mutex_exit(&softp->intr_mutex);
319 			mutex_exit(&iosram_mutex);
320 			return (DDI_FAILURE);
321 		}
322 		softp->suspended = 0;
323 
324 		/*
325 		 * enable SBBC interrupts if SBBC is mapped in
326 		 * restore the value saved during detach
327 		 */
328 		if (softp->sbbc_region) {
329 			ddi_put32(softp->sbbc_handle,
330 			    &(softp->sbbc_region->int_enable.reg),
331 			    softp->int_enable_sav);
332 		}
333 
334 		/*
335 		 * Trigger soft interrupt handler to process any pending
336 		 * interrupts.
337 		 */
338 		if (softp->intr_pending && !softp->intr_busy &&
339 		    (softp->softintr_id != NULL)) {
340 			ddi_trigger_softintr(softp->softintr_id);
341 		}
342 
343 		mutex_exit(&softp->intr_mutex);
344 		mutex_exit(&iosram_mutex);
345 
346 		return (DDI_SUCCESS);
347 
348 	default:
349 		return (DDI_FAILURE);
350 	}
351 
352 	if (ddi_soft_state_zalloc(iosramsoft_statep, instance) != 0) {
353 		return (DDI_FAILURE);
354 	}
355 
356 	if ((softp = ddi_get_soft_state(iosramsoft_statep, instance)) == NULL) {
357 			return (DDI_FAILURE);
358 	}
359 	softp->dip = dip;
360 	softp->instance = instance;
361 	softp->sbbc_region = NULL;
362 
363 	/*
364 	 * If this instance is not tunnel capable, we don't attach it.
365 	 */
366 	if (iosram_tunnel_capable(softp) == 0) {
367 		DPRINTF(1, ("iosram(%d): not tunnel_capable\n", instance));
368 		IOSRAMLOG(1, "ATTACH(%d): not tunnel_capable\n", instance, NULL,
369 		    NULL, NULL);
370 		goto attach_fail;
371 	}
372 
373 	/*
374 	 * Need to create an "interrupt-priorities" property to define the PIL
375 	 * to be used with the interrupt service routine.
376 	 */
377 	if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
378 	    "interrupt-priorities", &length) == DDI_PROP_NOT_FOUND) {
379 		DPRINTF(1, ("iosram(%d): creating interrupt priority property",
380 		    instance));
381 		propval = IOSRAM_PIL;
382 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, 0,
383 		    "interrupt-priorities", (caddr_t)&propval, sizeof (propval))
384 		    != DDI_PROP_SUCCESS) {
385 			cmn_err(CE_WARN,
386 			    "iosram_attach: failed to create property");
387 			goto attach_fail;
388 		}
389 	}
390 
391 	/*
392 	 * Get interrupts cookies and initialize per-instance mutexes
393 	 */
394 	if (ddi_get_iblock_cookie(softp->dip, 0, &softp->real_iblk)
395 	    != DDI_SUCCESS) {
396 		IOSRAMLOG(1, "ATTACH(%d): cannot get soft intr cookie\n",
397 		    instance, NULL, NULL, NULL);
398 		goto attach_fail;
399 	}
400 	mutex_init(&softp->intr_mutex, NULL, MUTEX_DRIVER,
401 	    (void *)softp->real_iblk);
402 
403 	/*
404 	 * Add this instance to the iosram_instances list so that it can be used
405 	 * for tunnel in future.
406 	 */
407 	mutex_enter(&iosram_mutex);
408 	softp->state = IOSRAM_STATE_INIT;
409 	iosram_add_instance(softp);
410 
411 	/*
412 	 * If this is the chosen IOSRAM and there is no master IOSRAM yet, then
413 	 * let's set this instance as the master.
414 	 */
415 	if (iosram_master == NULL && iosram_is_chosen(softp)) {
416 		iosram_switch_tunnel(softp);
417 
418 		/*
419 		 * XXX Do we need to panic if unable to setup master IOSRAM?
420 		 */
421 		if (iosram_master == NULL) {
422 			cmn_err(CE_WARN,
423 			    "iosram(%d): can't setup master tunnel\n",
424 			    instance);
425 			softp->state = 0;
426 			iosram_remove_instance(softp->instance);
427 			mutex_exit(&iosram_mutex);
428 			mutex_destroy(&softp->intr_mutex);
429 			goto attach_fail;
430 		}
431 	}
432 
433 	mutex_exit(&iosram_mutex);
434 
435 	/*
436 	 * Create minor node
437 	 */
438 	(void) sprintf(name, "iosram%d", instance);
439 	if (ddi_create_minor_node(dip, name, S_IFCHR, instance, NULL, NULL) ==
440 	    DDI_FAILURE) {
441 		/*
442 		 * Minor node seems to be needed only for debugging purposes.
443 		 * Therefore, there is no need to fail this attach request.
444 		 * Simply print a message out.
445 		 */
446 		cmn_err(CE_NOTE, "!iosram(%d): can't create minor node\n",
447 		    instance);
448 	}
449 	ddi_report_dev(dip);
450 
451 	DPRINTF(1, ("iosram_attach(%d): success.\n", instance));
452 	IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... success  softp:%p\n",
453 	    dip, instance, softp, NULL);
454 
455 	return (DDI_SUCCESS);
456 
457 attach_fail:
458 	DPRINTF(1, ("iosram_attach(%d):failed.\n", instance));
459 	IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... failed.\n",
460 	    dip, instance, NULL, NULL);
461 
462 	ddi_soft_state_free(iosramsoft_statep, instance);
463 	return (DDI_FAILURE);
464 }
465 
466 
467 static int
468 iosram_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
469 {
470 	int			instance;
471 	struct iosramsoft	*softp;
472 
473 	instance = ddi_get_instance(dip);
474 	if (!(softp = ddi_get_soft_state(iosramsoft_statep, instance))) {
475 		return (DDI_FAILURE);
476 	}
477 
478 	IOSRAMLOG(1, "DETACH: dip:%p instance %d softp:%p\n",
479 	    dip, instance, softp, NULL);
480 
481 	switch (cmd) {
482 	case DDI_DETACH:
483 		break;
484 	case DDI_SUSPEND:
485 		mutex_enter(&iosram_mutex);
486 		mutex_enter(&softp->intr_mutex);
487 		if (softp->suspended) {
488 			mutex_exit(&softp->intr_mutex);
489 			mutex_exit(&iosram_mutex);
490 			return (DDI_FAILURE);
491 		}
492 		softp->suspended = 1;
493 		/*
494 		 * Disable SBBC interrupts if SBBC is mapped in
495 		 */
496 		if (softp->sbbc_region) {
497 			/* save current interrupt enable register */
498 			softp->int_enable_sav = ddi_get32(softp->sbbc_handle,
499 			    &(softp->sbbc_region->int_enable.reg));
500 			ddi_put32(softp->sbbc_handle,
501 			    &(softp->sbbc_region->int_enable.reg), 0x0);
502 		}
503 		mutex_exit(&softp->intr_mutex);
504 		mutex_exit(&iosram_mutex);
505 		return (DDI_SUCCESS);
506 
507 	default:
508 		return (DDI_FAILURE);
509 	}
510 
511 
512 	/*
513 	 * Indicate that this instance is being detached so that this instance
514 	 * does not become a target for tunnel switch in future.
515 	 */
516 	mutex_enter(&iosram_mutex);
517 	softp->state |= IOSRAM_STATE_DETACH;
518 
519 	/*
520 	 * If this instance is currently the master or the target of the tunnel
521 	 * switch, then we need to wait and switch tunnel, if necessary.
522 	 */
523 	if (iosram_master == softp || (softp->state & IOSRAM_STATE_TSWITCH)) {
524 		mutex_exit(&iosram_mutex);
525 		iosram_switchfrom(instance);
526 		mutex_enter(&iosram_mutex);
527 	}
528 
529 	/*
530 	 * If the tunnel switch is in progress and we are the master or target
531 	 * of tunnel relocation, then we can't detach this instance right now.
532 	 */
533 	if (softp->state & IOSRAM_STATE_TSWITCH) {
534 		softp->state &= ~IOSRAM_STATE_DETACH;
535 		mutex_exit(&iosram_mutex);
536 		return (DDI_FAILURE);
537 	}
538 
539 	/*
540 	 * We can't allow master IOSRAM to be detached as we won't be able to
541 	 * communicate otherwise.
542 	 */
543 	if (iosram_master == softp) {
544 		softp->state &= ~IOSRAM_STATE_DETACH;
545 		mutex_exit(&iosram_mutex);
546 		return (DDI_FAILURE);
547 	}
548 
549 	/*
550 	 * Now remove our instance from the iosram_instances list.
551 	 */
552 	iosram_remove_instance(instance);
553 	mutex_exit(&iosram_mutex);
554 
555 	/*
556 	 * Instances should only ever be mapped if they are the master and/or
557 	 * participating in a tunnel switch.  Neither should be the case here.
558 	 */
559 	ASSERT((softp->state & IOSRAM_STATE_MAPPED) == 0);
560 
561 	/*
562 	 * Destroy per-instance mutexes
563 	 */
564 	mutex_destroy(&softp->intr_mutex);
565 
566 	ddi_remove_minor_node(dip, NULL);
567 
568 	/*
569 	 * Finally remove our soft state structure
570 	 */
571 	ddi_soft_state_free(iosramsoft_statep, instance);
572 
573 	return (DDI_SUCCESS);
574 }
575 
576 
577 /* ARGSUSED0 */
578 static int
579 iosram_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
580 		void **result)
581 {
582 	dev_t			dev = (dev_t)arg;
583 	struct iosramsoft	*softp;
584 	int			instance, ret;
585 
586 	instance = getminor(dev);
587 
588 	IOSRAMLOG(2, "GETINFO: dip:%x instance %d dev:%x infocmd:%x\n",
589 	    dip, instance, dev, infocmd);
590 
591 	switch (infocmd) {
592 		case DDI_INFO_DEVT2DEVINFO:
593 			softp = ddi_get_soft_state(iosramsoft_statep, instance);
594 			if (softp == NULL) {
595 				*result = NULL;
596 				ret = DDI_FAILURE;
597 			} else {
598 				*result = softp->dip;
599 				ret = DDI_SUCCESS;
600 			}
601 			break;
602 		case DDI_INFO_DEVT2INSTANCE:
603 			*result = (void *)(uintptr_t)instance;
604 			ret = DDI_SUCCESS;
605 			break;
606 		default:
607 			ret = DDI_FAILURE;
608 			break;
609 	}
610 
611 	return (ret);
612 }
613 
614 
615 /*ARGSUSED1*/
616 static int
617 iosram_open(dev_t *dev, int flag, int otype, cred_t *credp)
618 {
619 	struct iosramsoft	*softp;
620 	int			instance;
621 
622 	instance = getminor(*dev);
623 	softp = ddi_get_soft_state(iosramsoft_statep, instance);
624 
625 	if (softp == NULL) {
626 		return (ENXIO);
627 	}
628 
629 	IOSRAMLOG(1, "OPEN: dev:%p otype:%x ... instance:%d softp:%p\n",
630 	    *dev, otype, softp->instance, softp);
631 
632 	return (0);
633 }
634 
635 
636 /*ARGSUSED1*/
637 static int
638 iosram_close(dev_t dev, int flag, int otype, cred_t *credp)
639 {
640 	struct iosramsoft	*softp;
641 	int			instance;
642 
643 	instance = getminor(dev);
644 	softp = ddi_get_soft_state(iosramsoft_statep, instance);
645 	if (softp == NULL) {
646 		return (ENXIO);
647 	}
648 
649 	IOSRAMLOG(1, "CLOSE: dev:%p otype:%x ... instance:%d softp:%p\n",
650 	    dev, otype, softp->instance, softp);
651 
652 	return (0);
653 }
654 
655 
656 int
657 iosram_rd(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr)
658 {
659 	iosram_chunk_t		*chunkp;
660 	uint32_t		chunk_len;
661 	uint8_t			*iosramp;
662 	ddi_acc_handle_t	handle;
663 	int			boff;
664 	union {
665 		uchar_t	cbuf[UINT32SZ];
666 		uint32_t  data;
667 	} word;
668 
669 	int			error = 0;
670 	uint8_t			*buf = (uint8_t *)dptr;
671 
672 	/*
673 	 * We try to read from the IOSRAM using double word or word access
674 	 * provided both "off" and "buf" are (or can be) double word or word
675 	 * aligned.  Othewise, we try to align the "off" to a word boundary and
676 	 * then try to read data from the IOSRAM using word access, but store it
677 	 * into buf buffer using byte access.
678 	 *
679 	 * If the leading/trailing portion of the IOSRAM data is not word
680 	 * aligned, it will always be copied using byte access.
681 	 */
682 	IOSRAMLOG(1, "RD: key: 0x%x off:%x len:%x buf:%p\n",
683 	    key, off, len, buf);
684 
685 	/*
686 	 * Acquire lock and look for the requested chunk.  If it exists, make
687 	 * sure the requested read is within the chunk's bounds and no tunnel
688 	 * switch is active.
689 	 */
690 	mutex_enter(&iosram_mutex);
691 	chunkp = iosram_find_chunk(key);
692 	chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0;
693 
694 	if (iosram_master == NULL) {
695 		error = EIO;
696 	} else if (chunkp == NULL) {
697 		error = EINVAL;
698 	} else if ((off >= chunk_len) || (len > chunk_len) ||
699 	    ((off + len) > chunk_len)) {
700 		error = EMSGSIZE;
701 	} else if (iosram_tswitch_active) {
702 		error = EAGAIN;
703 	}
704 
705 	if (error) {
706 		mutex_exit(&iosram_mutex);
707 		return (error);
708 	}
709 
710 	/*
711 	 * Bump reference count to indicate #thread accessing IOSRAM and release
712 	 * the lock.
713 	 */
714 	iosram_rw_active++;
715 #if defined(DEBUG)
716 	if (iosram_rw_active > iosram_rw_active_max) {
717 		iosram_rw_active_max = iosram_rw_active;
718 	}
719 #endif
720 	mutex_exit(&iosram_mutex);
721 
722 	IOSRAM_STAT(read);
723 	IOSRAM_STAT_ADD(bread, len);
724 
725 	/* Get starting address and map handle */
726 	iosramp = chunkp->basep + off;
727 	handle = iosram_handle;
728 
729 	/*
730 	 * Align the off to word boundary and then try reading/writing data
731 	 * using double word or word access.
732 	 */
733 	if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) {
734 		int	cnt = UINT32SZ - boff;
735 
736 		if (cnt > len) {
737 			cnt = len;
738 		}
739 		IOSRAMLOG(2,
740 		    "RD: align rep_get8(buf:%p sramp:%p cnt:%x) len:%x\n",
741 		    buf, iosramp, cnt, len);
742 		ddi_rep_get8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR);
743 		buf += cnt;
744 		iosramp += cnt;
745 		len -= cnt;
746 	}
747 
748 	if ((len >= UINT64SZ) &&
749 	    ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) {
750 		/*
751 		 * Both source and destination are double word aligned
752 		 */
753 		int cnt = len/UINT64SZ;
754 
755 		IOSRAMLOG(2,
756 		    "RD: rep_get64(buf:%p sramp:%p cnt:%x) len:%x\n",
757 		    buf, iosramp, cnt, len);
758 		ddi_rep_get64(handle, (uint64_t *)buf, (uint64_t *)iosramp,
759 		    cnt, DDI_DEV_AUTOINCR);
760 		iosramp += cnt * UINT64SZ;
761 		buf += cnt * UINT64SZ;
762 		len -= cnt * UINT64SZ;
763 
764 		/*
765 		 * read remaining data using word and byte access
766 		 */
767 		if (len >= UINT32SZ) {
768 			IOSRAMLOG(2,
769 			    "RD: get32(buf:%p sramp:%p) len:%x\n",
770 			    buf, iosramp, len, NULL);
771 			*(uint32_t *)buf = ddi_get32(handle,
772 			    (uint32_t *)iosramp);
773 			iosramp += UINT32SZ;
774 			buf += UINT32SZ;
775 			len -= UINT32SZ;
776 		}
777 
778 		if (len != 0) {
779 			ddi_rep_get8(handle, buf, iosramp, len,
780 			    DDI_DEV_AUTOINCR);
781 		}
782 	} else if ((len >= UINT32SZ) &&
783 	    ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) {
784 		/*
785 		 * Both source and destination are word aligned
786 		 */
787 		int cnt = len/UINT32SZ;
788 
789 		IOSRAMLOG(2,
790 		    "RD: rep_get32(buf:%p sramp:%p cnt:%x) len:%x\n",
791 		    buf, iosramp, cnt, len);
792 		ddi_rep_get32(handle, (uint32_t *)buf, (uint32_t *)iosramp,
793 		    cnt, DDI_DEV_AUTOINCR);
794 		iosramp += cnt * UINT32SZ;
795 		buf += cnt * UINT32SZ;
796 		len -= cnt * UINT32SZ;
797 
798 		/*
799 		 * copy the remainder using byte access
800 		 */
801 		if (len != 0) {
802 			ddi_rep_get8(handle, buf, iosramp, len,
803 			    DDI_DEV_AUTOINCR);
804 		}
805 	} else if (len != 0) {
806 		/*
807 		 * We know that the "off" (i.e. iosramp) is at least word
808 		 * aligned. We need to read IOSRAM word at a time and copy it
809 		 * byte at a time.
810 		 */
811 		ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0);
812 
813 		IOSRAMLOG(2,
814 		    "RD: unaligned get32(buf:%p sramp:%p) len:%x\n",
815 		    buf, iosramp, len, NULL);
816 		for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) {
817 			word.data =  ddi_get32(handle, (uint32_t *)iosramp);
818 			*buf++ = word.cbuf[0];
819 			*buf++ = word.cbuf[1];
820 			*buf++ = word.cbuf[2];
821 			*buf++ = word.cbuf[3];
822 		}
823 
824 		/*
825 		 * copy the remaining data using byte access
826 		 */
827 		if (len != 0) {
828 			ddi_rep_get8(handle, buf, iosramp, len,
829 			    DDI_DEV_AUTOINCR);
830 		}
831 	}
832 
833 	/*
834 	 * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and any
835 	 * threads are waiting for r/w activity to complete, wake them up.
836 	 */
837 	mutex_enter(&iosram_mutex);
838 	ASSERT(iosram_rw_active > 0);
839 
840 	if ((--iosram_rw_active == 0) && iosram_rw_wakeup) {
841 		iosram_rw_wakeup = 0;
842 		cv_broadcast(&iosram_rw_wait);
843 	}
844 	mutex_exit(&iosram_mutex);
845 
846 	return (error);
847 }
848 
849 
850 /*
851  * _iosram_write(key, off, len, dptr, force)
852  *	Internal common routine to write to the IOSRAM.
853  */
854 static int
855 _iosram_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr, int force)
856 {
857 	iosram_chunk_t		*chunkp;
858 	uint32_t		chunk_len;
859 	uint8_t			*iosramp;
860 	ddi_acc_handle_t	handle;
861 	int			boff;
862 	union {
863 		uint8_t	cbuf[UINT32SZ];
864 		uint32_t data;
865 	} word;
866 
867 	int			error = 0;
868 	uint8_t			*buf = (uint8_t *)dptr;
869 
870 	/*
871 	 * We try to write to the IOSRAM using double word or word access
872 	 * provided both "off" and "buf" are (or can be) double word or word
873 	 * aligned.  Othewise, we try to align the "off" to a word boundary and
874 	 * then try to write data to the IOSRAM using word access, but read data
875 	 * from the buf buffer using byte access.
876 	 *
877 	 * If the leading/trailing portion of the IOSRAM data is not word
878 	 * aligned, it will always be written using byte access.
879 	 */
880 	IOSRAMLOG(1, "WR: key: 0x%x off:%x len:%x buf:%p\n",
881 	    key, off, len, buf);
882 
883 	/*
884 	 * Acquire lock and look for the requested chunk.  If it exists, make
885 	 * sure the requested write is within the chunk's bounds and no tunnel
886 	 * switch is active.
887 	 */
888 	mutex_enter(&iosram_mutex);
889 	chunkp = iosram_find_chunk(key);
890 	chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0;
891 
892 	if (iosram_master == NULL) {
893 		error = EIO;
894 	} else if (chunkp == NULL) {
895 		error = EINVAL;
896 	} else if ((off >= chunk_len) || (len > chunk_len) ||
897 	    ((off+len) > chunk_len)) {
898 		error = EMSGSIZE;
899 	} else if (iosram_tswitch_active && !force) {
900 		error = EAGAIN;
901 	}
902 
903 	if (error) {
904 		mutex_exit(&iosram_mutex);
905 		return (error);
906 	}
907 
908 	/*
909 	 * If this is a forced write and there's a tunnel switch in progress,
910 	 * abort the switch.
911 	 */
912 	if (iosram_tswitch_active && force) {
913 		cmn_err(CE_NOTE, "!iosram: Aborting tswitch on force_write");
914 		iosram_abort_tswitch();
915 	}
916 
917 	/*
918 	 * Bump reference count to indicate #thread accessing IOSRAM
919 	 * and release the lock.
920 	 */
921 	iosram_rw_active++;
922 #if defined(DEBUG)
923 	if (iosram_rw_active > iosram_rw_active_max) {
924 		iosram_rw_active_max = iosram_rw_active;
925 	}
926 #endif
927 	mutex_exit(&iosram_mutex);
928 
929 
930 	IOSRAM_STAT(write);
931 	IOSRAM_STAT_ADD(bwrite, len);
932 
933 	/* Get starting address and map handle */
934 	iosramp = chunkp->basep + off;
935 	handle = iosram_handle;
936 
937 	/*
938 	 * Align the off to word boundary and then try reading/writing
939 	 * data using double word or word access.
940 	 */
941 	if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) {
942 		int	cnt = UINT32SZ - boff;
943 
944 		if (cnt > len) {
945 			cnt = len;
946 		}
947 		IOSRAMLOG(2,
948 		    "WR: align rep_put8(buf:%p sramp:%p cnt:%x) len:%x\n",
949 		    buf, iosramp, cnt, len);
950 		ddi_rep_put8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR);
951 		buf += cnt;
952 		iosramp += cnt;
953 		len -= cnt;
954 	}
955 
956 	if ((len >= UINT64SZ) &&
957 	    ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) {
958 		/*
959 		 * Both source and destination are double word aligned
960 		 */
961 		int cnt = len/UINT64SZ;
962 
963 		IOSRAMLOG(2,
964 		    "WR: rep_put64(buf:%p sramp:%p cnt:%x) len:%x\n",
965 		    buf, iosramp, cnt, len);
966 		ddi_rep_put64(handle, (uint64_t *)buf, (uint64_t *)iosramp,
967 		    cnt, DDI_DEV_AUTOINCR);
968 		iosramp += cnt * UINT64SZ;
969 		buf += cnt * UINT64SZ;
970 		len -= cnt * UINT64SZ;
971 
972 		/*
973 		 * Copy the remaining data using word & byte access
974 		 */
975 		if (len >= UINT32SZ) {
976 			IOSRAMLOG(2,
977 			    "WR: put32(buf:%p sramp:%p) len:%x\n", buf, iosramp,
978 			    len, NULL);
979 			ddi_put32(handle, (uint32_t *)iosramp,
980 			    *(uint32_t *)buf);
981 			iosramp += UINT32SZ;
982 			buf += UINT32SZ;
983 			len -= UINT32SZ;
984 		}
985 
986 		if (len != 0) {
987 			ddi_rep_put8(handle, buf, iosramp, len,
988 			    DDI_DEV_AUTOINCR);
989 		}
990 	} else if ((len >= UINT32SZ) &&
991 	    ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) {
992 		/*
993 		 * Both source and destination are word aligned
994 		 */
995 		int cnt = len/UINT32SZ;
996 
997 		IOSRAMLOG(2,
998 		    "WR: rep_put32(buf:%p sramp:%p cnt:%x) len:%x\n",
999 		    buf, iosramp, cnt, len);
1000 		ddi_rep_put32(handle, (uint32_t *)buf, (uint32_t *)iosramp,
1001 		    cnt, DDI_DEV_AUTOINCR);
1002 		iosramp += cnt * UINT32SZ;
1003 		buf += cnt * UINT32SZ;
1004 		len -= cnt * UINT32SZ;
1005 
1006 		/*
1007 		 * copy the remainder using byte access
1008 		 */
1009 		if (len != 0) {
1010 			ddi_rep_put8(handle, buf, iosramp, len,
1011 			    DDI_DEV_AUTOINCR);
1012 		}
1013 	} else if (len != 0) {
1014 		/*
1015 		 * We know that the "off" is at least word aligned. We
1016 		 * need to read data from buf buffer byte at a time, and
1017 		 * write it to the IOSRAM word at a time.
1018 		 */
1019 
1020 		ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0);
1021 
1022 		IOSRAMLOG(2,
1023 		    "WR: unaligned put32(buf:%p sramp:%p) len:%x\n",
1024 		    buf, iosramp, len, NULL);
1025 		for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) {
1026 			word.cbuf[0] = *buf++;
1027 			word.cbuf[1] = *buf++;
1028 			word.cbuf[2] = *buf++;
1029 			word.cbuf[3] = *buf++;
1030 			ddi_put32(handle, (uint32_t *)iosramp, word.data);
1031 		}
1032 
1033 		/*
1034 		 * copy the remaining data using byte access
1035 		 */
1036 		if (len != 0) {
1037 			ddi_rep_put8(handle, buf, iosramp,
1038 			    len, DDI_DEV_AUTOINCR);
1039 		}
1040 	}
1041 
1042 	/*
1043 	 * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and
1044 	 * any threads are waiting for r/w activity to complete, wake them up.
1045 	 */
1046 	mutex_enter(&iosram_mutex);
1047 	ASSERT(iosram_rw_active > 0);
1048 
1049 	if ((--iosram_rw_active == 0) && iosram_rw_wakeup) {
1050 		iosram_rw_wakeup = 0;
1051 		cv_broadcast(&iosram_rw_wait);
1052 	}
1053 	mutex_exit(&iosram_mutex);
1054 
1055 	return (error);
1056 }
1057 
1058 
1059 int
1060 iosram_force_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr)
1061 {
1062 	return (_iosram_write(key, off, len, dptr, 1 /* force */));
1063 }
1064 
1065 
1066 int
1067 iosram_wr(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr)
1068 {
1069 	return (_iosram_write(key, off, len, dptr, 0));
1070 }
1071 
1072 
1073 /*
1074  * iosram_register(key, handler, arg)
1075  *	Register a handler and an arg for the specified chunk.  This handler
1076  *	will be invoked when an interrupt is received from the other side and
1077  *	the int_pending flag for the corresponding key is marked
1078  *	IOSRAM_INT_TO_DOM.
1079  */
1080 /* ARGSUSED */
1081 int
1082 iosram_register(uint32_t key, void (*handler)(), void *arg)
1083 {
1084 	struct iosram_chunk	*chunkp;
1085 	int			error = 0;
1086 
1087 	/*
1088 	 * Acquire lock and look for the requested chunk.  If it exists, and no
1089 	 * other callback is registered, proceed with the registration.
1090 	 */
1091 	mutex_enter(&iosram_mutex);
1092 	chunkp = iosram_find_chunk(key);
1093 
1094 	if (iosram_master == NULL) {
1095 		error = EIO;
1096 	} else if (chunkp == NULL) {
1097 		error = EINVAL;
1098 	} else if (chunkp->cback.handler != NULL) {
1099 		error = EBUSY;
1100 	} else {
1101 		chunkp->cback.busy = 0;
1102 		chunkp->cback.unregister = 0;
1103 		chunkp->cback.handler = handler;
1104 		chunkp->cback.arg = arg;
1105 	}
1106 	mutex_exit(&iosram_mutex);
1107 
1108 	IOSRAMLOG(1, "REG: key: 0x%x hdlr:%p arg:%p error:%d\n",
1109 	    key, handler, arg, error);
1110 
1111 	return (error);
1112 }
1113 
1114 
1115 /*
1116  * iosram_unregister()
1117  *	Unregister handler associated with the specified chunk.
1118  */
1119 int
1120 iosram_unregister(uint32_t key)
1121 {
1122 	struct iosram_chunk	*chunkp;
1123 	int			error = 0;
1124 
1125 	/*
1126 	 * Acquire lock and look for the requested chunk.  If it exists and has
1127 	 * a callback registered, unregister it.
1128 	 */
1129 	mutex_enter(&iosram_mutex);
1130 	chunkp = iosram_find_chunk(key);
1131 
1132 	if (iosram_master == NULL) {
1133 		error = EIO;
1134 	} else if (chunkp == NULL) {
1135 		error = EINVAL;
1136 	} else if (chunkp->cback.busy) {
1137 		/*
1138 		 * If the handler is already busy (being invoked), then we flag
1139 		 * it so it will be unregistered after the invocation completes.
1140 		 */
1141 		DPRINTF(1, ("IOSRAM(%d): unregister: delaying unreg k:0x%08x\n",
1142 		    iosram_master->instance, key));
1143 		chunkp->cback.unregister = 1;
1144 	} else if (chunkp->cback.handler != NULL) {
1145 		chunkp->cback.handler = NULL;
1146 		chunkp->cback.arg = NULL;
1147 	}
1148 	mutex_exit(&iosram_mutex);
1149 
1150 	IOSRAMLOG(1, "UNREG: key:%x error:%d\n", key, error, NULL, NULL);
1151 	return (error);
1152 }
1153 
1154 
1155 /*
1156  * iosram_get_flag():
1157  *	Get data_valid and/or int_pending flags associated with the
1158  *	specified key.
1159  */
1160 int
1161 iosram_get_flag(uint32_t key, uint8_t *data_valid, uint8_t *int_pending)
1162 {
1163 	iosram_chunk_t	*chunkp;
1164 	iosram_flags_t	flags;
1165 	int		error = 0;
1166 
1167 	/*
1168 	 * Acquire lock and look for the requested chunk.  If it exists, and no
1169 	 * tunnel switch is in progress, read the chunk's flags.
1170 	 */
1171 	mutex_enter(&iosram_mutex);
1172 	chunkp = iosram_find_chunk(key);
1173 
1174 	if (iosram_master == NULL) {
1175 		error = EIO;
1176 	} else if (chunkp == NULL) {
1177 		error = EINVAL;
1178 	} else if (iosram_tswitch_active) {
1179 		error = EAGAIN;
1180 	} else {
1181 		IOSRAM_STAT(getflag);
1182 
1183 		/*
1184 		 * Read the flags
1185 		 */
1186 		ddi_rep_get8(iosram_handle, (uint8_t *)&flags,
1187 		    (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t),
1188 		    DDI_DEV_AUTOINCR);
1189 
1190 		/*
1191 		 * Get each flag value that the caller is interested in.
1192 		 */
1193 		if (data_valid != NULL) {
1194 			*data_valid = flags.data_valid;
1195 		}
1196 
1197 		if (int_pending != NULL) {
1198 			*int_pending = flags.int_pending;
1199 		}
1200 	}
1201 	mutex_exit(&iosram_mutex);
1202 
1203 	IOSRAMLOG(1, "GetFlag key:%x data_valid:%x int_pending:%x error:%d\n",
1204 	    key, flags.data_valid, flags.int_pending, error);
1205 	return (error);
1206 }
1207 
1208 
1209 /*
1210  * iosram_set_flag():
1211  *	Set data_valid and int_pending flags associated with the specified key.
1212  */
1213 int
1214 iosram_set_flag(uint32_t key, uint8_t data_valid, uint8_t int_pending)
1215 {
1216 	iosram_chunk_t	*chunkp;
1217 	iosram_flags_t	flags;
1218 	int		error = 0;
1219 
1220 	/*
1221 	 * Acquire lock and look for the requested chunk.  If it exists, and no
1222 	 * tunnel switch is in progress, write the chunk's flags.
1223 	 */
1224 	mutex_enter(&iosram_mutex);
1225 	chunkp = iosram_find_chunk(key);
1226 
1227 	if (iosram_master == NULL) {
1228 		error = EIO;
1229 	} else if ((chunkp == NULL) ||
1230 	    ((data_valid != IOSRAM_DATA_INVALID) &&
1231 	    (data_valid != IOSRAM_DATA_VALID)) ||
1232 	    ((int_pending != IOSRAM_INT_NONE) &&
1233 	    (int_pending != IOSRAM_INT_TO_SSC) &&
1234 	    (int_pending != IOSRAM_INT_TO_DOM))) {
1235 		error = EINVAL;
1236 	} else if (iosram_tswitch_active) {
1237 		error = EAGAIN;
1238 	} else {
1239 		IOSRAM_STAT(setflag);
1240 		flags.data_valid = data_valid;
1241 		flags.int_pending = int_pending;
1242 		ddi_rep_put8(iosram_handle, (uint8_t *)&flags,
1243 		    (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t),
1244 		    DDI_DEV_AUTOINCR);
1245 	}
1246 	mutex_exit(&iosram_mutex);
1247 
1248 	IOSRAMLOG(1, "SetFlag key:%x data_valid:%x int_pending:%x error:%d\n",
1249 	    key, flags.data_valid, flags.int_pending, error);
1250 	return (error);
1251 }
1252 
1253 
1254 /*
1255  * iosram_ctrl()
1256  *	This function provides access to a variety of services not available
1257  *	through the basic API.
1258  */
1259 int
1260 iosram_ctrl(uint32_t key, uint32_t cmd, void *arg)
1261 {
1262 	struct iosram_chunk	*chunkp;
1263 	int			error = 0;
1264 
1265 	/*
1266 	 * Acquire lock and do some argument sanity checking.
1267 	 */
1268 	mutex_enter(&iosram_mutex);
1269 	chunkp = iosram_find_chunk(key);
1270 
1271 	if (iosram_master == NULL) {
1272 		error = EIO;
1273 	} else if (chunkp == NULL) {
1274 		error = EINVAL;
1275 	}
1276 
1277 	if (error != 0) {
1278 		mutex_exit(&iosram_mutex);
1279 		return (error);
1280 	}
1281 
1282 	/*
1283 	 * Arguments seem okay so far, so process the command.
1284 	 */
1285 	switch (cmd) {
1286 		case IOSRAM_CMD_CHUNKLEN:
1287 			/*
1288 			 * Return the length of the chunk indicated by the key.
1289 			 */
1290 			if (arg == NULL) {
1291 				error = EINVAL;
1292 				break;
1293 			}
1294 
1295 			*(uint32_t *)arg = chunkp->toc_data.len;
1296 			break;
1297 
1298 		default:
1299 			error = ENOTSUP;
1300 			break;
1301 	}
1302 
1303 	mutex_exit(&iosram_mutex);
1304 	return (error);
1305 }
1306 
1307 
1308 /*
1309  * iosram_hdr_ctrl()
1310  *	This function provides an interface for the Mailbox Protocol
1311  *	implementation to use when interacting with the IOSRAM header.
1312  */
1313 int
1314 iosram_hdr_ctrl(uint32_t cmd, void *arg)
1315 {
1316 	int	error = 0;
1317 
1318 	/*
1319 	 * Acquire lock and do some argument sanity checking.
1320 	 */
1321 	mutex_enter(&iosram_mutex);
1322 
1323 	if (iosram_master == NULL) {
1324 		error = EIO;
1325 	}
1326 
1327 	if (error != 0) {
1328 		mutex_exit(&iosram_mutex);
1329 		return (error);
1330 	}
1331 
1332 	switch (cmd) {
1333 		case IOSRAM_HDRCMD_GET_SMS_MBOX_VER:
1334 			/*
1335 			 * Return the value of the sms_mbox_version field.
1336 			 */
1337 			if (arg == NULL) {
1338 				error = EINVAL;
1339 				break;
1340 			}
1341 
1342 			*(uint32_t *)arg = IOSRAM_GET_HDRFIELD32(iosram_master,
1343 			    sms_mbox_version);
1344 			break;
1345 
1346 		case IOSRAM_HDRCMD_SET_OS_MBOX_VER:
1347 			/*
1348 			 * Set the value of the os_mbox_version field.
1349 			 */
1350 			IOSRAM_SET_HDRFIELD32(iosram_master, os_mbox_version,
1351 			    (uint32_t)(uintptr_t)arg);
1352 			IOSRAM_SET_HDRFIELD32(iosram_master, os_change_mask,
1353 			    IOSRAM_HDRFIELD_OS_MBOX_VER);
1354 			iosram_send_intr();
1355 			break;
1356 
1357 		case IOSRAM_HDRCMD_REG_CALLBACK:
1358 			iosram_hdrchange_handler = (void (*)())arg;
1359 			break;
1360 
1361 		default:
1362 			error = ENOTSUP;
1363 			break;
1364 	}
1365 
1366 	mutex_exit(&iosram_mutex);
1367 	return (error);
1368 }
1369 
1370 
1371 /*
1372  * iosram_softintr()
1373  *	IOSRAM soft interrupt handler
1374  */
1375 static uint_t
1376 iosram_softintr(caddr_t arg)
1377 {
1378 	uint32_t	hdr_changes;
1379 	iosramsoft_t	*softp = (iosramsoft_t *)arg;
1380 	iosram_chunk_t	*chunkp;
1381 	void		(*handler)();
1382 	int		i;
1383 	uint8_t		flag;
1384 
1385 	DPRINTF(1, ("iosram(%d): in iosram_softintr\n", softp->instance));
1386 
1387 	IOSRAMLOG(2, "SINTR arg/softp:%p  pending:%d busy:%d\n",
1388 	    arg, softp->intr_pending, softp->intr_busy, NULL);
1389 
1390 	mutex_enter(&iosram_mutex);
1391 	mutex_enter(&softp->intr_mutex);
1392 
1393 	/*
1394 	 * Do not process interrupt if interrupt handler is already running or
1395 	 * no interrupts are pending.
1396 	 */
1397 	if (softp->intr_busy || !softp->intr_pending) {
1398 		mutex_exit(&softp->intr_mutex);
1399 		mutex_exit(&iosram_mutex);
1400 		DPRINTF(1, ("IOSRAM(%d): softintr: busy=%d pending=%d\n",
1401 		    softp->instance, softp->intr_busy, softp->intr_pending));
1402 		return (softp->intr_pending ? DDI_INTR_CLAIMED :
1403 		    DDI_INTR_UNCLAIMED);
1404 	}
1405 
1406 	/*
1407 	 * It's possible for the SC to send an interrupt on the new master
1408 	 * before we are able to set our internal state.  If so, we'll retrigger
1409 	 * soft interrupt right after tunnel switch completion.
1410 	 */
1411 	if (softp->state & IOSRAM_STATE_TSWITCH) {
1412 		mutex_exit(&softp->intr_mutex);
1413 		mutex_exit(&iosram_mutex);
1414 		DPRINTF(1, ("IOSRAM(%d): softintr: doing switch "
1415 		    "state=0x%x\n", softp->instance, softp->state));
1416 		return (DDI_INTR_CLAIMED);
1417 	}
1418 
1419 	/*
1420 	 * Do not process interrupt if we are not the master.
1421 	 */
1422 	if (!(softp->state & IOSRAM_STATE_MASTER)) {
1423 		mutex_exit(&softp->intr_mutex);
1424 		mutex_exit(&iosram_mutex);
1425 		DPRINTF(1, ("IOSRAM(%d): softintr: no master state=0x%x\n ",
1426 		    softp->instance, softp->state));
1427 		return (DDI_INTR_CLAIMED);
1428 	}
1429 
1430 	IOSRAM_STAT(sintr_recv);
1431 
1432 	/*
1433 	 * If the driver is suspended, then we should not process any
1434 	 * interrupts.  Instead, we trigger a soft interrupt when the driver
1435 	 * resumes.
1436 	 */
1437 	if (softp->suspended) {
1438 		mutex_exit(&softp->intr_mutex);
1439 		mutex_exit(&iosram_mutex);
1440 		DPRINTF(1, ("IOSRAM(%d): softintr: suspended\n",
1441 		    softp->instance));
1442 		return (DDI_INTR_CLAIMED);
1443 	}
1444 
1445 	/*
1446 	 * Indicate that the IOSRAM interrupt handler is busy.  Note that this
1447 	 * includes incrementing the reader/writer count, since we don't want
1448 	 * any tunnel switches to start up while we're processing callbacks.
1449 	 */
1450 	softp->intr_busy = 1;
1451 	iosram_rw_active++;
1452 #if defined(DEBUG)
1453 	if (iosram_rw_active > iosram_rw_active_max) {
1454 		iosram_rw_active_max = iosram_rw_active;
1455 	}
1456 #endif
1457 
1458 	do {
1459 		DPRINTF(1, ("IOSRAM(%d): softintr: processing interrupt\n",
1460 		    softp->instance));
1461 
1462 		softp->intr_pending = 0;
1463 
1464 		mutex_exit(&softp->intr_mutex);
1465 
1466 		/*
1467 		 * Process changes to the IOSRAM header.
1468 		 */
1469 		hdr_changes = IOSRAM_GET_HDRFIELD32(iosram_master,
1470 		    sms_change_mask);
1471 		if (hdr_changes != 0) {
1472 			int	error;
1473 
1474 			IOSRAM_SET_HDRFIELD32(iosram_master, sms_change_mask,
1475 			    0);
1476 			if (hdr_changes & IOSRAM_HDRFIELD_TOC_INDEX) {
1477 				/*
1478 				 * XXX is it safe to temporarily release the
1479 				 * iosram_mutex here?
1480 				 */
1481 				mutex_exit(&iosram_mutex);
1482 				error = iosram_read_toc(iosram_master);
1483 				mutex_enter(&iosram_mutex);
1484 				if (error) {
1485 					cmn_err(CE_WARN, "iosram_read_toc: new"
1486 					    " TOC invalid; using old TOC.");
1487 				}
1488 				iosram_update_addrs(iosram_master);
1489 			}
1490 
1491 			if (iosram_hdrchange_handler != NULL) {
1492 				mutex_exit(&iosram_mutex);
1493 				iosram_hdrchange_handler();
1494 				mutex_enter(&iosram_mutex);
1495 			}
1496 		}
1497 
1498 		/*
1499 		 * Get data_valid/int_pending flags and generate a callback if
1500 		 * applicable.  For now, we read only those flags for which a
1501 		 * callback has been registered.  We can optimize reading of
1502 		 * flags by reading them all at once and then process them
1503 		 * later.
1504 		 */
1505 		for (i = 0, chunkp = chunks; i < nchunks; i++,
1506 		    chunkp++) {
1507 #if DEBUG
1508 			flag =  ddi_get8(iosram_handle,
1509 			    &(chunkp->flagsp->int_pending));
1510 			DPRINTF(1, ("IOSRAM(%d): softintr chunk #%d "
1511 			    "flag=0x%x handler=%p\n",
1512 			    softp->instance, i, (int)flag,
1513 			    chunkp->cback.handler));
1514 #endif
1515 			if ((handler = chunkp->cback.handler) == NULL) {
1516 				continue;
1517 			}
1518 			flag = ddi_get8(iosram_handle,
1519 			    &(chunkp->flagsp->int_pending));
1520 			if (flag == IOSRAM_INT_TO_DOM) {
1521 				DPRINTF(1,
1522 				    ("IOSRAM(%d): softintr: invoking handler\n",
1523 				    softp->instance));
1524 				IOSRAMLOG(1,
1525 				    "SINTR invoking hdlr:%p arg:%p index:%d\n",
1526 				    handler, chunkp->cback.arg, i, NULL);
1527 				IOSRAM_STAT(callbacks);
1528 
1529 				ddi_put8(iosram_handle,
1530 				    &(chunkp->flagsp->int_pending),
1531 				    IOSRAM_INT_NONE);
1532 				chunkp->cback.busy = 1;
1533 				mutex_exit(&iosram_mutex);
1534 				(*handler)(chunkp->cback.arg);
1535 				mutex_enter(&iosram_mutex);
1536 				chunkp->cback.busy = 0;
1537 
1538 				/*
1539 				 * If iosram_unregister was called while the
1540 				 * callback was being invoked, complete the
1541 				 * unregistration here.
1542 				 */
1543 				if (chunkp->cback.unregister) {
1544 					DPRINTF(1, ("IOSRAM(%d): softintr: "
1545 					    "delayed unreg k:0x%08x\n",
1546 					    softp->instance,
1547 					    chunkp->toc_data.key));
1548 					chunkp->cback.handler = NULL;
1549 					chunkp->cback.arg = NULL;
1550 					chunkp->cback.unregister = 0;
1551 				}
1552 			}
1553 
1554 			/*
1555 			 * If there's a tunnel switch waiting to run, give it
1556 			 * higher priority than these callbacks by bailing out.
1557 			 * They'll still be invoked on the new master iosram
1558 			 * when the tunnel switch is done.
1559 			 */
1560 			if (iosram_tswitch_active) {
1561 				break;
1562 			}
1563 		}
1564 
1565 		mutex_enter(&softp->intr_mutex);
1566 
1567 	} while (softp->intr_pending && !softp->suspended &&
1568 	    !iosram_tswitch_active);
1569 
1570 	/*
1571 	 * Indicate IOSRAM interrupt handler is not BUSY any more
1572 	 */
1573 	softp->intr_busy = 0;
1574 
1575 	ASSERT(iosram_rw_active > 0);
1576 	if ((--iosram_rw_active == 0) && iosram_rw_wakeup) {
1577 		iosram_rw_wakeup = 0;
1578 		cv_broadcast(&iosram_rw_wait);
1579 	}
1580 
1581 	mutex_exit(&softp->intr_mutex);
1582 	mutex_exit(&iosram_mutex);
1583 
1584 	DPRINTF(1, ("iosram(%d): softintr exit\n", softp->instance));
1585 
1586 	return (DDI_INTR_CLAIMED);
1587 }
1588 
1589 
1590 /*
1591  * iosram_intr()
1592  *	IOSRAM real interrupt handler
1593  */
1594 static uint_t
1595 iosram_intr(caddr_t arg)
1596 {
1597 	iosramsoft_t	*softp = (iosramsoft_t *)arg;
1598 	int		result = DDI_INTR_UNCLAIMED;
1599 	uint32_t	int_status;
1600 
1601 	DPRINTF(2, ("iosram(%d): in iosram_intr\n", softp->instance));
1602 
1603 	mutex_enter(&softp->intr_mutex);
1604 
1605 	if (softp->sbbc_handle == NULL) {
1606 		/*
1607 		 * The SBBC registers region is not mapped in.
1608 		 * Set the interrupt pending flag here, and process the
1609 		 * interrupt after the tunnel switch.
1610 		 */
1611 		DPRINTF(1, ("IOSRAM(%d): iosram_intr: SBBC not mapped\n",
1612 		    softp->instance));
1613 		softp->intr_pending = 1;
1614 		mutex_exit(&softp->intr_mutex);
1615 		return (DDI_INTR_UNCLAIMED);
1616 	}
1617 
1618 	int_status = ddi_get32(softp->sbbc_handle,
1619 	    &(softp->sbbc_region->int_status.reg));
1620 	DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n", int_status));
1621 
1622 	if (int_status & IOSRAM_SBBC_INT0) {
1623 		result = DDI_INTR_CLAIMED;
1624 		DPRINTF(1, ("iosram_intr: int0 detected!\n"));
1625 	}
1626 
1627 	if (int_status & IOSRAM_SBBC_INT1) {
1628 		result = DDI_INTR_CLAIMED;
1629 		DPRINTF(1, ("iosram_intr: int1 detected!\n"));
1630 	}
1631 
1632 	if (result == DDI_INTR_CLAIMED) {
1633 		ddi_put32(softp->sbbc_handle,
1634 		    &(softp->sbbc_region->int_status.reg), int_status);
1635 		int_status = ddi_get32(softp->sbbc_handle,
1636 		    &(softp->sbbc_region->int_status.reg));
1637 		DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n",
1638 		    int_status));
1639 
1640 		softp->intr_pending = 1;
1641 		/*
1642 		 * Trigger soft interrupt if not executing and
1643 		 * not suspended.
1644 		 */
1645 		if (!softp->intr_busy && !softp->suspended &&
1646 		    (softp->softintr_id != NULL)) {
1647 			DPRINTF(1, ("iosram(%d): trigger softint\n",
1648 			    softp->instance));
1649 			ddi_trigger_softintr(softp->softintr_id);
1650 		}
1651 	}
1652 
1653 	IOSRAM_STAT(intr_recv);
1654 
1655 	mutex_exit(&softp->intr_mutex);
1656 
1657 	IOSRAMLOG(2, "INTR arg/softp:%p  pending:%d busy:%d\n",
1658 	    arg, softp->intr_pending, softp->intr_busy, NULL);
1659 	DPRINTF(1, ("iosram(%d): iosram_intr exit\n", softp->instance));
1660 
1661 	return (result);
1662 }
1663 
1664 
1665 /*
1666  * iosram_send_intr()
1667  *	Send an interrupt to the SSP side via AXQ driver
1668  */
1669 int
1670 iosram_send_intr()
1671 {
1672 	IOSRAMLOG(1, "SendIntr called\n", NULL, NULL, NULL, NULL);
1673 	IOSRAM_STAT(intr_send);
1674 	DPRINTF(1, ("iosram iosram_send_intr invoked\n"));
1675 
1676 	return (axq_cpu2ssc_intr(0));
1677 }
1678 
1679 
1680 #if defined(DEBUG)
1681 static void
1682 iosram_dummy_cback(void *arg)
1683 {
1684 	DPRINTF(1, ("iosram_dummy_cback invoked arg:%p\n", arg));
1685 }
1686 #endif /* DEBUG */
1687 
1688 
1689 /*ARGSUSED1*/
1690 static int
1691 iosram_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1692 		int *rvalp)
1693 {
1694 	struct iosramsoft	*softp;
1695 	int			error = DDI_SUCCESS;
1696 
1697 	softp = ddi_get_soft_state(iosramsoft_statep, getminor(dev));
1698 	if (softp == NULL) {
1699 		return (ENXIO);
1700 	}
1701 	IOSRAMLOG(1, "IOCTL: dev:%p cmd:%x arg:%p ... instance %d\n",
1702 	    dev, cmd, arg, softp->instance);
1703 
1704 	switch (cmd) {
1705 #if defined(DEBUG)
1706 	case IOSRAM_GET_FLAG:
1707 		{
1708 		iosram_io_t	req;
1709 		uint8_t		data_valid, int_pending;
1710 
1711 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1712 			return (EFAULT);
1713 		}
1714 
1715 		DPRINTF(2, ("IOSRAM_GET_FLAG(key:%x\n", req.key));
1716 
1717 		req.retval = iosram_get_flag(req.key, &data_valid,
1718 		    &int_pending);
1719 		req.data_valid = (uint32_t)data_valid;
1720 		req.int_pending = (uint32_t)int_pending;
1721 
1722 		if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1723 			DPRINTF(1,
1724 			    ("IOSRAM_GET_FLAG: can't copyout req.retval (%x)",
1725 			    req.retval));
1726 			error = EFAULT;
1727 		}
1728 
1729 		return (error);
1730 		}
1731 
1732 	case IOSRAM_SET_FLAG:
1733 		{
1734 		iosram_io_t	req;
1735 
1736 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1737 			return (EFAULT);
1738 		}
1739 
1740 		DPRINTF(2, ("IOSRAM_SET_FLAG(key:%x data_valid:%x "
1741 		    "int_pending:%x\n", req.key, req.data_valid,
1742 		    req.int_pending));
1743 
1744 		req.retval = iosram_set_flag(req.key, req.data_valid,
1745 		    req.int_pending);
1746 
1747 		if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1748 			DPRINTF(1, ("IOSRAM_SET_FLAG: can't copyout req.retval"
1749 			    " (%x)\n", req.retval));
1750 			error = EFAULT;
1751 		}
1752 
1753 		return (error);
1754 		}
1755 
1756 	case IOSRAM_RD:
1757 		{
1758 		caddr_t		bufp;
1759 		int		len;
1760 		iosram_io_t	req;
1761 
1762 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1763 			return (EFAULT);
1764 		}
1765 
1766 		DPRINTF(2, ("IOSRAM_RD(k:%x o:%x len:%x bufp:%p\n", req.key,
1767 		    req.off, req.len, (void *)(uintptr_t)req.bufp));
1768 
1769 		len = req.len;
1770 		bufp = kmem_alloc(len, KM_SLEEP);
1771 
1772 		req.retval = iosram_rd(req.key, req.off, req.len, bufp);
1773 
1774 		if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, len, mode)) {
1775 			DPRINTF(1, ("IOSRAM_RD: copyout(%p, %p,%x,%x) failed\n",
1776 			    bufp, (void *)(uintptr_t)req.bufp, len, mode));
1777 			error = EFAULT;
1778 		} else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1779 			DPRINTF(1, ("IOSRAM_RD: can't copyout retval (%x)\n",
1780 			    req.retval));
1781 			error = EFAULT;
1782 		}
1783 
1784 		kmem_free(bufp, len);
1785 		return (error);
1786 		}
1787 
1788 	case IOSRAM_WR:
1789 		{
1790 		caddr_t		bufp;
1791 		iosram_io_t	req;
1792 		int		len;
1793 
1794 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1795 			return (EFAULT);
1796 		}
1797 
1798 		DPRINTF(2, ("IOSRAM_WR(k:%x o:%x len:%x bufp:%p\n",
1799 		    req.key, req.off, req.len, req.bufp));
1800 		len = req.len;
1801 		bufp = kmem_alloc(len, KM_SLEEP);
1802 		if (ddi_copyin((void *)(uintptr_t)req.bufp, bufp, len, mode)) {
1803 			error = EFAULT;
1804 		} else {
1805 			req.retval = iosram_wr(req.key, req.off, req.len,
1806 			    bufp);
1807 
1808 			if (ddi_copyout(&req, (void *)arg, sizeof (req),
1809 			    mode)) {
1810 				error = EFAULT;
1811 			}
1812 		}
1813 		kmem_free(bufp, len);
1814 		return (error);
1815 		}
1816 
1817 	case IOSRAM_TOC:
1818 		{
1819 		caddr_t		bufp;
1820 		int		len;
1821 		iosram_io_t	req;
1822 
1823 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1824 			return (EFAULT);
1825 		}
1826 
1827 		DPRINTF(2, ("IOSRAM_TOC (req.bufp:%x req.len:%x) \n",
1828 		    req.bufp, req.len));
1829 
1830 		len = req.len;
1831 		bufp = kmem_alloc(len, KM_SLEEP);
1832 
1833 		req.retval = iosram_get_keys((iosram_toc_entry_t *)bufp,
1834 		    &req.len);
1835 
1836 		if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, req.len,
1837 		    mode)) {
1838 			DPRINTF(1,
1839 			    ("IOSRAM_TOC: copyout(%p, %p,%x,%x) failed\n",
1840 			    bufp, (void *)(uintptr_t)req.bufp, req.len, mode));
1841 			error = EFAULT;
1842 		} else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1843 			DPRINTF(1, ("IOSRAM_TOC: can't copyout retval (%x)\n",
1844 			    req.retval));
1845 			error = EFAULT;
1846 		}
1847 		kmem_free(bufp, len);
1848 		return (error);
1849 		}
1850 
1851 	case IOSRAM_SEND_INTR:
1852 		{
1853 		DPRINTF(2, ("IOSRAM_SEND_INTR\n"));
1854 
1855 		switch ((int)arg) {
1856 		case 0x11:
1857 		case 0x22:
1858 		case 0x44:
1859 		case 0x88:
1860 			ddi_put32(softp->sbbc_handle,
1861 			    &(softp->sbbc_region->int_enable.reg), (int)arg);
1862 			DPRINTF(1, ("Wrote 0x%x to int_enable.reg\n",
1863 			    (int)arg));
1864 			break;
1865 		case 0xBB:
1866 			ddi_put32(softp->sbbc_handle,
1867 			    &(softp->sbbc_region->p0_int_gen.reg), 1);
1868 			DPRINTF(1, ("Wrote 1 to p0_int_gen.reg\n"));
1869 			break;
1870 		default:
1871 			error = iosram_send_intr();
1872 		}
1873 
1874 		return (error);
1875 		}
1876 
1877 	case IOSRAM_PRINT_CBACK:
1878 		iosram_print_cback();
1879 		break;
1880 
1881 	case IOSRAM_PRINT_STATE:
1882 		iosram_print_state((int)arg);
1883 		break;
1884 
1885 #if IOSRAM_STATS
1886 	case IOSRAM_PRINT_STATS:
1887 		iosram_print_stats();
1888 		break;
1889 #endif
1890 
1891 #if IOSRAM_LOG
1892 	case IOSRAM_PRINT_LOG:
1893 		iosram_print_log((int)arg);
1894 		break;
1895 #endif
1896 
1897 	case IOSRAM_TUNNEL_SWITCH:
1898 		error = iosram_switchfrom((int)arg);
1899 		break;
1900 
1901 	case IOSRAM_PRINT_FLAGS:
1902 		iosram_print_flags();
1903 		break;
1904 
1905 	case IOSRAM_REG_CBACK:
1906 		{
1907 		iosram_io_t	req;
1908 
1909 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1910 			return (EFAULT);
1911 		}
1912 
1913 		DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key));
1914 
1915 		req.retval = iosram_register(req.key, iosram_dummy_cback,
1916 		    (void *)(uintptr_t)req.key);
1917 		if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1918 			error = EFAULT;
1919 		}
1920 
1921 		return (error);
1922 		}
1923 
1924 	case IOSRAM_UNREG_CBACK:
1925 		{
1926 		iosram_io_t	req;
1927 
1928 		if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1929 			return (EFAULT);
1930 		}
1931 
1932 		DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key));
1933 
1934 		req.retval = iosram_unregister(req.key);
1935 		if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1936 			error = EFAULT;
1937 		}
1938 
1939 		return (error);
1940 		}
1941 
1942 	case IOSRAM_SEMA_ACQUIRE:
1943 	{
1944 		DPRINTF(1, ("IOSRAM_SEMA_ACQUIRE\n"));
1945 		error = iosram_sema_acquire(NULL);
1946 		return (error);
1947 	}
1948 
1949 	case IOSRAM_SEMA_RELEASE:
1950 	{
1951 		DPRINTF(1, ("IOSRAM_SEMA_RELEASE\n"));
1952 		error = iosram_sema_release();
1953 		return (error);
1954 	}
1955 
1956 #endif /* DEBUG */
1957 
1958 	default:
1959 		DPRINTF(1, ("iosram_ioctl: Illegal command %x\n", cmd));
1960 		error = ENOTTY;
1961 	}
1962 
1963 	return (error);
1964 }
1965 
1966 
1967 /*
1968  * iosram_switch_tunnel(softp)
1969  *	Switch master tunnel to the specified instance
1970  *	Must be called while holding iosram_mutex
1971  */
1972 /*ARGSUSED*/
1973 static int
1974 iosram_switch_tunnel(iosramsoft_t *softp)
1975 {
1976 #ifdef DEBUG
1977 	int		instance = softp->instance;
1978 #endif
1979 	int		error = 0;
1980 	iosramsoft_t	*prev_master;
1981 
1982 	ASSERT(mutex_owned(&iosram_mutex));
1983 
1984 	DPRINTF(1, ("tunnel switch new master:%p (%d) current master:%p (%d)\n",
1985 	    softp, instance, iosram_master,
1986 	    ((iosram_master) ? iosram_master->instance : -1)));
1987 	IOSRAMLOG(1, "TSWTCH: new_master:%p (%p) iosram_master:%p (%d)\n",
1988 	    softp, instance, iosram_master,
1989 	    ((iosram_master) ? iosram_master->instance : -1));
1990 
1991 	if (softp == NULL || (softp->state & IOSRAM_STATE_DETACH)) {
1992 		return (ENXIO);
1993 	}
1994 	if (iosram_master == softp) {
1995 		return (0);
1996 	}
1997 
1998 
1999 	/*
2000 	 * We protect against the softp structure being deallocated by setting
2001 	 * the IOSRAM_STATE_TSWITCH state flag. The detach routine will check
2002 	 * for this flag and if set, it will wait for this flag to be reset or
2003 	 * refuse the detach operation.
2004 	 */
2005 	iosram_new_master = softp;
2006 	softp->state |= IOSRAM_STATE_TSWITCH;
2007 	prev_master = iosram_master;
2008 	if (prev_master) {
2009 		prev_master->state |= IOSRAM_STATE_TSWITCH;
2010 	}
2011 	mutex_exit(&iosram_mutex);
2012 
2013 	/*
2014 	 * Map the target IOSRAM, read the TOC, and register interrupts if not
2015 	 * already done.
2016 	 */
2017 	DPRINTF(1, ("iosram(%d): mapping IOSRAM and SBBC\n",
2018 	    softp->instance));
2019 	IOSRAMLOG(1, "TSWTCH: mapping instance:%d  softp:%p\n",
2020 	    instance, softp, NULL, NULL);
2021 
2022 	if (iosram_setup_map(softp) != DDI_SUCCESS) {
2023 		error = ENXIO;
2024 	} else if ((chunks == NULL) && (iosram_read_toc(softp) != 0)) {
2025 		iosram_remove_map(softp);
2026 		error = EINVAL;
2027 	} else if (iosram_add_intr(softp) != DDI_SUCCESS) {
2028 		/*
2029 		 * If there was no previous master, purge the TOC data that
2030 		 * iosram_read_toc() created.
2031 		 */
2032 		if ((prev_master == NULL) && (chunks != NULL)) {
2033 			kmem_free(chunks, nchunks * sizeof (iosram_chunk_t));
2034 			chunks = NULL;
2035 			nchunks = 0;
2036 			iosram_init_hashtab();
2037 		}
2038 		iosram_remove_map(softp);
2039 		error = ENXIO;
2040 	}
2041 
2042 	/*
2043 	 * If we are asked to abort tunnel switch, do so now, before invoking
2044 	 * the OBP callback.
2045 	 */
2046 	if (iosram_tswitch_aborted) {
2047 
2048 		/*
2049 		 * Once the tunnel switch is aborted, this thread should not
2050 		 * resume.  If it does, we simply log a message.  We can't unmap
2051 		 * the new master IOSRAM as it may be accessed in
2052 		 * iosram_abort_tswitch(). It will be unmapped when it is
2053 		 * detached.
2054 		 */
2055 		IOSRAMLOG(1,
2056 		    "TSWTCH: aborted (pre OBP cback). Thread resumed.\n",
2057 		    NULL, NULL, NULL, NULL);
2058 		error = EIO;
2059 	}
2060 
2061 	if (error) {
2062 		IOSRAMLOG(1,
2063 		    "TSWTCH: map failed instance:%d  softp:%p error:%x\n",
2064 		    instance, softp, error, NULL);
2065 		goto done;
2066 	}
2067 
2068 	if (prev_master != NULL) {
2069 		int	result;
2070 
2071 		/*
2072 		 * Now invoke the OBP interface to do the tunnel switch.
2073 		 */
2074 		result = prom_starcat_switch_tunnel(softp->portid,
2075 		    OBP_TSWITCH_REQREPLY);
2076 		if (result != 0) {
2077 			error = EIO;
2078 		}
2079 		IOSRAMLOG(1,
2080 		    "TSWTCH: OBP tswitch portid:%x result:%x error:%x\n",
2081 		    softp->portid, result, error, NULL);
2082 		IOSRAM_STAT(tswitch);
2083 		iosram_tswitch_tstamp = ddi_get_lbolt();
2084 	}
2085 
2086 	mutex_enter(&iosram_mutex);
2087 	if (iosram_tswitch_aborted) {
2088 		/*
2089 		 * Tunnel switch aborted.  This thread should not resume.
2090 		 * For now, we simply log a message, but don't unmap any
2091 		 * IOSRAM at this stage as it may be accessed within the
2092 		 * isoram_abort_tswitch(). The IOSRAM will be unmapped
2093 		 * when that instance is detached.
2094 		 */
2095 		if (iosram_tswitch_aborted) {
2096 			IOSRAMLOG(1,
2097 			    "TSWTCH: aborted (post OBP cback). Thread"
2098 			    " resumed.\n", NULL, NULL, NULL, NULL);
2099 			error = EIO;
2100 			mutex_exit(&iosram_mutex);
2101 		}
2102 	} else if (error) {
2103 		/*
2104 		 * Tunnel switch failed.  Continue using previous tunnel.
2105 		 * However, unmap new (target) IOSRAM.
2106 		 */
2107 		iosram_new_master = NULL;
2108 		mutex_exit(&iosram_mutex);
2109 		iosram_remove_intr(softp);
2110 		iosram_remove_map(softp);
2111 	} else {
2112 		/*
2113 		 * Tunnel switch was successful.  Set the new master.
2114 		 * Also unmap old master IOSRAM and remove any interrupts
2115 		 * associated with that.
2116 		 *
2117 		 * Note that a call to iosram_force_write() allows access
2118 		 * to the IOSRAM while tunnel switch is in progress.  That
2119 		 * means we need to set the new master before unmapping
2120 		 * the old master.
2121 		 */
2122 		iosram_set_master(softp);
2123 		iosram_new_master = NULL;
2124 		mutex_exit(&iosram_mutex);
2125 
2126 		if (prev_master) {
2127 			IOSRAMLOG(1, "TSWTCH: unmapping prev_master:%p (%d)\n",
2128 			    prev_master, prev_master->instance, NULL, NULL);
2129 			iosram_remove_intr(prev_master);
2130 			iosram_remove_map(prev_master);
2131 		}
2132 	}
2133 
2134 done:
2135 	mutex_enter(&iosram_mutex);
2136 
2137 	/*
2138 	 * Clear the tunnel switch flag on the source and destination
2139 	 * instances.
2140 	 */
2141 	if (prev_master) {
2142 		prev_master->state &= ~IOSRAM_STATE_TSWITCH;
2143 	}
2144 	softp->state &= ~IOSRAM_STATE_TSWITCH;
2145 
2146 	/*
2147 	 * Since incoming interrupts could get lost during a tunnel switch,
2148 	 * trigger a soft interrupt just in case.  No harm other than a bit
2149 	 * of wasted effort will be caused if no interrupts were dropped.
2150 	 */
2151 	mutex_enter(&softp->intr_mutex);
2152 	iosram_master->intr_pending = 1;
2153 	if ((iosram_master->softintr_id != NULL) &&
2154 	    (iosram_master->intr_busy == 0)) {
2155 		ddi_trigger_softintr(iosram_master->softintr_id);
2156 	}
2157 	mutex_exit(&softp->intr_mutex);
2158 
2159 	IOSRAMLOG(1, "TSWTCH: done error:%d iosram_master:%p instance:%d\n",
2160 	    error, iosram_master,
2161 	    (iosram_master) ? iosram_master->instance : -1, NULL);
2162 
2163 	return (error);
2164 }
2165 
2166 
2167 /*
2168  * iosram_abort_tswitch()
2169  * Must be called while holding iosram_mutex.
2170  */
2171 static void
2172 iosram_abort_tswitch()
2173 {
2174 	uint32_t  master_valid, new_master_valid;
2175 
2176 	ASSERT(mutex_owned(&iosram_mutex));
2177 
2178 	if ((!iosram_tswitch_active) || iosram_tswitch_aborted) {
2179 		return;
2180 	}
2181 
2182 	ASSERT(iosram_master != NULL);
2183 
2184 	IOSRAMLOG(1, "ABORT: iosram_master:%p (%d) iosram_new_master:%p (%d)\n",
2185 	    iosram_master, iosram_master->instance, iosram_new_master,
2186 	    (iosram_new_master == NULL) ? -1 : iosram_new_master->instance);
2187 
2188 	/*
2189 	 * The first call to iosram_force_write() in the middle of tunnel switch
2190 	 * will get here. We lookup IOSRAM VALID location and setup appropriate
2191 	 * master, if one is still valid.  We also set iosram_tswitch_aborted to
2192 	 * prevent reentering this code and to catch if the OBP callback thread
2193 	 * somehow resumes.
2194 	 */
2195 	iosram_tswitch_aborted = 1;
2196 
2197 	if ((iosram_new_master == NULL) ||
2198 	    (iosram_new_master = iosram_master)) {
2199 		/*
2200 		 * New master hasn't been selected yet, or OBP callback
2201 		 * succeeded and we already selected new IOSRAM as master, but
2202 		 * system crashed in the middle of unmapping previous master or
2203 		 * cleaning up state.  Use the existing master.
2204 		 */
2205 		ASSERT(iosram_master->iosramp != NULL);
2206 		ASSERT(IOSRAM_GET_HDRFIELD32(iosram_master, status) ==
2207 		    IOSRAM_VALID);
2208 		IOSRAMLOG(1, "ABORT: master (%d) already determined.\n",
2209 		    iosram_master->instance, NULL, NULL, NULL);
2210 
2211 		return;
2212 	}
2213 
2214 	/*
2215 	 * System crashed in the middle of tunnel switch and we know that the
2216 	 * new target has not been marked master yet.  That means, the old
2217 	 * master should still be mapped.  We need to abort the tunnel switch
2218 	 * and setup a valid master, if possible, so that we can write to the
2219 	 * IOSRAM.
2220 	 *
2221 	 * We select a new master based upon the IOSRAM header status fields in
2222 	 * the previous master IOSRAM and the target IOSRAM as follows:
2223 	 *
2224 	 *	iosram_master	iosram-tswitch
2225 	 * 	(Prev Master)	(New Target)	Decision
2226 	 *	---------------	---------------	-----------
2227 	 *	  VALID		  don't care	prev master
2228 	 *	  INTRANSIT	  INVALID	prev master
2229 	 *	  INTRANSIT	  INTRANSIT	prev master
2230 	 *	  INTRANSIT	  VALID		new target
2231 	 *	  INVALID	  INVALID	shouldn't ever happen
2232 	 *	  INVALID	  INTRANSIT	shouldn't ever happen
2233 	 *	  INVALID	  VALID		new target
2234 	 */
2235 
2236 	master_valid = (iosram_master->iosramp != NULL) ?
2237 	    IOSRAM_GET_HDRFIELD32(iosram_master, status) : IOSRAM_INVALID;
2238 	new_master_valid = (iosram_new_master->iosramp != NULL) ?
2239 	    IOSRAM_GET_HDRFIELD32(iosram_new_master, status) : IOSRAM_INVALID;
2240 
2241 	if (master_valid == IOSRAM_VALID) {
2242 		/* EMPTY */
2243 		/*
2244 		 * OBP hasn't been called yet or, if it has, it hasn't started
2245 		 * copying yet.  Use the existing master.  Note that the new
2246 		 * master may not be mapped yet.
2247 		 */
2248 		IOSRAMLOG(1, "ABORT: prev master(%d) is VALID\n",
2249 		    iosram_master->instance, NULL, NULL, NULL);
2250 	} else if (master_valid == IOSRAM_INTRANSIT) {
2251 		/*
2252 		 * The system crashed after OBP started processing the tunnel
2253 		 * switch but before the iosram driver determined that it was
2254 		 * complete.  Use the new master if it has been marked valid,
2255 		 * meaning that OBP finished copying data to it, or the old
2256 		 * master otherwise.
2257 		 */
2258 		IOSRAMLOG(1, "ABORT: prev master(%d) is INTRANSIT\n",
2259 		    iosram_master->instance, NULL, NULL, NULL);
2260 
2261 		if (new_master_valid == IOSRAM_VALID) {
2262 			iosram_set_master(iosram_new_master);
2263 			IOSRAMLOG(1, "ABORT: new master(%d) is VALID\n",
2264 			    iosram_new_master->instance, NULL, NULL,
2265 			    NULL);
2266 		} else {
2267 			prom_starcat_switch_tunnel(iosram_master->portid,
2268 			    OBP_TSWITCH_NOREPLY);
2269 
2270 			IOSRAMLOG(1, "ABORT: new master(%d) is INVALID\n",
2271 			    iosram_new_master->instance, NULL, NULL,
2272 			    NULL);
2273 		}
2274 	} else {
2275 		/*
2276 		 * The system crashed after OBP marked the old master INVALID,
2277 		 * which means the new master is the way to go.
2278 		 */
2279 		IOSRAMLOG(1, "ABORT: prev master(%d) is INVALID\n",
2280 		    iosram_master->instance, NULL, NULL, NULL);
2281 
2282 		ASSERT(new_master_valid == IOSRAM_VALID);
2283 
2284 		iosram_set_master(iosram_new_master);
2285 	}
2286 
2287 	IOSRAMLOG(1, "ABORT: Instance %d selected as master\n",
2288 	    iosram_master->instance, NULL, NULL, NULL);
2289 }
2290 
2291 
2292 /*
2293  * iosram_switchfrom(instance)
2294  *	Switch master tunnel away from the specified instance
2295  */
2296 /*ARGSUSED*/
2297 int
2298 iosram_switchfrom(int instance)
2299 {
2300 	struct iosramsoft	*softp;
2301 	int			error = 0;
2302 	int			count;
2303 	clock_t			current_tstamp;
2304 	clock_t			tstamp_interval;
2305 	struct iosramsoft	*last_master = NULL;
2306 	static int		last_master_instance = -1;
2307 
2308 	IOSRAMLOG(1, "SwtchFrom: instance:%d  iosram_master:%p (%d)\n",
2309 	    instance, iosram_master,
2310 	    ((iosram_master) ? iosram_master->instance : -1), NULL);
2311 
2312 	mutex_enter(&iosram_mutex);
2313 
2314 	/*
2315 	 * Wait if another tunnel switch is in progress
2316 	 */
2317 	for (count = 0; iosram_tswitch_active && count < IOSRAM_TSWITCH_RETRY;
2318 	    count++) {
2319 		iosram_tswitch_wakeup = 1;
2320 		cv_wait(&iosram_tswitch_wait, &iosram_mutex);
2321 	}
2322 
2323 	if (iosram_tswitch_active) {
2324 		mutex_exit(&iosram_mutex);
2325 		return (EAGAIN);
2326 	}
2327 
2328 	/*
2329 	 * Check if the specified instance holds the tunnel. If not,
2330 	 * then we are done.
2331 	 */
2332 	if ((iosram_master == NULL) || (iosram_master->instance != instance)) {
2333 		mutex_exit(&iosram_mutex);
2334 		return (0);
2335 	}
2336 
2337 	/*
2338 	 * Before beginning the tunnel switch process, wait for any outstanding
2339 	 * read/write activity to complete.
2340 	 */
2341 	iosram_tswitch_active = 1;
2342 	while (iosram_rw_active) {
2343 		iosram_rw_wakeup = 1;
2344 		cv_wait(&iosram_rw_wait, &iosram_mutex);
2345 	}
2346 
2347 	/*
2348 	 * If a previous tunnel switch just completed, we have to make sure
2349 	 * HWAD has enough time to find the new tunnel before we switch
2350 	 * away from it.  Otherwise, OBP's mailbox message to OSD will never
2351 	 * get through.  Just to be paranoid about synchronization of lbolt
2352 	 * across different CPUs, make sure the current attempt isn't noted
2353 	 * as starting _before_ the last tunnel switch completed.
2354 	 */
2355 	current_tstamp = ddi_get_lbolt();
2356 	if (current_tstamp > iosram_tswitch_tstamp) {
2357 		tstamp_interval = current_tstamp - iosram_tswitch_tstamp;
2358 	} else {
2359 		tstamp_interval = 0;
2360 	}
2361 	if (drv_hztousec(tstamp_interval) < IOSRAM_TSWITCH_DELAY_US) {
2362 		mutex_exit(&iosram_mutex);
2363 		delay(drv_usectohz(IOSRAM_TSWITCH_DELAY_US) - tstamp_interval);
2364 		mutex_enter(&iosram_mutex);
2365 	}
2366 
2367 	/*
2368 	 * The specified instance holds the tunnel.  We need to move it to some
2369 	 * other IOSRAM.  Try out all possible IOSRAMs listed in
2370 	 * iosram_instances.  For now, we always search from the first entry.
2371 	 * In future, it may be desirable to start where we left off.
2372 	 */
2373 	for (softp = iosram_instances; softp != NULL; softp = softp->next) {
2374 		if (iosram_tswitch_aborted) {
2375 			break;
2376 		}
2377 
2378 		/* we can't switch _to_ the instance we're switching _from_ */
2379 		if (softp->instance == instance) {
2380 			continue;
2381 		}
2382 
2383 		/* skip over instances being detached */
2384 		if (softp->state & IOSRAM_STATE_DETACH) {
2385 			continue;
2386 		}
2387 
2388 		/*
2389 		 * Try to avoid reverting to the last instance we switched away
2390 		 * from, as we expect that one to be detached eventually.  Keep
2391 		 * track of it, though, so we can go ahead and try switching to
2392 		 * it if no other viable candidates are found.
2393 		 */
2394 		if (softp->instance == last_master_instance) {
2395 			last_master = softp;
2396 			continue;
2397 		}
2398 
2399 		/*
2400 		 * Do the tunnel switch.  If successful, record the instance of
2401 		 * the master we just left behind so we can try to avoid
2402 		 * reverting to it next time.
2403 		 */
2404 		if (iosram_switch_tunnel(softp) == 0) {
2405 			last_master_instance = instance;
2406 			break;
2407 		}
2408 	}
2409 
2410 	/*
2411 	 * If we failed to switch the tunnel, but we skipped over an instance
2412 	 * that had previously been switched out of because we expected it to be
2413 	 * detached, go ahead and try it anyway (unless the tswitch was aborted
2414 	 * or the instance we skipped is finally being detached).
2415 	 */
2416 	if ((softp == NULL) && (last_master != NULL) &&
2417 	    !iosram_tswitch_aborted &&
2418 	    !(last_master->state & IOSRAM_STATE_DETACH)) {
2419 		if (iosram_switch_tunnel(last_master) == 0) {
2420 			softp = last_master;
2421 			last_master_instance = instance;
2422 		}
2423 	}
2424 
2425 	if ((softp == NULL) || (iosram_tswitch_aborted)) {
2426 		error = EIO;
2427 	}
2428 
2429 	/*
2430 	 * If there are additional tunnel switches queued up waiting for this
2431 	 * one to complete, wake them up.
2432 	 */
2433 	if (iosram_tswitch_wakeup) {
2434 		iosram_tswitch_wakeup = 0;
2435 		cv_broadcast(&iosram_tswitch_wait);
2436 	}
2437 	iosram_tswitch_active = 0;
2438 	mutex_exit(&iosram_mutex);
2439 	return (error);
2440 }
2441 
2442 
2443 /*
2444  * iosram_tunnel_capable(softp)
2445  *	Check if this IOSRAM instance is tunnel-capable by looing at
2446  *	"tunnel-capable" property.
2447  */
2448 static int
2449 iosram_tunnel_capable(struct iosramsoft *softp)
2450 {
2451 	int	proplen;
2452 	int	tunnel_capable;
2453 
2454 	/*
2455 	 * Look up IOSRAM_TUNNELOK_PROP property, if any.
2456 	 */
2457 	proplen = sizeof (tunnel_capable);
2458 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, softp->dip,
2459 	    DDI_PROP_DONTPASS, IOSRAM_TUNNELOK_PROP, (caddr_t)&tunnel_capable,
2460 	    &proplen) != DDI_PROP_SUCCESS) {
2461 		tunnel_capable = 0;
2462 	}
2463 	return (tunnel_capable);
2464 }
2465 
2466 
2467 static int
2468 iosram_sbbc_setup_map(struct iosramsoft *softp)
2469 {
2470 	int				rv;
2471 	struct ddi_device_acc_attr	attr;
2472 	dev_info_t			*dip = softp->dip;
2473 	uint32_t			sema_val;
2474 
2475 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2476 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2477 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2478 
2479 	mutex_enter(&iosram_mutex);
2480 	mutex_enter(&softp->intr_mutex);
2481 
2482 	/*
2483 	 * Map SBBC region in
2484 	 */
2485 	if ((rv = ddi_regs_map_setup(dip, IOSRAM_SBBC_MAP_INDEX,
2486 	    (caddr_t *)&softp->sbbc_region,
2487 	    IOSRAM_SBBC_MAP_OFFSET, sizeof (iosram_sbbc_region_t),
2488 	    &attr, &softp->sbbc_handle)) != DDI_SUCCESS) {
2489 		DPRINTF(1, ("Failed to map SBBC region.\n"));
2490 		mutex_exit(&softp->intr_mutex);
2491 		mutex_exit(&iosram_mutex);
2492 		return (rv);
2493 	}
2494 
2495 	/*
2496 	 * Disable SBBC interrupts. SBBC interrupts are enabled
2497 	 * once the interrupt handler is registered.
2498 	 */
2499 	ddi_put32(softp->sbbc_handle,
2500 	    &(softp->sbbc_region->int_enable.reg), 0x0);
2501 
2502 	/*
2503 	 * Clear hardware semaphore value if appropriate.
2504 	 * When the first SBBC is mapped in by the IOSRAM driver,
2505 	 * the value of the semaphore should be initialized only
2506 	 * if it is not held by SMS. For subsequent SBBC's, the
2507 	 * semaphore will be always initialized.
2508 	 */
2509 	sema_val = IOSRAM_SEMA_RD(softp);
2510 
2511 	if (!iosram_master) {
2512 		/* the first SBBC is being mapped in */
2513 		if (!(IOSRAM_SEMA_IS_HELD(sema_val) &&
2514 		    IOSRAM_SEMA_GET_IDX(sema_val) == IOSRAM_SEMA_SMS_IDX)) {
2515 			/* not held by SMS, we clear the semaphore */
2516 			IOSRAM_SEMA_WR(softp, 0);
2517 		}
2518 	} else {
2519 		/* not the first SBBC, we clear the semaphore */
2520 		IOSRAM_SEMA_WR(softp, 0);
2521 	}
2522 
2523 	mutex_exit(&softp->intr_mutex);
2524 	mutex_exit(&iosram_mutex);
2525 	return (0);
2526 }
2527 
2528 
2529 static int
2530 iosram_setup_map(struct iosramsoft *softp)
2531 {
2532 	int				instance = softp->instance;
2533 	dev_info_t			*dip = softp->dip;
2534 	int				portid;
2535 	int				proplen;
2536 	caddr_t				propvalue;
2537 	struct ddi_device_acc_attr	attr;
2538 
2539 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2540 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2541 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2542 
2543 	/*
2544 	 * Lookup IOSRAM_REG_PROP property to find out our IOSRAM length
2545 	 */
2546 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2547 	    DDI_PROP_DONTPASS, IOSRAM_REG_PROP, (caddr_t)&propvalue,
2548 	    &proplen) != DDI_PROP_SUCCESS) {
2549 		cmn_err(CE_WARN, "iosram(%d): can't find register property.\n",
2550 		    instance);
2551 		return (DDI_FAILURE);
2552 	} else {
2553 		iosram_reg_t	*regprop = (iosram_reg_t *)propvalue;
2554 
2555 		DPRINTF(1, ("SetupMap(%d): Got reg prop: %x %x %x\n",
2556 		    instance, regprop->addr_hi,
2557 		    regprop->addr_lo, regprop->size));
2558 
2559 		softp->iosramlen = regprop->size;
2560 
2561 		kmem_free(propvalue, proplen);
2562 	}
2563 	DPRINTF(1, ("SetupMap(%d): IOSRAM length: 0x%x\n", instance,
2564 	    softp->iosramlen));
2565 	softp->handle = NULL;
2566 
2567 	/*
2568 	 * To minimize boot time, we map the entire IOSRAM as opposed to
2569 	 * mapping individual chunk via ddi_regs_map_setup() call.
2570 	 */
2571 	if (ddi_regs_map_setup(dip, 0, (caddr_t *)&softp->iosramp,
2572 	    0x0, softp->iosramlen, &attr, &softp->handle) != DDI_SUCCESS) {
2573 		cmn_err(CE_WARN, "iosram(%d): failed to map IOSRAM len:%x\n",
2574 		    instance, softp->iosramlen);
2575 		iosram_remove_map(softp);
2576 		return (DDI_FAILURE);
2577 	}
2578 
2579 	/*
2580 	 * Lookup PORTID property on my parent hierarchy
2581 	 */
2582 	proplen = sizeof (portid);
2583 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
2584 	    0, IOSRAM_PORTID_PROP, (caddr_t)&portid,
2585 	    &proplen) != DDI_PROP_SUCCESS) {
2586 		cmn_err(CE_WARN, "iosram(%d): can't find portid property.\n",
2587 		    instance);
2588 		iosram_remove_map(softp);
2589 		return (DDI_FAILURE);
2590 	}
2591 	softp->portid = portid;
2592 
2593 	if (iosram_sbbc_setup_map(softp) != DDI_SUCCESS) {
2594 		cmn_err(CE_WARN, "iosram(%d): can't map SBBC region.\n",
2595 		    instance);
2596 		iosram_remove_map(softp);
2597 		return (DDI_FAILURE);
2598 	}
2599 
2600 	mutex_enter(&iosram_mutex);
2601 	softp->state |= IOSRAM_STATE_MAPPED;
2602 	mutex_exit(&iosram_mutex);
2603 
2604 	return (DDI_SUCCESS);
2605 }
2606 
2607 
2608 static void
2609 iosram_remove_map(struct iosramsoft *softp)
2610 {
2611 	mutex_enter(&iosram_mutex);
2612 
2613 	ASSERT((softp->state & IOSRAM_STATE_MASTER) == 0);
2614 
2615 	if (softp->handle) {
2616 		ddi_regs_map_free(&softp->handle);
2617 		softp->handle = NULL;
2618 	}
2619 	softp->iosramp = NULL;
2620 
2621 	/*
2622 	 * Umap SBBC registers region. Shared with handler for SBBC
2623 	 * interrupts, take intr_mutex.
2624 	 */
2625 	mutex_enter(&softp->intr_mutex);
2626 	if (softp->sbbc_region) {
2627 		ddi_regs_map_free(&softp->sbbc_handle);
2628 		softp->sbbc_region = NULL;
2629 	}
2630 	mutex_exit(&softp->intr_mutex);
2631 
2632 	softp->state &= ~IOSRAM_STATE_MAPPED;
2633 
2634 	mutex_exit(&iosram_mutex);
2635 }
2636 
2637 
2638 /*
2639  * iosram_is_chosen(struct iosramsoft *softp)
2640  *
2641  *	Looks up "chosen" node property to
2642  *	determine if it is the chosen IOSRAM.
2643  */
2644 static int
2645 iosram_is_chosen(struct iosramsoft *softp)
2646 {
2647 	char		chosen_iosram[MAXNAMELEN];
2648 	char		pn[MAXNAMELEN];
2649 	int		nodeid;
2650 	int		chosen;
2651 	pnode_t		dnode;
2652 
2653 	/*
2654 	 * Get /chosen node info. prom interface will handle errors.
2655 	 */
2656 	dnode = prom_chosennode();
2657 
2658 	/*
2659 	 * Look for the "iosram" property on the chosen node with a prom
2660 	 * interface as ddi_find_devinfo() couldn't be used (calls
2661 	 * ddi_walk_devs() that creates one extra lock on the device tree).
2662 	 */
2663 	if (prom_getprop(dnode, IOSRAM_CHOSEN_PROP, (caddr_t)&nodeid) <= 0) {
2664 		/*
2665 		 * Can't find IOSRAM_CHOSEN_PROP property under chosen node
2666 		 */
2667 		cmn_err(CE_WARN,
2668 		    "iosram(%d): can't find chosen iosram property\n",
2669 		    softp->instance);
2670 		return (0);
2671 	}
2672 
2673 	DPRINTF(1, ("iosram(%d): Got '%x' for chosen '%s' property\n",
2674 	    softp->instance, nodeid, IOSRAM_CHOSEN_PROP));
2675 
2676 	/*
2677 	 * get the full OBP pathname of this node
2678 	 */
2679 	if (prom_phandle_to_path((phandle_t)nodeid, chosen_iosram,
2680 	    sizeof (chosen_iosram)) < 0) {
2681 		cmn_err(CE_NOTE, "prom_phandle_to_path(%x) failed\n", nodeid);
2682 		return (0);
2683 	}
2684 	DPRINTF(1, ("iosram(%d): prom_phandle_to_path(%x) is '%s'\n",
2685 	    softp->instance, nodeid, chosen_iosram));
2686 
2687 	(void) ddi_pathname(softp->dip, pn);
2688 	DPRINTF(1, ("iosram(%d): ddi_pathname(%p) is '%s'\n",
2689 	    softp->instance, softp->dip, pn));
2690 
2691 	chosen = (strcmp(chosen_iosram, pn) == 0) ? 1 : 0;
2692 	DPRINTF(1, ("iosram(%d): ... %s\n", softp->instance,
2693 	    chosen ? "MASTER" : "SLAVE"));
2694 	IOSRAMLOG(1, "iosram(%d): ... %s\n", softp->instance,
2695 	    (chosen ? "MASTER" : "SLAVE"), NULL, NULL);
2696 
2697 	return (chosen);
2698 }
2699 
2700 
2701 /*
2702  * iosram_set_master(struct iosramsoft *softp)
2703  *
2704  *	Set master tunnel to the specified IOSRAM
2705  *	Must be called while holding iosram_mutex.
2706  */
2707 static void
2708 iosram_set_master(struct iosramsoft *softp)
2709 {
2710 	ASSERT(mutex_owned(&iosram_mutex));
2711 	ASSERT(softp != NULL);
2712 	ASSERT(softp->state & IOSRAM_STATE_MAPPED);
2713 	ASSERT(IOSRAM_GET_HDRFIELD32(softp, status) == IOSRAM_VALID);
2714 
2715 	/*
2716 	 * Clear MASTER flag on any previous IOSRAM master, if any
2717 	 */
2718 	if (iosram_master && (iosram_master != softp)) {
2719 		iosram_master->state &= ~IOSRAM_STATE_MASTER;
2720 	}
2721 
2722 	/*
2723 	 * Setup new IOSRAM master
2724 	 */
2725 	iosram_update_addrs(softp);
2726 	iosram_handle = softp->handle;
2727 	softp->state |= IOSRAM_STATE_MASTER;
2728 	softp->tswitch_ok++;
2729 	iosram_master = softp;
2730 
2731 	IOSRAMLOG(1, "SETMASTER: softp:%p instance:%d\n", softp,
2732 	    softp->instance, NULL, NULL);
2733 }
2734 
2735 
2736 /*
2737  * iosram_read_toc()
2738  *
2739  *	Read the TOC from an IOSRAM instance that has been mapped in.
2740  *	If the TOC is flawed or the IOSRAM isn't valid, return an error.
2741  */
2742 static int
2743 iosram_read_toc(struct iosramsoft *softp)
2744 {
2745 	int			i;
2746 	int			instance = softp->instance;
2747 	uint8_t			*toc_entryp;
2748 	iosram_flags_t		*flagsp = NULL;
2749 	int			new_nchunks;
2750 	iosram_chunk_t		*new_chunks;
2751 	iosram_chunk_t		*chunkp;
2752 	iosram_chunk_t		*old_chunkp;
2753 	iosram_toc_entry_t	index;
2754 
2755 	/*
2756 	 * Never try to read the TOC out of an unmapped IOSRAM.
2757 	 */
2758 	ASSERT(softp->state & IOSRAM_STATE_MAPPED);
2759 
2760 	mutex_enter(&iosram_mutex);
2761 
2762 	/*
2763 	 * Check to make sure this IOSRAM is marked valid.  Return
2764 	 * an error if it isn't.
2765 	 */
2766 	if (IOSRAM_GET_HDRFIELD32(softp, status) != IOSRAM_VALID) {
2767 		DPRINTF(1, ("iosram_read_toc(%d): IOSRAM not flagged valid\n",
2768 		    instance));
2769 		mutex_exit(&iosram_mutex);
2770 		return (EINVAL);
2771 	}
2772 
2773 	/*
2774 	 * Get the location of the TOC.
2775 	 */
2776 	toc_entryp = softp->iosramp + IOSRAM_GET_HDRFIELD32(softp, toc_offset);
2777 
2778 	/*
2779 	 * Read the index entry from the TOC and make sure it looks correct.
2780 	 */
2781 	ddi_rep_get8(softp->handle, (uint8_t *)&index, toc_entryp,
2782 	    sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR);
2783 	if ((index.key != IOSRAM_INDEX_KEY) ||
2784 	    (index.off != IOSRAM_INDEX_OFF)) {
2785 		cmn_err(CE_WARN, "iosram(%d): invalid TOC index.\n", instance);
2786 		mutex_exit(&iosram_mutex);
2787 		return (EINVAL);
2788 	}
2789 
2790 	/*
2791 	 * Allocate storage for the new chunks array and initialize it with data
2792 	 * from the TOC and callback data from the corresponding old chunk, if
2793 	 * it exists.
2794 	 */
2795 	new_nchunks = index.len - 1;
2796 	new_chunks = (iosram_chunk_t *)kmem_zalloc(new_nchunks *
2797 	    sizeof (iosram_chunk_t), KM_SLEEP);
2798 	for (i = 0, chunkp = new_chunks; i < new_nchunks; i++, chunkp++) {
2799 		toc_entryp += sizeof (iosram_toc_entry_t);
2800 		ddi_rep_get8(softp->handle, (uint8_t *)&(chunkp->toc_data),
2801 		    toc_entryp, sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR);
2802 		chunkp->hash = NULL;
2803 		if ((chunkp->toc_data.off < softp->iosramlen) &&
2804 		    (chunkp->toc_data.len <= softp->iosramlen) &&
2805 		    ((chunkp->toc_data.off + chunkp->toc_data.len) <=
2806 		    softp->iosramlen)) {
2807 			chunkp->basep = softp->iosramp + chunkp->toc_data.off;
2808 			DPRINTF(1,
2809 			    ("iosram_read_toc(%d): k:%x o:%x l:%x p:%x\n",
2810 			    instance, chunkp->toc_data.key,
2811 			    chunkp->toc_data.off, chunkp->toc_data.len,
2812 			    chunkp->basep));
2813 		} else {
2814 			cmn_err(CE_WARN, "iosram(%d): TOC entry %d"
2815 			    "out of range... off:%x  len:%x\n",
2816 			    instance, i + 1, chunkp->toc_data.off,
2817 			    chunkp->toc_data.len);
2818 			kmem_free(new_chunks, new_nchunks *
2819 			    sizeof (iosram_chunk_t));
2820 			mutex_exit(&iosram_mutex);
2821 			return (EINVAL);
2822 		}
2823 
2824 		/*
2825 		 * Note the existence of the flags chunk, which is required in
2826 		 * a correct TOC.
2827 		 */
2828 		if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) {
2829 			flagsp = (iosram_flags_t *)chunkp->basep;
2830 		}
2831 
2832 		/*
2833 		 * If there was an entry for this chunk in the old list, copy
2834 		 * the callback data from old to new storage.
2835 		 */
2836 		if ((nchunks > 0) &&
2837 		    ((old_chunkp = iosram_find_chunk(chunkp->toc_data.key)) !=
2838 		    NULL)) {
2839 			bcopy(&(old_chunkp->cback), &(chunkp->cback),
2840 			    sizeof (iosram_cback_t));
2841 		}
2842 	}
2843 	/*
2844 	 * The TOC is malformed if there is no entry for the flags chunk.
2845 	 */
2846 	if (flagsp == NULL) {
2847 		kmem_free(new_chunks, new_nchunks * sizeof (iosram_chunk_t));
2848 		mutex_exit(&iosram_mutex);
2849 		return (EINVAL);
2850 	}
2851 
2852 	/*
2853 	 * Free any memory that is no longer needed and install the new data
2854 	 * as current data.
2855 	 */
2856 	if (chunks != NULL) {
2857 		kmem_free(chunks, nchunks * sizeof (iosram_chunk_t));
2858 	}
2859 	chunks = new_chunks;
2860 	nchunks = new_nchunks;
2861 	iosram_init_hashtab();
2862 
2863 	mutex_exit(&iosram_mutex);
2864 	return (0);
2865 }
2866 
2867 
2868 /*
2869  * iosram_init_hashtab()
2870  *
2871  *	Initialize the hash table and populate it with the IOSRAM
2872  *	chunks previously read from the TOC.  The caller must hold the
2873  *	ioram_mutex lock.
2874  */
2875 static void
2876 iosram_init_hashtab(void)
2877 {
2878 	int		i, bucket;
2879 	iosram_chunk_t	*chunkp;
2880 
2881 	ASSERT(mutex_owned(&iosram_mutex));
2882 
2883 	for (i = 0; i < IOSRAM_HASHSZ; i++) {
2884 		iosram_hashtab[i] = NULL;
2885 	}
2886 
2887 	if (chunks) {
2888 		for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
2889 			/*
2890 			 * Hide the flags chunk by leaving it out of the hash
2891 			 * table.
2892 			 */
2893 			if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) {
2894 				continue;
2895 			}
2896 
2897 			/*
2898 			 * Add the current chunk to the hash table.
2899 			 */
2900 			bucket = IOSRAM_HASH(chunkp->toc_data.key);
2901 			chunkp->hash = iosram_hashtab[bucket];
2902 			iosram_hashtab[bucket] = chunkp;
2903 		}
2904 	}
2905 }
2906 
2907 
2908 /*
2909  * iosram_update_addrs()
2910  *
2911  *	Process the chunk list, updating each chunk's basep, which is a pointer
2912  *	to the beginning of the chunk's memory in kvaddr space.  Record the
2913  *	basep value of the flags chunk to speed up flag access.  The caller
2914  *	must hold the iosram_mutex lock.
2915  */
2916 static void
2917 iosram_update_addrs(struct iosramsoft *softp)
2918 {
2919 	int		i;
2920 	iosram_flags_t	*flagsp;
2921 	iosram_chunk_t	*chunkp;
2922 
2923 	ASSERT(mutex_owned(&iosram_mutex));
2924 
2925 	/*
2926 	 * First go through all of the chunks updating their base pointers and
2927 	 * looking for the flags chunk.
2928 	 */
2929 	for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
2930 		chunkp->basep = softp->iosramp + chunkp->toc_data.off;
2931 		if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) {
2932 			flagsp = (iosram_flags_t *)(chunkp->basep);
2933 			DPRINTF(1,
2934 			    ("iosram_update_addrs flags: o:0x%08x p:%p",
2935 			    chunkp->toc_data.off, flagsp));
2936 		}
2937 	}
2938 
2939 	/*
2940 	 * Now, go through and update each chunk's flags pointer.  This can't be
2941 	 * done in the first loop because we don't have the address of the flags
2942 	 * chunk yet.
2943 	 */
2944 	for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
2945 		chunkp->flagsp = flagsp++;
2946 		DPRINTF(1, ("iosram_update_addrs: k:0x%x f:%p\n",
2947 		    chunkp->toc_data.key, chunkp->flagsp));
2948 	}
2949 }
2950 
2951 /*
2952  * iosram_find_chunk(key)
2953  *
2954  *	Return a pointer to iosram_chunk structure corresponding to the
2955  *	"key" IOSRAM chunk.  The caller must hold the iosram_mutex lock.
2956  */
2957 static iosram_chunk_t *
2958 iosram_find_chunk(uint32_t key)
2959 {
2960 	iosram_chunk_t	*chunkp;
2961 	int		index = IOSRAM_HASH(key);
2962 
2963 	ASSERT(mutex_owned(&iosram_mutex));
2964 
2965 	for (chunkp = iosram_hashtab[index]; chunkp; chunkp = chunkp->hash) {
2966 		if (chunkp->toc_data.key == key) {
2967 			break;
2968 		}
2969 	}
2970 
2971 	return (chunkp);
2972 }
2973 
2974 
2975 /*
2976  * iosram_add_intr(iosramsoft_t *)
2977  */
2978 static int
2979 iosram_add_intr(iosramsoft_t *softp)
2980 {
2981 	IOSRAMLOG(2, "ADDINTR: softp:%p  instance:%d\n",
2982 	    softp, softp->instance, NULL, NULL);
2983 
2984 	if (ddi_add_softintr(softp->dip, DDI_SOFTINT_MED,
2985 	    &softp->softintr_id, &softp->soft_iblk, NULL,
2986 	    iosram_softintr, (caddr_t)softp) != DDI_SUCCESS) {
2987 		cmn_err(CE_WARN,
2988 		    "iosram(%d): Can't register softintr.\n",
2989 		    softp->instance);
2990 		return (DDI_FAILURE);
2991 	}
2992 
2993 	if (ddi_add_intr(softp->dip, 0, &softp->real_iblk, NULL,
2994 	    iosram_intr, (caddr_t)softp) != DDI_SUCCESS) {
2995 		cmn_err(CE_WARN,
2996 		    "iosram(%d): Can't register intr"
2997 		    " handler.\n", softp->instance);
2998 		ddi_remove_softintr(softp->softintr_id);
2999 		return (DDI_FAILURE);
3000 	}
3001 
3002 	/*
3003 	 * Enable SBBC interrupts
3004 	 */
3005 	ddi_put32(softp->sbbc_handle, &(softp->sbbc_region->int_enable.reg),
3006 	    IOSRAM_SBBC_INT0|IOSRAM_SBBC_INT1);
3007 
3008 	return (DDI_SUCCESS);
3009 }
3010 
3011 
3012 /*
3013  * iosram_remove_intr(iosramsoft_t *)
3014  */
3015 static int
3016 iosram_remove_intr(iosramsoft_t *softp)
3017 {
3018 	IOSRAMLOG(2, "REMINTR: softp:%p  instance:%d\n",
3019 	    softp, softp->instance, NULL, NULL);
3020 
3021 	/*
3022 	 * Disable SBBC interrupts if SBBC is mapped in
3023 	 */
3024 	if (softp->sbbc_region) {
3025 		ddi_put32(softp->sbbc_handle,
3026 		    &(softp->sbbc_region->int_enable.reg), 0);
3027 	}
3028 
3029 	/*
3030 	 * Remove SBBC interrupt handler
3031 	 */
3032 	ddi_remove_intr(softp->dip, 0, softp->real_iblk);
3033 
3034 	/*
3035 	 * Remove soft interrupt handler
3036 	 */
3037 	mutex_enter(&iosram_mutex);
3038 	if (softp->softintr_id != NULL) {
3039 		ddi_remove_softintr(softp->softintr_id);
3040 		softp->softintr_id = NULL;
3041 	}
3042 	mutex_exit(&iosram_mutex);
3043 
3044 	return (0);
3045 }
3046 
3047 
3048 /*
3049  * iosram_add_instance(iosramsoft_t *)
3050  * Must be called while holding iosram_mutex
3051  */
3052 static void
3053 iosram_add_instance(iosramsoft_t *new_softp)
3054 {
3055 #ifdef DEBUG
3056 	int		instance = new_softp->instance;
3057 	iosramsoft_t	*softp;
3058 #endif
3059 
3060 	ASSERT(mutex_owned(&iosram_mutex));
3061 
3062 #if defined(DEBUG)
3063 	/* Verify that this instance is not in the list */
3064 	for (softp = iosram_instances; softp != NULL; softp = softp->next) {
3065 		ASSERT(softp->instance != instance);
3066 	}
3067 #endif
3068 
3069 	/*
3070 	 * Add this instance to the list
3071 	 */
3072 	if (iosram_instances != NULL) {
3073 		iosram_instances->prev = new_softp;
3074 	}
3075 	new_softp->next = iosram_instances;
3076 	new_softp->prev = NULL;
3077 	iosram_instances = new_softp;
3078 }
3079 
3080 
3081 /*
3082  * iosram_remove_instance(int instance)
3083  * Must be called while holding iosram_mutex
3084  */
3085 static void
3086 iosram_remove_instance(int instance)
3087 {
3088 	iosramsoft_t *softp;
3089 
3090 	/*
3091 	 * Remove specified instance from the iosram_instances list so that
3092 	 * it can't be chosen for tunnel in future.
3093 	 */
3094 	ASSERT(mutex_owned(&iosram_mutex));
3095 
3096 	for (softp = iosram_instances; softp != NULL; softp = softp->next) {
3097 		if (softp->instance == instance) {
3098 			if (softp->next != NULL) {
3099 				softp->next->prev = softp->prev;
3100 			}
3101 			if (softp->prev != NULL) {
3102 				softp->prev->next = softp->next;
3103 			}
3104 			if (iosram_instances == softp) {
3105 				iosram_instances = softp->next;
3106 			}
3107 
3108 			return;
3109 		}
3110 	}
3111 }
3112 
3113 
3114 /*
3115  * iosram_sema_acquire: Acquire hardware semaphore.
3116  * Return 0 if the semaphore could be acquired, or one of the following
3117  * possible values:
3118  * EAGAIN: there is a tunnel switch in progress
3119  * EBUSY: the semaphore was already "held"
3120  * ENXIO:  an IO error occured (e.g. SBBC not mapped)
3121  * If old_value is not NULL, the location it points to will be updated
3122  * with the semaphore value read when attempting to acquire it.
3123  */
3124 int
3125 iosram_sema_acquire(uint32_t *old_value)
3126 {
3127 	struct iosramsoft	*softp;
3128 	int			rv;
3129 	uint32_t		sema_val;
3130 
3131 	DPRINTF(2, ("IOSRAM: in iosram_sema_acquire\n"));
3132 
3133 	mutex_enter(&iosram_mutex);
3134 
3135 	/*
3136 	 * Disallow access if there is a tunnel switch in progress.
3137 	 */
3138 	if (iosram_tswitch_active) {
3139 		mutex_exit(&iosram_mutex);
3140 		return (EAGAIN);
3141 	}
3142 
3143 	/*
3144 	 * Use current master IOSRAM for operation, fail if none is
3145 	 * currently active.
3146 	 */
3147 	if ((softp = iosram_master) == NULL) {
3148 		mutex_exit(&iosram_mutex);
3149 		DPRINTF(1, ("IOSRAM: iosram_sema_acquire: no master\n"));
3150 		return (ENXIO);
3151 	}
3152 
3153 	mutex_enter(&softp->intr_mutex);
3154 
3155 	/*
3156 	 * Fail if SBBC region has not been mapped. This shouldn't
3157 	 * happen if we have a master IOSRAM, but we double-check.
3158 	 */
3159 	if (softp->sbbc_region == NULL) {
3160 		mutex_exit(&softp->intr_mutex);
3161 		mutex_exit(&iosram_mutex);
3162 		DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: "
3163 		    "SBBC not mapped\n", softp->instance));
3164 		return (ENXIO);
3165 	}
3166 
3167 	/* read semaphore value */
3168 	sema_val = IOSRAM_SEMA_RD(softp);
3169 	if (old_value != NULL)
3170 		*old_value = sema_val;
3171 
3172 	if (IOSRAM_SEMA_IS_HELD(sema_val)) {
3173 		/* semaphore was held by someone else */
3174 		rv = EBUSY;
3175 	} else {
3176 		/* semaphore was not held, we just acquired it */
3177 		rv = 0;
3178 	}
3179 
3180 	mutex_exit(&softp->intr_mutex);
3181 	mutex_exit(&iosram_mutex);
3182 
3183 	DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: "
3184 	    "old value=0x%x rv=%d\n", softp->instance, sema_val, rv));
3185 
3186 	return (rv);
3187 }
3188 
3189 
3190 /*
3191  * iosram_sema_release: Release hardware semaphore.
3192  * This function will "release" the hardware semaphore, and return 0 on
3193  * success. If an error occured, one of the following values will be
3194  * returned:
3195  * EAGAIN: there is a tunnel switch in progress
3196  * ENXIO:  an IO error occured (e.g. SBBC not mapped)
3197  */
3198 int
3199 iosram_sema_release(void)
3200 {
3201 	struct iosramsoft	*softp;
3202 
3203 	DPRINTF(2, ("IOSRAM: in iosram_sema_release\n"));
3204 
3205 	mutex_enter(&iosram_mutex);
3206 
3207 	/*
3208 	 * Disallow access if there is a tunnel switch in progress.
3209 	 */
3210 	if (iosram_tswitch_active) {
3211 		mutex_exit(&iosram_mutex);
3212 		return (EAGAIN);
3213 	}
3214 
3215 	/*
3216 	 * Use current master IOSRAM for operation, fail if none is
3217 	 * currently active.
3218 	 */
3219 	if ((softp = iosram_master) == NULL) {
3220 		mutex_exit(&iosram_mutex);
3221 		DPRINTF(1, ("IOSRAM: iosram_sema_release: no master\n"));
3222 		return (ENXIO);
3223 	}
3224 
3225 	mutex_enter(&softp->intr_mutex);
3226 
3227 	/*
3228 	 * Fail if SBBC region has not been mapped in. This shouldn't
3229 	 * happen if we have a master IOSRAM, but we double-check.
3230 	 */
3231 	if (softp->sbbc_region == NULL) {
3232 		mutex_exit(&softp->intr_mutex);
3233 		mutex_exit(&iosram_mutex);
3234 		DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: "
3235 		    "SBBC not mapped\n", softp->instance));
3236 		return (ENXIO);
3237 	}
3238 
3239 	/* Release semaphore by clearing our semaphore register */
3240 	IOSRAM_SEMA_WR(softp, 0);
3241 
3242 	mutex_exit(&softp->intr_mutex);
3243 	mutex_exit(&iosram_mutex);
3244 
3245 	DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: success\n",
3246 	    softp->instance));
3247 
3248 	return (0);
3249 }
3250 
3251 
3252 #if defined(IOSRAM_LOG)
3253 void
3254 iosram_log(caddr_t fmt, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
3255 {
3256 	uint32_t	seq;
3257 	iosram_log_t	*logp;
3258 
3259 	mutex_enter(&iosram_log_mutex);
3260 
3261 	seq = iosram_logseq++;
3262 	logp = &iosram_logbuf[seq % IOSRAM_MAXLOG];
3263 	logp->seq = seq;
3264 	logp->tstamp = lbolt;
3265 	logp->fmt = fmt;
3266 	logp->arg1 = a1;
3267 	logp->arg2 = a2;
3268 	logp->arg3 = a3;
3269 	logp->arg4 = a4;
3270 
3271 	mutex_exit(&iosram_log_mutex);
3272 
3273 	if (iosram_log_print) {
3274 		cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp);
3275 		if (logp->fmt) {
3276 			cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2,
3277 			    logp->arg3, logp->arg4);
3278 			if (logp->fmt[strlen(logp->fmt)-1] != '\n') {
3279 				cmn_err(CE_CONT, "\n");
3280 			}
3281 		} else {
3282 			cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n",
3283 			    logp->fmt, logp->arg1, logp->arg2, logp->arg3,
3284 			    logp->arg4);
3285 		}
3286 	}
3287 }
3288 #endif /* IOSRAM_LOG */
3289 
3290 
3291 #if defined(DEBUG)
3292 /*
3293  * iosram_get_keys(buf, len)
3294  *	Return IOSRAM TOC in the specified buffer
3295  */
3296 static int
3297 iosram_get_keys(iosram_toc_entry_t *bufp, uint32_t *len)
3298 {
3299 	struct iosram_chunk	*chunkp;
3300 	int			error = 0;
3301 	int			i;
3302 	int			cnt = (*len) / sizeof (iosram_toc_entry_t);
3303 
3304 	IOSRAMLOG(2, "iosram_get_keys(bufp:%p *len:%x)\n", bufp, *len, NULL,
3305 	    NULL);
3306 
3307 	/*
3308 	 * Copy data while holding the lock to prevent any data
3309 	 * corruption or invalid pointer dereferencing.
3310 	 */
3311 	mutex_enter(&iosram_mutex);
3312 
3313 	if (iosram_master == NULL) {
3314 		error = EIO;
3315 	} else {
3316 		for (i = 0, chunkp = chunks; i < nchunks && i < cnt;
3317 		    i++, chunkp++) {
3318 			bufp[i].key = chunkp->toc_data.key;
3319 			bufp[i].off = chunkp->toc_data.off;
3320 			bufp[i].len = chunkp->toc_data.len;
3321 			bufp[i].unused = chunkp->toc_data.unused;
3322 		}
3323 		*len = i * sizeof (iosram_toc_entry_t);
3324 	}
3325 
3326 	mutex_exit(&iosram_mutex);
3327 	return (error);
3328 }
3329 
3330 
3331 /*
3332  * iosram_print_state(instance)
3333  */
3334 static void
3335 iosram_print_state(int instance)
3336 {
3337 	struct iosramsoft	*softp;
3338 	char			pn[MAXNAMELEN];
3339 
3340 	if (instance < 0) {
3341 		softp = iosram_master;
3342 	} else {
3343 		softp = ddi_get_soft_state(iosramsoft_statep, instance);
3344 	}
3345 
3346 	if (softp == NULL) {
3347 		cmn_err(CE_CONT, "iosram_print_state: Can't find instance %d\n",
3348 		    instance);
3349 		return;
3350 	}
3351 	instance = softp->instance;
3352 
3353 	mutex_enter(&iosram_mutex);
3354 	mutex_enter(&softp->intr_mutex);
3355 
3356 	cmn_err(CE_CONT, "iosram_print_state(%d): ... %s\n", instance,
3357 	    ((softp == iosram_master) ? "MASTER" : "SLAVE"));
3358 
3359 	(void) ddi_pathname(softp->dip, pn);
3360 	cmn_err(CE_CONT, "  pathname:%s\n", pn);
3361 	cmn_err(CE_CONT, "  instance:%d  portid:%d iosramlen:0x%x\n",
3362 	    softp->instance, softp->portid, softp->iosramlen);
3363 	cmn_err(CE_CONT, "  softp:%p  handle:%p  iosramp:%p\n", softp,
3364 	    softp->handle, softp->iosramp);
3365 	cmn_err(CE_CONT, "  state:0x%x  tswitch_ok:%x  tswitch_fail:%x\n",
3366 	    softp->state, softp->tswitch_ok, softp->tswitch_fail);
3367 	cmn_err(CE_CONT, "  softintr_id:%p  intr_busy:%x  intr_pending:%x\n",
3368 	    softp->softintr_id, softp->intr_busy, softp->intr_pending);
3369 
3370 	mutex_exit(&softp->intr_mutex);
3371 	mutex_exit(&iosram_mutex);
3372 }
3373 
3374 
3375 /*
3376  * iosram_print_stats()
3377  */
3378 static void
3379 iosram_print_stats()
3380 {
3381 	uint32_t	calls;
3382 
3383 	cmn_err(CE_CONT, "iosram_stats:\n");
3384 	calls = iosram_stats.read;
3385 	cmn_err(CE_CONT, " read  ... calls:%x  bytes:%lx  avg_sz:%x\n",
3386 	    calls, iosram_stats.bread,
3387 	    (uint32_t)((calls != 0) ? (iosram_stats.bread/calls) : 0));
3388 
3389 	calls = iosram_stats.write;
3390 	cmn_err(CE_CONT, " write ... calls:%x  bytes:%lx  avg_sz:%x\n",
3391 	    calls, iosram_stats.bwrite,
3392 	    (uint32_t)((calls != 0) ? (iosram_stats.bwrite/calls) : 0));
3393 
3394 	cmn_err(CE_CONT, " intr recv (real:%x  soft:%x)  sent:%x  cback:%x\n",
3395 	    iosram_stats.intr_recv, iosram_stats.sintr_recv,
3396 	    iosram_stats.intr_send, iosram_stats.callbacks);
3397 
3398 	cmn_err(CE_CONT, " tswitch: %x  getflag:%x  setflag:%x\n",
3399 	    iosram_stats.tswitch, iosram_stats.getflag,
3400 	    iosram_stats.setflag);
3401 
3402 	cmn_err(CE_CONT, " iosram_rw_active_max: %x\n", iosram_rw_active_max);
3403 }
3404 
3405 
3406 static void
3407 iosram_print_cback()
3408 {
3409 	iosram_chunk_t	*chunkp;
3410 	int		i;
3411 
3412 	/*
3413 	 * Print callback handlers
3414 	 */
3415 	mutex_enter(&iosram_mutex);
3416 
3417 	cmn_err(CE_CONT, "IOSRAM callbacks:\n");
3418 	for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
3419 		if (chunkp->cback.handler) {
3420 			cmn_err(CE_CONT, "  %2d: key:0x%x  hdlr:%p  arg:%p "
3421 			    "busy:%d unreg:%d\n", i, chunkp->toc_data.key,
3422 			    chunkp->cback.handler, chunkp->cback.arg,
3423 			    chunkp->cback.busy, chunkp->cback.unregister);
3424 		}
3425 	}
3426 	mutex_exit(&iosram_mutex);
3427 }
3428 
3429 
3430 static void
3431 iosram_print_flags()
3432 {
3433 	int		i;
3434 	uint32_t	*keys;
3435 	iosram_flags_t	*flags;
3436 
3437 	mutex_enter(&iosram_mutex);
3438 
3439 	if (iosram_master == NULL) {
3440 		mutex_exit(&iosram_mutex);
3441 		cmn_err(CE_CONT, "IOSRAM Flags: not accessible\n");
3442 		return;
3443 	}
3444 
3445 	keys = kmem_alloc(nchunks * sizeof (uint32_t), KM_SLEEP);
3446 	flags = kmem_alloc(nchunks * sizeof (iosram_flags_t), KM_SLEEP);
3447 
3448 	for (i = 0; i < nchunks; i++) {
3449 		keys[i] = chunks[i].toc_data.key;
3450 		ddi_rep_get8(iosram_handle, (uint8_t *)&(flags[i]),
3451 		    (uint8_t *)(chunks[i].flagsp), sizeof (iosram_flags_t),
3452 		    DDI_DEV_AUTOINCR);
3453 	}
3454 
3455 	mutex_exit(&iosram_mutex);
3456 
3457 	cmn_err(CE_CONT, "IOSRAM Flags:\n");
3458 	for (i = 0; i < nchunks; i++) {
3459 		cmn_err(CE_CONT,
3460 		    "  %2d: key: 0x%x  data_valid:%x  int_pending:%x\n",
3461 		    i, keys[i], flags[i].data_valid, flags[i].int_pending);
3462 	}
3463 
3464 	kmem_free(keys, nchunks * sizeof (uint32_t));
3465 	kmem_free(flags, nchunks * sizeof (iosram_flags_t));
3466 }
3467 
3468 
3469 /*PRINTFLIKE1*/
3470 static void
3471 iosram_dprintf(const char *fmt, ...)
3472 {
3473 	char	msg_buf[256];
3474 	va_list	adx;
3475 
3476 	va_start(adx, fmt);
3477 	vsprintf(msg_buf, fmt, adx);
3478 	va_end(adx);
3479 
3480 	cmn_err(CE_CONT, "%s", msg_buf);
3481 }
3482 #endif /* DEBUG */
3483 
3484 
3485 #if IOSRAM_LOG
3486 /*
3487  * iosram_print_log(int cnt)
3488  *	Print last few entries of the IOSRAM log in reverse order
3489  */
3490 static void
3491 iosram_print_log(int cnt)
3492 {
3493 	int	i;
3494 
3495 	if (cnt <= 0) {
3496 		cnt = 20;
3497 	} else if (cnt > IOSRAM_MAXLOG) {
3498 		cnt = IOSRAM_MAXLOG;
3499 	}
3500 
3501 
3502 	cmn_err(CE_CONT,
3503 	    "\niosram_logseq: 0x%x  lbolt: %lx  iosram_log_level:%x\n",
3504 	    iosram_logseq, lbolt, iosram_log_level);
3505 	cmn_err(CE_CONT, "iosram_logbuf: %p  max entries:0x%x\n",
3506 	    iosram_logbuf, IOSRAM_MAXLOG);
3507 	for (i = iosram_logseq;  --i >= 0 && --cnt >= 0; ) {
3508 		iosram_log_t	*logp;
3509 
3510 		mutex_enter(&iosram_log_mutex);
3511 
3512 		logp = &iosram_logbuf[i %IOSRAM_MAXLOG];
3513 		cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp);
3514 
3515 		if (logp->fmt) {
3516 			cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2,
3517 			    logp->arg3, logp->arg4);
3518 			if (logp->fmt[strlen(logp->fmt)-1] != '\n') {
3519 				cmn_err(CE_CONT, "\n");
3520 			}
3521 		} else {
3522 			cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n",
3523 			    logp->fmt, logp->arg1, logp->arg2,
3524 			    logp->arg3, logp->arg4);
3525 		}
3526 
3527 		mutex_exit(&iosram_log_mutex);
3528 	}
3529 }
3530 #endif	/* IOSRAM_LOG */
3531