1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * IOSRAM leaf driver to SBBC nexus driver. This driver is used
29 * by Starcat Domain SW to read/write from/to the IO sram.
30 */
31
32 #include <sys/types.h>
33 #include <sys/conf.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/obpdefs.h>
38 #include <sys/promif.h>
39 #include <sys/prom_plat.h>
40 #include <sys/cmn_err.h>
41 #include <sys/conf.h> /* req. by dev_ops flags MTSAFE etc. */
42 #include <sys/modctl.h> /* for modldrv */
43 #include <sys/stat.h> /* ddi_create_minor_node S_IFCHR */
44 #include <sys/errno.h>
45 #include <sys/kmem.h>
46 #include <sys/kstat.h>
47 #include <sys/debug.h>
48
49 #include <sys/axq.h>
50 #include <sys/iosramreg.h>
51 #include <sys/iosramio.h>
52 #include <sys/iosramvar.h>
53
54
55 #if defined(DEBUG)
56 int iosram_debug = 0;
57 static void iosram_dprintf(const char *fmt, ...);
58 #define DPRINTF(level, arg) \
59 { if (iosram_debug >= level) iosram_dprintf arg; }
60 #else /* !DEBUG */
61 #define DPRINTF(level, arg)
62 #endif /* !DEBUG */
63
64
65 /*
66 * IOSRAM module global state
67 */
68 static void *iosramsoft_statep; /* IOSRAM state pointer */
69 static kmutex_t iosram_mutex; /* mutex lock */
70
71 static iosram_chunk_t *chunks = NULL; /* array of TOC entries */
72 static int nchunks = 0; /* # of TOC entries */
73 static iosram_chunk_t *iosram_hashtab[IOSRAM_HASHSZ]; /* key hash table */
74
75 static kcondvar_t iosram_tswitch_wait; /* tunnel switch wait cv */
76 static int iosram_tswitch_wakeup = 0; /* flag indicationg one or */
77 /* more threads waiting on */
78 /* iosram_tswitch_wait cv */
79 static int iosram_tswitch_active = 0; /* tunnel switch active flag */
80 static int iosram_tswitch_aborted = 0; /* tunnel switch abort flag */
81 static clock_t iosram_tswitch_tstamp = 0; /* lbolt of last tswitch end */
82 static kcondvar_t iosram_rw_wait; /* read/write wait cv */
83 static int iosram_rw_wakeup = 0; /* flag indicationg one or */
84 /* more threads waiting on */
85 /* iosram_rw_wait cv */
86 static int iosram_rw_active = 0; /* # threads accessing IOSRAM */
87 #if defined(DEBUG)
88 static int iosram_rw_active_max = 0;
89 #endif
90
91 static struct iosramsoft *iosram_new_master = NULL; /* new tunnel target */
92 static struct iosramsoft *iosram_master = NULL; /* master tunnel */
93 static struct iosramsoft *iosram_instances = NULL; /* list of softstates */
94
95 static ddi_acc_handle_t iosram_handle = NULL; /* master IOSRAM map handle */
96
97 static void (*iosram_hdrchange_handler)() = NULL;
98
99 #if IOSRAM_STATS
100 static struct iosram_stat iosram_stats; /* IOSRAM statistics */
101 static void iosram_print_stats(); /* forward declaration */
102 #endif /* IOSRAM_STATS */
103
104
105 #if IOSRAM_LOG
106 kmutex_t iosram_log_mutex;
107 int iosram_log_level = 1;
108 int iosram_log_print = 0; /* print log when recorded */
109 uint32_t iosram_logseq;
110 iosram_log_t iosram_logbuf[IOSRAM_MAXLOG];
111 static void iosram_print_log(int cnt); /* forward declaration */
112 #endif /* IOSRAM_LOG */
113
114
115 /* driver entry point fn definitions */
116 static int iosram_open(dev_t *, int, int, cred_t *);
117 static int iosram_close(dev_t, int, int, cred_t *);
118 static int iosram_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
119
120 /* configuration entry point fn definitions */
121 static int iosram_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
122 static int iosram_attach(dev_info_t *, ddi_attach_cmd_t);
123 static int iosram_detach(dev_info_t *, ddi_detach_cmd_t);
124
125
126 /* forward declaractions */
127 static iosram_chunk_t *iosram_find_chunk(uint32_t key);
128 static void iosram_set_master(struct iosramsoft *softp);
129 static int iosram_is_chosen(struct iosramsoft *softp);
130 static int iosram_tunnel_capable(struct iosramsoft *softp);
131 static int iosram_read_toc(struct iosramsoft *softp);
132 static void iosram_init_hashtab(void);
133 static void iosram_update_addrs(struct iosramsoft *softp);
134
135 static int iosram_setup_map(struct iosramsoft *softp);
136 static void iosram_remove_map(struct iosramsoft *softp);
137 static int iosram_add_intr(iosramsoft_t *);
138 static int iosram_remove_intr(iosramsoft_t *);
139
140 static void iosram_add_instance(struct iosramsoft *softp);
141 static void iosram_remove_instance(int instance);
142 static int iosram_switch_tunnel(iosramsoft_t *softp);
143 static void iosram_abort_tswitch();
144
145 #if defined(DEBUG)
146 /* forward declaractions for debugging */
147 static int iosram_get_keys(iosram_toc_entry_t *buf, uint32_t *len);
148 static void iosram_print_cback();
149 static void iosram_print_state(int);
150 static void iosram_print_flags();
151 #endif
152
153
154
155 /*
156 * cb_ops
157 */
158 static struct cb_ops iosram_cb_ops = {
159 iosram_open, /* cb_open */
160 iosram_close, /* cb_close */
161 nodev, /* cb_strategy */
162 nodev, /* cb_print */
163 nodev, /* cb_dump */
164 nodev, /* cb_read */
165 nodev, /* cb_write */
166 iosram_ioctl, /* cb_ioctl */
167 nodev, /* cb_devmap */
168 nodev, /* cb_mmap */
169 nodev, /* cb_segmap */
170 nochpoll, /* cb_chpoll */
171 ddi_prop_op, /* cb_prop_op */
172 NULL, /* cb_stream */
173 (int)(D_NEW | D_MP | D_HOTPLUG) /* cb_flag */
174 };
175
176 /*
177 * Declare ops vectors for auto configuration.
178 */
179 struct dev_ops iosram_ops = {
180 DEVO_REV, /* devo_rev */
181 0, /* devo_refcnt */
182 iosram_getinfo, /* devo_getinfo */
183 nulldev, /* devo_identify */
184 nulldev, /* devo_probe */
185 iosram_attach, /* devo_attach */
186 iosram_detach, /* devo_detach */
187 nodev, /* devo_reset */
188 &iosram_cb_ops, /* devo_cb_ops */
189 (struct bus_ops *)NULL, /* devo_bus_ops */
190 nulldev, /* devo_power */
191 ddi_quiesce_not_supported, /* devo_quiesce */
192 };
193
194 /*
195 * Loadable module support.
196 */
197 extern struct mod_ops mod_driverops;
198
199 static struct modldrv iosrammodldrv = {
200 &mod_driverops, /* type of module - driver */
201 "IOSRAM Leaf driver",
202 &iosram_ops,
203 };
204
205 static struct modlinkage iosrammodlinkage = {
206 MODREV_1,
207 &iosrammodldrv,
208 NULL
209 };
210
211
212 int
_init(void)213 _init(void)
214 {
215 int error;
216 int i;
217
218 mutex_init(&iosram_mutex, NULL, MUTEX_DRIVER, (void *)NULL);
219 cv_init(&iosram_tswitch_wait, NULL, CV_DRIVER, NULL);
220 cv_init(&iosram_rw_wait, NULL, CV_DRIVER, NULL);
221 #if defined(IOSRAM_LOG)
222 mutex_init(&iosram_log_mutex, NULL, MUTEX_DRIVER, (void *)NULL);
223 #endif
224
225 DPRINTF(1, ("_init:IOSRAM\n"));
226
227 for (i = 0; i < IOSRAM_HASHSZ; i++) {
228 iosram_hashtab[i] = NULL;
229 }
230
231 if ((error = ddi_soft_state_init(&iosramsoft_statep,
232 sizeof (struct iosramsoft), 1)) != 0) {
233 goto failed;
234 }
235 if ((error = mod_install(&iosrammodlinkage)) != 0) {
236 ddi_soft_state_fini(&iosramsoft_statep);
237 goto failed;
238 }
239
240 IOSRAMLOG(0, "_init:IOSRAM ... error:%d statep:%p\n",
241 error, iosramsoft_statep, NULL, NULL);
242
243 return (error);
244
245 failed:
246 cv_destroy(&iosram_tswitch_wait);
247 cv_destroy(&iosram_rw_wait);
248 mutex_destroy(&iosram_mutex);
249 #if defined(IOSRAM_LOG)
250 mutex_destroy(&iosram_log_mutex);
251 #endif
252 IOSRAMLOG(0, "_init:IOSRAM ... error:%d statep:%p\n",
253 error, iosramsoft_statep, NULL, NULL);
254
255 return (error);
256 }
257
258
259 int
_fini(void)260 _fini(void)
261 {
262 #ifndef DEBUG
263 return (EBUSY);
264 #else /* !DEBUG */
265 int error;
266
267 if ((error = mod_remove(&iosrammodlinkage)) == 0) {
268 ddi_soft_state_fini(&iosramsoft_statep);
269
270 cv_destroy(&iosram_tswitch_wait);
271 cv_destroy(&iosram_rw_wait);
272 mutex_destroy(&iosram_mutex);
273 #if defined(IOSRAM_LOG)
274 mutex_destroy(&iosram_log_mutex);
275 #endif
276 }
277 DPRINTF(1, ("_fini:IOSRAM error:%d\n", error));
278
279 return (error);
280 #endif /* !DEBUG */
281 }
282
283
284 int
_info(struct modinfo * modinfop)285 _info(struct modinfo *modinfop)
286 {
287 return (mod_info(&iosrammodlinkage, modinfop));
288 }
289
290
291 static int
iosram_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)292 iosram_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
293 {
294 int instance;
295 int propval;
296 int length;
297 char name[32];
298 struct iosramsoft *softp;
299
300 instance = ddi_get_instance(dip);
301
302 DPRINTF(1, ("iosram(%d): attach dip:%p\n", instance, (void *)dip));
303
304 IOSRAMLOG(1, "ATTACH: dip:%p instance %d ... start\n",
305 dip, instance, NULL, NULL);
306 switch (cmd) {
307 case DDI_ATTACH:
308 break;
309 case DDI_RESUME:
310 if (!(softp = ddi_get_soft_state(iosramsoft_statep,
311 instance))) {
312 return (DDI_FAILURE);
313 }
314 mutex_enter(&iosram_mutex);
315 mutex_enter(&softp->intr_mutex);
316 if (!softp->suspended) {
317 mutex_exit(&softp->intr_mutex);
318 mutex_exit(&iosram_mutex);
319 return (DDI_FAILURE);
320 }
321 softp->suspended = 0;
322
323 /*
324 * enable SBBC interrupts if SBBC is mapped in
325 * restore the value saved during detach
326 */
327 if (softp->sbbc_region) {
328 ddi_put32(softp->sbbc_handle,
329 &(softp->sbbc_region->int_enable.reg),
330 softp->int_enable_sav);
331 }
332
333 /*
334 * Trigger soft interrupt handler to process any pending
335 * interrupts.
336 */
337 if (softp->intr_pending && !softp->intr_busy &&
338 (softp->softintr_id != NULL)) {
339 ddi_trigger_softintr(softp->softintr_id);
340 }
341
342 mutex_exit(&softp->intr_mutex);
343 mutex_exit(&iosram_mutex);
344
345 return (DDI_SUCCESS);
346
347 default:
348 return (DDI_FAILURE);
349 }
350
351 if (ddi_soft_state_zalloc(iosramsoft_statep, instance) != 0) {
352 return (DDI_FAILURE);
353 }
354
355 if ((softp = ddi_get_soft_state(iosramsoft_statep, instance)) == NULL) {
356 return (DDI_FAILURE);
357 }
358 softp->dip = dip;
359 softp->instance = instance;
360 softp->sbbc_region = NULL;
361
362 /*
363 * If this instance is not tunnel capable, we don't attach it.
364 */
365 if (iosram_tunnel_capable(softp) == 0) {
366 DPRINTF(1, ("iosram(%d): not tunnel_capable\n", instance));
367 IOSRAMLOG(1, "ATTACH(%d): not tunnel_capable\n", instance, NULL,
368 NULL, NULL);
369 goto attach_fail;
370 }
371
372 /*
373 * Need to create an "interrupt-priorities" property to define the PIL
374 * to be used with the interrupt service routine.
375 */
376 if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
377 "interrupt-priorities", &length) == DDI_PROP_NOT_FOUND) {
378 DPRINTF(1, ("iosram(%d): creating interrupt priority property",
379 instance));
380 propval = IOSRAM_PIL;
381 if (ddi_prop_create(DDI_DEV_T_NONE, dip, 0,
382 "interrupt-priorities", (caddr_t)&propval, sizeof (propval))
383 != DDI_PROP_SUCCESS) {
384 cmn_err(CE_WARN,
385 "iosram_attach: failed to create property");
386 goto attach_fail;
387 }
388 }
389
390 /*
391 * Get interrupts cookies and initialize per-instance mutexes
392 */
393 if (ddi_get_iblock_cookie(softp->dip, 0, &softp->real_iblk)
394 != DDI_SUCCESS) {
395 IOSRAMLOG(1, "ATTACH(%d): cannot get soft intr cookie\n",
396 instance, NULL, NULL, NULL);
397 goto attach_fail;
398 }
399 mutex_init(&softp->intr_mutex, NULL, MUTEX_DRIVER,
400 (void *)softp->real_iblk);
401
402 /*
403 * Add this instance to the iosram_instances list so that it can be used
404 * for tunnel in future.
405 */
406 mutex_enter(&iosram_mutex);
407 softp->state = IOSRAM_STATE_INIT;
408 iosram_add_instance(softp);
409
410 /*
411 * If this is the chosen IOSRAM and there is no master IOSRAM yet, then
412 * let's set this instance as the master.
413 */
414 if (iosram_master == NULL && iosram_is_chosen(softp)) {
415 (void) iosram_switch_tunnel(softp);
416
417 /*
418 * XXX Do we need to panic if unable to setup master IOSRAM?
419 */
420 if (iosram_master == NULL) {
421 cmn_err(CE_WARN,
422 "iosram(%d): can't setup master tunnel\n",
423 instance);
424 softp->state = 0;
425 iosram_remove_instance(softp->instance);
426 mutex_exit(&iosram_mutex);
427 mutex_destroy(&softp->intr_mutex);
428 goto attach_fail;
429 }
430 }
431
432 mutex_exit(&iosram_mutex);
433
434 /*
435 * Create minor node
436 */
437 (void) sprintf(name, "iosram%d", instance);
438 if (ddi_create_minor_node(dip, name, S_IFCHR, instance, NULL, NULL) ==
439 DDI_FAILURE) {
440 /*
441 * Minor node seems to be needed only for debugging purposes.
442 * Therefore, there is no need to fail this attach request.
443 * Simply print a message out.
444 */
445 cmn_err(CE_NOTE, "!iosram(%d): can't create minor node\n",
446 instance);
447 }
448 ddi_report_dev(dip);
449
450 DPRINTF(1, ("iosram_attach(%d): success.\n", instance));
451 IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... success softp:%p\n",
452 dip, instance, softp, NULL);
453
454 return (DDI_SUCCESS);
455
456 attach_fail:
457 DPRINTF(1, ("iosram_attach(%d):failed.\n", instance));
458 IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... failed.\n",
459 dip, instance, NULL, NULL);
460
461 ddi_soft_state_free(iosramsoft_statep, instance);
462 return (DDI_FAILURE);
463 }
464
465
466 static int
iosram_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)467 iosram_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
468 {
469 int instance;
470 struct iosramsoft *softp;
471
472 instance = ddi_get_instance(dip);
473 if (!(softp = ddi_get_soft_state(iosramsoft_statep, instance))) {
474 return (DDI_FAILURE);
475 }
476
477 IOSRAMLOG(1, "DETACH: dip:%p instance %d softp:%p\n",
478 dip, instance, softp, NULL);
479
480 switch (cmd) {
481 case DDI_DETACH:
482 break;
483 case DDI_SUSPEND:
484 mutex_enter(&iosram_mutex);
485 mutex_enter(&softp->intr_mutex);
486 if (softp->suspended) {
487 mutex_exit(&softp->intr_mutex);
488 mutex_exit(&iosram_mutex);
489 return (DDI_FAILURE);
490 }
491 softp->suspended = 1;
492 /*
493 * Disable SBBC interrupts if SBBC is mapped in
494 */
495 if (softp->sbbc_region) {
496 /* save current interrupt enable register */
497 softp->int_enable_sav = ddi_get32(softp->sbbc_handle,
498 &(softp->sbbc_region->int_enable.reg));
499 ddi_put32(softp->sbbc_handle,
500 &(softp->sbbc_region->int_enable.reg), 0x0);
501 }
502 mutex_exit(&softp->intr_mutex);
503 mutex_exit(&iosram_mutex);
504 return (DDI_SUCCESS);
505
506 default:
507 return (DDI_FAILURE);
508 }
509
510
511 /*
512 * Indicate that this instance is being detached so that this instance
513 * does not become a target for tunnel switch in future.
514 */
515 mutex_enter(&iosram_mutex);
516 softp->state |= IOSRAM_STATE_DETACH;
517
518 /*
519 * If this instance is currently the master or the target of the tunnel
520 * switch, then we need to wait and switch tunnel, if necessary.
521 */
522 if (iosram_master == softp || (softp->state & IOSRAM_STATE_TSWITCH)) {
523 mutex_exit(&iosram_mutex);
524 (void) iosram_switchfrom(instance);
525 mutex_enter(&iosram_mutex);
526 }
527
528 /*
529 * If the tunnel switch is in progress and we are the master or target
530 * of tunnel relocation, then we can't detach this instance right now.
531 */
532 if (softp->state & IOSRAM_STATE_TSWITCH) {
533 softp->state &= ~IOSRAM_STATE_DETACH;
534 mutex_exit(&iosram_mutex);
535 return (DDI_FAILURE);
536 }
537
538 /*
539 * We can't allow master IOSRAM to be detached as we won't be able to
540 * communicate otherwise.
541 */
542 if (iosram_master == softp) {
543 softp->state &= ~IOSRAM_STATE_DETACH;
544 mutex_exit(&iosram_mutex);
545 return (DDI_FAILURE);
546 }
547
548 /*
549 * Now remove our instance from the iosram_instances list.
550 */
551 iosram_remove_instance(instance);
552 mutex_exit(&iosram_mutex);
553
554 /*
555 * Instances should only ever be mapped if they are the master and/or
556 * participating in a tunnel switch. Neither should be the case here.
557 */
558 ASSERT((softp->state & IOSRAM_STATE_MAPPED) == 0);
559
560 /*
561 * Destroy per-instance mutexes
562 */
563 mutex_destroy(&softp->intr_mutex);
564
565 ddi_remove_minor_node(dip, NULL);
566
567 /*
568 * Finally remove our soft state structure
569 */
570 ddi_soft_state_free(iosramsoft_statep, instance);
571
572 return (DDI_SUCCESS);
573 }
574
575
576 /* ARGSUSED0 */
577 static int
iosram_getinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)578 iosram_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
579 void **result)
580 {
581 dev_t dev = (dev_t)arg;
582 struct iosramsoft *softp;
583 int instance, ret;
584
585 instance = getminor(dev);
586
587 IOSRAMLOG(2, "GETINFO: dip:%x instance %d dev:%x infocmd:%x\n",
588 dip, instance, dev, infocmd);
589
590 switch (infocmd) {
591 case DDI_INFO_DEVT2DEVINFO:
592 softp = ddi_get_soft_state(iosramsoft_statep, instance);
593 if (softp == NULL) {
594 *result = NULL;
595 ret = DDI_FAILURE;
596 } else {
597 *result = softp->dip;
598 ret = DDI_SUCCESS;
599 }
600 break;
601 case DDI_INFO_DEVT2INSTANCE:
602 *result = (void *)(uintptr_t)instance;
603 ret = DDI_SUCCESS;
604 break;
605 default:
606 ret = DDI_FAILURE;
607 break;
608 }
609
610 return (ret);
611 }
612
613
614 /*ARGSUSED1*/
615 static int
iosram_open(dev_t * dev,int flag,int otype,cred_t * credp)616 iosram_open(dev_t *dev, int flag, int otype, cred_t *credp)
617 {
618 struct iosramsoft *softp;
619 int instance;
620
621 instance = getminor(*dev);
622 softp = ddi_get_soft_state(iosramsoft_statep, instance);
623
624 if (softp == NULL) {
625 return (ENXIO);
626 }
627
628 IOSRAMLOG(1, "OPEN: dev:%p otype:%x ... instance:%d softp:%p\n",
629 *dev, otype, softp->instance, softp);
630
631 return (0);
632 }
633
634
635 /*ARGSUSED1*/
636 static int
iosram_close(dev_t dev,int flag,int otype,cred_t * credp)637 iosram_close(dev_t dev, int flag, int otype, cred_t *credp)
638 {
639 struct iosramsoft *softp;
640 int instance;
641
642 instance = getminor(dev);
643 softp = ddi_get_soft_state(iosramsoft_statep, instance);
644 if (softp == NULL) {
645 return (ENXIO);
646 }
647
648 IOSRAMLOG(1, "CLOSE: dev:%p otype:%x ... instance:%d softp:%p\n",
649 dev, otype, softp->instance, softp);
650
651 return (0);
652 }
653
654
655 int
iosram_rd(uint32_t key,uint32_t off,uint32_t len,caddr_t dptr)656 iosram_rd(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr)
657 {
658 iosram_chunk_t *chunkp;
659 uint32_t chunk_len;
660 uint8_t *iosramp;
661 ddi_acc_handle_t handle;
662 int boff;
663 union {
664 uchar_t cbuf[UINT32SZ];
665 uint32_t data;
666 } word;
667
668 int error = 0;
669 uint8_t *buf = (uint8_t *)dptr;
670
671 /*
672 * We try to read from the IOSRAM using double word or word access
673 * provided both "off" and "buf" are (or can be) double word or word
674 * aligned. Othewise, we try to align the "off" to a word boundary and
675 * then try to read data from the IOSRAM using word access, but store it
676 * into buf buffer using byte access.
677 *
678 * If the leading/trailing portion of the IOSRAM data is not word
679 * aligned, it will always be copied using byte access.
680 */
681 IOSRAMLOG(1, "RD: key: 0x%x off:%x len:%x buf:%p\n",
682 key, off, len, buf);
683
684 /*
685 * Acquire lock and look for the requested chunk. If it exists, make
686 * sure the requested read is within the chunk's bounds and no tunnel
687 * switch is active.
688 */
689 mutex_enter(&iosram_mutex);
690 chunkp = iosram_find_chunk(key);
691 chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0;
692
693 if (iosram_master == NULL) {
694 error = EIO;
695 } else if (chunkp == NULL) {
696 error = EINVAL;
697 } else if ((off >= chunk_len) || (len > chunk_len) ||
698 ((off + len) > chunk_len)) {
699 error = EMSGSIZE;
700 } else if (iosram_tswitch_active) {
701 error = EAGAIN;
702 }
703
704 if (error) {
705 mutex_exit(&iosram_mutex);
706 return (error);
707 }
708
709 /*
710 * Bump reference count to indicate #thread accessing IOSRAM and release
711 * the lock.
712 */
713 iosram_rw_active++;
714 #if defined(DEBUG)
715 if (iosram_rw_active > iosram_rw_active_max) {
716 iosram_rw_active_max = iosram_rw_active;
717 }
718 #endif
719 mutex_exit(&iosram_mutex);
720
721 IOSRAM_STAT(read);
722 IOSRAM_STAT_ADD(bread, len);
723
724 /* Get starting address and map handle */
725 iosramp = chunkp->basep + off;
726 handle = iosram_handle;
727
728 /*
729 * Align the off to word boundary and then try reading/writing data
730 * using double word or word access.
731 */
732 if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) {
733 int cnt = UINT32SZ - boff;
734
735 if (cnt > len) {
736 cnt = len;
737 }
738 IOSRAMLOG(2,
739 "RD: align rep_get8(buf:%p sramp:%p cnt:%x) len:%x\n",
740 buf, iosramp, cnt, len);
741 ddi_rep_get8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR);
742 buf += cnt;
743 iosramp += cnt;
744 len -= cnt;
745 }
746
747 if ((len >= UINT64SZ) &&
748 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) {
749 /*
750 * Both source and destination are double word aligned
751 */
752 int cnt = len/UINT64SZ;
753
754 IOSRAMLOG(2,
755 "RD: rep_get64(buf:%p sramp:%p cnt:%x) len:%x\n",
756 buf, iosramp, cnt, len);
757 ddi_rep_get64(handle, (uint64_t *)buf, (uint64_t *)iosramp,
758 cnt, DDI_DEV_AUTOINCR);
759 iosramp += cnt * UINT64SZ;
760 buf += cnt * UINT64SZ;
761 len -= cnt * UINT64SZ;
762
763 /*
764 * read remaining data using word and byte access
765 */
766 if (len >= UINT32SZ) {
767 IOSRAMLOG(2,
768 "RD: get32(buf:%p sramp:%p) len:%x\n",
769 buf, iosramp, len, NULL);
770 *(uint32_t *)buf = ddi_get32(handle,
771 (uint32_t *)iosramp);
772 iosramp += UINT32SZ;
773 buf += UINT32SZ;
774 len -= UINT32SZ;
775 }
776
777 if (len != 0) {
778 ddi_rep_get8(handle, buf, iosramp, len,
779 DDI_DEV_AUTOINCR);
780 }
781 } else if ((len >= UINT32SZ) &&
782 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) {
783 /*
784 * Both source and destination are word aligned
785 */
786 int cnt = len/UINT32SZ;
787
788 IOSRAMLOG(2,
789 "RD: rep_get32(buf:%p sramp:%p cnt:%x) len:%x\n",
790 buf, iosramp, cnt, len);
791 ddi_rep_get32(handle, (uint32_t *)buf, (uint32_t *)iosramp,
792 cnt, DDI_DEV_AUTOINCR);
793 iosramp += cnt * UINT32SZ;
794 buf += cnt * UINT32SZ;
795 len -= cnt * UINT32SZ;
796
797 /*
798 * copy the remainder using byte access
799 */
800 if (len != 0) {
801 ddi_rep_get8(handle, buf, iosramp, len,
802 DDI_DEV_AUTOINCR);
803 }
804 } else if (len != 0) {
805 /*
806 * We know that the "off" (i.e. iosramp) is at least word
807 * aligned. We need to read IOSRAM word at a time and copy it
808 * byte at a time.
809 */
810 ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0);
811
812 IOSRAMLOG(2,
813 "RD: unaligned get32(buf:%p sramp:%p) len:%x\n",
814 buf, iosramp, len, NULL);
815 for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) {
816 word.data = ddi_get32(handle, (uint32_t *)iosramp);
817 *buf++ = word.cbuf[0];
818 *buf++ = word.cbuf[1];
819 *buf++ = word.cbuf[2];
820 *buf++ = word.cbuf[3];
821 }
822
823 /*
824 * copy the remaining data using byte access
825 */
826 if (len != 0) {
827 ddi_rep_get8(handle, buf, iosramp, len,
828 DDI_DEV_AUTOINCR);
829 }
830 }
831
832 /*
833 * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and any
834 * threads are waiting for r/w activity to complete, wake them up.
835 */
836 mutex_enter(&iosram_mutex);
837 ASSERT(iosram_rw_active > 0);
838
839 if ((--iosram_rw_active == 0) && iosram_rw_wakeup) {
840 iosram_rw_wakeup = 0;
841 cv_broadcast(&iosram_rw_wait);
842 }
843 mutex_exit(&iosram_mutex);
844
845 return (error);
846 }
847
848
849 /*
850 * _iosram_write(key, off, len, dptr, force)
851 * Internal common routine to write to the IOSRAM.
852 */
853 static int
_iosram_write(uint32_t key,uint32_t off,uint32_t len,caddr_t dptr,int force)854 _iosram_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr, int force)
855 {
856 iosram_chunk_t *chunkp;
857 uint32_t chunk_len;
858 uint8_t *iosramp;
859 ddi_acc_handle_t handle;
860 int boff;
861 union {
862 uint8_t cbuf[UINT32SZ];
863 uint32_t data;
864 } word;
865
866 int error = 0;
867 uint8_t *buf = (uint8_t *)dptr;
868
869 /*
870 * We try to write to the IOSRAM using double word or word access
871 * provided both "off" and "buf" are (or can be) double word or word
872 * aligned. Othewise, we try to align the "off" to a word boundary and
873 * then try to write data to the IOSRAM using word access, but read data
874 * from the buf buffer using byte access.
875 *
876 * If the leading/trailing portion of the IOSRAM data is not word
877 * aligned, it will always be written using byte access.
878 */
879 IOSRAMLOG(1, "WR: key: 0x%x off:%x len:%x buf:%p\n",
880 key, off, len, buf);
881
882 /*
883 * Acquire lock and look for the requested chunk. If it exists, make
884 * sure the requested write is within the chunk's bounds and no tunnel
885 * switch is active.
886 */
887 mutex_enter(&iosram_mutex);
888 chunkp = iosram_find_chunk(key);
889 chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0;
890
891 if (iosram_master == NULL) {
892 error = EIO;
893 } else if (chunkp == NULL) {
894 error = EINVAL;
895 } else if ((off >= chunk_len) || (len > chunk_len) ||
896 ((off+len) > chunk_len)) {
897 error = EMSGSIZE;
898 } else if (iosram_tswitch_active && !force) {
899 error = EAGAIN;
900 }
901
902 if (error) {
903 mutex_exit(&iosram_mutex);
904 return (error);
905 }
906
907 /*
908 * If this is a forced write and there's a tunnel switch in progress,
909 * abort the switch.
910 */
911 if (iosram_tswitch_active && force) {
912 cmn_err(CE_NOTE, "!iosram: Aborting tswitch on force_write");
913 iosram_abort_tswitch();
914 }
915
916 /*
917 * Bump reference count to indicate #thread accessing IOSRAM
918 * and release the lock.
919 */
920 iosram_rw_active++;
921 #if defined(DEBUG)
922 if (iosram_rw_active > iosram_rw_active_max) {
923 iosram_rw_active_max = iosram_rw_active;
924 }
925 #endif
926 mutex_exit(&iosram_mutex);
927
928
929 IOSRAM_STAT(write);
930 IOSRAM_STAT_ADD(bwrite, len);
931
932 /* Get starting address and map handle */
933 iosramp = chunkp->basep + off;
934 handle = iosram_handle;
935
936 /*
937 * Align the off to word boundary and then try reading/writing
938 * data using double word or word access.
939 */
940 if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) {
941 int cnt = UINT32SZ - boff;
942
943 if (cnt > len) {
944 cnt = len;
945 }
946 IOSRAMLOG(2,
947 "WR: align rep_put8(buf:%p sramp:%p cnt:%x) len:%x\n",
948 buf, iosramp, cnt, len);
949 ddi_rep_put8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR);
950 buf += cnt;
951 iosramp += cnt;
952 len -= cnt;
953 }
954
955 if ((len >= UINT64SZ) &&
956 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) {
957 /*
958 * Both source and destination are double word aligned
959 */
960 int cnt = len/UINT64SZ;
961
962 IOSRAMLOG(2,
963 "WR: rep_put64(buf:%p sramp:%p cnt:%x) len:%x\n",
964 buf, iosramp, cnt, len);
965 ddi_rep_put64(handle, (uint64_t *)buf, (uint64_t *)iosramp,
966 cnt, DDI_DEV_AUTOINCR);
967 iosramp += cnt * UINT64SZ;
968 buf += cnt * UINT64SZ;
969 len -= cnt * UINT64SZ;
970
971 /*
972 * Copy the remaining data using word & byte access
973 */
974 if (len >= UINT32SZ) {
975 IOSRAMLOG(2,
976 "WR: put32(buf:%p sramp:%p) len:%x\n", buf, iosramp,
977 len, NULL);
978 ddi_put32(handle, (uint32_t *)iosramp,
979 *(uint32_t *)buf);
980 iosramp += UINT32SZ;
981 buf += UINT32SZ;
982 len -= UINT32SZ;
983 }
984
985 if (len != 0) {
986 ddi_rep_put8(handle, buf, iosramp, len,
987 DDI_DEV_AUTOINCR);
988 }
989 } else if ((len >= UINT32SZ) &&
990 ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) {
991 /*
992 * Both source and destination are word aligned
993 */
994 int cnt = len/UINT32SZ;
995
996 IOSRAMLOG(2,
997 "WR: rep_put32(buf:%p sramp:%p cnt:%x) len:%x\n",
998 buf, iosramp, cnt, len);
999 ddi_rep_put32(handle, (uint32_t *)buf, (uint32_t *)iosramp,
1000 cnt, DDI_DEV_AUTOINCR);
1001 iosramp += cnt * UINT32SZ;
1002 buf += cnt * UINT32SZ;
1003 len -= cnt * UINT32SZ;
1004
1005 /*
1006 * copy the remainder using byte access
1007 */
1008 if (len != 0) {
1009 ddi_rep_put8(handle, buf, iosramp, len,
1010 DDI_DEV_AUTOINCR);
1011 }
1012 } else if (len != 0) {
1013 /*
1014 * We know that the "off" is at least word aligned. We
1015 * need to read data from buf buffer byte at a time, and
1016 * write it to the IOSRAM word at a time.
1017 */
1018
1019 ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0);
1020
1021 IOSRAMLOG(2,
1022 "WR: unaligned put32(buf:%p sramp:%p) len:%x\n",
1023 buf, iosramp, len, NULL);
1024 for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) {
1025 word.cbuf[0] = *buf++;
1026 word.cbuf[1] = *buf++;
1027 word.cbuf[2] = *buf++;
1028 word.cbuf[3] = *buf++;
1029 ddi_put32(handle, (uint32_t *)iosramp, word.data);
1030 }
1031
1032 /*
1033 * copy the remaining data using byte access
1034 */
1035 if (len != 0) {
1036 ddi_rep_put8(handle, buf, iosramp,
1037 len, DDI_DEV_AUTOINCR);
1038 }
1039 }
1040
1041 /*
1042 * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and
1043 * any threads are waiting for r/w activity to complete, wake them up.
1044 */
1045 mutex_enter(&iosram_mutex);
1046 ASSERT(iosram_rw_active > 0);
1047
1048 if ((--iosram_rw_active == 0) && iosram_rw_wakeup) {
1049 iosram_rw_wakeup = 0;
1050 cv_broadcast(&iosram_rw_wait);
1051 }
1052 mutex_exit(&iosram_mutex);
1053
1054 return (error);
1055 }
1056
1057
1058 int
iosram_force_write(uint32_t key,uint32_t off,uint32_t len,caddr_t dptr)1059 iosram_force_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr)
1060 {
1061 return (_iosram_write(key, off, len, dptr, 1 /* force */));
1062 }
1063
1064
1065 int
iosram_wr(uint32_t key,uint32_t off,uint32_t len,caddr_t dptr)1066 iosram_wr(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr)
1067 {
1068 return (_iosram_write(key, off, len, dptr, 0));
1069 }
1070
1071
1072 /*
1073 * iosram_register(key, handler, arg)
1074 * Register a handler and an arg for the specified chunk. This handler
1075 * will be invoked when an interrupt is received from the other side and
1076 * the int_pending flag for the corresponding key is marked
1077 * IOSRAM_INT_TO_DOM.
1078 */
1079 /* ARGSUSED */
1080 int
iosram_register(uint32_t key,void (* handler)(),void * arg)1081 iosram_register(uint32_t key, void (*handler)(), void *arg)
1082 {
1083 struct iosram_chunk *chunkp;
1084 int error = 0;
1085
1086 /*
1087 * Acquire lock and look for the requested chunk. If it exists, and no
1088 * other callback is registered, proceed with the registration.
1089 */
1090 mutex_enter(&iosram_mutex);
1091 chunkp = iosram_find_chunk(key);
1092
1093 if (iosram_master == NULL) {
1094 error = EIO;
1095 } else if (chunkp == NULL) {
1096 error = EINVAL;
1097 } else if (chunkp->cback.handler != NULL) {
1098 error = EBUSY;
1099 } else {
1100 chunkp->cback.busy = 0;
1101 chunkp->cback.unregister = 0;
1102 chunkp->cback.handler = handler;
1103 chunkp->cback.arg = arg;
1104 }
1105 mutex_exit(&iosram_mutex);
1106
1107 IOSRAMLOG(1, "REG: key: 0x%x hdlr:%p arg:%p error:%d\n",
1108 key, handler, arg, error);
1109
1110 return (error);
1111 }
1112
1113
1114 /*
1115 * iosram_unregister()
1116 * Unregister handler associated with the specified chunk.
1117 */
1118 int
iosram_unregister(uint32_t key)1119 iosram_unregister(uint32_t key)
1120 {
1121 struct iosram_chunk *chunkp;
1122 int error = 0;
1123
1124 /*
1125 * Acquire lock and look for the requested chunk. If it exists and has
1126 * a callback registered, unregister it.
1127 */
1128 mutex_enter(&iosram_mutex);
1129 chunkp = iosram_find_chunk(key);
1130
1131 if (iosram_master == NULL) {
1132 error = EIO;
1133 } else if (chunkp == NULL) {
1134 error = EINVAL;
1135 } else if (chunkp->cback.busy) {
1136 /*
1137 * If the handler is already busy (being invoked), then we flag
1138 * it so it will be unregistered after the invocation completes.
1139 */
1140 DPRINTF(1, ("IOSRAM(%d): unregister: delaying unreg k:0x%08x\n",
1141 iosram_master->instance, key));
1142 chunkp->cback.unregister = 1;
1143 } else if (chunkp->cback.handler != NULL) {
1144 chunkp->cback.handler = NULL;
1145 chunkp->cback.arg = NULL;
1146 }
1147 mutex_exit(&iosram_mutex);
1148
1149 IOSRAMLOG(1, "UNREG: key:%x error:%d\n", key, error, NULL, NULL);
1150 return (error);
1151 }
1152
1153
1154 /*
1155 * iosram_get_flag():
1156 * Get data_valid and/or int_pending flags associated with the
1157 * specified key.
1158 */
1159 int
iosram_get_flag(uint32_t key,uint8_t * data_valid,uint8_t * int_pending)1160 iosram_get_flag(uint32_t key, uint8_t *data_valid, uint8_t *int_pending)
1161 {
1162 iosram_chunk_t *chunkp;
1163 iosram_flags_t flags;
1164 int error = 0;
1165
1166 /*
1167 * Acquire lock and look for the requested chunk. If it exists, and no
1168 * tunnel switch is in progress, read the chunk's flags.
1169 */
1170 mutex_enter(&iosram_mutex);
1171 chunkp = iosram_find_chunk(key);
1172
1173 if (iosram_master == NULL) {
1174 error = EIO;
1175 } else if (chunkp == NULL) {
1176 error = EINVAL;
1177 } else if (iosram_tswitch_active) {
1178 error = EAGAIN;
1179 } else {
1180 IOSRAM_STAT(getflag);
1181
1182 /*
1183 * Read the flags
1184 */
1185 ddi_rep_get8(iosram_handle, (uint8_t *)&flags,
1186 (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t),
1187 DDI_DEV_AUTOINCR);
1188
1189 /*
1190 * Get each flag value that the caller is interested in.
1191 */
1192 if (data_valid != NULL) {
1193 *data_valid = flags.data_valid;
1194 }
1195
1196 if (int_pending != NULL) {
1197 *int_pending = flags.int_pending;
1198 }
1199 }
1200 mutex_exit(&iosram_mutex);
1201
1202 IOSRAMLOG(1, "GetFlag key:%x data_valid:%x int_pending:%x error:%d\n",
1203 key, flags.data_valid, flags.int_pending, error);
1204 return (error);
1205 }
1206
1207
1208 /*
1209 * iosram_set_flag():
1210 * Set data_valid and int_pending flags associated with the specified key.
1211 */
1212 int
iosram_set_flag(uint32_t key,uint8_t data_valid,uint8_t int_pending)1213 iosram_set_flag(uint32_t key, uint8_t data_valid, uint8_t int_pending)
1214 {
1215 iosram_chunk_t *chunkp;
1216 iosram_flags_t flags;
1217 int error = 0;
1218
1219 /*
1220 * Acquire lock and look for the requested chunk. If it exists, and no
1221 * tunnel switch is in progress, write the chunk's flags.
1222 */
1223 mutex_enter(&iosram_mutex);
1224 chunkp = iosram_find_chunk(key);
1225
1226 if (iosram_master == NULL) {
1227 error = EIO;
1228 } else if ((chunkp == NULL) ||
1229 ((data_valid != IOSRAM_DATA_INVALID) &&
1230 (data_valid != IOSRAM_DATA_VALID)) ||
1231 ((int_pending != IOSRAM_INT_NONE) &&
1232 (int_pending != IOSRAM_INT_TO_SSC) &&
1233 (int_pending != IOSRAM_INT_TO_DOM))) {
1234 error = EINVAL;
1235 } else if (iosram_tswitch_active) {
1236 error = EAGAIN;
1237 } else {
1238 IOSRAM_STAT(setflag);
1239 flags.data_valid = data_valid;
1240 flags.int_pending = int_pending;
1241 ddi_rep_put8(iosram_handle, (uint8_t *)&flags,
1242 (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t),
1243 DDI_DEV_AUTOINCR);
1244 }
1245 mutex_exit(&iosram_mutex);
1246
1247 IOSRAMLOG(1, "SetFlag key:%x data_valid:%x int_pending:%x error:%d\n",
1248 key, flags.data_valid, flags.int_pending, error);
1249 return (error);
1250 }
1251
1252
1253 /*
1254 * iosram_ctrl()
1255 * This function provides access to a variety of services not available
1256 * through the basic API.
1257 */
1258 int
iosram_ctrl(uint32_t key,uint32_t cmd,void * arg)1259 iosram_ctrl(uint32_t key, uint32_t cmd, void *arg)
1260 {
1261 struct iosram_chunk *chunkp;
1262 int error = 0;
1263
1264 /*
1265 * Acquire lock and do some argument sanity checking.
1266 */
1267 mutex_enter(&iosram_mutex);
1268 chunkp = iosram_find_chunk(key);
1269
1270 if (iosram_master == NULL) {
1271 error = EIO;
1272 } else if (chunkp == NULL) {
1273 error = EINVAL;
1274 }
1275
1276 if (error != 0) {
1277 mutex_exit(&iosram_mutex);
1278 return (error);
1279 }
1280
1281 /*
1282 * Arguments seem okay so far, so process the command.
1283 */
1284 switch (cmd) {
1285 case IOSRAM_CMD_CHUNKLEN:
1286 /*
1287 * Return the length of the chunk indicated by the key.
1288 */
1289 if (arg == NULL) {
1290 error = EINVAL;
1291 break;
1292 }
1293
1294 *(uint32_t *)arg = chunkp->toc_data.len;
1295 break;
1296
1297 default:
1298 error = ENOTSUP;
1299 break;
1300 }
1301
1302 mutex_exit(&iosram_mutex);
1303 return (error);
1304 }
1305
1306
1307 /*
1308 * iosram_hdr_ctrl()
1309 * This function provides an interface for the Mailbox Protocol
1310 * implementation to use when interacting with the IOSRAM header.
1311 */
1312 int
iosram_hdr_ctrl(uint32_t cmd,void * arg)1313 iosram_hdr_ctrl(uint32_t cmd, void *arg)
1314 {
1315 int error = 0;
1316
1317 /*
1318 * Acquire lock and do some argument sanity checking.
1319 */
1320 mutex_enter(&iosram_mutex);
1321
1322 if (iosram_master == NULL) {
1323 error = EIO;
1324 }
1325
1326 if (error != 0) {
1327 mutex_exit(&iosram_mutex);
1328 return (error);
1329 }
1330
1331 switch (cmd) {
1332 case IOSRAM_HDRCMD_GET_SMS_MBOX_VER:
1333 /*
1334 * Return the value of the sms_mbox_version field.
1335 */
1336 if (arg == NULL) {
1337 error = EINVAL;
1338 break;
1339 }
1340
1341 *(uint32_t *)arg = IOSRAM_GET_HDRFIELD32(iosram_master,
1342 sms_mbox_version);
1343 break;
1344
1345 case IOSRAM_HDRCMD_SET_OS_MBOX_VER:
1346 /*
1347 * Set the value of the os_mbox_version field.
1348 */
1349 IOSRAM_SET_HDRFIELD32(iosram_master, os_mbox_version,
1350 (uint32_t)(uintptr_t)arg);
1351 IOSRAM_SET_HDRFIELD32(iosram_master, os_change_mask,
1352 IOSRAM_HDRFIELD_OS_MBOX_VER);
1353 (void) iosram_send_intr();
1354 break;
1355
1356 case IOSRAM_HDRCMD_REG_CALLBACK:
1357 iosram_hdrchange_handler = (void (*)())arg;
1358 break;
1359
1360 default:
1361 error = ENOTSUP;
1362 break;
1363 }
1364
1365 mutex_exit(&iosram_mutex);
1366 return (error);
1367 }
1368
1369
1370 /*
1371 * iosram_softintr()
1372 * IOSRAM soft interrupt handler
1373 */
1374 static uint_t
iosram_softintr(caddr_t arg)1375 iosram_softintr(caddr_t arg)
1376 {
1377 uint32_t hdr_changes;
1378 iosramsoft_t *softp = (iosramsoft_t *)arg;
1379 iosram_chunk_t *chunkp;
1380 void (*handler)();
1381 int i;
1382 uint8_t flag;
1383
1384 DPRINTF(1, ("iosram(%d): in iosram_softintr\n", softp->instance));
1385
1386 IOSRAMLOG(2, "SINTR arg/softp:%p pending:%d busy:%d\n",
1387 arg, softp->intr_pending, softp->intr_busy, NULL);
1388
1389 mutex_enter(&iosram_mutex);
1390 mutex_enter(&softp->intr_mutex);
1391
1392 /*
1393 * Do not process interrupt if interrupt handler is already running or
1394 * no interrupts are pending.
1395 */
1396 if (softp->intr_busy || !softp->intr_pending) {
1397 mutex_exit(&softp->intr_mutex);
1398 mutex_exit(&iosram_mutex);
1399 DPRINTF(1, ("IOSRAM(%d): softintr: busy=%d pending=%d\n",
1400 softp->instance, softp->intr_busy, softp->intr_pending));
1401 return (softp->intr_pending ? DDI_INTR_CLAIMED :
1402 DDI_INTR_UNCLAIMED);
1403 }
1404
1405 /*
1406 * It's possible for the SC to send an interrupt on the new master
1407 * before we are able to set our internal state. If so, we'll retrigger
1408 * soft interrupt right after tunnel switch completion.
1409 */
1410 if (softp->state & IOSRAM_STATE_TSWITCH) {
1411 mutex_exit(&softp->intr_mutex);
1412 mutex_exit(&iosram_mutex);
1413 DPRINTF(1, ("IOSRAM(%d): softintr: doing switch "
1414 "state=0x%x\n", softp->instance, softp->state));
1415 return (DDI_INTR_CLAIMED);
1416 }
1417
1418 /*
1419 * Do not process interrupt if we are not the master.
1420 */
1421 if (!(softp->state & IOSRAM_STATE_MASTER)) {
1422 mutex_exit(&softp->intr_mutex);
1423 mutex_exit(&iosram_mutex);
1424 DPRINTF(1, ("IOSRAM(%d): softintr: no master state=0x%x\n ",
1425 softp->instance, softp->state));
1426 return (DDI_INTR_CLAIMED);
1427 }
1428
1429 IOSRAM_STAT(sintr_recv);
1430
1431 /*
1432 * If the driver is suspended, then we should not process any
1433 * interrupts. Instead, we trigger a soft interrupt when the driver
1434 * resumes.
1435 */
1436 if (softp->suspended) {
1437 mutex_exit(&softp->intr_mutex);
1438 mutex_exit(&iosram_mutex);
1439 DPRINTF(1, ("IOSRAM(%d): softintr: suspended\n",
1440 softp->instance));
1441 return (DDI_INTR_CLAIMED);
1442 }
1443
1444 /*
1445 * Indicate that the IOSRAM interrupt handler is busy. Note that this
1446 * includes incrementing the reader/writer count, since we don't want
1447 * any tunnel switches to start up while we're processing callbacks.
1448 */
1449 softp->intr_busy = 1;
1450 iosram_rw_active++;
1451 #if defined(DEBUG)
1452 if (iosram_rw_active > iosram_rw_active_max) {
1453 iosram_rw_active_max = iosram_rw_active;
1454 }
1455 #endif
1456
1457 do {
1458 DPRINTF(1, ("IOSRAM(%d): softintr: processing interrupt\n",
1459 softp->instance));
1460
1461 softp->intr_pending = 0;
1462
1463 mutex_exit(&softp->intr_mutex);
1464
1465 /*
1466 * Process changes to the IOSRAM header.
1467 */
1468 hdr_changes = IOSRAM_GET_HDRFIELD32(iosram_master,
1469 sms_change_mask);
1470 if (hdr_changes != 0) {
1471 int error;
1472
1473 IOSRAM_SET_HDRFIELD32(iosram_master, sms_change_mask,
1474 0);
1475 if (hdr_changes & IOSRAM_HDRFIELD_TOC_INDEX) {
1476 /*
1477 * XXX is it safe to temporarily release the
1478 * iosram_mutex here?
1479 */
1480 mutex_exit(&iosram_mutex);
1481 error = iosram_read_toc(iosram_master);
1482 mutex_enter(&iosram_mutex);
1483 if (error) {
1484 cmn_err(CE_WARN, "iosram_read_toc: new"
1485 " TOC invalid; using old TOC.");
1486 }
1487 iosram_update_addrs(iosram_master);
1488 }
1489
1490 if (iosram_hdrchange_handler != NULL) {
1491 mutex_exit(&iosram_mutex);
1492 iosram_hdrchange_handler();
1493 mutex_enter(&iosram_mutex);
1494 }
1495 }
1496
1497 /*
1498 * Get data_valid/int_pending flags and generate a callback if
1499 * applicable. For now, we read only those flags for which a
1500 * callback has been registered. We can optimize reading of
1501 * flags by reading them all at once and then process them
1502 * later.
1503 */
1504 for (i = 0, chunkp = chunks; i < nchunks; i++,
1505 chunkp++) {
1506 #if DEBUG
1507 flag = ddi_get8(iosram_handle,
1508 &(chunkp->flagsp->int_pending));
1509 DPRINTF(1, ("IOSRAM(%d): softintr chunk #%d "
1510 "flag=0x%x handler=%p\n",
1511 softp->instance, i, (int)flag,
1512 (void *)chunkp->cback.handler));
1513 #endif
1514 if ((handler = chunkp->cback.handler) == NULL) {
1515 continue;
1516 }
1517 flag = ddi_get8(iosram_handle,
1518 &(chunkp->flagsp->int_pending));
1519 if (flag == IOSRAM_INT_TO_DOM) {
1520 DPRINTF(1,
1521 ("IOSRAM(%d): softintr: invoking handler\n",
1522 softp->instance));
1523 IOSRAMLOG(1,
1524 "SINTR invoking hdlr:%p arg:%p index:%d\n",
1525 handler, chunkp->cback.arg, i, NULL);
1526 IOSRAM_STAT(callbacks);
1527
1528 ddi_put8(iosram_handle,
1529 &(chunkp->flagsp->int_pending),
1530 IOSRAM_INT_NONE);
1531 chunkp->cback.busy = 1;
1532 mutex_exit(&iosram_mutex);
1533 (*handler)(chunkp->cback.arg);
1534 mutex_enter(&iosram_mutex);
1535 chunkp->cback.busy = 0;
1536
1537 /*
1538 * If iosram_unregister was called while the
1539 * callback was being invoked, complete the
1540 * unregistration here.
1541 */
1542 if (chunkp->cback.unregister) {
1543 DPRINTF(1, ("IOSRAM(%d): softintr: "
1544 "delayed unreg k:0x%08x\n",
1545 softp->instance,
1546 chunkp->toc_data.key));
1547 chunkp->cback.handler = NULL;
1548 chunkp->cback.arg = NULL;
1549 chunkp->cback.unregister = 0;
1550 }
1551 }
1552
1553 /*
1554 * If there's a tunnel switch waiting to run, give it
1555 * higher priority than these callbacks by bailing out.
1556 * They'll still be invoked on the new master iosram
1557 * when the tunnel switch is done.
1558 */
1559 if (iosram_tswitch_active) {
1560 break;
1561 }
1562 }
1563
1564 mutex_enter(&softp->intr_mutex);
1565
1566 } while (softp->intr_pending && !softp->suspended &&
1567 !iosram_tswitch_active);
1568
1569 /*
1570 * Indicate IOSRAM interrupt handler is not BUSY any more
1571 */
1572 softp->intr_busy = 0;
1573
1574 ASSERT(iosram_rw_active > 0);
1575 if ((--iosram_rw_active == 0) && iosram_rw_wakeup) {
1576 iosram_rw_wakeup = 0;
1577 cv_broadcast(&iosram_rw_wait);
1578 }
1579
1580 mutex_exit(&softp->intr_mutex);
1581 mutex_exit(&iosram_mutex);
1582
1583 DPRINTF(1, ("iosram(%d): softintr exit\n", softp->instance));
1584
1585 return (DDI_INTR_CLAIMED);
1586 }
1587
1588
1589 /*
1590 * iosram_intr()
1591 * IOSRAM real interrupt handler
1592 */
1593 static uint_t
iosram_intr(caddr_t arg)1594 iosram_intr(caddr_t arg)
1595 {
1596 iosramsoft_t *softp = (iosramsoft_t *)arg;
1597 int result = DDI_INTR_UNCLAIMED;
1598 uint32_t int_status;
1599
1600 DPRINTF(2, ("iosram(%d): in iosram_intr\n", softp->instance));
1601
1602 mutex_enter(&softp->intr_mutex);
1603
1604 if (softp->sbbc_handle == NULL) {
1605 /*
1606 * The SBBC registers region is not mapped in.
1607 * Set the interrupt pending flag here, and process the
1608 * interrupt after the tunnel switch.
1609 */
1610 DPRINTF(1, ("IOSRAM(%d): iosram_intr: SBBC not mapped\n",
1611 softp->instance));
1612 softp->intr_pending = 1;
1613 mutex_exit(&softp->intr_mutex);
1614 return (DDI_INTR_UNCLAIMED);
1615 }
1616
1617 int_status = ddi_get32(softp->sbbc_handle,
1618 &(softp->sbbc_region->int_status.reg));
1619 DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n", int_status));
1620
1621 if (int_status & IOSRAM_SBBC_INT0) {
1622 result = DDI_INTR_CLAIMED;
1623 DPRINTF(1, ("iosram_intr: int0 detected!\n"));
1624 }
1625
1626 if (int_status & IOSRAM_SBBC_INT1) {
1627 result = DDI_INTR_CLAIMED;
1628 DPRINTF(1, ("iosram_intr: int1 detected!\n"));
1629 }
1630
1631 if (result == DDI_INTR_CLAIMED) {
1632 ddi_put32(softp->sbbc_handle,
1633 &(softp->sbbc_region->int_status.reg), int_status);
1634 int_status = ddi_get32(softp->sbbc_handle,
1635 &(softp->sbbc_region->int_status.reg));
1636 DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n",
1637 int_status));
1638
1639 softp->intr_pending = 1;
1640 /*
1641 * Trigger soft interrupt if not executing and
1642 * not suspended.
1643 */
1644 if (!softp->intr_busy && !softp->suspended &&
1645 (softp->softintr_id != NULL)) {
1646 DPRINTF(1, ("iosram(%d): trigger softint\n",
1647 softp->instance));
1648 ddi_trigger_softintr(softp->softintr_id);
1649 }
1650 }
1651
1652 IOSRAM_STAT(intr_recv);
1653
1654 mutex_exit(&softp->intr_mutex);
1655
1656 IOSRAMLOG(2, "INTR arg/softp:%p pending:%d busy:%d\n",
1657 arg, softp->intr_pending, softp->intr_busy, NULL);
1658 DPRINTF(1, ("iosram(%d): iosram_intr exit\n", softp->instance));
1659
1660 return (result);
1661 }
1662
1663
1664 /*
1665 * iosram_send_intr()
1666 * Send an interrupt to the SSP side via AXQ driver
1667 */
1668 int
iosram_send_intr()1669 iosram_send_intr()
1670 {
1671 IOSRAMLOG(1, "SendIntr called\n", NULL, NULL, NULL, NULL);
1672 IOSRAM_STAT(intr_send);
1673 DPRINTF(1, ("iosram iosram_send_intr invoked\n"));
1674
1675 return (axq_cpu2ssc_intr(0));
1676 }
1677
1678
1679 #if defined(DEBUG)
1680 static void
iosram_dummy_cback(void * arg)1681 iosram_dummy_cback(void *arg)
1682 {
1683 DPRINTF(1, ("iosram_dummy_cback invoked arg:%p\n", arg));
1684 }
1685 #endif /* DEBUG */
1686
1687
1688 /*ARGSUSED1*/
1689 static int
iosram_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)1690 iosram_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1691 int *rvalp)
1692 {
1693 struct iosramsoft *softp;
1694 int error = DDI_SUCCESS;
1695
1696 softp = ddi_get_soft_state(iosramsoft_statep, getminor(dev));
1697 if (softp == NULL) {
1698 return (ENXIO);
1699 }
1700 IOSRAMLOG(1, "IOCTL: dev:%p cmd:%x arg:%p ... instance %d\n",
1701 dev, cmd, arg, softp->instance);
1702
1703 switch (cmd) {
1704 #if defined(DEBUG)
1705 case IOSRAM_GET_FLAG:
1706 {
1707 iosram_io_t req;
1708 uint8_t data_valid, int_pending;
1709
1710 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1711 return (EFAULT);
1712 }
1713
1714 DPRINTF(2, ("IOSRAM_GET_FLAG(key:%x\n", req.key));
1715
1716 req.retval = iosram_get_flag(req.key, &data_valid,
1717 &int_pending);
1718 req.data_valid = (uint32_t)data_valid;
1719 req.int_pending = (uint32_t)int_pending;
1720
1721 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1722 DPRINTF(1,
1723 ("IOSRAM_GET_FLAG: can't copyout req.retval (%x)",
1724 req.retval));
1725 error = EFAULT;
1726 }
1727
1728 return (error);
1729 }
1730
1731 case IOSRAM_SET_FLAG:
1732 {
1733 iosram_io_t req;
1734
1735 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1736 return (EFAULT);
1737 }
1738
1739 DPRINTF(2, ("IOSRAM_SET_FLAG(key:%x data_valid:%x "
1740 "int_pending:%x\n", req.key, req.data_valid,
1741 req.int_pending));
1742
1743 req.retval = iosram_set_flag(req.key, req.data_valid,
1744 req.int_pending);
1745
1746 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1747 DPRINTF(1, ("IOSRAM_SET_FLAG: can't copyout req.retval"
1748 " (%x)\n", req.retval));
1749 error = EFAULT;
1750 }
1751
1752 return (error);
1753 }
1754
1755 case IOSRAM_RD:
1756 {
1757 caddr_t bufp;
1758 int len;
1759 iosram_io_t req;
1760
1761 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1762 return (EFAULT);
1763 }
1764
1765 DPRINTF(2, ("IOSRAM_RD(k:%x o:%x len:%x bufp:%p\n", req.key,
1766 req.off, req.len, (void *)(uintptr_t)req.bufp));
1767
1768 len = req.len;
1769 bufp = kmem_alloc(len, KM_SLEEP);
1770
1771 req.retval = iosram_rd(req.key, req.off, req.len, bufp);
1772
1773 if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, len, mode)) {
1774 DPRINTF(1, ("IOSRAM_RD: copyout(%p, %p,%x,%x) failed\n",
1775 (void *)bufp, (void *)(uintptr_t)req.bufp, len,
1776 mode));
1777 error = EFAULT;
1778 } else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1779 DPRINTF(1, ("IOSRAM_RD: can't copyout retval (%x)\n",
1780 req.retval));
1781 error = EFAULT;
1782 }
1783
1784 kmem_free(bufp, len);
1785 return (error);
1786 }
1787
1788 case IOSRAM_WR:
1789 {
1790 caddr_t bufp;
1791 iosram_io_t req;
1792 int len;
1793
1794 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1795 return (EFAULT);
1796 }
1797
1798 DPRINTF(2, ("IOSRAM_WR(k:%x o:%x len:%x bufp:%p\n",
1799 req.key, req.off, req.len, (void *)(uintptr_t)req.bufp));
1800 len = req.len;
1801 bufp = kmem_alloc(len, KM_SLEEP);
1802 if (ddi_copyin((void *)(uintptr_t)req.bufp, bufp, len, mode)) {
1803 error = EFAULT;
1804 } else {
1805 req.retval = iosram_wr(req.key, req.off, req.len,
1806 bufp);
1807
1808 if (ddi_copyout(&req, (void *)arg, sizeof (req),
1809 mode)) {
1810 error = EFAULT;
1811 }
1812 }
1813 kmem_free(bufp, len);
1814 return (error);
1815 }
1816
1817 case IOSRAM_TOC:
1818 {
1819 caddr_t bufp;
1820 int len;
1821 iosram_io_t req;
1822
1823 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1824 return (EFAULT);
1825 }
1826
1827 DPRINTF(2, ("IOSRAM_TOC (req.bufp:%x req.len:%x) \n",
1828 req.bufp, req.len));
1829
1830 len = req.len;
1831 bufp = kmem_alloc(len, KM_SLEEP);
1832
1833 req.retval = iosram_get_keys((iosram_toc_entry_t *)bufp,
1834 &req.len);
1835
1836 if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, req.len,
1837 mode)) {
1838 DPRINTF(1,
1839 ("IOSRAM_TOC: copyout(%p, %p,%x,%x) failed\n",
1840 (void *)bufp, (void *)(uintptr_t)req.bufp, req.len,
1841 mode));
1842 error = EFAULT;
1843 } else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1844 DPRINTF(1, ("IOSRAM_TOC: can't copyout retval (%x)\n",
1845 req.retval));
1846 error = EFAULT;
1847 }
1848 kmem_free(bufp, len);
1849 return (error);
1850 }
1851
1852 case IOSRAM_SEND_INTR:
1853 {
1854 DPRINTF(2, ("IOSRAM_SEND_INTR\n"));
1855
1856 switch ((int)arg) {
1857 case 0x11:
1858 case 0x22:
1859 case 0x44:
1860 case 0x88:
1861 ddi_put32(softp->sbbc_handle,
1862 &(softp->sbbc_region->int_enable.reg), (int)arg);
1863 DPRINTF(1, ("Wrote 0x%x to int_enable.reg\n",
1864 (int)arg));
1865 break;
1866 case 0xBB:
1867 ddi_put32(softp->sbbc_handle,
1868 &(softp->sbbc_region->p0_int_gen.reg), 1);
1869 DPRINTF(1, ("Wrote 1 to p0_int_gen.reg\n"));
1870 break;
1871 default:
1872 error = iosram_send_intr();
1873 }
1874
1875 return (error);
1876 }
1877
1878 case IOSRAM_PRINT_CBACK:
1879 iosram_print_cback();
1880 break;
1881
1882 case IOSRAM_PRINT_STATE:
1883 iosram_print_state((int)arg);
1884 break;
1885
1886 #if IOSRAM_STATS
1887 case IOSRAM_PRINT_STATS:
1888 iosram_print_stats();
1889 break;
1890 #endif
1891
1892 #if IOSRAM_LOG
1893 case IOSRAM_PRINT_LOG:
1894 iosram_print_log((int)arg);
1895 break;
1896 #endif
1897
1898 case IOSRAM_TUNNEL_SWITCH:
1899 error = iosram_switchfrom((int)arg);
1900 break;
1901
1902 case IOSRAM_PRINT_FLAGS:
1903 iosram_print_flags();
1904 break;
1905
1906 case IOSRAM_REG_CBACK:
1907 {
1908 iosram_io_t req;
1909
1910 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1911 return (EFAULT);
1912 }
1913
1914 DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key));
1915
1916 req.retval = iosram_register(req.key, iosram_dummy_cback,
1917 (void *)(uintptr_t)req.key);
1918 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1919 error = EFAULT;
1920 }
1921
1922 return (error);
1923 }
1924
1925 case IOSRAM_UNREG_CBACK:
1926 {
1927 iosram_io_t req;
1928
1929 if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) {
1930 return (EFAULT);
1931 }
1932
1933 DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key));
1934
1935 req.retval = iosram_unregister(req.key);
1936 if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) {
1937 error = EFAULT;
1938 }
1939
1940 return (error);
1941 }
1942
1943 case IOSRAM_SEMA_ACQUIRE:
1944 {
1945 DPRINTF(1, ("IOSRAM_SEMA_ACQUIRE\n"));
1946 error = iosram_sema_acquire(NULL);
1947 return (error);
1948 }
1949
1950 case IOSRAM_SEMA_RELEASE:
1951 {
1952 DPRINTF(1, ("IOSRAM_SEMA_RELEASE\n"));
1953 error = iosram_sema_release();
1954 return (error);
1955 }
1956
1957 #endif /* DEBUG */
1958
1959 default:
1960 DPRINTF(1, ("iosram_ioctl: Illegal command %x\n", cmd));
1961 error = ENOTTY;
1962 }
1963
1964 return (error);
1965 }
1966
1967
1968 /*
1969 * iosram_switch_tunnel(softp)
1970 * Switch master tunnel to the specified instance
1971 * Must be called while holding iosram_mutex
1972 */
1973 /*ARGSUSED*/
1974 static int
iosram_switch_tunnel(iosramsoft_t * softp)1975 iosram_switch_tunnel(iosramsoft_t *softp)
1976 {
1977 #ifdef DEBUG
1978 int instance = softp->instance;
1979 #endif
1980 int error = 0;
1981 iosramsoft_t *prev_master;
1982
1983 ASSERT(mutex_owned(&iosram_mutex));
1984
1985 DPRINTF(1, ("tunnel switch new master:%p (%d) current master:%p (%d)\n",
1986 (void *)softp, instance, (void *)iosram_master,
1987 ((iosram_master) ? iosram_master->instance : -1)));
1988 IOSRAMLOG(1, "TSWTCH: new_master:%p (%p) iosram_master:%p (%d)\n",
1989 softp, instance, iosram_master,
1990 ((iosram_master) ? iosram_master->instance : -1));
1991
1992 if (softp == NULL || (softp->state & IOSRAM_STATE_DETACH)) {
1993 return (ENXIO);
1994 }
1995 if (iosram_master == softp) {
1996 return (0);
1997 }
1998
1999
2000 /*
2001 * We protect against the softp structure being deallocated by setting
2002 * the IOSRAM_STATE_TSWITCH state flag. The detach routine will check
2003 * for this flag and if set, it will wait for this flag to be reset or
2004 * refuse the detach operation.
2005 */
2006 iosram_new_master = softp;
2007 softp->state |= IOSRAM_STATE_TSWITCH;
2008 prev_master = iosram_master;
2009 if (prev_master) {
2010 prev_master->state |= IOSRAM_STATE_TSWITCH;
2011 }
2012 mutex_exit(&iosram_mutex);
2013
2014 /*
2015 * Map the target IOSRAM, read the TOC, and register interrupts if not
2016 * already done.
2017 */
2018 DPRINTF(1, ("iosram(%d): mapping IOSRAM and SBBC\n",
2019 softp->instance));
2020 IOSRAMLOG(1, "TSWTCH: mapping instance:%d softp:%p\n",
2021 instance, softp, NULL, NULL);
2022
2023 if (iosram_setup_map(softp) != DDI_SUCCESS) {
2024 error = ENXIO;
2025 } else if ((chunks == NULL) && (iosram_read_toc(softp) != 0)) {
2026 iosram_remove_map(softp);
2027 error = EINVAL;
2028 } else if (iosram_add_intr(softp) != DDI_SUCCESS) {
2029 /*
2030 * If there was no previous master, purge the TOC data that
2031 * iosram_read_toc() created.
2032 */
2033 if ((prev_master == NULL) && (chunks != NULL)) {
2034 kmem_free(chunks, nchunks * sizeof (iosram_chunk_t));
2035 chunks = NULL;
2036 nchunks = 0;
2037 iosram_init_hashtab();
2038 }
2039 iosram_remove_map(softp);
2040 error = ENXIO;
2041 }
2042
2043 /*
2044 * If we are asked to abort tunnel switch, do so now, before invoking
2045 * the OBP callback.
2046 */
2047 if (iosram_tswitch_aborted) {
2048
2049 /*
2050 * Once the tunnel switch is aborted, this thread should not
2051 * resume. If it does, we simply log a message. We can't unmap
2052 * the new master IOSRAM as it may be accessed in
2053 * iosram_abort_tswitch(). It will be unmapped when it is
2054 * detached.
2055 */
2056 IOSRAMLOG(1,
2057 "TSWTCH: aborted (pre OBP cback). Thread resumed.\n",
2058 NULL, NULL, NULL, NULL);
2059 error = EIO;
2060 }
2061
2062 if (error) {
2063 IOSRAMLOG(1,
2064 "TSWTCH: map failed instance:%d softp:%p error:%x\n",
2065 instance, softp, error, NULL);
2066 goto done;
2067 }
2068
2069 if (prev_master != NULL) {
2070 int result;
2071
2072 /*
2073 * Now invoke the OBP interface to do the tunnel switch.
2074 */
2075 result = prom_starcat_switch_tunnel(softp->portid,
2076 OBP_TSWITCH_REQREPLY);
2077 if (result != 0) {
2078 error = EIO;
2079 }
2080 IOSRAMLOG(1,
2081 "TSWTCH: OBP tswitch portid:%x result:%x error:%x\n",
2082 softp->portid, result, error, NULL);
2083 IOSRAM_STAT(tswitch);
2084 iosram_tswitch_tstamp = ddi_get_lbolt();
2085 }
2086
2087 mutex_enter(&iosram_mutex);
2088 if (iosram_tswitch_aborted) {
2089 /*
2090 * Tunnel switch aborted. This thread should not resume.
2091 * For now, we simply log a message, but don't unmap any
2092 * IOSRAM at this stage as it may be accessed within the
2093 * isoram_abort_tswitch(). The IOSRAM will be unmapped
2094 * when that instance is detached.
2095 */
2096 if (iosram_tswitch_aborted) {
2097 IOSRAMLOG(1,
2098 "TSWTCH: aborted (post OBP cback). Thread"
2099 " resumed.\n", NULL, NULL, NULL, NULL);
2100 error = EIO;
2101 mutex_exit(&iosram_mutex);
2102 }
2103 } else if (error) {
2104 /*
2105 * Tunnel switch failed. Continue using previous tunnel.
2106 * However, unmap new (target) IOSRAM.
2107 */
2108 iosram_new_master = NULL;
2109 mutex_exit(&iosram_mutex);
2110 (void) iosram_remove_intr(softp);
2111 iosram_remove_map(softp);
2112 } else {
2113 /*
2114 * Tunnel switch was successful. Set the new master.
2115 * Also unmap old master IOSRAM and remove any interrupts
2116 * associated with that.
2117 *
2118 * Note that a call to iosram_force_write() allows access
2119 * to the IOSRAM while tunnel switch is in progress. That
2120 * means we need to set the new master before unmapping
2121 * the old master.
2122 */
2123 iosram_set_master(softp);
2124 iosram_new_master = NULL;
2125 mutex_exit(&iosram_mutex);
2126
2127 if (prev_master) {
2128 IOSRAMLOG(1, "TSWTCH: unmapping prev_master:%p (%d)\n",
2129 prev_master, prev_master->instance, NULL, NULL);
2130 (void) iosram_remove_intr(prev_master);
2131 iosram_remove_map(prev_master);
2132 }
2133 }
2134
2135 done:
2136 mutex_enter(&iosram_mutex);
2137
2138 /*
2139 * Clear the tunnel switch flag on the source and destination
2140 * instances.
2141 */
2142 if (prev_master) {
2143 prev_master->state &= ~IOSRAM_STATE_TSWITCH;
2144 }
2145 softp->state &= ~IOSRAM_STATE_TSWITCH;
2146
2147 /*
2148 * Since incoming interrupts could get lost during a tunnel switch,
2149 * trigger a soft interrupt just in case. No harm other than a bit
2150 * of wasted effort will be caused if no interrupts were dropped.
2151 */
2152 mutex_enter(&softp->intr_mutex);
2153 iosram_master->intr_pending = 1;
2154 if ((iosram_master->softintr_id != NULL) &&
2155 (iosram_master->intr_busy == 0)) {
2156 ddi_trigger_softintr(iosram_master->softintr_id);
2157 }
2158 mutex_exit(&softp->intr_mutex);
2159
2160 IOSRAMLOG(1, "TSWTCH: done error:%d iosram_master:%p instance:%d\n",
2161 error, iosram_master,
2162 (iosram_master) ? iosram_master->instance : -1, NULL);
2163
2164 return (error);
2165 }
2166
2167
2168 /*
2169 * iosram_abort_tswitch()
2170 * Must be called while holding iosram_mutex.
2171 */
2172 static void
iosram_abort_tswitch()2173 iosram_abort_tswitch()
2174 {
2175 uint32_t master_valid, new_master_valid;
2176
2177 ASSERT(mutex_owned(&iosram_mutex));
2178
2179 if ((!iosram_tswitch_active) || iosram_tswitch_aborted) {
2180 return;
2181 }
2182
2183 ASSERT(iosram_master != NULL);
2184
2185 IOSRAMLOG(1, "ABORT: iosram_master:%p (%d) iosram_new_master:%p (%d)\n",
2186 iosram_master, iosram_master->instance, iosram_new_master,
2187 (iosram_new_master == NULL) ? -1 : iosram_new_master->instance);
2188
2189 /*
2190 * The first call to iosram_force_write() in the middle of tunnel switch
2191 * will get here. We lookup IOSRAM VALID location and setup appropriate
2192 * master, if one is still valid. We also set iosram_tswitch_aborted to
2193 * prevent reentering this code and to catch if the OBP callback thread
2194 * somehow resumes.
2195 */
2196 iosram_tswitch_aborted = 1;
2197
2198 if ((iosram_new_master == NULL) ||
2199 (iosram_new_master = iosram_master)) {
2200 /*
2201 * New master hasn't been selected yet, or OBP callback
2202 * succeeded and we already selected new IOSRAM as master, but
2203 * system crashed in the middle of unmapping previous master or
2204 * cleaning up state. Use the existing master.
2205 */
2206 ASSERT(iosram_master->iosramp != NULL);
2207 ASSERT(IOSRAM_GET_HDRFIELD32(iosram_master, status) ==
2208 IOSRAM_VALID);
2209 IOSRAMLOG(1, "ABORT: master (%d) already determined.\n",
2210 iosram_master->instance, NULL, NULL, NULL);
2211
2212 return;
2213 }
2214
2215 /*
2216 * System crashed in the middle of tunnel switch and we know that the
2217 * new target has not been marked master yet. That means, the old
2218 * master should still be mapped. We need to abort the tunnel switch
2219 * and setup a valid master, if possible, so that we can write to the
2220 * IOSRAM.
2221 *
2222 * We select a new master based upon the IOSRAM header status fields in
2223 * the previous master IOSRAM and the target IOSRAM as follows:
2224 *
2225 * iosram_master iosram-tswitch
2226 * (Prev Master) (New Target) Decision
2227 * --------------- --------------- -----------
2228 * VALID don't care prev master
2229 * INTRANSIT INVALID prev master
2230 * INTRANSIT INTRANSIT prev master
2231 * INTRANSIT VALID new target
2232 * INVALID INVALID shouldn't ever happen
2233 * INVALID INTRANSIT shouldn't ever happen
2234 * INVALID VALID new target
2235 */
2236
2237 master_valid = (iosram_master->iosramp != NULL) ?
2238 IOSRAM_GET_HDRFIELD32(iosram_master, status) : IOSRAM_INVALID;
2239 new_master_valid = (iosram_new_master->iosramp != NULL) ?
2240 IOSRAM_GET_HDRFIELD32(iosram_new_master, status) : IOSRAM_INVALID;
2241
2242 if (master_valid == IOSRAM_VALID) {
2243 /* EMPTY */
2244 /*
2245 * OBP hasn't been called yet or, if it has, it hasn't started
2246 * copying yet. Use the existing master. Note that the new
2247 * master may not be mapped yet.
2248 */
2249 IOSRAMLOG(1, "ABORT: prev master(%d) is VALID\n",
2250 iosram_master->instance, NULL, NULL, NULL);
2251 } else if (master_valid == IOSRAM_INTRANSIT) {
2252 /*
2253 * The system crashed after OBP started processing the tunnel
2254 * switch but before the iosram driver determined that it was
2255 * complete. Use the new master if it has been marked valid,
2256 * meaning that OBP finished copying data to it, or the old
2257 * master otherwise.
2258 */
2259 IOSRAMLOG(1, "ABORT: prev master(%d) is INTRANSIT\n",
2260 iosram_master->instance, NULL, NULL, NULL);
2261
2262 if (new_master_valid == IOSRAM_VALID) {
2263 iosram_set_master(iosram_new_master);
2264 IOSRAMLOG(1, "ABORT: new master(%d) is VALID\n",
2265 iosram_new_master->instance, NULL, NULL,
2266 NULL);
2267 } else {
2268 (void) prom_starcat_switch_tunnel(iosram_master->portid,
2269 OBP_TSWITCH_NOREPLY);
2270
2271 IOSRAMLOG(1, "ABORT: new master(%d) is INVALID\n",
2272 iosram_new_master->instance, NULL, NULL,
2273 NULL);
2274 }
2275 } else {
2276 /*
2277 * The system crashed after OBP marked the old master INVALID,
2278 * which means the new master is the way to go.
2279 */
2280 IOSRAMLOG(1, "ABORT: prev master(%d) is INVALID\n",
2281 iosram_master->instance, NULL, NULL, NULL);
2282
2283 ASSERT(new_master_valid == IOSRAM_VALID);
2284
2285 iosram_set_master(iosram_new_master);
2286 }
2287
2288 IOSRAMLOG(1, "ABORT: Instance %d selected as master\n",
2289 iosram_master->instance, NULL, NULL, NULL);
2290 }
2291
2292
2293 /*
2294 * iosram_switchfrom(instance)
2295 * Switch master tunnel away from the specified instance
2296 */
2297 /*ARGSUSED*/
2298 int
iosram_switchfrom(int instance)2299 iosram_switchfrom(int instance)
2300 {
2301 struct iosramsoft *softp;
2302 int error = 0;
2303 int count;
2304 clock_t current_tstamp;
2305 clock_t tstamp_interval;
2306 struct iosramsoft *last_master = NULL;
2307 static int last_master_instance = -1;
2308
2309 IOSRAMLOG(1, "SwtchFrom: instance:%d iosram_master:%p (%d)\n",
2310 instance, iosram_master,
2311 ((iosram_master) ? iosram_master->instance : -1), NULL);
2312
2313 mutex_enter(&iosram_mutex);
2314
2315 /*
2316 * Wait if another tunnel switch is in progress
2317 */
2318 for (count = 0; iosram_tswitch_active && count < IOSRAM_TSWITCH_RETRY;
2319 count++) {
2320 iosram_tswitch_wakeup = 1;
2321 cv_wait(&iosram_tswitch_wait, &iosram_mutex);
2322 }
2323
2324 if (iosram_tswitch_active) {
2325 mutex_exit(&iosram_mutex);
2326 return (EAGAIN);
2327 }
2328
2329 /*
2330 * Check if the specified instance holds the tunnel. If not,
2331 * then we are done.
2332 */
2333 if ((iosram_master == NULL) || (iosram_master->instance != instance)) {
2334 mutex_exit(&iosram_mutex);
2335 return (0);
2336 }
2337
2338 /*
2339 * Before beginning the tunnel switch process, wait for any outstanding
2340 * read/write activity to complete.
2341 */
2342 iosram_tswitch_active = 1;
2343 while (iosram_rw_active) {
2344 iosram_rw_wakeup = 1;
2345 cv_wait(&iosram_rw_wait, &iosram_mutex);
2346 }
2347
2348 /*
2349 * If a previous tunnel switch just completed, we have to make sure
2350 * HWAD has enough time to find the new tunnel before we switch
2351 * away from it. Otherwise, OBP's mailbox message to OSD will never
2352 * get through. Just to be paranoid about synchronization of lbolt
2353 * across different CPUs, make sure the current attempt isn't noted
2354 * as starting _before_ the last tunnel switch completed.
2355 */
2356 current_tstamp = ddi_get_lbolt();
2357 if (current_tstamp > iosram_tswitch_tstamp) {
2358 tstamp_interval = current_tstamp - iosram_tswitch_tstamp;
2359 } else {
2360 tstamp_interval = 0;
2361 }
2362 if (drv_hztousec(tstamp_interval) < IOSRAM_TSWITCH_DELAY_US) {
2363 mutex_exit(&iosram_mutex);
2364 delay(drv_usectohz(IOSRAM_TSWITCH_DELAY_US) - tstamp_interval);
2365 mutex_enter(&iosram_mutex);
2366 }
2367
2368 /*
2369 * The specified instance holds the tunnel. We need to move it to some
2370 * other IOSRAM. Try out all possible IOSRAMs listed in
2371 * iosram_instances. For now, we always search from the first entry.
2372 * In future, it may be desirable to start where we left off.
2373 */
2374 for (softp = iosram_instances; softp != NULL; softp = softp->next) {
2375 if (iosram_tswitch_aborted) {
2376 break;
2377 }
2378
2379 /* we can't switch _to_ the instance we're switching _from_ */
2380 if (softp->instance == instance) {
2381 continue;
2382 }
2383
2384 /* skip over instances being detached */
2385 if (softp->state & IOSRAM_STATE_DETACH) {
2386 continue;
2387 }
2388
2389 /*
2390 * Try to avoid reverting to the last instance we switched away
2391 * from, as we expect that one to be detached eventually. Keep
2392 * track of it, though, so we can go ahead and try switching to
2393 * it if no other viable candidates are found.
2394 */
2395 if (softp->instance == last_master_instance) {
2396 last_master = softp;
2397 continue;
2398 }
2399
2400 /*
2401 * Do the tunnel switch. If successful, record the instance of
2402 * the master we just left behind so we can try to avoid
2403 * reverting to it next time.
2404 */
2405 if (iosram_switch_tunnel(softp) == 0) {
2406 last_master_instance = instance;
2407 break;
2408 }
2409 }
2410
2411 /*
2412 * If we failed to switch the tunnel, but we skipped over an instance
2413 * that had previously been switched out of because we expected it to be
2414 * detached, go ahead and try it anyway (unless the tswitch was aborted
2415 * or the instance we skipped is finally being detached).
2416 */
2417 if ((softp == NULL) && (last_master != NULL) &&
2418 !iosram_tswitch_aborted &&
2419 !(last_master->state & IOSRAM_STATE_DETACH)) {
2420 if (iosram_switch_tunnel(last_master) == 0) {
2421 softp = last_master;
2422 last_master_instance = instance;
2423 }
2424 }
2425
2426 if ((softp == NULL) || (iosram_tswitch_aborted)) {
2427 error = EIO;
2428 }
2429
2430 /*
2431 * If there are additional tunnel switches queued up waiting for this
2432 * one to complete, wake them up.
2433 */
2434 if (iosram_tswitch_wakeup) {
2435 iosram_tswitch_wakeup = 0;
2436 cv_broadcast(&iosram_tswitch_wait);
2437 }
2438 iosram_tswitch_active = 0;
2439 mutex_exit(&iosram_mutex);
2440 return (error);
2441 }
2442
2443
2444 /*
2445 * iosram_tunnel_capable(softp)
2446 * Check if this IOSRAM instance is tunnel-capable by looing at
2447 * "tunnel-capable" property.
2448 */
2449 static int
iosram_tunnel_capable(struct iosramsoft * softp)2450 iosram_tunnel_capable(struct iosramsoft *softp)
2451 {
2452 int proplen;
2453 int tunnel_capable;
2454
2455 /*
2456 * Look up IOSRAM_TUNNELOK_PROP property, if any.
2457 */
2458 proplen = sizeof (tunnel_capable);
2459 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, softp->dip,
2460 DDI_PROP_DONTPASS, IOSRAM_TUNNELOK_PROP, (caddr_t)&tunnel_capable,
2461 &proplen) != DDI_PROP_SUCCESS) {
2462 tunnel_capable = 0;
2463 }
2464 return (tunnel_capable);
2465 }
2466
2467
2468 static int
iosram_sbbc_setup_map(struct iosramsoft * softp)2469 iosram_sbbc_setup_map(struct iosramsoft *softp)
2470 {
2471 int rv;
2472 struct ddi_device_acc_attr attr;
2473 dev_info_t *dip = softp->dip;
2474 uint32_t sema_val;
2475
2476 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2477 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2478 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2479
2480 mutex_enter(&iosram_mutex);
2481 mutex_enter(&softp->intr_mutex);
2482
2483 /*
2484 * Map SBBC region in
2485 */
2486 if ((rv = ddi_regs_map_setup(dip, IOSRAM_SBBC_MAP_INDEX,
2487 (caddr_t *)&softp->sbbc_region,
2488 IOSRAM_SBBC_MAP_OFFSET, sizeof (iosram_sbbc_region_t),
2489 &attr, &softp->sbbc_handle)) != DDI_SUCCESS) {
2490 DPRINTF(1, ("Failed to map SBBC region.\n"));
2491 mutex_exit(&softp->intr_mutex);
2492 mutex_exit(&iosram_mutex);
2493 return (rv);
2494 }
2495
2496 /*
2497 * Disable SBBC interrupts. SBBC interrupts are enabled
2498 * once the interrupt handler is registered.
2499 */
2500 ddi_put32(softp->sbbc_handle,
2501 &(softp->sbbc_region->int_enable.reg), 0x0);
2502
2503 /*
2504 * Clear hardware semaphore value if appropriate.
2505 * When the first SBBC is mapped in by the IOSRAM driver,
2506 * the value of the semaphore should be initialized only
2507 * if it is not held by SMS. For subsequent SBBC's, the
2508 * semaphore will be always initialized.
2509 */
2510 sema_val = IOSRAM_SEMA_RD(softp);
2511
2512 if (!iosram_master) {
2513 /* the first SBBC is being mapped in */
2514 if (!(IOSRAM_SEMA_IS_HELD(sema_val) &&
2515 IOSRAM_SEMA_GET_IDX(sema_val) == IOSRAM_SEMA_SMS_IDX)) {
2516 /* not held by SMS, we clear the semaphore */
2517 IOSRAM_SEMA_WR(softp, 0);
2518 }
2519 } else {
2520 /* not the first SBBC, we clear the semaphore */
2521 IOSRAM_SEMA_WR(softp, 0);
2522 }
2523
2524 mutex_exit(&softp->intr_mutex);
2525 mutex_exit(&iosram_mutex);
2526 return (0);
2527 }
2528
2529
2530 static int
iosram_setup_map(struct iosramsoft * softp)2531 iosram_setup_map(struct iosramsoft *softp)
2532 {
2533 int instance = softp->instance;
2534 dev_info_t *dip = softp->dip;
2535 int portid;
2536 int proplen;
2537 caddr_t propvalue;
2538 struct ddi_device_acc_attr attr;
2539
2540 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2541 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2542 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2543
2544 /*
2545 * Lookup IOSRAM_REG_PROP property to find out our IOSRAM length
2546 */
2547 if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2548 DDI_PROP_DONTPASS, IOSRAM_REG_PROP, (caddr_t)&propvalue,
2549 &proplen) != DDI_PROP_SUCCESS) {
2550 cmn_err(CE_WARN, "iosram(%d): can't find register property.\n",
2551 instance);
2552 return (DDI_FAILURE);
2553 } else {
2554 iosram_reg_t *regprop = (iosram_reg_t *)propvalue;
2555
2556 DPRINTF(1, ("SetupMap(%d): Got reg prop: %x %x %x\n",
2557 instance, regprop->addr_hi,
2558 regprop->addr_lo, regprop->size));
2559
2560 softp->iosramlen = regprop->size;
2561
2562 kmem_free(propvalue, proplen);
2563 }
2564 DPRINTF(1, ("SetupMap(%d): IOSRAM length: 0x%x\n", instance,
2565 softp->iosramlen));
2566 softp->handle = NULL;
2567
2568 /*
2569 * To minimize boot time, we map the entire IOSRAM as opposed to
2570 * mapping individual chunk via ddi_regs_map_setup() call.
2571 */
2572 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&softp->iosramp,
2573 0x0, softp->iosramlen, &attr, &softp->handle) != DDI_SUCCESS) {
2574 cmn_err(CE_WARN, "iosram(%d): failed to map IOSRAM len:%x\n",
2575 instance, softp->iosramlen);
2576 iosram_remove_map(softp);
2577 return (DDI_FAILURE);
2578 }
2579
2580 /*
2581 * Lookup PORTID property on my parent hierarchy
2582 */
2583 proplen = sizeof (portid);
2584 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
2585 0, IOSRAM_PORTID_PROP, (caddr_t)&portid,
2586 &proplen) != DDI_PROP_SUCCESS) {
2587 cmn_err(CE_WARN, "iosram(%d): can't find portid property.\n",
2588 instance);
2589 iosram_remove_map(softp);
2590 return (DDI_FAILURE);
2591 }
2592 softp->portid = portid;
2593
2594 if (iosram_sbbc_setup_map(softp) != DDI_SUCCESS) {
2595 cmn_err(CE_WARN, "iosram(%d): can't map SBBC region.\n",
2596 instance);
2597 iosram_remove_map(softp);
2598 return (DDI_FAILURE);
2599 }
2600
2601 mutex_enter(&iosram_mutex);
2602 softp->state |= IOSRAM_STATE_MAPPED;
2603 mutex_exit(&iosram_mutex);
2604
2605 return (DDI_SUCCESS);
2606 }
2607
2608
2609 static void
iosram_remove_map(struct iosramsoft * softp)2610 iosram_remove_map(struct iosramsoft *softp)
2611 {
2612 mutex_enter(&iosram_mutex);
2613
2614 ASSERT((softp->state & IOSRAM_STATE_MASTER) == 0);
2615
2616 if (softp->handle) {
2617 ddi_regs_map_free(&softp->handle);
2618 softp->handle = NULL;
2619 }
2620 softp->iosramp = NULL;
2621
2622 /*
2623 * Umap SBBC registers region. Shared with handler for SBBC
2624 * interrupts, take intr_mutex.
2625 */
2626 mutex_enter(&softp->intr_mutex);
2627 if (softp->sbbc_region) {
2628 ddi_regs_map_free(&softp->sbbc_handle);
2629 softp->sbbc_region = NULL;
2630 }
2631 mutex_exit(&softp->intr_mutex);
2632
2633 softp->state &= ~IOSRAM_STATE_MAPPED;
2634
2635 mutex_exit(&iosram_mutex);
2636 }
2637
2638
2639 /*
2640 * iosram_is_chosen(struct iosramsoft *softp)
2641 *
2642 * Looks up "chosen" node property to
2643 * determine if it is the chosen IOSRAM.
2644 */
2645 static int
iosram_is_chosen(struct iosramsoft * softp)2646 iosram_is_chosen(struct iosramsoft *softp)
2647 {
2648 char chosen_iosram[MAXNAMELEN];
2649 char pn[MAXNAMELEN];
2650 int nodeid;
2651 int chosen;
2652 pnode_t dnode;
2653
2654 /*
2655 * Get /chosen node info. prom interface will handle errors.
2656 */
2657 dnode = prom_chosennode();
2658
2659 /*
2660 * Look for the "iosram" property on the chosen node with a prom
2661 * interface as ddi_find_devinfo() couldn't be used (calls
2662 * ddi_walk_devs() that creates one extra lock on the device tree).
2663 */
2664 if (prom_getprop(dnode, IOSRAM_CHOSEN_PROP, (caddr_t)&nodeid) <= 0) {
2665 /*
2666 * Can't find IOSRAM_CHOSEN_PROP property under chosen node
2667 */
2668 cmn_err(CE_WARN,
2669 "iosram(%d): can't find chosen iosram property\n",
2670 softp->instance);
2671 return (0);
2672 }
2673
2674 DPRINTF(1, ("iosram(%d): Got '%x' for chosen '%s' property\n",
2675 softp->instance, nodeid, IOSRAM_CHOSEN_PROP));
2676
2677 /*
2678 * get the full OBP pathname of this node
2679 */
2680 if (prom_phandle_to_path((phandle_t)nodeid, chosen_iosram,
2681 sizeof (chosen_iosram)) < 0) {
2682 cmn_err(CE_NOTE, "prom_phandle_to_path(%x) failed\n", nodeid);
2683 return (0);
2684 }
2685 DPRINTF(1, ("iosram(%d): prom_phandle_to_path(%x) is '%s'\n",
2686 softp->instance, nodeid, chosen_iosram));
2687
2688 (void) ddi_pathname(softp->dip, pn);
2689 DPRINTF(1, ("iosram(%d): ddi_pathname(%p) is '%s'\n",
2690 softp->instance, (void *)softp->dip, pn));
2691
2692 chosen = (strcmp(chosen_iosram, pn) == 0) ? 1 : 0;
2693 DPRINTF(1, ("iosram(%d): ... %s\n", softp->instance,
2694 chosen ? "MASTER" : "SLAVE"));
2695 IOSRAMLOG(1, "iosram(%d): ... %s\n", softp->instance,
2696 (chosen ? "MASTER" : "SLAVE"), NULL, NULL);
2697
2698 return (chosen);
2699 }
2700
2701
2702 /*
2703 * iosram_set_master(struct iosramsoft *softp)
2704 *
2705 * Set master tunnel to the specified IOSRAM
2706 * Must be called while holding iosram_mutex.
2707 */
2708 static void
iosram_set_master(struct iosramsoft * softp)2709 iosram_set_master(struct iosramsoft *softp)
2710 {
2711 ASSERT(mutex_owned(&iosram_mutex));
2712 ASSERT(softp != NULL);
2713 ASSERT(softp->state & IOSRAM_STATE_MAPPED);
2714 ASSERT(IOSRAM_GET_HDRFIELD32(softp, status) == IOSRAM_VALID);
2715
2716 /*
2717 * Clear MASTER flag on any previous IOSRAM master, if any
2718 */
2719 if (iosram_master && (iosram_master != softp)) {
2720 iosram_master->state &= ~IOSRAM_STATE_MASTER;
2721 }
2722
2723 /*
2724 * Setup new IOSRAM master
2725 */
2726 iosram_update_addrs(softp);
2727 iosram_handle = softp->handle;
2728 softp->state |= IOSRAM_STATE_MASTER;
2729 softp->tswitch_ok++;
2730 iosram_master = softp;
2731
2732 IOSRAMLOG(1, "SETMASTER: softp:%p instance:%d\n", softp,
2733 softp->instance, NULL, NULL);
2734 }
2735
2736
2737 /*
2738 * iosram_read_toc()
2739 *
2740 * Read the TOC from an IOSRAM instance that has been mapped in.
2741 * If the TOC is flawed or the IOSRAM isn't valid, return an error.
2742 */
2743 static int
iosram_read_toc(struct iosramsoft * softp)2744 iosram_read_toc(struct iosramsoft *softp)
2745 {
2746 int i;
2747 int instance = softp->instance;
2748 uint8_t *toc_entryp;
2749 iosram_flags_t *flagsp = NULL;
2750 int new_nchunks;
2751 iosram_chunk_t *new_chunks;
2752 iosram_chunk_t *chunkp;
2753 iosram_chunk_t *old_chunkp;
2754 iosram_toc_entry_t index;
2755
2756 /*
2757 * Never try to read the TOC out of an unmapped IOSRAM.
2758 */
2759 ASSERT(softp->state & IOSRAM_STATE_MAPPED);
2760
2761 mutex_enter(&iosram_mutex);
2762
2763 /*
2764 * Check to make sure this IOSRAM is marked valid. Return
2765 * an error if it isn't.
2766 */
2767 if (IOSRAM_GET_HDRFIELD32(softp, status) != IOSRAM_VALID) {
2768 DPRINTF(1, ("iosram_read_toc(%d): IOSRAM not flagged valid\n",
2769 instance));
2770 mutex_exit(&iosram_mutex);
2771 return (EINVAL);
2772 }
2773
2774 /*
2775 * Get the location of the TOC.
2776 */
2777 toc_entryp = softp->iosramp + IOSRAM_GET_HDRFIELD32(softp, toc_offset);
2778
2779 /*
2780 * Read the index entry from the TOC and make sure it looks correct.
2781 */
2782 ddi_rep_get8(softp->handle, (uint8_t *)&index, toc_entryp,
2783 sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR);
2784 if ((index.key != IOSRAM_INDEX_KEY) ||
2785 (index.off != IOSRAM_INDEX_OFF)) {
2786 cmn_err(CE_WARN, "iosram(%d): invalid TOC index.\n", instance);
2787 mutex_exit(&iosram_mutex);
2788 return (EINVAL);
2789 }
2790
2791 /*
2792 * Allocate storage for the new chunks array and initialize it with data
2793 * from the TOC and callback data from the corresponding old chunk, if
2794 * it exists.
2795 */
2796 new_nchunks = index.len - 1;
2797 new_chunks = (iosram_chunk_t *)kmem_zalloc(new_nchunks *
2798 sizeof (iosram_chunk_t), KM_SLEEP);
2799 for (i = 0, chunkp = new_chunks; i < new_nchunks; i++, chunkp++) {
2800 toc_entryp += sizeof (iosram_toc_entry_t);
2801 ddi_rep_get8(softp->handle, (uint8_t *)&(chunkp->toc_data),
2802 toc_entryp, sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR);
2803 chunkp->hash = NULL;
2804 if ((chunkp->toc_data.off < softp->iosramlen) &&
2805 (chunkp->toc_data.len <= softp->iosramlen) &&
2806 ((chunkp->toc_data.off + chunkp->toc_data.len) <=
2807 softp->iosramlen)) {
2808 chunkp->basep = softp->iosramp + chunkp->toc_data.off;
2809 DPRINTF(1,
2810 ("iosram_read_toc(%d): k:%x o:%x l:%x p:%p\n",
2811 instance, chunkp->toc_data.key,
2812 chunkp->toc_data.off, chunkp->toc_data.len,
2813 (void *)chunkp->basep));
2814 } else {
2815 cmn_err(CE_WARN, "iosram(%d): TOC entry %d"
2816 "out of range... off:%x len:%x\n",
2817 instance, i + 1, chunkp->toc_data.off,
2818 chunkp->toc_data.len);
2819 kmem_free(new_chunks, new_nchunks *
2820 sizeof (iosram_chunk_t));
2821 mutex_exit(&iosram_mutex);
2822 return (EINVAL);
2823 }
2824
2825 /*
2826 * Note the existence of the flags chunk, which is required in
2827 * a correct TOC.
2828 */
2829 if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) {
2830 flagsp = (iosram_flags_t *)chunkp->basep;
2831 }
2832
2833 /*
2834 * If there was an entry for this chunk in the old list, copy
2835 * the callback data from old to new storage.
2836 */
2837 if ((nchunks > 0) &&
2838 ((old_chunkp = iosram_find_chunk(chunkp->toc_data.key)) !=
2839 NULL)) {
2840 bcopy(&(old_chunkp->cback), &(chunkp->cback),
2841 sizeof (iosram_cback_t));
2842 }
2843 }
2844 /*
2845 * The TOC is malformed if there is no entry for the flags chunk.
2846 */
2847 if (flagsp == NULL) {
2848 kmem_free(new_chunks, new_nchunks * sizeof (iosram_chunk_t));
2849 mutex_exit(&iosram_mutex);
2850 return (EINVAL);
2851 }
2852
2853 /*
2854 * Free any memory that is no longer needed and install the new data
2855 * as current data.
2856 */
2857 if (chunks != NULL) {
2858 kmem_free(chunks, nchunks * sizeof (iosram_chunk_t));
2859 }
2860 chunks = new_chunks;
2861 nchunks = new_nchunks;
2862 iosram_init_hashtab();
2863
2864 mutex_exit(&iosram_mutex);
2865 return (0);
2866 }
2867
2868
2869 /*
2870 * iosram_init_hashtab()
2871 *
2872 * Initialize the hash table and populate it with the IOSRAM
2873 * chunks previously read from the TOC. The caller must hold the
2874 * ioram_mutex lock.
2875 */
2876 static void
iosram_init_hashtab(void)2877 iosram_init_hashtab(void)
2878 {
2879 int i, bucket;
2880 iosram_chunk_t *chunkp;
2881
2882 ASSERT(mutex_owned(&iosram_mutex));
2883
2884 for (i = 0; i < IOSRAM_HASHSZ; i++) {
2885 iosram_hashtab[i] = NULL;
2886 }
2887
2888 if (chunks) {
2889 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
2890 /*
2891 * Hide the flags chunk by leaving it out of the hash
2892 * table.
2893 */
2894 if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) {
2895 continue;
2896 }
2897
2898 /*
2899 * Add the current chunk to the hash table.
2900 */
2901 bucket = IOSRAM_HASH(chunkp->toc_data.key);
2902 chunkp->hash = iosram_hashtab[bucket];
2903 iosram_hashtab[bucket] = chunkp;
2904 }
2905 }
2906 }
2907
2908
2909 /*
2910 * iosram_update_addrs()
2911 *
2912 * Process the chunk list, updating each chunk's basep, which is a pointer
2913 * to the beginning of the chunk's memory in kvaddr space. Record the
2914 * basep value of the flags chunk to speed up flag access. The caller
2915 * must hold the iosram_mutex lock.
2916 */
2917 static void
iosram_update_addrs(struct iosramsoft * softp)2918 iosram_update_addrs(struct iosramsoft *softp)
2919 {
2920 int i;
2921 iosram_flags_t *flagsp;
2922 iosram_chunk_t *chunkp;
2923
2924 ASSERT(mutex_owned(&iosram_mutex));
2925
2926 /*
2927 * First go through all of the chunks updating their base pointers and
2928 * looking for the flags chunk.
2929 */
2930 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
2931 chunkp->basep = softp->iosramp + chunkp->toc_data.off;
2932 if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) {
2933 flagsp = (iosram_flags_t *)(chunkp->basep);
2934 DPRINTF(1,
2935 ("iosram_update_addrs flags: o:0x%08x p:%p",
2936 chunkp->toc_data.off, (void *)flagsp));
2937 }
2938 }
2939
2940 /*
2941 * Now, go through and update each chunk's flags pointer. This can't be
2942 * done in the first loop because we don't have the address of the flags
2943 * chunk yet.
2944 */
2945 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
2946 chunkp->flagsp = flagsp++;
2947 DPRINTF(1, ("iosram_update_addrs: k:0x%x f:%p\n",
2948 chunkp->toc_data.key, (void *)chunkp->flagsp));
2949 }
2950 }
2951
2952 /*
2953 * iosram_find_chunk(key)
2954 *
2955 * Return a pointer to iosram_chunk structure corresponding to the
2956 * "key" IOSRAM chunk. The caller must hold the iosram_mutex lock.
2957 */
2958 static iosram_chunk_t *
iosram_find_chunk(uint32_t key)2959 iosram_find_chunk(uint32_t key)
2960 {
2961 iosram_chunk_t *chunkp;
2962 int index = IOSRAM_HASH(key);
2963
2964 ASSERT(mutex_owned(&iosram_mutex));
2965
2966 for (chunkp = iosram_hashtab[index]; chunkp; chunkp = chunkp->hash) {
2967 if (chunkp->toc_data.key == key) {
2968 break;
2969 }
2970 }
2971
2972 return (chunkp);
2973 }
2974
2975
2976 /*
2977 * iosram_add_intr(iosramsoft_t *)
2978 */
2979 static int
iosram_add_intr(iosramsoft_t * softp)2980 iosram_add_intr(iosramsoft_t *softp)
2981 {
2982 IOSRAMLOG(2, "ADDINTR: softp:%p instance:%d\n",
2983 softp, softp->instance, NULL, NULL);
2984
2985 if (ddi_add_softintr(softp->dip, DDI_SOFTINT_MED,
2986 &softp->softintr_id, &softp->soft_iblk, NULL,
2987 iosram_softintr, (caddr_t)softp) != DDI_SUCCESS) {
2988 cmn_err(CE_WARN,
2989 "iosram(%d): Can't register softintr.\n",
2990 softp->instance);
2991 return (DDI_FAILURE);
2992 }
2993
2994 if (ddi_add_intr(softp->dip, 0, &softp->real_iblk, NULL,
2995 iosram_intr, (caddr_t)softp) != DDI_SUCCESS) {
2996 cmn_err(CE_WARN,
2997 "iosram(%d): Can't register intr"
2998 " handler.\n", softp->instance);
2999 ddi_remove_softintr(softp->softintr_id);
3000 return (DDI_FAILURE);
3001 }
3002
3003 /*
3004 * Enable SBBC interrupts
3005 */
3006 ddi_put32(softp->sbbc_handle, &(softp->sbbc_region->int_enable.reg),
3007 IOSRAM_SBBC_INT0|IOSRAM_SBBC_INT1);
3008
3009 return (DDI_SUCCESS);
3010 }
3011
3012
3013 /*
3014 * iosram_remove_intr(iosramsoft_t *)
3015 */
3016 static int
iosram_remove_intr(iosramsoft_t * softp)3017 iosram_remove_intr(iosramsoft_t *softp)
3018 {
3019 IOSRAMLOG(2, "REMINTR: softp:%p instance:%d\n",
3020 softp, softp->instance, NULL, NULL);
3021
3022 /*
3023 * Disable SBBC interrupts if SBBC is mapped in
3024 */
3025 if (softp->sbbc_region) {
3026 ddi_put32(softp->sbbc_handle,
3027 &(softp->sbbc_region->int_enable.reg), 0);
3028 }
3029
3030 /*
3031 * Remove SBBC interrupt handler
3032 */
3033 ddi_remove_intr(softp->dip, 0, softp->real_iblk);
3034
3035 /*
3036 * Remove soft interrupt handler
3037 */
3038 mutex_enter(&iosram_mutex);
3039 if (softp->softintr_id != NULL) {
3040 ddi_remove_softintr(softp->softintr_id);
3041 softp->softintr_id = NULL;
3042 }
3043 mutex_exit(&iosram_mutex);
3044
3045 return (0);
3046 }
3047
3048
3049 /*
3050 * iosram_add_instance(iosramsoft_t *)
3051 * Must be called while holding iosram_mutex
3052 */
3053 static void
iosram_add_instance(iosramsoft_t * new_softp)3054 iosram_add_instance(iosramsoft_t *new_softp)
3055 {
3056 #ifdef DEBUG
3057 int instance = new_softp->instance;
3058 iosramsoft_t *softp;
3059 #endif
3060
3061 ASSERT(mutex_owned(&iosram_mutex));
3062
3063 #if defined(DEBUG)
3064 /* Verify that this instance is not in the list */
3065 for (softp = iosram_instances; softp != NULL; softp = softp->next) {
3066 ASSERT(softp->instance != instance);
3067 }
3068 #endif
3069
3070 /*
3071 * Add this instance to the list
3072 */
3073 if (iosram_instances != NULL) {
3074 iosram_instances->prev = new_softp;
3075 }
3076 new_softp->next = iosram_instances;
3077 new_softp->prev = NULL;
3078 iosram_instances = new_softp;
3079 }
3080
3081
3082 /*
3083 * iosram_remove_instance(int instance)
3084 * Must be called while holding iosram_mutex
3085 */
3086 static void
iosram_remove_instance(int instance)3087 iosram_remove_instance(int instance)
3088 {
3089 iosramsoft_t *softp;
3090
3091 /*
3092 * Remove specified instance from the iosram_instances list so that
3093 * it can't be chosen for tunnel in future.
3094 */
3095 ASSERT(mutex_owned(&iosram_mutex));
3096
3097 for (softp = iosram_instances; softp != NULL; softp = softp->next) {
3098 if (softp->instance == instance) {
3099 if (softp->next != NULL) {
3100 softp->next->prev = softp->prev;
3101 }
3102 if (softp->prev != NULL) {
3103 softp->prev->next = softp->next;
3104 }
3105 if (iosram_instances == softp) {
3106 iosram_instances = softp->next;
3107 }
3108
3109 return;
3110 }
3111 }
3112 }
3113
3114
3115 /*
3116 * iosram_sema_acquire: Acquire hardware semaphore.
3117 * Return 0 if the semaphore could be acquired, or one of the following
3118 * possible values:
3119 * EAGAIN: there is a tunnel switch in progress
3120 * EBUSY: the semaphore was already "held"
3121 * ENXIO: an IO error occured (e.g. SBBC not mapped)
3122 * If old_value is not NULL, the location it points to will be updated
3123 * with the semaphore value read when attempting to acquire it.
3124 */
3125 int
iosram_sema_acquire(uint32_t * old_value)3126 iosram_sema_acquire(uint32_t *old_value)
3127 {
3128 struct iosramsoft *softp;
3129 int rv;
3130 uint32_t sema_val;
3131
3132 DPRINTF(2, ("IOSRAM: in iosram_sema_acquire\n"));
3133
3134 mutex_enter(&iosram_mutex);
3135
3136 /*
3137 * Disallow access if there is a tunnel switch in progress.
3138 */
3139 if (iosram_tswitch_active) {
3140 mutex_exit(&iosram_mutex);
3141 return (EAGAIN);
3142 }
3143
3144 /*
3145 * Use current master IOSRAM for operation, fail if none is
3146 * currently active.
3147 */
3148 if ((softp = iosram_master) == NULL) {
3149 mutex_exit(&iosram_mutex);
3150 DPRINTF(1, ("IOSRAM: iosram_sema_acquire: no master\n"));
3151 return (ENXIO);
3152 }
3153
3154 mutex_enter(&softp->intr_mutex);
3155
3156 /*
3157 * Fail if SBBC region has not been mapped. This shouldn't
3158 * happen if we have a master IOSRAM, but we double-check.
3159 */
3160 if (softp->sbbc_region == NULL) {
3161 mutex_exit(&softp->intr_mutex);
3162 mutex_exit(&iosram_mutex);
3163 DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: "
3164 "SBBC not mapped\n", softp->instance));
3165 return (ENXIO);
3166 }
3167
3168 /* read semaphore value */
3169 sema_val = IOSRAM_SEMA_RD(softp);
3170 if (old_value != NULL)
3171 *old_value = sema_val;
3172
3173 if (IOSRAM_SEMA_IS_HELD(sema_val)) {
3174 /* semaphore was held by someone else */
3175 rv = EBUSY;
3176 } else {
3177 /* semaphore was not held, we just acquired it */
3178 rv = 0;
3179 }
3180
3181 mutex_exit(&softp->intr_mutex);
3182 mutex_exit(&iosram_mutex);
3183
3184 DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: "
3185 "old value=0x%x rv=%d\n", softp->instance, sema_val, rv));
3186
3187 return (rv);
3188 }
3189
3190
3191 /*
3192 * iosram_sema_release: Release hardware semaphore.
3193 * This function will "release" the hardware semaphore, and return 0 on
3194 * success. If an error occured, one of the following values will be
3195 * returned:
3196 * EAGAIN: there is a tunnel switch in progress
3197 * ENXIO: an IO error occured (e.g. SBBC not mapped)
3198 */
3199 int
iosram_sema_release(void)3200 iosram_sema_release(void)
3201 {
3202 struct iosramsoft *softp;
3203
3204 DPRINTF(2, ("IOSRAM: in iosram_sema_release\n"));
3205
3206 mutex_enter(&iosram_mutex);
3207
3208 /*
3209 * Disallow access if there is a tunnel switch in progress.
3210 */
3211 if (iosram_tswitch_active) {
3212 mutex_exit(&iosram_mutex);
3213 return (EAGAIN);
3214 }
3215
3216 /*
3217 * Use current master IOSRAM for operation, fail if none is
3218 * currently active.
3219 */
3220 if ((softp = iosram_master) == NULL) {
3221 mutex_exit(&iosram_mutex);
3222 DPRINTF(1, ("IOSRAM: iosram_sema_release: no master\n"));
3223 return (ENXIO);
3224 }
3225
3226 mutex_enter(&softp->intr_mutex);
3227
3228 /*
3229 * Fail if SBBC region has not been mapped in. This shouldn't
3230 * happen if we have a master IOSRAM, but we double-check.
3231 */
3232 if (softp->sbbc_region == NULL) {
3233 mutex_exit(&softp->intr_mutex);
3234 mutex_exit(&iosram_mutex);
3235 DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: "
3236 "SBBC not mapped\n", softp->instance));
3237 return (ENXIO);
3238 }
3239
3240 /* Release semaphore by clearing our semaphore register */
3241 IOSRAM_SEMA_WR(softp, 0);
3242
3243 mutex_exit(&softp->intr_mutex);
3244 mutex_exit(&iosram_mutex);
3245
3246 DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: success\n",
3247 softp->instance));
3248
3249 return (0);
3250 }
3251
3252
3253 #if defined(IOSRAM_LOG)
3254 void
iosram_log(caddr_t fmt,intptr_t a1,intptr_t a2,intptr_t a3,intptr_t a4)3255 iosram_log(caddr_t fmt, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4)
3256 {
3257 uint32_t seq;
3258 iosram_log_t *logp;
3259
3260 mutex_enter(&iosram_log_mutex);
3261
3262 seq = iosram_logseq++;
3263 logp = &iosram_logbuf[seq % IOSRAM_MAXLOG];
3264 logp->seq = seq;
3265 logp->tstamp = ddi_get_lbolt();
3266 logp->fmt = fmt;
3267 logp->arg1 = a1;
3268 logp->arg2 = a2;
3269 logp->arg3 = a3;
3270 logp->arg4 = a4;
3271
3272 mutex_exit(&iosram_log_mutex);
3273
3274 if (iosram_log_print) {
3275 cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp);
3276 if (logp->fmt) {
3277 cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2,
3278 logp->arg3, logp->arg4);
3279 if (logp->fmt[strlen(logp->fmt)-1] != '\n') {
3280 cmn_err(CE_CONT, "\n");
3281 }
3282 } else {
3283 cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n",
3284 (void *)logp->fmt, logp->arg1, logp->arg2,
3285 logp->arg3, logp->arg4);
3286 }
3287 }
3288 }
3289 #endif /* IOSRAM_LOG */
3290
3291
3292 #if defined(DEBUG)
3293 /*
3294 * iosram_get_keys(buf, len)
3295 * Return IOSRAM TOC in the specified buffer
3296 */
3297 static int
iosram_get_keys(iosram_toc_entry_t * bufp,uint32_t * len)3298 iosram_get_keys(iosram_toc_entry_t *bufp, uint32_t *len)
3299 {
3300 struct iosram_chunk *chunkp;
3301 int error = 0;
3302 int i;
3303 int cnt = (*len) / sizeof (iosram_toc_entry_t);
3304
3305 IOSRAMLOG(2, "iosram_get_keys(bufp:%p *len:%x)\n", bufp, *len, NULL,
3306 NULL);
3307
3308 /*
3309 * Copy data while holding the lock to prevent any data
3310 * corruption or invalid pointer dereferencing.
3311 */
3312 mutex_enter(&iosram_mutex);
3313
3314 if (iosram_master == NULL) {
3315 error = EIO;
3316 } else {
3317 for (i = 0, chunkp = chunks; i < nchunks && i < cnt;
3318 i++, chunkp++) {
3319 bufp[i].key = chunkp->toc_data.key;
3320 bufp[i].off = chunkp->toc_data.off;
3321 bufp[i].len = chunkp->toc_data.len;
3322 bufp[i].unused = chunkp->toc_data.unused;
3323 }
3324 *len = i * sizeof (iosram_toc_entry_t);
3325 }
3326
3327 mutex_exit(&iosram_mutex);
3328 return (error);
3329 }
3330
3331
3332 /*
3333 * iosram_print_state(instance)
3334 */
3335 static void
iosram_print_state(int instance)3336 iosram_print_state(int instance)
3337 {
3338 struct iosramsoft *softp;
3339 char pn[MAXNAMELEN];
3340
3341 if (instance < 0) {
3342 softp = iosram_master;
3343 } else {
3344 softp = ddi_get_soft_state(iosramsoft_statep, instance);
3345 }
3346
3347 if (softp == NULL) {
3348 cmn_err(CE_CONT, "iosram_print_state: Can't find instance %d\n",
3349 instance);
3350 return;
3351 }
3352 instance = softp->instance;
3353
3354 mutex_enter(&iosram_mutex);
3355 mutex_enter(&softp->intr_mutex);
3356
3357 cmn_err(CE_CONT, "iosram_print_state(%d): ... %s\n", instance,
3358 ((softp == iosram_master) ? "MASTER" : "SLAVE"));
3359
3360 (void) ddi_pathname(softp->dip, pn);
3361 cmn_err(CE_CONT, " pathname:%s\n", pn);
3362 cmn_err(CE_CONT, " instance:%d portid:%d iosramlen:0x%x\n",
3363 softp->instance, softp->portid, softp->iosramlen);
3364 cmn_err(CE_CONT, " softp:%p handle:%p iosramp:%p\n", (void *)softp,
3365 (void *)softp->handle, (void *)softp->iosramp);
3366 cmn_err(CE_CONT, " state:0x%x tswitch_ok:%x tswitch_fail:%x\n",
3367 softp->state, softp->tswitch_ok, softp->tswitch_fail);
3368 cmn_err(CE_CONT, " softintr_id:%p intr_busy:%x intr_pending:%x\n",
3369 (void *)softp->softintr_id, softp->intr_busy, softp->intr_pending);
3370
3371 mutex_exit(&softp->intr_mutex);
3372 mutex_exit(&iosram_mutex);
3373 }
3374
3375
3376 /*
3377 * iosram_print_stats()
3378 */
3379 static void
iosram_print_stats()3380 iosram_print_stats()
3381 {
3382 uint32_t calls;
3383
3384 cmn_err(CE_CONT, "iosram_stats:\n");
3385 calls = iosram_stats.read;
3386 cmn_err(CE_CONT, " read ... calls:%x bytes:%lx avg_sz:%x\n",
3387 calls, iosram_stats.bread,
3388 (uint32_t)((calls != 0) ? (iosram_stats.bread/calls) : 0));
3389
3390 calls = iosram_stats.write;
3391 cmn_err(CE_CONT, " write ... calls:%x bytes:%lx avg_sz:%x\n",
3392 calls, iosram_stats.bwrite,
3393 (uint32_t)((calls != 0) ? (iosram_stats.bwrite/calls) : 0));
3394
3395 cmn_err(CE_CONT, " intr recv (real:%x soft:%x) sent:%x cback:%x\n",
3396 iosram_stats.intr_recv, iosram_stats.sintr_recv,
3397 iosram_stats.intr_send, iosram_stats.callbacks);
3398
3399 cmn_err(CE_CONT, " tswitch: %x getflag:%x setflag:%x\n",
3400 iosram_stats.tswitch, iosram_stats.getflag,
3401 iosram_stats.setflag);
3402
3403 cmn_err(CE_CONT, " iosram_rw_active_max: %x\n", iosram_rw_active_max);
3404 }
3405
3406
3407 static void
iosram_print_cback()3408 iosram_print_cback()
3409 {
3410 iosram_chunk_t *chunkp;
3411 int i;
3412
3413 /*
3414 * Print callback handlers
3415 */
3416 mutex_enter(&iosram_mutex);
3417
3418 cmn_err(CE_CONT, "IOSRAM callbacks:\n");
3419 for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) {
3420 if (chunkp->cback.handler) {
3421 cmn_err(CE_CONT, " %2d: key:0x%x hdlr:%p arg:%p "
3422 "busy:%d unreg:%d\n", i, chunkp->toc_data.key,
3423 (void *)chunkp->cback.handler,
3424 (void *)chunkp->cback.arg,
3425 chunkp->cback.busy, chunkp->cback.unregister);
3426 }
3427 }
3428 mutex_exit(&iosram_mutex);
3429 }
3430
3431
3432 static void
iosram_print_flags()3433 iosram_print_flags()
3434 {
3435 int i;
3436 uint32_t *keys;
3437 iosram_flags_t *flags;
3438
3439 mutex_enter(&iosram_mutex);
3440
3441 if (iosram_master == NULL) {
3442 mutex_exit(&iosram_mutex);
3443 cmn_err(CE_CONT, "IOSRAM Flags: not accessible\n");
3444 return;
3445 }
3446
3447 keys = kmem_alloc(nchunks * sizeof (uint32_t), KM_SLEEP);
3448 flags = kmem_alloc(nchunks * sizeof (iosram_flags_t), KM_SLEEP);
3449
3450 for (i = 0; i < nchunks; i++) {
3451 keys[i] = chunks[i].toc_data.key;
3452 ddi_rep_get8(iosram_handle, (uint8_t *)&(flags[i]),
3453 (uint8_t *)(chunks[i].flagsp), sizeof (iosram_flags_t),
3454 DDI_DEV_AUTOINCR);
3455 }
3456
3457 mutex_exit(&iosram_mutex);
3458
3459 cmn_err(CE_CONT, "IOSRAM Flags:\n");
3460 for (i = 0; i < nchunks; i++) {
3461 cmn_err(CE_CONT,
3462 " %2d: key: 0x%x data_valid:%x int_pending:%x\n",
3463 i, keys[i], flags[i].data_valid, flags[i].int_pending);
3464 }
3465
3466 kmem_free(keys, nchunks * sizeof (uint32_t));
3467 kmem_free(flags, nchunks * sizeof (iosram_flags_t));
3468 }
3469
3470
3471 /*PRINTFLIKE1*/
3472 static void
iosram_dprintf(const char * fmt,...)3473 iosram_dprintf(const char *fmt, ...)
3474 {
3475 char msg_buf[256];
3476 va_list adx;
3477
3478 va_start(adx, fmt);
3479 (void) vsprintf(msg_buf, fmt, adx);
3480 va_end(adx);
3481
3482 cmn_err(CE_CONT, "%s", msg_buf);
3483 }
3484 #endif /* DEBUG */
3485
3486
3487 #if IOSRAM_LOG
3488 /*
3489 * iosram_print_log(int cnt)
3490 * Print last few entries of the IOSRAM log in reverse order
3491 */
3492 static void
iosram_print_log(int cnt)3493 iosram_print_log(int cnt)
3494 {
3495 int i;
3496
3497 if (cnt <= 0) {
3498 cnt = 20;
3499 } else if (cnt > IOSRAM_MAXLOG) {
3500 cnt = IOSRAM_MAXLOG;
3501 }
3502
3503
3504 cmn_err(CE_CONT,
3505 "\niosram_logseq: 0x%x lbolt: %lx iosram_log_level:%x\n",
3506 iosram_logseq, ddi_get_lbolt(), iosram_log_level);
3507 cmn_err(CE_CONT, "iosram_logbuf: %p max entries:0x%x\n",
3508 (void *)iosram_logbuf, IOSRAM_MAXLOG);
3509 for (i = iosram_logseq; --i >= 0 && --cnt >= 0; ) {
3510 iosram_log_t *logp;
3511
3512 mutex_enter(&iosram_log_mutex);
3513
3514 logp = &iosram_logbuf[i %IOSRAM_MAXLOG];
3515 cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp);
3516
3517 if (logp->fmt) {
3518 cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2,
3519 logp->arg3, logp->arg4);
3520 if (logp->fmt[strlen(logp->fmt)-1] != '\n') {
3521 cmn_err(CE_CONT, "\n");
3522 }
3523 } else {
3524 cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n",
3525 (void *)logp->fmt, logp->arg1, logp->arg2,
3526 logp->arg3, logp->arg4);
3527 }
3528
3529 mutex_exit(&iosram_log_mutex);
3530 }
3531 }
3532 #endif /* IOSRAM_LOG */
3533