xref: /titanic_41/usr/src/uts/sun4u/starcat/io/axq.c (revision 2b4a78020b9c38d1b95e2f3fefa6d6e4be382d1f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 
29 #include <sys/types.h>
30 #include <sys/conf.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/obpdefs.h>
35 #include <sys/cmn_err.h>
36 #include <sys/errno.h>
37 #include <sys/kmem.h>
38 #include <sys/debug.h>
39 #include <sys/sysmacros.h>
40 #include <sys/autoconf.h>
41 #include <sys/modctl.h>
42 #include <sys/sunndi.h>
43 
44 #include <sys/axq.h>
45 #include <sys/promif.h>
46 #include <sys/cpuvar.h>
47 #include <sys/starcat.h>
48 #include <sys/callb.h>
49 
50 #define	REG_ADDR(b, o)	(uint32_t *)((caddr_t)(b) + (o))
51 
52 /*
53  * Function prototypes
54  */
55 
56 /* autoconfig entry point function definitions */
57 static int axq_attach(dev_info_t *, ddi_attach_cmd_t);
58 static int axq_detach(dev_info_t *, ddi_detach_cmd_t);
59 static int axq_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
60 
61 /* internal axq definitions */
62 static void axq_init(struct axq_soft_state *);
63 static void axq_init_local(struct axq_local_regs *);
64 
65 /* axq kstats */
66 static void axq_add_picN_kstats(dev_info_t *dip);
67 static void axq_add_kstats(struct axq_soft_state *);
68 static int axq_counters_kstat_update(kstat_t *, int);
69 
70 /*
71  * Configuration data structures
72  */
73 static struct cb_ops axq_cb_ops = {
74 	nulldev,			/* open */
75 	nulldev,			/* close */
76 	nulldev,			/* strategy */
77 	nulldev,			/* print */
78 	nodev,				/* dump */
79 	nulldev,			/* read */
80 	nulldev,			/* write */
81 	nulldev,			/* ioctl */
82 	nodev,				/* devmap */
83 	nodev,				/* mmap */
84 	nodev,				/* segmap */
85 	nochpoll,			/* poll */
86 	ddi_prop_op,			/* cb_prop_op */
87 	0,				/* streamtab */
88 	D_MP | D_NEW,			/* Driver compatibility flag */
89 	CB_REV,				/* rev */
90 	nodev,				/* cb_aread */
91 	nodev				/* cb_awrite */
92 };
93 
94 static struct dev_ops axq_ops = {
95 	DEVO_REV,			/* rev */
96 	0,				/* refcnt  */
97 	axq_getinfo,			/* getinfo */
98 	nulldev,			/* identify */
99 	nulldev,			/* probe */
100 	axq_attach,			/* attach */
101 	axq_detach,			/* detach */
102 	nulldev,			/* reset */
103 	&axq_cb_ops,			/* cb_ops */
104 	(struct bus_ops *)0,		/* bus_ops */
105 	nulldev,			/* power */
106 	ddi_quiesce_not_supported,	/* devo_quiesce */
107 };
108 
109 
110 /*
111  * AXQ globals
112  */
113 struct axq_soft_state *axq_array[AXQ_MAX_EXP][AXQ_MAX_SLOT_PER_EXP];
114 krwlock_t axq_array_lock;
115 struct axq_local_regs axq_local;
116 int use_axq_iopause = 1;	/* enable flag axq iopause by default */
117 /*
118  * If non-zero, iopause will be asserted during DDI_SUSPEND.
119  * Clients using the axq_iopause_*_all interfaces should set this to zero.
120  */
121 int axq_suspend_iopause = 1;
122 
123 /*
124  * loadable module support
125  */
126 extern struct mod_ops mod_driverops;
127 
128 static struct modldrv modldrv = {
129 	&mod_driverops,		/* Type of module.  This one is a driver */
130 	"AXQ driver",	/* name of module */
131 	&axq_ops,		/* driver ops */
132 };
133 
134 static struct modlinkage modlinkage = {
135 	MODREV_1,
136 	(void *)&modldrv,
137 	NULL
138 };
139 
140 static void *axq_softp;
141 
142 /*
143  * AXQ Performance counters
144  * We statically declare a array of the known
145  * AXQ event-names and event masks. The number
146  * of events in this array is AXQ_NUM_EVENTS.
147  */
148 static axq_event_mask_t axq_events[AXQ_NUM_EVENTS] = {
149 	{"count_clk", COUNT_CLK}, {"freeze_cnt", FREEZE_CNT},
150 	{"ha_input_fifo", HA_INPUT_FIFO}, {"ha_intr_info", HA_INTR_INFO},
151 	{"ha_pio_fifo", HA_PIO_FIFO}, {"ha_adr_fifo_lk3", HA_ADR_FIFO_LK3},
152 	{"ha_adr_fifo_lk2", HA_ADR_FIFO_LK2},
153 	{"ha_adr_fifo_lk1", HA_ADR_FIFO_LK1},
154 	{"ha_adr_fifo_lk0", HA_ADR_FIFO_LK0},
155 	{"ha_dump_q", HA_DUMP_Q},
156 	{"ha_rd_f_stb_q", HA_RD_F_STB_Q},
157 	{"ha_dp_wr_q", HA_DP_WR_Q},
158 	{"ha_int_q", HA_INT_Q},
159 	{"ha_wrb_q", HA_WRB_Q},
160 	{"ha_wr_mp_q", HA_WR_MP_Q},
161 	{"ha_wrtag_q", HA_WRTAG_Q},
162 	{"ha_wt_wait_fifo", HA_WT_WAIT_FIFO},
163 	{"ha_wrb_stb_fifo", HA_WRB_STB_FIFO},
164 	{"ha_ap0_q", HA_AP0_Q},
165 	{"ha_ap1_q", HA_AP1_Q},
166 	{"ha_new_wr_q", HA_NEW_WR_Q},
167 	{"ha_dp_rd_q", HA_DP_RD_Q},
168 	{"ha_unlock_q", HA_UNLOCK_Q},
169 	{"ha_cdc_upd_q", HA_CDC_UPD_Q},
170 	{"ha_ds_q", HA_DS_Q},
171 	{"ha_unlk_wait_q", HA_UNLK_WAIT_Q},
172 	{"ha_rd_mp_q", HA_RD_MP_Q},
173 	{"l2_io_q", L2_IO_Q},
174 	{"l2_sb_q", L2_SB_Q},
175 	{"l2_ra_q", L2_RA_Q},
176 	{"l2_ha_q", L2_HA_Q},
177 	{"l2_sa_q", L2_SA_Q},
178 	{"ra_wait_fifo", RA_WAIT_FIFO},
179 	{"ra_wrb_inv_fifo", RA_WRB_INV_FIFO},
180 	{"ra_wrb_fifo", RA_WRB_FIFO},
181 	{"ra_cc_ptr_fifo", RA_CC_PTR_FIFO},
182 	{"ra_io_ptr_fifo", RA_IO_PTR_FIFO},
183 	{"ra_int_ptr_fifo", RA_INT_PTR_FIFO},
184 	{"ra_rp_q", RA_RP_Q},
185 	{"ra_wrb_rp_q", RA_WRB_RP_Q},
186 	{"ra_dp_q", RA_DP_Q},
187 	{"ra_dp_stb_q", RA_DP_STB_Q},
188 	{"ra_gtarg_q", RA_GTARG_Q},
189 	{"sdc_recv_q",	SDC_RECV_Q},
190 	{"sdc_redir_io_q", SDC_REDIR_IO_Q},
191 	{"sdc_redir_sb_q", SDC_REDIR_SB_Q},
192 	{"sdc_outb_io_q", SDC_OUTB_IO_Q},
193 	{"sdc_outb_sb_q", SDC_OUTB_SB_Q},
194 	{"sa_add1_input_q", SA_ADD1_INPUT_Q},
195 	{"sa_add2_input_q", SA_ADD2_INPUT_Q},
196 	{"sa_inv_q", SA_INV_Q},
197 	{"sa_no_inv_q", SA_NO_INV_Q},
198 	{"sa_int_dp_q", SA_INT_DP_Q},
199 	{"sa_dp_q", SA_DP_Q},
200 	{"sl_wrtag_q", SL_WRTAG_Q},
201 	{"sl_rto_dp_q", SL_RTO_DP_Q},
202 	{"syreg_input_q", SYSREG_INPUT_Q},
203 	{"sdi_sys_status1", SDI_SYS_STATUS1},
204 	{"sdi_sys_status0", SDI_SYS_STATUS0},
205 	{"cdc_hits", CDC_HITS},
206 	{"total_cdc_read", TOTAL_CDC_READ},
207 	{"ha_watranid_sd", HA_WATRANID_SD},
208 	{"ha_stb_sd", HA_STB_SD},
209 	{"ha_l2_irq_sd", HA_L2_IRQ_SD},
210 	{"ha_sl_wrtag_sd", HA_SL_WRTAG_SD},
211 	{"aa_home_cc_full", AA_HOME_CC_FULL},
212 	{"aa_home_io_full", AA_HOME_IO_FULL},
213 	{"aa_slave_full", AA_SLAVE_FULL},
214 	{"aa_rp_full", AA_RP_FULL}
215 };
216 
217 static kstat_t *axq_picN_ksp[AXQ_NUM_PICS];	/* picN kstats */
218 static int axq_attachcnt = 0;		/* # of instances attached */
219 static kmutex_t axq_attachcnt_lock;	/* lock for attachcnt */
220 
221 static int axq_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
222     ddi_device_acc_attr_t *, ddi_acc_handle_t *);
223 static void axq_unmap_phys(ddi_acc_handle_t *);
224 
225 int starcat_axq_pio_workaround(dev_info_t *);
226 static int axq_slot1_idle(struct axq_soft_state *);
227 
228 static boolean_t axq_panic_callb(void *, int);
229 static callb_id_t axq_panic_cb_id;
230 
231 /*
232  * These are the module initialization routines.
233  */
234 
235 int
236 _init(void)
237 {
238 	int error;
239 
240 	if ((error = ddi_soft_state_init(&axq_softp,
241 	    sizeof (struct axq_soft_state), 1)) != 0)
242 		return (error);
243 
244 	rw_init(&axq_array_lock, NULL, RW_DEFAULT, NULL);
245 
246 	mutex_init(&axq_local.axq_local_lock, NULL, MUTEX_DRIVER, NULL);
247 
248 	mutex_init(&axq_attachcnt_lock, NULL, MUTEX_DRIVER, NULL);
249 
250 	axq_local.initflag = 0;
251 
252 	if ((error = mod_install(&modlinkage)) != 0) {
253 		ddi_soft_state_fini(&axq_softp);
254 		mutex_destroy(&axq_attachcnt_lock);
255 		mutex_destroy(&axq_local.axq_local_lock);
256 		rw_destroy(&axq_array_lock);
257 		return (error);
258 	}
259 
260 	axq_panic_cb_id = callb_add(axq_panic_callb, (void *)NULL,
261 	    CB_CL_PANIC, "axq_panic");
262 
263 	return (0);
264 }
265 
266 int
267 _fini(void)
268 {
269 	int error;
270 
271 	if ((error = mod_remove(&modlinkage)) != 0)
272 		return (error);
273 
274 	ddi_soft_state_fini(&axq_softp);
275 	mutex_destroy(&axq_attachcnt_lock);
276 	mutex_destroy(&axq_local.axq_local_lock);
277 	rw_destroy(&axq_array_lock);
278 
279 	(void) callb_delete(axq_panic_cb_id);
280 
281 	return (0);
282 }
283 
284 int
285 _info(struct modinfo *modinfop)
286 {
287 	return (mod_info(&modlinkage, modinfop));
288 }
289 
290 static int
291 axq_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
292 {
293 	int instance;
294 	struct axq_soft_state *softsp;
295 	ddi_device_acc_attr_t attr;
296 	extern uint64_t va_to_pa(void *);
297 
298 	instance = ddi_get_instance(devi);
299 
300 	switch (cmd) {
301 	case DDI_ATTACH:
302 		break;
303 
304 	case DDI_RESUME:
305 		/*
306 		 * Reenable the axq io pause if it is
307 		 * employed. See the DDI_SUSPEND comments
308 		 */
309 		softsp = ddi_get_soft_state(axq_softp, instance);
310 		if (softsp->slotnum && softsp->paused && use_axq_iopause &&
311 		    axq_suspend_iopause) {
312 			*softsp->axq_domain_ctrl &= ~AXQ_DOMCTRL_PAUSE;
313 			softsp->paused = 0;
314 		}
315 		return (DDI_SUCCESS);
316 
317 	default:
318 		return (DDI_FAILURE);
319 	}
320 
321 	if (ddi_soft_state_zalloc(axq_softp, instance) != DDI_SUCCESS)
322 		return (DDI_FAILURE);
323 
324 	softsp = ddi_get_soft_state(axq_softp, instance);
325 
326 	/* Set the dip in the soft state */
327 	softsp->dip = devi;
328 
329 	/* Get the "portid" property */
330 	if ((softsp->portid = (int)ddi_getprop(DDI_DEV_T_ANY, softsp->dip,
331 	    DDI_PROP_DONTPASS, "portid", -1)) == -1) {
332 		cmn_err(CE_WARN, "Unable to retrieve safari portid"
333 		    "property.");
334 		goto bad;
335 	}
336 
337 	softsp->expid = softsp->portid >> 5;
338 
339 	/*
340 	 * derive the slot # from the portid - for starcat, it is
341 	 * either 0 or 1 based on the lsb of the axq portid.
342 	 */
343 	softsp->slotnum = softsp->portid & 0x1;
344 
345 	/*
346 	 * map in the regs. There are two regspecs - one
347 	 * in safari config space and the other in local space.
348 	 */
349 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
350 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
351 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
352 	if (ddi_regs_map_setup(softsp->dip, 0, &softsp->address, 0, 0,
353 	    &attr, &softsp->ac0) != DDI_SUCCESS) {
354 		cmn_err(CE_WARN, "%s%d: unable to map reg set 0\n",
355 		    ddi_get_name(softsp->dip),
356 		    ddi_get_instance(softsp->dip));
357 		goto bad;
358 	}
359 
360 	/*
361 	 * This is a hack for support DR copy rename scripting
362 	 * Get the physical address of the start of the
363 	 * AXQ config space and save it.
364 	 */
365 	softsp->axq_phyaddr = va_to_pa((caddr_t)softsp->address);
366 
367 	axq_init(softsp);
368 
369 	/*
370 	 * Map in the regs for local space access
371 	 * This is global for all axq instances.
372 	 * Make sure that some axq instance does
373 	 * it for the rest of the gang..
374 	 * Note that this mapping is never removed.
375 	 */
376 	mutex_enter(&axq_local.axq_local_lock);
377 	if (!axq_local.initflag) {
378 		/* initialize and map in the local space */
379 		if (ddi_regs_map_setup(softsp->dip, 1,
380 		    &axq_local.laddress, 0, 0,
381 		    &attr, &axq_local.ac) != DDI_SUCCESS) {
382 			cmn_err(CE_WARN, "%s%d: unable to map reg set 1\n",
383 			    ddi_get_name(softsp->dip),
384 			    ddi_get_instance(softsp->dip));
385 			ddi_regs_map_free(&softsp->ac0);
386 			mutex_exit(&axq_local.axq_local_lock);
387 			goto bad;
388 		}
389 		axq_init_local(&axq_local);
390 		axq_local.initflag = 1;
391 	}
392 	mutex_exit(&axq_local.axq_local_lock);
393 
394 	mutex_init(&softsp->axq_lock, NULL, MUTEX_DRIVER, NULL);
395 
396 	/* update the axq array for this new instance */
397 	rw_enter(&axq_array_lock, RW_WRITER);
398 	ASSERT(axq_array[softsp->expid][softsp->slotnum] == NULL);
399 	axq_array[softsp->expid][softsp->slotnum] = softsp;
400 	rw_exit(&axq_array_lock);
401 
402 	axq_add_kstats(softsp);
403 
404 	ddi_report_dev(devi);
405 
406 	return (DDI_SUCCESS);
407 
408 bad:
409 	ddi_soft_state_free(axq_softp, instance);
410 	return (DDI_FAILURE);
411 }
412 
413 
414 static void
415 axq_init(struct axq_soft_state *softsp)
416 {
417 	int i;
418 
419 	/*
420 	 * Setup the AXQ registers
421 	 * Some offsets and availability are dependent on the slot type
422 	 */
423 	if (softsp->slotnum == 0) {
424 		/* This is a slot type 0 AXQ */
425 		softsp->axq_domain_ctrl = REG_ADDR(softsp->address,
426 		    AXQ_SLOT0_DOMCTRL);
427 		softsp->axq_cdc_addrtest = REG_ADDR(softsp->address,
428 		    AXQ_SLOT0_CDC_ADR_TEST);
429 		softsp->axq_cdc_ctrltest = REG_ADDR(softsp->address,
430 		    AXQ_SLOT0_CDC_CTL_TEST);
431 		softsp->axq_cdc_datawrite0 = REG_ADDR(softsp->address,
432 		    AXQ_SLOT0_CDC_DATA_WR0);
433 		softsp->axq_cdc_datawrite1 = REG_ADDR(softsp->address,
434 		    AXQ_SLOT0_CDC_DATA_WR1);
435 		softsp->axq_cdc_datawrite2 = REG_ADDR(softsp->address,
436 		    AXQ_SLOT0_CDC_DATA_WR2);
437 		softsp->axq_cdc_datawrite3 = REG_ADDR(softsp->address,
438 		    AXQ_SLOT0_CDC_DATA_WR3);
439 		softsp->axq_cdc_counter = REG_ADDR(softsp->address,
440 		    AXQ_SLOT0_CDC_CNT_TEST);
441 		softsp->axq_cdc_readdata0 = REG_ADDR(softsp->address,
442 		    AXQ_SLOT0_CDC_RD_DATA0);
443 		softsp->axq_cdc_readdata1 = REG_ADDR(softsp->address,
444 		    AXQ_SLOT0_CDC_RD_DATA1);
445 		softsp->axq_cdc_readdata2 = REG_ADDR(softsp->address,
446 		    AXQ_SLOT0_CDC_RD_DATA2);
447 		softsp->axq_cdc_readdata3 = REG_ADDR(softsp->address,
448 		    AXQ_SLOT0_CDC_RD_DATA3);
449 		softsp->axq_pcr = REG_ADDR(softsp->address,
450 		    AXQ_SLOT0_PERFCNT_SEL);
451 		softsp->axq_pic0 = REG_ADDR(softsp->address,
452 		    AXQ_SLOT0_PERFCNT0);
453 		softsp->axq_pic1 = REG_ADDR(softsp->address,
454 		    AXQ_SLOT0_PERFCNT1);
455 		softsp->axq_pic2 = REG_ADDR(softsp->address,
456 		    AXQ_SLOT0_PERFCNT2);
457 		softsp->axq_nasm = REG_ADDR(softsp->address, AXQ_SLOT0_NASM);
458 	} else {
459 		/* slot type 1 AXQ */
460 		softsp->axq_domain_ctrl = REG_ADDR(softsp->address,
461 		    AXQ_SLOT1_DOMCTRL);
462 		softsp->axq_pcr = REG_ADDR(softsp->address,
463 		    AXQ_SLOT1_PERFCNT_SEL);
464 		softsp->axq_pic0 = REG_ADDR(softsp->address,
465 		    AXQ_SLOT1_PERFCNT0);
466 		softsp->axq_pic1 = REG_ADDR(softsp->address,
467 		    AXQ_SLOT1_PERFCNT1);
468 		softsp->axq_pic2 = REG_ADDR(softsp->address,
469 		    AXQ_SLOT1_PERFCNT2);
470 		softsp->axq_nasm = REG_ADDR(softsp->address, AXQ_SLOT1_NASM);
471 	}
472 
473 	/* setup CASM slots */
474 	for (i = 0; i < AXQ_MAX_EXP; i++) {
475 		softsp->axq_casm_slot[i] = REG_ADDR(softsp->address,
476 		    (AXQ_CASM_SLOT_START + AXQ_REGOFF(i)));
477 	}
478 
479 	/* setup SDI timeout register accesses */
480 	softsp->axq_sdi_timeout_rd = REG_ADDR(softsp->address,
481 	    AXQ_SLOT_SDI_TIMEOUT_RD);
482 	softsp->axq_sdi_timeout_rdclr = REG_ADDR(softsp->address,
483 	    AXQ_SLOT_SDI_TIMEOUT_RDCLR);
484 
485 	/*
486 	 * Save the CDC state (enabled or disabled)
487 	 * as originally setup by Post.
488 	 */
489 	if (softsp->slotnum == 0) {
490 		softsp->axq_cdc_state = *softsp->axq_cdc_ctrltest &
491 		    AXQ_CDC_DIS;
492 	}
493 
494 #ifndef _AXQ_LOCAL_ACCESS_SUPPORTED
495 	/*
496 	 * Setup cpu2ssc intr register in explicit expander
497 	 * space. Local space addressing for this is broken,
498 	 * we'll use explicit addressing for now.
499 	 */
500 	softsp->axq_cpu2ssc_intr = REG_ADDR(softsp->address,
501 	    AXQ_SLOT_CPU2SSC_INTR);
502 #endif /* _AXQ_LOCAL_ACCESS_SUPPORTED */
503 }
504 
505 
506 static void
507 axq_init_local(struct axq_local_regs *localregs)
508 {
509 	/*
510 	 * local access to cpu2ssc intr register will
511 	 * be the only one that may work properly in the
512 	 * next revision of the AXQ asics.
513 	 * Set it up here for now.
514 	 */
515 	localregs->axq_cpu2ssc_intr = REG_ADDR(localregs->laddress,
516 	    AXQ_SLOT_CPU2SSC_INTR);
517 }
518 
519 /* ARGSUSED */
520 static int
521 axq_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
522 {
523 	int instance;
524 	int i;
525 	struct axq_soft_state *softsp;
526 	processorid_t cpuid;
527 
528 	/* get the instance of this devi */
529 	instance = ddi_get_instance(devi);
530 
531 	/* get the soft state pointer for this device node */
532 	softsp = ddi_get_soft_state(axq_softp, instance);
533 
534 	switch (cmd) {
535 	case DDI_SUSPEND:
536 		/*
537 		 * Depending on the variable "use_axq_iopause"
538 		 * we set the axq iopause bit as a paranoid
539 		 * safety net. This is assuming all the devices
540 		 * associated with the slot are already suspended.
541 		 * Care must be taken to not set iopause when CPUs
542 		 * are known to be present on the slot 1 board,
543 		 * i.e. MCPU board type.
544 		 * This io pause bit only applies to slot 1 axq,
545 		 */
546 		if (softsp->slotnum && use_axq_iopause && axq_suspend_iopause) {
547 			/*
548 			 * Do not enable AXQ_DOMCTRL_PAUSE if CPUs are
549 			 * known to be present in slot 1.
550 			 */
551 			mutex_enter(&cpu_lock);
552 			for (i = 0; i < STARCAT_SLOT1_CPU_MAX; i++) {
553 				cpuid = MAKE_CPUID(softsp->expid,
554 				    softsp->slotnum, i);
555 				if (cpu[cpuid]) {
556 					mutex_exit(&cpu_lock);
557 					return (DDI_SUCCESS);
558 				}
559 			}
560 			mutex_exit(&cpu_lock);
561 
562 			/*
563 			 * Make sure that there is no outstanding
564 			 * I/O activity by reading the domain ctrl reg.
565 			 * A non-zero lsb indicates no I/O activity.
566 			 */
567 			if (axq_slot1_idle(softsp) == DDI_FAILURE) {
568 				cmn_err(CE_WARN, "%s%d: busy! suspend failed",
569 				    ddi_get_name(softsp->dip),
570 				    ddi_get_instance(softsp->dip));
571 				return (DDI_FAILURE);
572 			}
573 
574 			*softsp->axq_domain_ctrl |= AXQ_DOMCTRL_PAUSE;
575 			softsp->paused = 1;
576 		}
577 		return (DDI_SUCCESS);
578 
579 	case DDI_DETACH:
580 		rw_enter(&axq_array_lock, RW_WRITER);
581 		ASSERT(axq_array[softsp->expid][softsp->slotnum]
582 		    != NULL);
583 		axq_array[softsp->expid][softsp->slotnum] = NULL;
584 		rw_exit(&axq_array_lock);
585 
586 		ddi_regs_map_free(&softsp->ac0);
587 
588 		/*
589 		 * remove counter kstats for this device
590 		 */
591 		if (softsp->axq_counters_ksp != (kstat_t *)NULL) {
592 			kstat_delete(softsp->axq_counters_ksp);
593 		}
594 
595 		/*
596 		 * See if we are the last instance to detach.
597 		 * If so, we need to remove the picN kstats
598 		 */
599 		mutex_enter(&axq_attachcnt_lock);
600 		if (--axq_attachcnt == 0) {
601 			for (i = 0; i < AXQ_NUM_PICS; i++) {
602 				if (axq_picN_ksp[i] != (kstat_t *)NULL) {
603 					kstat_delete(axq_picN_ksp[i]);
604 					axq_picN_ksp[i] = NULL;
605 				}
606 			}
607 		}
608 		mutex_exit(&axq_attachcnt_lock);
609 
610 		ddi_soft_state_free(axq_softp, instance);
611 
612 		return (DDI_SUCCESS);
613 	default:
614 		return (DDI_FAILURE);
615 	}
616 }
617 
618 
619 /* ARGSUSED0 */
620 static int
621 axq_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
622 {
623 	dev_t dev = (dev_t)arg;
624 	struct axq_soft_state *softsp;
625 	int instance, ret;
626 
627 	instance = getminor(dev);
628 
629 	switch (infocmd) {
630 		case DDI_INFO_DEVT2DEVINFO:
631 			softsp = (struct axq_soft_state *)
632 			    ddi_get_soft_state(axq_softp, instance);
633 			if (softsp == NULL) {
634 				ret = DDI_FAILURE;
635 			} else {
636 				*result = softsp->dip;
637 				ret = DDI_SUCCESS;
638 			}
639 			break;
640 		case DDI_INFO_DEVT2INSTANCE:
641 			*result = (void *)(uintptr_t)instance;
642 			ret = DDI_SUCCESS;
643 			break;
644 		default:
645 			ret = DDI_FAILURE;
646 			break;
647 	}
648 	return (ret);
649 }
650 
651 /*
652  * Flush the CDC Sram of the slot0 axq
653  * indicated by the expid argument
654  */
655 int
656 axq_cdc_flush(uint32_t expid, int held, int disabled)
657 {
658 	struct axq_soft_state *softsp;
659 	uint32_t axq_ctrl_test_save0;
660 	uint32_t tmpval;
661 	int retval = 0;
662 	int i;
663 
664 	if (!held)
665 		rw_enter(&axq_array_lock, RW_READER);
666 
667 	ASSERT(axq_array[expid][SLOT0_AXQ] != NULL);
668 
669 	softsp = axq_array[expid][SLOT0_AXQ];
670 
671 	mutex_enter(&softsp->axq_lock);
672 
673 	/* save the value of the ctrl test reg */
674 	axq_ctrl_test_save0 = *softsp->axq_cdc_ctrltest;
675 
676 	/* disable sram and setup the ctrl test reg for flushing */
677 	tmpval = axq_ctrl_test_save0 & (AXQ_CDC_DATA_ECC_CHK_EN |
678 	    AXQ_CDC_ADR_PAR_CHK_EN |
679 	    AXQ_CDC_DATA_ECC_GEN_EN |
680 	    AXQ_CDC_ADR_PAR_GEN_EN);
681 	*softsp->axq_cdc_ctrltest = tmpval | AXQ_CDC_TMODE_WR
682 	    | AXQ_CDC_DATA2PAR_MUX_SEL_DATA
683 	    | AXQ_CDC_ADR2SRAM_MUX_SEL_TEST
684 	    | AXQ_CDC_ADR_INCR_XOR_CTRL
685 	    | AXQ_CDC_DIS;
686 
687 	/* Enable CDC test in the CDC Address test reg */
688 	*softsp->axq_cdc_addrtest = AXQ_CDC_ADR_TEST_EN;
689 
690 	/* clear the CDC Data write regs */
691 	*softsp->axq_cdc_datawrite0 = *softsp->axq_cdc_datawrite1 = 0;
692 	*softsp->axq_cdc_datawrite2 = *softsp->axq_cdc_datawrite3 = 0;
693 
694 	/*
695 	 * write in the size of the sram to clear
696 	 * into the CDC Counter test reg
697 	 */
698 	*softsp->axq_cdc_counter = AXQ_CDC_SRAM_SIZE;
699 
700 	/* wait for flush to complete */
701 	for (i = 0; i < AXQ_CDC_FLUSH_WAIT; i++) {
702 		DELAY(3000); /* should take only 1750 usecs */
703 		if (((*softsp->axq_cdc_counter) &
704 		    AXQ_CDC_CNT_TEST_DONE) != 0) {
705 			break;
706 		}
707 	}
708 	if (i >= AXQ_CDC_FLUSH_WAIT) {
709 		retval = DDI_FAILURE;
710 		cmn_err(CE_WARN, "axq_cdc_flush failed on expander %d",
711 		    expid);
712 	}
713 
714 	/*
715 	 * Disable test mode in CDC address test reg
716 	 */
717 	*softsp->axq_cdc_addrtest = 0;
718 
719 	/*
720 	 * If "disabled" option is requested, leave
721 	 * the CDC disabled.
722 	 */
723 	if (disabled) {
724 		axq_ctrl_test_save0 |= AXQ_CDC_DIS;
725 		*softsp->axq_cdc_ctrltest = axq_ctrl_test_save0;
726 	} else {
727 		*softsp->axq_cdc_ctrltest = axq_ctrl_test_save0;
728 	}
729 
730 	mutex_exit(&softsp->axq_lock);
731 
732 	if (!held)
733 		rw_exit(&axq_array_lock);
734 
735 	return (retval);
736 }
737 
738 
739 /*
740  * Flush all the CDC srams for all the AXQs in
741  * the local domain.
742  */
743 int
744 axq_cdc_flush_all()
745 {
746 	int retval;
747 	int i;
748 
749 	rw_enter(&axq_array_lock, RW_READER);
750 
751 	for (i = 0; i < AXQ_MAX_EXP; i++) {
752 		if (axq_array[i][SLOT0_AXQ] != NULL) {
753 			retval = axq_cdc_flush(i, 1, 0);
754 			if (retval != DDI_SUCCESS) break;
755 		}
756 	}
757 	rw_exit(&axq_array_lock);
758 	return (retval);
759 }
760 
761 /*
762  * Disable and flush all CDC srams for all the AXQs
763  * in the local domain.
764  */
765 int
766 axq_cdc_disable_flush_all()
767 {
768 	int retval;
769 	int i;
770 
771 	rw_enter(&axq_array_lock, RW_READER);
772 
773 	/*
774 	 * Disable and flush all the CDC srams
775 	 */
776 	for (i = 0; i < AXQ_MAX_EXP; i++) {
777 		if (axq_array[i][SLOT0_AXQ] != NULL) {
778 			retval = axq_cdc_flush(i, 1, 1);
779 			if (retval != DDI_SUCCESS) break;
780 		}
781 	}
782 	rw_exit(&axq_array_lock);
783 
784 	if (retval != DDI_SUCCESS) {
785 		axq_cdc_enable_all();
786 	}
787 	return (retval);
788 }
789 
790 
791 /*
792  * Enable the CDC srams for all the AXQs in the
793  * the local domain. This routine is used in
794  * conjunction with axq_cdc_disable_flush_all().
795  */
796 void
797 axq_cdc_enable_all()
798 {
799 	struct axq_soft_state *softsp;
800 	int i;
801 
802 	rw_enter(&axq_array_lock, RW_READER);
803 
804 	/*
805 	 * Enable all the CDC sram
806 	 */
807 	for (i = 0; i < AXQ_MAX_EXP; i++) {
808 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
809 			mutex_enter(&softsp->axq_lock);
810 			if (softsp->axq_cdc_state != AXQ_CDC_DIS) {
811 				*softsp->axq_cdc_ctrltest &= ~AXQ_CDC_DIS;
812 			}
813 			mutex_exit(&softsp->axq_lock);
814 		}
815 	}
816 	rw_exit(&axq_array_lock);
817 }
818 
819 /*
820  * Interface for DR to enable slot1 iopause after cpus have been idled.
821  * Precondition is for all devices to have been suspended (including axq).
822  * This routine avoids locks as it is called by DR with cpus paused.
823  */
824 int
825 axq_iopause_enable_all(uint32_t *errexp)
826 {
827 	int i, j;
828 	int retval = DDI_SUCCESS;
829 	processorid_t cpuid;
830 	struct axq_soft_state *softsp;
831 
832 	DELAY(1000);
833 	for (i = 0; i < AXQ_MAX_EXP; i++) {
834 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL &&
835 		    use_axq_iopause) {
836 			/*
837 			 * Do not enable if cpus configured in slot1.
838 			 * Unconfigured cpus should be idle in nc space.
839 			 */
840 			for (j = 0; j < STARCAT_SLOT1_CPU_MAX; j++) {
841 				cpuid = MAKE_CPUID(softsp->expid,
842 				    softsp->slotnum, j);
843 				if (cpu[cpuid]) {
844 					break;
845 				}
846 			}
847 			if (j < STARCAT_SLOT1_CPU_MAX) {
848 				continue;
849 			}
850 
851 			retval = axq_slot1_idle(softsp);
852 			if (retval == DDI_FAILURE) {
853 				break;
854 			}
855 
856 			*softsp->axq_domain_ctrl |= AXQ_DOMCTRL_PAUSE;
857 			softsp->paused = 1;
858 		}
859 	}
860 
861 	if (retval != DDI_SUCCESS) {
862 		ASSERT(errexp);
863 		*errexp = i;
864 		axq_iopause_disable_all();
865 	}
866 	return (retval);
867 }
868 
869 /*
870  * De-assert axq iopause on all slot1 boards. This routine avoids locks
871  * as it is called by DR with cpus paused.
872  */
873 void
874 axq_iopause_disable_all()
875 {
876 	int i;
877 	struct axq_soft_state *softsp;
878 
879 	for (i = 0; i < AXQ_MAX_EXP; i++) {
880 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL &&
881 		    softsp->paused) {
882 			*softsp->axq_domain_ctrl &= ~AXQ_DOMCTRL_PAUSE;
883 			softsp->paused = 0;
884 		}
885 	}
886 }
887 
888 /*
889  * Attempt to wait for slot1 activity to go idle.
890  */
891 static int
892 axq_slot1_idle(struct axq_soft_state *softsp)
893 {
894 	int i;
895 
896 	ASSERT(softsp->slotnum == SLOT1_AXQ);
897 	for (i = 0; i < 10; i++) {
898 		if ((*(softsp->axq_domain_ctrl) & AXQ_DOMCTRL_BUSY) != 0) {
899 			return (DDI_SUCCESS);
900 		}
901 		DELAY(50);
902 	}
903 	return (DDI_FAILURE);
904 }
905 
906 /*
907  * Read a particular NASM entry
908  */
909 int
910 axq_nasm_read(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
911     uint32_t *data)
912 {
913 	axq_nasm_read_u aread;
914 	axq_nasm_write_u awrite;
915 	struct axq_soft_state *softsp;
916 
917 	if (slot > AXQ_MAX_SLOT_PER_EXP ||
918 	    expid > AXQ_MAX_EXP ||
919 	    nasm_entry > AXQ_NASM_SIZE) {
920 		return (DDI_FAILURE);
921 	}
922 
923 	awrite.bit.rw = 0;	/* read operation */
924 	awrite.bit.addr = nasm_entry;
925 	awrite.bit.data = 0;
926 
927 	rw_enter(&axq_array_lock, RW_READER);
928 
929 	softsp = axq_array[expid][slot];
930 	if (softsp == NULL) {
931 		rw_exit(&axq_array_lock);
932 		return (DDI_FAILURE);
933 	}
934 
935 	mutex_enter(&softsp->axq_lock);
936 
937 	*(softsp->axq_nasm) = awrite.val;
938 	aread.val = *(softsp->axq_nasm);
939 
940 	mutex_exit(&softsp->axq_lock);
941 	rw_exit(&axq_array_lock);
942 
943 	if (aread.bit.valid) {
944 		*data = aread.bit.data;
945 		return (DDI_SUCCESS);
946 	}
947 	return (DDI_FAILURE);
948 }
949 
950 /*
951  * Write a particular NASM entry
952  */
953 static int
954 axq_nasm_write_one(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
955     uint32_t data)
956 {
957 	axq_nasm_write_u awrite;
958 	struct axq_soft_state *softsp;
959 
960 	/*
961 	 * Note: need to make sure axq_array_lock held first, so that a
962 	 * paused thread is not holding softsp->axq_lock, which could
963 	 * result in deadlock.
964 	 */
965 	ASSERT(RW_LOCK_HELD(&axq_array_lock));
966 
967 	if (slot > AXQ_MAX_SLOT_PER_EXP ||
968 	    expid > AXQ_MAX_EXP ||
969 	    nasm_entry > AXQ_NASM_SIZE) {
970 		return (DDI_FAILURE);
971 	}
972 
973 	awrite.bit.rw = 1;	/* write operation */
974 	awrite.bit.addr = nasm_entry;
975 	awrite.bit.data = data;
976 
977 	softsp = axq_array[expid][slot];
978 	if (softsp == NULL) {
979 		return (DDI_FAILURE);
980 	}
981 
982 	mutex_enter(&softsp->axq_lock);
983 
984 	*(softsp->axq_nasm) = awrite.val;
985 
986 	mutex_exit(&softsp->axq_lock);
987 
988 	return (DDI_SUCCESS);
989 }
990 
991 int
992 axq_nasm_write(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
993     uint32_t data)
994 {
995 	int rc;
996 
997 	rw_enter(&axq_array_lock, RW_READER);
998 	rc = axq_nasm_write_one(expid, slot, nasm_entry, data);
999 	rw_exit(&axq_array_lock);
1000 	return (rc);
1001 }
1002 
1003 /*
1004  * Write a particular NASM entry for all the
1005  * axqs in the domain
1006  * Note: other CPUs are paused when this function called.
1007  */
1008 int
1009 axq_nasm_write_all(uint32_t nasm_entry, uint32_t data)
1010 {
1011 	int i;
1012 	int rc;
1013 
1014 	ASSERT(RW_WRITE_HELD(&axq_array_lock));
1015 
1016 	for (i = 0; i < AXQ_MAX_EXP; i++) {
1017 		if (axq_array[i][SLOT0_AXQ] != NULL) {
1018 			rc = axq_nasm_write_one(i, SLOT0_AXQ, nasm_entry,
1019 			    data);
1020 			if (rc != DDI_SUCCESS) {
1021 				return (DDI_FAILURE);
1022 			}
1023 		}
1024 		if (axq_array[i][SLOT1_AXQ] != NULL) {
1025 			rc = axq_nasm_write_one(i, SLOT1_AXQ, nasm_entry,
1026 			    data);
1027 			if (rc != DDI_SUCCESS) {
1028 				return (DDI_FAILURE);
1029 			}
1030 		}
1031 	}
1032 
1033 	return (DDI_SUCCESS);
1034 }
1035 
1036 /*
1037  * Take write lock for axq_nasm_write_all() outside
1038  * critical section where other CPUs are paused.
1039  */
1040 void
1041 axq_array_rw_enter(void)
1042 {
1043 	rw_enter(&axq_array_lock, RW_WRITER);
1044 }
1045 
1046 /*
1047  * Release write lock for axq_nasm_write_all() outside
1048  * critical section where other CPUs are paused.
1049  */
1050 void
1051 axq_array_rw_exit(void)
1052 {
1053 	rw_exit(&axq_array_lock);
1054 }
1055 
1056 /*
1057  * Read a particular CASM entry
1058  */
1059 uint32_t
1060 axq_casm_read(uint32_t expid, uint32_t slot, int casmslot)
1061 {
1062 	struct axq_soft_state *softsp;
1063 	uint32_t retval;
1064 
1065 	rw_enter(&axq_array_lock, RW_READER);
1066 
1067 	ASSERT(axq_array[expid][slot] != NULL);
1068 	ASSERT(casmslot >= 0 && casmslot < AXQ_MAX_EXP);
1069 
1070 	softsp = axq_array[expid][slot];
1071 
1072 	mutex_enter(&softsp->axq_lock);
1073 
1074 	retval = *(softsp->axq_casm_slot[casmslot]);
1075 
1076 	mutex_exit(&softsp->axq_lock);
1077 	rw_exit(&axq_array_lock);
1078 
1079 	return (retval);
1080 }
1081 
1082 
1083 /*
1084  * Write a particular CASM entry
1085  */
1086 
1087 int
1088 axq_casm_write(uint32_t expid, uint32_t slot, int casmslot,
1089 		uint32_t value)
1090 {
1091 	struct axq_soft_state *softsp;
1092 	int retval;
1093 
1094 	rw_enter(&axq_array_lock, RW_READER);
1095 
1096 	ASSERT(axq_array[expid][slot] != NULL);
1097 	ASSERT(casmslot >= 0 && casmslot < AXQ_MAX_EXP);
1098 
1099 	softsp = axq_array[expid][slot];
1100 
1101 	mutex_enter(&softsp->axq_lock);
1102 
1103 	/*
1104 	 * first read the casm slot in question
1105 	 * it should be non-zero to indicate that
1106 	 * we have write permission to update it.
1107 	 * Note that if we write it without valid
1108 	 * permission, we can get an exception.
1109 	 */
1110 	if (*(softsp->axq_casm_slot[casmslot])) {
1111 		*(softsp->axq_casm_slot[casmslot]) = value;
1112 		retval = DDI_SUCCESS;
1113 	} else {
1114 		retval = DDI_FAILURE;
1115 	}
1116 
1117 	mutex_exit(&softsp->axq_lock);
1118 	rw_exit(&axq_array_lock);
1119 	return (retval);
1120 }
1121 
1122 /*
1123  * Write a particular CASM entry for all the
1124  * axqs in the domain
1125  */
1126 
1127 int
1128 axq_casm_write_all(int casmslot, uint32_t value)
1129 {
1130 	int i;
1131 	struct axq_soft_state *softsp;
1132 
1133 	/*
1134 	 * Since we are updating all the AXQs,
1135 	 * it will be easier to simply grab
1136 	 * exclusive access to the AXQs by obtaining
1137 	 * the RW_WRITER access to the axq_array.
1138 	 */
1139 	rw_enter(&axq_array_lock, RW_WRITER);
1140 
1141 	/*
1142 	 * Paranoid check: run thru all the avail AXQs
1143 	 * and make sure we can write into that slot in question
1144 	 * We check it by reading the slot and it should be
1145 	 * non-zero.
1146 	 */
1147 	for (i = 0; i < AXQ_MAX_EXP; i++) {
1148 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
1149 			if (*(softsp->axq_casm_slot[casmslot])
1150 			    == 0) {
1151 				break;
1152 			}
1153 		}
1154 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL) {
1155 			if (*(softsp->axq_casm_slot[casmslot])
1156 			    == 0) {
1157 				break;
1158 			}
1159 		}
1160 	}
1161 
1162 	if (i < AXQ_MAX_EXP) {
1163 		/*
1164 		 * We have no write permission for some AXQ
1165 		 * for the CASM slot in question. Flag it
1166 		 * as an error
1167 		 */
1168 		rw_exit(&axq_array_lock);
1169 		return (DDI_FAILURE);
1170 	}
1171 
1172 	/*
1173 	 * everything looks good - do the update
1174 	 */
1175 	for (i = 0; i < AXQ_MAX_EXP; i++) {
1176 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
1177 			*softsp->axq_casm_slot[casmslot] = value;
1178 		}
1179 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL) {
1180 			*softsp->axq_casm_slot[casmslot] = value;
1181 		}
1182 	}
1183 
1184 	rw_exit(&axq_array_lock);
1185 	return (DDI_SUCCESS);
1186 }
1187 
1188 
1189 /*
1190  * Construct a script of <physicaladdr, data> tuple pairs that
1191  * reprogram the all the AXQs in the local domain to swap the
1192  * contents of casmslot0 with casmslot1.
1193  */
1194 int
1195 axq_do_casm_rename_script(uint64_t **script_elm, int casmslot0,
1196 	int casmslot1)
1197 {
1198 	struct axq_soft_state *softsp;
1199 	int i, slot;
1200 	uint32_t val0, val1;
1201 	uint64_t *s_elm = *script_elm;
1202 	uint64_t paddr;
1203 
1204 	/*
1205 	 * There should be some global locking at the
1206 	 * DR level to do this - since this is one of
1207 	 * the sequence of steps in copyrename.
1208 	 * For now, we grab the RW_WRITER lock for
1209 	 * script construction.
1210 	 */
1211 	rw_enter(&axq_array_lock, RW_WRITER);
1212 
1213 	/*
1214 	 * Construct the <physicaladdr, data> tuple pairs
1215 	 * for reprogramming the AXQs so that the value in
1216 	 * casmslot0 is swapped with the content in casmslot1.
1217 	 * Paranoid check: We make sure that we can write to
1218 	 * both slots in all the AXQs by reading the slots and
1219 	 * they should be non-zero.
1220 	 */
1221 	for (slot = SLOT0_AXQ; slot <= SLOT1_AXQ; slot++) {
1222 		for (i = 0; i < AXQ_MAX_EXP; i++) {
1223 		if ((softsp = axq_array[i][slot]) != NULL) {
1224 			paddr = softsp->axq_phyaddr;
1225 			val0 = *(softsp->axq_casm_slot[casmslot0]);
1226 			val1 = *(softsp->axq_casm_slot[casmslot1]);
1227 			if (val0 != 0 && val1 != 0) {
1228 				*s_elm++ = paddr + AXQ_CASM_SLOT_START +
1229 				    AXQ_REGOFF(casmslot0);
1230 				*s_elm++ = val1;
1231 				*s_elm++ = paddr + AXQ_CASM_SLOT_START +
1232 				    AXQ_REGOFF(casmslot1);
1233 				*s_elm++ = val0;
1234 			} else {
1235 				/*
1236 				 * Somehow we can't access one of
1237 				 * the casm slot - quit.
1238 				 */
1239 				break;
1240 			}
1241 		}
1242 		}
1243 		if (i < AXQ_MAX_EXP) break;
1244 	}
1245 
1246 	rw_exit(&axq_array_lock);
1247 
1248 	if (slot > SLOT1_AXQ) {
1249 		/* successful */
1250 		*script_elm = s_elm;
1251 		return (DDI_SUCCESS);
1252 	} else {
1253 		return (DDI_FAILURE);
1254 	}
1255 }
1256 
1257 
1258 /*
1259  * Send an interrupt to the SSC passing
1260  * a 8 bit cookie value
1261  */
1262 int
1263 axq_cpu2ssc_intr(uint8_t cookie)
1264 {
1265 	int retval, i;
1266 	volatile uint32_t *intr_reg;
1267 
1268 #ifndef	_AXQ_LOCAL_SPACE_SUPPORTED
1269 	/* Local space access not available */
1270 
1271 	int exp, slot;
1272 
1273 	rw_enter(&axq_array_lock, RW_READER);
1274 
1275 	/* Make sure the current cpu is not switched out */
1276 	kpreempt_disable();
1277 
1278 	/*
1279 	 * Compute the exp# and slot# of the current cpu
1280 	 * so that we know which AXQ cpu2ssc intr reg to
1281 	 * use.
1282 	 */
1283 	exp = CPU->cpu_id >> 5;
1284 	slot = (CPU->cpu_id >> 3) & 0x1;
1285 
1286 	intr_reg = axq_array[exp][slot]->axq_cpu2ssc_intr;
1287 #else
1288 	/* use local space */
1289 	intr_reg = axq_local.axq_cpu2ssc_intr;
1290 #endif /* _AXQ_LOCAL_SPACE_SUPPORTED */
1291 
1292 	ASSERT(intr_reg != 0);
1293 
1294 	retval = DDI_FAILURE;
1295 	for (i = 0; i < AXQ_INTR_PEND_WAIT; i++) {
1296 		if (!(*intr_reg & AXQ_CPU2SSC_INTR_PEND)) {
1297 			*intr_reg = cookie;
1298 			retval = DDI_SUCCESS;
1299 			break;
1300 		}
1301 		DELAY(200);
1302 	}
1303 
1304 #ifndef	_AXQ_LOCAL_SPACE_SUPPORTED
1305 	kpreempt_enable();
1306 	rw_exit(&axq_array_lock);
1307 #endif
1308 	return (retval);
1309 }
1310 
1311 
1312 /*
1313  * Read the SDI timeout register (SRD use)
1314  * This routine accepts a clear flag to indicate
1315  * whether the register should be cleared after
1316  * the read.
1317  */
1318 uint32_t
1319 axq_read_sdi_timeout_reg(uint32_t expid, uint32_t slot, int clearflag)
1320 {
1321 	struct axq_soft_state *softsp;
1322 	uint32_t retval;
1323 
1324 	rw_enter(&axq_array_lock, RW_READER);
1325 
1326 	ASSERT(axq_array[expid][slot] != NULL);
1327 
1328 	softsp = axq_array[expid][slot];
1329 
1330 	mutex_enter(&softsp->axq_lock);
1331 
1332 	if (clearflag) {
1333 		/* read and then clear register */
1334 		retval = *softsp->axq_sdi_timeout_rdclr;
1335 	} else {
1336 		retval = *softsp->axq_sdi_timeout_rd;
1337 	}
1338 
1339 	mutex_exit(&softsp->axq_lock);
1340 	rw_exit(&axq_array_lock);
1341 
1342 	return (retval);
1343 }
1344 
1345 
1346 /*
1347  * Routine to create a kstat for each %pic that
1348  * the AXQ has (there are 3 of them). These read-only
1349  * kstats export event names that the respective %pic
1350  * supports. Pic0 and Pic1 are similar and they both have
1351  * a 128-input mux. Pic2 counts the clock and can set up
1352  * to count or freeze.
1353  * Note that all AXQ instances use the same events, we only
1354  * need to create one set of the picN kstats.
1355  */
1356 static void
1357 axq_add_picN_kstats(dev_info_t *dip)
1358 {
1359 	struct kstat_named *axq_pic_named_data;
1360 	int event, pic;
1361 	int instance = ddi_get_instance(dip);
1362 	int pic_shift = 0;
1363 
1364 	/*
1365 	 * Create the picN kstat for Pic0 and Pic1
1366 	 * Both have similar set of events. Add one
1367 	 * extra event for the clear_event mask.
1368 	 */
1369 	for (pic = 0; pic < AXQ_NUM_PICS; pic++) {
1370 		char pic_name[20];
1371 		int num_events, i;
1372 
1373 		(void) sprintf(pic_name, "pic%d", pic);
1374 
1375 		num_events = (pic <= 1) ? AXQ_PIC0_1_NUM_EVENTS :
1376 		    AXQ_PIC2_NUM_EVENTS;
1377 
1378 		if ((axq_picN_ksp[pic] = kstat_create("axq",
1379 		    instance, pic_name, "bus", KSTAT_TYPE_NAMED,
1380 		    num_events + 1, NULL)) == NULL) {
1381 			cmn_err(CE_WARN, "axq %s: kstat_create failed",
1382 			    pic_name);
1383 
1384 			/* remove pic kstats that was created earlier */
1385 			for (i = 0; i < pic; i++) {
1386 				kstat_delete(axq_picN_ksp[i]);
1387 				axq_picN_ksp[i] = NULL;
1388 			}
1389 			return;
1390 		}
1391 
1392 		axq_pic_named_data =
1393 		    (struct kstat_named *)(axq_picN_ksp[pic]->ks_data);
1394 
1395 		pic_shift = pic * AXQ_PIC_SHIFT;
1396 
1397 		/*
1398 		 * for each picN event, write a kstat record of
1399 		 * name = EVENT & value.ui64 = PCR_MASK.
1400 		 */
1401 		for (event = 0; event < num_events; event++) {
1402 			/* pcr_mask */
1403 			axq_pic_named_data[event].value.ui64 =
1404 			    axq_events[event].pcr_mask << pic_shift;
1405 
1406 			/* event name */
1407 			kstat_named_init(&axq_pic_named_data[event],
1408 			    axq_events[event].event_name,
1409 			    KSTAT_DATA_UINT64);
1410 		}
1411 
1412 		/*
1413 		 * Add the clear pic event and mask as the last
1414 		 * record in the kstat.
1415 		 */
1416 		axq_pic_named_data[num_events].value.ui64 =
1417 		    (uint32_t)~(AXQ_PIC_CLEAR_MASK << pic_shift);
1418 
1419 		kstat_named_init(&axq_pic_named_data[num_events],
1420 		    "clear_pic", KSTAT_DATA_UINT64);
1421 
1422 		kstat_install(axq_picN_ksp[pic]);
1423 	}
1424 }
1425 
1426 
1427 static  void
1428 axq_add_kstats(struct axq_soft_state *softsp)
1429 {
1430 	struct kstat *axq_counters_ksp;
1431 	struct kstat_named *axq_counters_named_data;
1432 
1433 	/*
1434 	 * Create the picN kstats if we are the first instance
1435 	 * to attach. We use axq_attachcnt as a count of how
1436 	 * many instances have attached. This is protected by
1437 	 * a lock.
1438 	 */
1439 	mutex_enter(&axq_attachcnt_lock);
1440 	if (axq_attachcnt++ == 0)
1441 		axq_add_picN_kstats(softsp->dip);
1442 
1443 	mutex_exit(&axq_attachcnt_lock);
1444 
1445 	/*
1446 	 * A "counter" kstat is created for each axq
1447 	 * instance that provides access to the %pcr and %pic
1448 	 * registers for that instance.
1449 	 *
1450 	 * The size of this kstat is AXQ_NUM_PICS + 1 for %pcr
1451 	 */
1452 	if ((axq_counters_ksp = kstat_create("axq",
1453 	    ddi_get_instance(softsp->dip), "counters",
1454 	    "bus", KSTAT_TYPE_NAMED, AXQ_NUM_PICS + 1,
1455 	    KSTAT_FLAG_WRITABLE)) == NULL) {
1456 			cmn_err(CE_WARN, "axq%d counters: kstat_create"
1457 			" failed", ddi_get_instance(softsp->dip));
1458 		return;
1459 	}
1460 
1461 	axq_counters_named_data =
1462 	    (struct kstat_named *)(axq_counters_ksp->ks_data);
1463 
1464 	/* initialize the named kstats */
1465 	kstat_named_init(&axq_counters_named_data[0],
1466 	    "pcr", KSTAT_DATA_UINT32);
1467 
1468 	kstat_named_init(&axq_counters_named_data[1],
1469 	    "pic0", KSTAT_DATA_UINT32);
1470 
1471 	kstat_named_init(&axq_counters_named_data[2],
1472 	    "pic1", KSTAT_DATA_UINT32);
1473 
1474 	kstat_named_init(&axq_counters_named_data[3],
1475 	    "pic2", KSTAT_DATA_UINT32);
1476 
1477 	axq_counters_ksp->ks_update = axq_counters_kstat_update;
1478 	axq_counters_ksp->ks_private = (void *)softsp;
1479 
1480 	kstat_install(axq_counters_ksp);
1481 
1482 	/* update the softstate */
1483 	softsp->axq_counters_ksp = axq_counters_ksp;
1484 }
1485 
1486 
1487 static  int
1488 axq_counters_kstat_update(kstat_t *ksp, int rw)
1489 {
1490 	struct kstat_named *axq_counters_data;
1491 	struct axq_soft_state *softsp;
1492 
1493 	axq_counters_data = (struct kstat_named *)ksp->ks_data;
1494 	softsp = (struct axq_soft_state *)ksp->ks_private;
1495 
1496 	if (rw == KSTAT_WRITE) {
1497 		/*
1498 		 * Write the pcr value to the softsp->axq_pcr.
1499 		 * The pic register is read-only so we don't
1500 		 * attempt to write to it.
1501 		 */
1502 		*softsp->axq_pcr = (uint32_t)axq_counters_data[0].value.ui64;
1503 	} else {
1504 		/*
1505 		 * Read %pcr and %pic register values and write them
1506 		 * into counters kstat.
1507 		 *
1508 		 */
1509 
1510 		/* pcr */
1511 		axq_counters_data[0].value.ui64 = (uint64_t)
1512 		    (*softsp->axq_pcr);
1513 
1514 		/* pic0 */
1515 		axq_counters_data[1].value.ui64 = (uint64_t)
1516 		    (*softsp->axq_pic0);
1517 
1518 		/* pic1 */
1519 		axq_counters_data[2].value.ui64 = (uint64_t)
1520 		    *softsp->axq_pic1;
1521 
1522 		/* pic2 */
1523 		axq_counters_data[3].value.ui64 = (uint64_t)
1524 		    *softsp->axq_pic2;
1525 	}
1526 	return (0);
1527 }
1528 
1529 struct gptwo_phys_spec {
1530 	uint_t gptwo_phys_hi;   /* child's address, hi word */
1531 	uint_t gptwo_phys_low;  /* child's address, low word */
1532 	uint_t gptwo_size_hi;   /* high word of size field */
1533 	uint_t gptwo_size_low;  /* low word of size field */
1534 };
1535 
1536 int axq_pio_workaround_disable = 0;
1537 int axq_pio_limit = 3;
1538 
1539 int
1540 starcat_axq_pio_workaround(dev_info_t *dip)
1541 {
1542 	dev_info_t *axq_dip, *cdip, *pdip;
1543 	int portid, axq_portid;
1544 	char *name;
1545 	int size, circ;
1546 	uint_t *base_addr, *io_domain_control_addr;
1547 	int32_t io_domain_control;
1548 	ddi_device_acc_attr_t acc;
1549 	ddi_acc_handle_t handle;
1550 	struct gptwo_phys_spec *gptwo_spec;
1551 	struct regspec phys_spec;
1552 
1553 	if (axq_pio_workaround_disable)
1554 		return (0);
1555 
1556 	/*
1557 	 * Get the portid for the PCI (Schizo) device).
1558 	 */
1559 	if ((portid = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "portid", -1)) < 0) {
1560 		cmn_err(CE_WARN, "%s: no portid\n", ddi_get_name(dip));
1561 		return (0);
1562 	}
1563 
1564 	/*
1565 	 * Calculate the portid for the Slot 1 AXQ.  The portid for
1566 	 * Schizo 0 EEEEE11100
1567 	 * Schizo 1 EEEEE11101
1568 	 * AXQ 0    EEEEE11110
1569 	 * AXQ 1    EEEEE11111
1570 	 * where EEEEE is the 5 bit expander number.  So the portid for
1571 	 * AXQ 1 can be easily calculated by oring a 3 to the portid of
1572 	 * Schizo 0 or 1.
1573 	 */
1574 	axq_portid = portid | 3;
1575 
1576 	/*
1577 	 * Look for AXQ nodes that have the portid we calculated.
1578 	 */
1579 	axq_dip = NULL;
1580 	pdip = ddi_root_node();
1581 	ndi_devi_enter(pdip, &circ);
1582 	for (cdip = ddi_get_child(pdip); cdip != NULL;
1583 	    cdip = ddi_get_next_sibling(cdip)) {
1584 
1585 		if (ddi_getlongprop(DDI_DEV_T_ANY, cdip,
1586 		    DDI_PROP_DONTPASS, "name", (caddr_t)&name, &size)
1587 		    != DDI_PROP_SUCCESS) {
1588 			continue;
1589 		}
1590 
1591 		if (strcmp(name, "address-extender-queue") != 0) {
1592 			kmem_free(name, size);
1593 			continue;
1594 		}
1595 
1596 		/*
1597 		 * Found an AXQ node.
1598 		 */
1599 
1600 		kmem_free(name, size);
1601 
1602 		portid = ddi_getprop(DDI_DEV_T_ANY, cdip, 0, "portid", -1);
1603 
1604 		if (portid == axq_portid) {
1605 
1606 			/*
1607 			 * We found the correct AXQ node.
1608 			 */
1609 			ndi_hold_devi(cdip);
1610 			axq_dip = cdip;
1611 			break;
1612 		}
1613 	}
1614 	ndi_devi_exit(pdip, circ);
1615 
1616 	if (axq_dip == NULL) {
1617 		cmn_err(CE_WARN, "can't find axq node with portid=0x%x\n",
1618 		    axq_portid);
1619 		return (0);
1620 	}
1621 
1622 	if (ddi_getlongprop(DDI_DEV_T_ANY, axq_dip, DDI_PROP_DONTPASS, "reg",
1623 	    (caddr_t)&gptwo_spec, &size) != DDI_PROP_SUCCESS) {
1624 		cmn_err(CE_WARN, "%s: no regspec\n", ddi_get_name(axq_dip));
1625 		ndi_rele_devi(axq_dip);
1626 		return (0);
1627 	}
1628 
1629 	phys_spec.regspec_bustype = gptwo_spec->gptwo_phys_hi;
1630 	phys_spec.regspec_addr = gptwo_spec->gptwo_phys_low;
1631 	phys_spec.regspec_size = gptwo_spec->gptwo_size_low;
1632 
1633 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1634 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1635 	acc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
1636 
1637 	if (axq_map_phys(axq_dip, &phys_spec, (caddr_t *)&base_addr,
1638 	    &acc, &handle)) {
1639 		cmn_err(CE_WARN, "%s: map phys failed\n",
1640 		    ddi_get_name(axq_dip));
1641 		kmem_free(gptwo_spec, size);
1642 		ndi_rele_devi(axq_dip);
1643 		return (0);
1644 	}
1645 
1646 	kmem_free(gptwo_spec, size);
1647 
1648 	io_domain_control_addr = REG_ADDR(base_addr, AXQ_SLOT1_DOMCTRL);
1649 
1650 	if (ddi_peek32(axq_dip, (int32_t *)io_domain_control_addr,
1651 	    (int32_t *)&io_domain_control)) {
1652 		cmn_err(CE_WARN, "%s: peek failed\n", ddi_get_name(axq_dip));
1653 		ndi_rele_devi(axq_dip);
1654 		return (0);
1655 	}
1656 
1657 	axq_unmap_phys(&handle);
1658 
1659 	ndi_rele_devi(axq_dip);
1660 
1661 	/*
1662 	 * If bit 6 of the IO Domain Control Register is a one,
1663 	 * then this AXQ version does not have the PIO Limit problem.
1664 	 */
1665 	if (io_domain_control & AXQ_DOMCTRL_PIOFIX)
1666 		return (0);
1667 
1668 	return (axq_pio_limit);
1669 }
1670 
1671 static int
1672 axq_map_phys(dev_info_t *dip, struct regspec *phys_spec,
1673 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1674 	ddi_acc_handle_t *handlep)
1675 {
1676 	ddi_map_req_t mr;
1677 	ddi_acc_hdl_t *hp;
1678 	int result;
1679 	struct regspec *ph;
1680 
1681 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1682 	hp = impl_acc_hdl_get(*handlep);
1683 	hp->ah_vers = VERS_ACCHDL;
1684 	hp->ah_dip = dip;
1685 	hp->ah_rnumber = 0;
1686 	hp->ah_offset = 0;
1687 	hp->ah_len = 0;
1688 	hp->ah_acc = *accattrp;
1689 	ph = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
1690 	*ph = *phys_spec;
1691 	hp->ah_bus_private = ph;	/* cache a copy of the reg spec */
1692 
1693 	mr.map_op = DDI_MO_MAP_LOCKED;
1694 	mr.map_type = DDI_MT_REGSPEC;
1695 	mr.map_obj.rp = phys_spec;
1696 	mr.map_prot = PROT_READ | PROT_WRITE;
1697 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1698 	mr.map_handlep = hp;
1699 	mr.map_vers = DDI_MAP_VERSION;
1700 
1701 	result = ddi_map(dip, &mr, 0, 0, addrp);
1702 
1703 	if (result != DDI_SUCCESS) {
1704 		impl_acc_hdl_free(*handlep);
1705 		*handlep = NULL;
1706 	} else {
1707 		hp->ah_addr = *addrp;
1708 	}
1709 
1710 	return (result);
1711 }
1712 
1713 static void
1714 axq_unmap_phys(ddi_acc_handle_t *handlep)
1715 {
1716 	ddi_map_req_t mr;
1717 	ddi_acc_hdl_t *hp;
1718 	struct regspec *ph;
1719 
1720 	hp = impl_acc_hdl_get(*handlep);
1721 	ASSERT(hp);
1722 	ph = hp->ah_bus_private;
1723 
1724 	mr.map_op = DDI_MO_UNMAP;
1725 	mr.map_type = DDI_MT_REGSPEC;
1726 	mr.map_obj.rp = ph;
1727 	mr.map_prot = PROT_READ | PROT_WRITE;
1728 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
1729 	mr.map_handlep = hp;
1730 	mr.map_vers = DDI_MAP_VERSION;
1731 
1732 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1733 	    hp->ah_len, &hp->ah_addr);
1734 
1735 	impl_acc_hdl_free(*handlep);
1736 	kmem_free(ph, sizeof (struct regspec));	/* Free the cached copy */
1737 	*handlep = NULL;
1738 }
1739 
1740 /* ARGSUSED */
1741 static boolean_t
1742 axq_panic_callb(void *arg, int code)
1743 {
1744 	axq_iopause_disable_all();
1745 	return (B_TRUE);
1746 }
1747