xref: /titanic_50/usr/src/uts/sun4u/starcat/io/axq.c (revision 193974072f41a843678abf5f61979c748687e66b)
103831d35Sstevel /*
203831d35Sstevel  * CDDL HEADER START
303831d35Sstevel  *
403831d35Sstevel  * The contents of this file are subject to the terms of the
503831d35Sstevel  * Common Development and Distribution License (the "License").
603831d35Sstevel  * You may not use this file except in compliance with the License.
703831d35Sstevel  *
803831d35Sstevel  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
903831d35Sstevel  * or http://www.opensolaris.org/os/licensing.
1003831d35Sstevel  * See the License for the specific language governing permissions
1103831d35Sstevel  * and limitations under the License.
1203831d35Sstevel  *
1303831d35Sstevel  * When distributing Covered Code, include this CDDL HEADER in each
1403831d35Sstevel  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1503831d35Sstevel  * If applicable, add the following below this CDDL HEADER, with the
1603831d35Sstevel  * fields enclosed by brackets "[]" replaced with your own identifying
1703831d35Sstevel  * information: Portions Copyright [yyyy] [name of copyright owner]
1803831d35Sstevel  *
1903831d35Sstevel  * CDDL HEADER END
2003831d35Sstevel  */
2103831d35Sstevel 
2203831d35Sstevel /*
23*19397407SSherry Moore  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
2403831d35Sstevel  * Use is subject to license terms.
2503831d35Sstevel  */
2603831d35Sstevel 
2703831d35Sstevel 
2803831d35Sstevel 
2903831d35Sstevel #include <sys/types.h>
3003831d35Sstevel #include <sys/conf.h>
3103831d35Sstevel #include <sys/ddi.h>
3203831d35Sstevel #include <sys/sunddi.h>
3303831d35Sstevel #include <sys/ddi_impldefs.h>
3403831d35Sstevel #include <sys/obpdefs.h>
3503831d35Sstevel #include <sys/cmn_err.h>
3603831d35Sstevel #include <sys/errno.h>
3703831d35Sstevel #include <sys/kmem.h>
3803831d35Sstevel #include <sys/debug.h>
3903831d35Sstevel #include <sys/sysmacros.h>
4003831d35Sstevel #include <sys/autoconf.h>
4103831d35Sstevel #include <sys/modctl.h>
4203831d35Sstevel #include <sys/sunndi.h>
4303831d35Sstevel 
4403831d35Sstevel #include <sys/axq.h>
4503831d35Sstevel #include <sys/promif.h>
4603831d35Sstevel #include <sys/cpuvar.h>
4703831d35Sstevel #include <sys/starcat.h>
4803831d35Sstevel #include <sys/callb.h>
4903831d35Sstevel 
5003831d35Sstevel #define	REG_ADDR(b, o)	(uint32_t *)((caddr_t)(b) + (o))
5103831d35Sstevel 
5203831d35Sstevel /*
5303831d35Sstevel  * Function prototypes
5403831d35Sstevel  */
5503831d35Sstevel 
5603831d35Sstevel /* autoconfig entry point function definitions */
5703831d35Sstevel static int axq_attach(dev_info_t *, ddi_attach_cmd_t);
5803831d35Sstevel static int axq_detach(dev_info_t *, ddi_detach_cmd_t);
5903831d35Sstevel static int axq_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
6003831d35Sstevel 
6103831d35Sstevel /* internal axq definitions */
6203831d35Sstevel static void axq_init(struct axq_soft_state *);
6303831d35Sstevel static void axq_init_local(struct axq_local_regs *);
6403831d35Sstevel 
6503831d35Sstevel /* axq kstats */
6603831d35Sstevel static void axq_add_picN_kstats(dev_info_t *dip);
6703831d35Sstevel static void axq_add_kstats(struct axq_soft_state *);
6803831d35Sstevel static int axq_counters_kstat_update(kstat_t *, int);
6903831d35Sstevel 
7003831d35Sstevel /*
7103831d35Sstevel  * Configuration data structures
7203831d35Sstevel  */
7303831d35Sstevel static struct cb_ops axq_cb_ops = {
7403831d35Sstevel 	nulldev,			/* open */
7503831d35Sstevel 	nulldev,			/* close */
7603831d35Sstevel 	nulldev,			/* strategy */
7703831d35Sstevel 	nulldev,			/* print */
7803831d35Sstevel 	nodev,				/* dump */
7903831d35Sstevel 	nulldev,			/* read */
8003831d35Sstevel 	nulldev,			/* write */
8103831d35Sstevel 	nulldev,			/* ioctl */
8203831d35Sstevel 	nodev,				/* devmap */
8303831d35Sstevel 	nodev,				/* mmap */
8403831d35Sstevel 	nodev,				/* segmap */
8503831d35Sstevel 	nochpoll,			/* poll */
8603831d35Sstevel 	ddi_prop_op,			/* cb_prop_op */
8703831d35Sstevel 	0,				/* streamtab */
8803831d35Sstevel 	D_MP | D_NEW,			/* Driver compatibility flag */
8903831d35Sstevel 	CB_REV,				/* rev */
9003831d35Sstevel 	nodev,				/* cb_aread */
9103831d35Sstevel 	nodev				/* cb_awrite */
9203831d35Sstevel };
9303831d35Sstevel 
9403831d35Sstevel static struct dev_ops axq_ops = {
9503831d35Sstevel 	DEVO_REV,			/* rev */
9603831d35Sstevel 	0,				/* refcnt  */
9703831d35Sstevel 	axq_getinfo,			/* getinfo */
9803831d35Sstevel 	nulldev,			/* identify */
9903831d35Sstevel 	nulldev,			/* probe */
10003831d35Sstevel 	axq_attach,			/* attach */
10103831d35Sstevel 	axq_detach,			/* detach */
10203831d35Sstevel 	nulldev,			/* reset */
10303831d35Sstevel 	&axq_cb_ops,			/* cb_ops */
10403831d35Sstevel 	(struct bus_ops *)0,		/* bus_ops */
105*19397407SSherry Moore 	nulldev,			/* power */
106*19397407SSherry Moore 	ddi_quiesce_not_supported,	/* devo_quiesce */
10703831d35Sstevel };
10803831d35Sstevel 
10903831d35Sstevel 
11003831d35Sstevel /*
11103831d35Sstevel  * AXQ globals
11203831d35Sstevel  */
11303831d35Sstevel struct axq_soft_state *axq_array[AXQ_MAX_EXP][AXQ_MAX_SLOT_PER_EXP];
11403831d35Sstevel krwlock_t axq_array_lock;
11503831d35Sstevel struct axq_local_regs axq_local;
11603831d35Sstevel int use_axq_iopause = 1;	/* enable flag axq iopause by default */
11703831d35Sstevel /*
11803831d35Sstevel  * If non-zero, iopause will be asserted during DDI_SUSPEND.
11903831d35Sstevel  * Clients using the axq_iopause_*_all interfaces should set this to zero.
12003831d35Sstevel  */
12103831d35Sstevel int axq_suspend_iopause = 1;
12203831d35Sstevel 
12303831d35Sstevel /*
12403831d35Sstevel  * loadable module support
12503831d35Sstevel  */
12603831d35Sstevel extern struct mod_ops mod_driverops;
12703831d35Sstevel 
12803831d35Sstevel static struct modldrv modldrv = {
12903831d35Sstevel 	&mod_driverops,		/* Type of module.  This one is a driver */
130*19397407SSherry Moore 	"AXQ driver",	/* name of module */
13103831d35Sstevel 	&axq_ops,		/* driver ops */
13203831d35Sstevel };
13303831d35Sstevel 
13403831d35Sstevel static struct modlinkage modlinkage = {
13503831d35Sstevel 	MODREV_1,
13603831d35Sstevel 	(void *)&modldrv,
13703831d35Sstevel 	NULL
13803831d35Sstevel };
13903831d35Sstevel 
14003831d35Sstevel static void *axq_softp;
14103831d35Sstevel 
14203831d35Sstevel /*
14303831d35Sstevel  * AXQ Performance counters
14403831d35Sstevel  * We statically declare a array of the known
14503831d35Sstevel  * AXQ event-names and event masks. The number
14603831d35Sstevel  * of events in this array is AXQ_NUM_EVENTS.
14703831d35Sstevel  */
14803831d35Sstevel static axq_event_mask_t axq_events[AXQ_NUM_EVENTS] = {
14903831d35Sstevel 	{"count_clk", COUNT_CLK}, {"freeze_cnt", FREEZE_CNT},
15003831d35Sstevel 	{"ha_input_fifo", HA_INPUT_FIFO}, {"ha_intr_info", HA_INTR_INFO},
15103831d35Sstevel 	{"ha_pio_fifo", HA_PIO_FIFO}, {"ha_adr_fifo_lk3", HA_ADR_FIFO_LK3},
15203831d35Sstevel 	{"ha_adr_fifo_lk2", HA_ADR_FIFO_LK2},
15303831d35Sstevel 	{"ha_adr_fifo_lk1", HA_ADR_FIFO_LK1},
15403831d35Sstevel 	{"ha_adr_fifo_lk0", HA_ADR_FIFO_LK0},
15503831d35Sstevel 	{"ha_dump_q", HA_DUMP_Q},
15603831d35Sstevel 	{"ha_rd_f_stb_q", HA_RD_F_STB_Q},
15703831d35Sstevel 	{"ha_dp_wr_q", HA_DP_WR_Q},
15803831d35Sstevel 	{"ha_int_q", HA_INT_Q},
15903831d35Sstevel 	{"ha_wrb_q", HA_WRB_Q},
16003831d35Sstevel 	{"ha_wr_mp_q", HA_WR_MP_Q},
16103831d35Sstevel 	{"ha_wrtag_q", HA_WRTAG_Q},
16203831d35Sstevel 	{"ha_wt_wait_fifo", HA_WT_WAIT_FIFO},
16303831d35Sstevel 	{"ha_wrb_stb_fifo", HA_WRB_STB_FIFO},
16403831d35Sstevel 	{"ha_ap0_q", HA_AP0_Q},
16503831d35Sstevel 	{"ha_ap1_q", HA_AP1_Q},
16603831d35Sstevel 	{"ha_new_wr_q", HA_NEW_WR_Q},
16703831d35Sstevel 	{"ha_dp_rd_q", HA_DP_RD_Q},
16803831d35Sstevel 	{"ha_unlock_q", HA_UNLOCK_Q},
16903831d35Sstevel 	{"ha_cdc_upd_q", HA_CDC_UPD_Q},
17003831d35Sstevel 	{"ha_ds_q", HA_DS_Q},
17103831d35Sstevel 	{"ha_unlk_wait_q", HA_UNLK_WAIT_Q},
17203831d35Sstevel 	{"ha_rd_mp_q", HA_RD_MP_Q},
17303831d35Sstevel 	{"l2_io_q", L2_IO_Q},
17403831d35Sstevel 	{"l2_sb_q", L2_SB_Q},
17503831d35Sstevel 	{"l2_ra_q", L2_RA_Q},
17603831d35Sstevel 	{"l2_ha_q", L2_HA_Q},
17703831d35Sstevel 	{"l2_sa_q", L2_SA_Q},
17803831d35Sstevel 	{"ra_wait_fifo", RA_WAIT_FIFO},
17903831d35Sstevel 	{"ra_wrb_inv_fifo", RA_WRB_INV_FIFO},
18003831d35Sstevel 	{"ra_wrb_fifo", RA_WRB_FIFO},
18103831d35Sstevel 	{"ra_cc_ptr_fifo", RA_CC_PTR_FIFO},
18203831d35Sstevel 	{"ra_io_ptr_fifo", RA_IO_PTR_FIFO},
18303831d35Sstevel 	{"ra_int_ptr_fifo", RA_INT_PTR_FIFO},
18403831d35Sstevel 	{"ra_rp_q", RA_RP_Q},
18503831d35Sstevel 	{"ra_wrb_rp_q", RA_WRB_RP_Q},
18603831d35Sstevel 	{"ra_dp_q", RA_DP_Q},
18703831d35Sstevel 	{"ra_dp_stb_q", RA_DP_STB_Q},
18803831d35Sstevel 	{"ra_gtarg_q", RA_GTARG_Q},
18903831d35Sstevel 	{"sdc_recv_q",	SDC_RECV_Q},
19003831d35Sstevel 	{"sdc_redir_io_q", SDC_REDIR_IO_Q},
19103831d35Sstevel 	{"sdc_redir_sb_q", SDC_REDIR_SB_Q},
19203831d35Sstevel 	{"sdc_outb_io_q", SDC_OUTB_IO_Q},
19303831d35Sstevel 	{"sdc_outb_sb_q", SDC_OUTB_SB_Q},
19403831d35Sstevel 	{"sa_add1_input_q", SA_ADD1_INPUT_Q},
19503831d35Sstevel 	{"sa_add2_input_q", SA_ADD2_INPUT_Q},
19603831d35Sstevel 	{"sa_inv_q", SA_INV_Q},
19703831d35Sstevel 	{"sa_no_inv_q", SA_NO_INV_Q},
19803831d35Sstevel 	{"sa_int_dp_q", SA_INT_DP_Q},
19903831d35Sstevel 	{"sa_dp_q", SA_DP_Q},
20003831d35Sstevel 	{"sl_wrtag_q", SL_WRTAG_Q},
20103831d35Sstevel 	{"sl_rto_dp_q", SL_RTO_DP_Q},
20203831d35Sstevel 	{"syreg_input_q", SYSREG_INPUT_Q},
20303831d35Sstevel 	{"sdi_sys_status1", SDI_SYS_STATUS1},
20403831d35Sstevel 	{"sdi_sys_status0", SDI_SYS_STATUS0},
20503831d35Sstevel 	{"cdc_hits", CDC_HITS},
20603831d35Sstevel 	{"total_cdc_read", TOTAL_CDC_READ},
20703831d35Sstevel 	{"ha_watranid_sd", HA_WATRANID_SD},
20803831d35Sstevel 	{"ha_stb_sd", HA_STB_SD},
20903831d35Sstevel 	{"ha_l2_irq_sd", HA_L2_IRQ_SD},
21003831d35Sstevel 	{"ha_sl_wrtag_sd", HA_SL_WRTAG_SD},
21103831d35Sstevel 	{"aa_home_cc_full", AA_HOME_CC_FULL},
21203831d35Sstevel 	{"aa_home_io_full", AA_HOME_IO_FULL},
21303831d35Sstevel 	{"aa_slave_full", AA_SLAVE_FULL},
21403831d35Sstevel 	{"aa_rp_full", AA_RP_FULL}
21503831d35Sstevel };
21603831d35Sstevel 
21703831d35Sstevel static kstat_t *axq_picN_ksp[AXQ_NUM_PICS];	/* picN kstats */
21803831d35Sstevel static int axq_attachcnt = 0;		/* # of instances attached */
21903831d35Sstevel static kmutex_t axq_attachcnt_lock;	/* lock for attachcnt */
22003831d35Sstevel 
22103831d35Sstevel static int axq_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
22203831d35Sstevel     ddi_device_acc_attr_t *, ddi_acc_handle_t *);
22303831d35Sstevel static void axq_unmap_phys(ddi_acc_handle_t *);
22403831d35Sstevel 
22503831d35Sstevel int starcat_axq_pio_workaround(dev_info_t *);
22603831d35Sstevel static int axq_slot1_idle(struct axq_soft_state *);
22703831d35Sstevel 
22803831d35Sstevel static boolean_t axq_panic_callb(void *, int);
22903831d35Sstevel static callb_id_t axq_panic_cb_id;
23003831d35Sstevel 
23103831d35Sstevel /*
23203831d35Sstevel  * These are the module initialization routines.
23303831d35Sstevel  */
23403831d35Sstevel 
23503831d35Sstevel int
_init(void)23603831d35Sstevel _init(void)
23703831d35Sstevel {
23803831d35Sstevel 	int error;
23903831d35Sstevel 
24003831d35Sstevel 	if ((error = ddi_soft_state_init(&axq_softp,
24103831d35Sstevel 	    sizeof (struct axq_soft_state), 1)) != 0)
24203831d35Sstevel 		return (error);
24303831d35Sstevel 
24403831d35Sstevel 	rw_init(&axq_array_lock, NULL, RW_DEFAULT, NULL);
24503831d35Sstevel 
24603831d35Sstevel 	mutex_init(&axq_local.axq_local_lock, NULL, MUTEX_DRIVER, NULL);
24703831d35Sstevel 
24803831d35Sstevel 	mutex_init(&axq_attachcnt_lock, NULL, MUTEX_DRIVER, NULL);
24903831d35Sstevel 
25003831d35Sstevel 	axq_local.initflag = 0;
25103831d35Sstevel 
25203831d35Sstevel 	if ((error = mod_install(&modlinkage)) != 0) {
25303831d35Sstevel 		ddi_soft_state_fini(&axq_softp);
25403831d35Sstevel 		mutex_destroy(&axq_attachcnt_lock);
25503831d35Sstevel 		mutex_destroy(&axq_local.axq_local_lock);
25603831d35Sstevel 		rw_destroy(&axq_array_lock);
25703831d35Sstevel 		return (error);
25803831d35Sstevel 	}
25903831d35Sstevel 
26003831d35Sstevel 	axq_panic_cb_id = callb_add(axq_panic_callb, (void *)NULL,
26103831d35Sstevel 	    CB_CL_PANIC, "axq_panic");
26203831d35Sstevel 
26303831d35Sstevel 	return (0);
26403831d35Sstevel }
26503831d35Sstevel 
26603831d35Sstevel int
_fini(void)26703831d35Sstevel _fini(void)
26803831d35Sstevel {
26903831d35Sstevel 	int error;
27003831d35Sstevel 
27103831d35Sstevel 	if ((error = mod_remove(&modlinkage)) != 0)
27203831d35Sstevel 		return (error);
27303831d35Sstevel 
27403831d35Sstevel 	ddi_soft_state_fini(&axq_softp);
27503831d35Sstevel 	mutex_destroy(&axq_attachcnt_lock);
27603831d35Sstevel 	mutex_destroy(&axq_local.axq_local_lock);
27703831d35Sstevel 	rw_destroy(&axq_array_lock);
27803831d35Sstevel 
27903831d35Sstevel 	(void) callb_delete(axq_panic_cb_id);
28003831d35Sstevel 
28103831d35Sstevel 	return (0);
28203831d35Sstevel }
28303831d35Sstevel 
28403831d35Sstevel int
_info(struct modinfo * modinfop)28503831d35Sstevel _info(struct modinfo *modinfop)
28603831d35Sstevel {
28703831d35Sstevel 	return (mod_info(&modlinkage, modinfop));
28803831d35Sstevel }
28903831d35Sstevel 
29003831d35Sstevel static int
axq_attach(dev_info_t * devi,ddi_attach_cmd_t cmd)29103831d35Sstevel axq_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
29203831d35Sstevel {
29303831d35Sstevel 	int instance;
29403831d35Sstevel 	struct axq_soft_state *softsp;
29503831d35Sstevel 	ddi_device_acc_attr_t attr;
29603831d35Sstevel 	extern uint64_t va_to_pa(void *);
29703831d35Sstevel 
29803831d35Sstevel 	instance = ddi_get_instance(devi);
29903831d35Sstevel 
30003831d35Sstevel 	switch (cmd) {
30103831d35Sstevel 	case DDI_ATTACH:
30203831d35Sstevel 		break;
30303831d35Sstevel 
30403831d35Sstevel 	case DDI_RESUME:
30503831d35Sstevel 		/*
30603831d35Sstevel 		 * Reenable the axq io pause if it is
30703831d35Sstevel 		 * employed. See the DDI_SUSPEND comments
30803831d35Sstevel 		 */
30903831d35Sstevel 		softsp = ddi_get_soft_state(axq_softp, instance);
31003831d35Sstevel 		if (softsp->slotnum && softsp->paused && use_axq_iopause &&
31103831d35Sstevel 		    axq_suspend_iopause) {
31203831d35Sstevel 			*softsp->axq_domain_ctrl &= ~AXQ_DOMCTRL_PAUSE;
31303831d35Sstevel 			softsp->paused = 0;
31403831d35Sstevel 		}
31503831d35Sstevel 		return (DDI_SUCCESS);
31603831d35Sstevel 
31703831d35Sstevel 	default:
31803831d35Sstevel 		return (DDI_FAILURE);
31903831d35Sstevel 	}
32003831d35Sstevel 
32103831d35Sstevel 	if (ddi_soft_state_zalloc(axq_softp, instance) != DDI_SUCCESS)
32203831d35Sstevel 		return (DDI_FAILURE);
32303831d35Sstevel 
32403831d35Sstevel 	softsp = ddi_get_soft_state(axq_softp, instance);
32503831d35Sstevel 
32603831d35Sstevel 	/* Set the dip in the soft state */
32703831d35Sstevel 	softsp->dip = devi;
32803831d35Sstevel 
32903831d35Sstevel 	/* Get the "portid" property */
33003831d35Sstevel 	if ((softsp->portid = (int)ddi_getprop(DDI_DEV_T_ANY, softsp->dip,
33103831d35Sstevel 	    DDI_PROP_DONTPASS, "portid", -1)) == -1) {
33203831d35Sstevel 		cmn_err(CE_WARN, "Unable to retrieve safari portid"
33303831d35Sstevel 		    "property.");
33403831d35Sstevel 		goto bad;
33503831d35Sstevel 	}
33603831d35Sstevel 
33703831d35Sstevel 	softsp->expid = softsp->portid >> 5;
33803831d35Sstevel 
33903831d35Sstevel 	/*
34003831d35Sstevel 	 * derive the slot # from the portid - for starcat, it is
34103831d35Sstevel 	 * either 0 or 1 based on the lsb of the axq portid.
34203831d35Sstevel 	 */
34303831d35Sstevel 	softsp->slotnum = softsp->portid & 0x1;
34403831d35Sstevel 
34503831d35Sstevel 	/*
34603831d35Sstevel 	 * map in the regs. There are two regspecs - one
34703831d35Sstevel 	 * in safari config space and the other in local space.
34803831d35Sstevel 	 */
34903831d35Sstevel 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
35003831d35Sstevel 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
35103831d35Sstevel 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
35203831d35Sstevel 	if (ddi_regs_map_setup(softsp->dip, 0, &softsp->address, 0, 0,
35303831d35Sstevel 	    &attr, &softsp->ac0) != DDI_SUCCESS) {
35403831d35Sstevel 		cmn_err(CE_WARN, "%s%d: unable to map reg set 0\n",
35503831d35Sstevel 		    ddi_get_name(softsp->dip),
35603831d35Sstevel 		    ddi_get_instance(softsp->dip));
35703831d35Sstevel 		goto bad;
35803831d35Sstevel 	}
35903831d35Sstevel 
36003831d35Sstevel 	/*
36103831d35Sstevel 	 * This is a hack for support DR copy rename scripting
36203831d35Sstevel 	 * Get the physical address of the start of the
36303831d35Sstevel 	 * AXQ config space and save it.
36403831d35Sstevel 	 */
36503831d35Sstevel 	softsp->axq_phyaddr = va_to_pa((caddr_t)softsp->address);
36603831d35Sstevel 
36703831d35Sstevel 	axq_init(softsp);
36803831d35Sstevel 
36903831d35Sstevel 	/*
37003831d35Sstevel 	 * Map in the regs for local space access
37103831d35Sstevel 	 * This is global for all axq instances.
37203831d35Sstevel 	 * Make sure that some axq instance does
37303831d35Sstevel 	 * it for the rest of the gang..
37403831d35Sstevel 	 * Note that this mapping is never removed.
37503831d35Sstevel 	 */
37603831d35Sstevel 	mutex_enter(&axq_local.axq_local_lock);
37703831d35Sstevel 	if (!axq_local.initflag) {
37803831d35Sstevel 		/* initialize and map in the local space */
37903831d35Sstevel 		if (ddi_regs_map_setup(softsp->dip, 1,
38003831d35Sstevel 		    &axq_local.laddress, 0, 0,
38103831d35Sstevel 		    &attr, &axq_local.ac) != DDI_SUCCESS) {
38203831d35Sstevel 			cmn_err(CE_WARN, "%s%d: unable to map reg set 1\n",
38303831d35Sstevel 			    ddi_get_name(softsp->dip),
38403831d35Sstevel 			    ddi_get_instance(softsp->dip));
38503831d35Sstevel 			ddi_regs_map_free(&softsp->ac0);
38603831d35Sstevel 			mutex_exit(&axq_local.axq_local_lock);
38703831d35Sstevel 			goto bad;
38803831d35Sstevel 		}
38903831d35Sstevel 		axq_init_local(&axq_local);
39003831d35Sstevel 		axq_local.initflag = 1;
39103831d35Sstevel 	}
39203831d35Sstevel 	mutex_exit(&axq_local.axq_local_lock);
39303831d35Sstevel 
39403831d35Sstevel 	mutex_init(&softsp->axq_lock, NULL, MUTEX_DRIVER, NULL);
39503831d35Sstevel 
39603831d35Sstevel 	/* update the axq array for this new instance */
39703831d35Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
39803831d35Sstevel 	ASSERT(axq_array[softsp->expid][softsp->slotnum] == NULL);
39903831d35Sstevel 	axq_array[softsp->expid][softsp->slotnum] = softsp;
40003831d35Sstevel 	rw_exit(&axq_array_lock);
40103831d35Sstevel 
40203831d35Sstevel 	axq_add_kstats(softsp);
40303831d35Sstevel 
40403831d35Sstevel 	ddi_report_dev(devi);
40503831d35Sstevel 
40603831d35Sstevel 	return (DDI_SUCCESS);
40703831d35Sstevel 
40803831d35Sstevel bad:
40903831d35Sstevel 	ddi_soft_state_free(axq_softp, instance);
41003831d35Sstevel 	return (DDI_FAILURE);
41103831d35Sstevel }
41203831d35Sstevel 
41303831d35Sstevel 
41403831d35Sstevel static void
axq_init(struct axq_soft_state * softsp)41503831d35Sstevel axq_init(struct axq_soft_state *softsp)
41603831d35Sstevel {
41703831d35Sstevel 	int i;
41803831d35Sstevel 
41903831d35Sstevel 	/*
42003831d35Sstevel 	 * Setup the AXQ registers
42103831d35Sstevel 	 * Some offsets and availability are dependent on the slot type
42203831d35Sstevel 	 */
42303831d35Sstevel 	if (softsp->slotnum == 0) {
42403831d35Sstevel 		/* This is a slot type 0 AXQ */
42503831d35Sstevel 		softsp->axq_domain_ctrl = REG_ADDR(softsp->address,
42603831d35Sstevel 		    AXQ_SLOT0_DOMCTRL);
42703831d35Sstevel 		softsp->axq_cdc_addrtest = REG_ADDR(softsp->address,
42803831d35Sstevel 		    AXQ_SLOT0_CDC_ADR_TEST);
42903831d35Sstevel 		softsp->axq_cdc_ctrltest = REG_ADDR(softsp->address,
43003831d35Sstevel 		    AXQ_SLOT0_CDC_CTL_TEST);
43103831d35Sstevel 		softsp->axq_cdc_datawrite0 = REG_ADDR(softsp->address,
43203831d35Sstevel 		    AXQ_SLOT0_CDC_DATA_WR0);
43303831d35Sstevel 		softsp->axq_cdc_datawrite1 = REG_ADDR(softsp->address,
43403831d35Sstevel 		    AXQ_SLOT0_CDC_DATA_WR1);
43503831d35Sstevel 		softsp->axq_cdc_datawrite2 = REG_ADDR(softsp->address,
43603831d35Sstevel 		    AXQ_SLOT0_CDC_DATA_WR2);
43703831d35Sstevel 		softsp->axq_cdc_datawrite3 = REG_ADDR(softsp->address,
43803831d35Sstevel 		    AXQ_SLOT0_CDC_DATA_WR3);
43903831d35Sstevel 		softsp->axq_cdc_counter = REG_ADDR(softsp->address,
44003831d35Sstevel 		    AXQ_SLOT0_CDC_CNT_TEST);
44103831d35Sstevel 		softsp->axq_cdc_readdata0 = REG_ADDR(softsp->address,
44203831d35Sstevel 		    AXQ_SLOT0_CDC_RD_DATA0);
44303831d35Sstevel 		softsp->axq_cdc_readdata1 = REG_ADDR(softsp->address,
44403831d35Sstevel 		    AXQ_SLOT0_CDC_RD_DATA1);
44503831d35Sstevel 		softsp->axq_cdc_readdata2 = REG_ADDR(softsp->address,
44603831d35Sstevel 		    AXQ_SLOT0_CDC_RD_DATA2);
44703831d35Sstevel 		softsp->axq_cdc_readdata3 = REG_ADDR(softsp->address,
44803831d35Sstevel 		    AXQ_SLOT0_CDC_RD_DATA3);
44903831d35Sstevel 		softsp->axq_pcr = REG_ADDR(softsp->address,
45003831d35Sstevel 		    AXQ_SLOT0_PERFCNT_SEL);
45103831d35Sstevel 		softsp->axq_pic0 = REG_ADDR(softsp->address,
45203831d35Sstevel 		    AXQ_SLOT0_PERFCNT0);
45303831d35Sstevel 		softsp->axq_pic1 = REG_ADDR(softsp->address,
45403831d35Sstevel 		    AXQ_SLOT0_PERFCNT1);
45503831d35Sstevel 		softsp->axq_pic2 = REG_ADDR(softsp->address,
45603831d35Sstevel 		    AXQ_SLOT0_PERFCNT2);
45703831d35Sstevel 		softsp->axq_nasm = REG_ADDR(softsp->address, AXQ_SLOT0_NASM);
45803831d35Sstevel 	} else {
45903831d35Sstevel 		/* slot type 1 AXQ */
46003831d35Sstevel 		softsp->axq_domain_ctrl = REG_ADDR(softsp->address,
46103831d35Sstevel 		    AXQ_SLOT1_DOMCTRL);
46203831d35Sstevel 		softsp->axq_pcr = REG_ADDR(softsp->address,
46303831d35Sstevel 		    AXQ_SLOT1_PERFCNT_SEL);
46403831d35Sstevel 		softsp->axq_pic0 = REG_ADDR(softsp->address,
46503831d35Sstevel 		    AXQ_SLOT1_PERFCNT0);
46603831d35Sstevel 		softsp->axq_pic1 = REG_ADDR(softsp->address,
46703831d35Sstevel 		    AXQ_SLOT1_PERFCNT1);
46803831d35Sstevel 		softsp->axq_pic2 = REG_ADDR(softsp->address,
46903831d35Sstevel 		    AXQ_SLOT1_PERFCNT2);
47003831d35Sstevel 		softsp->axq_nasm = REG_ADDR(softsp->address, AXQ_SLOT1_NASM);
47103831d35Sstevel 	}
47203831d35Sstevel 
47303831d35Sstevel 	/* setup CASM slots */
47403831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
47503831d35Sstevel 		softsp->axq_casm_slot[i] = REG_ADDR(softsp->address,
47603831d35Sstevel 		    (AXQ_CASM_SLOT_START + AXQ_REGOFF(i)));
47703831d35Sstevel 	}
47803831d35Sstevel 
47903831d35Sstevel 	/* setup SDI timeout register accesses */
48003831d35Sstevel 	softsp->axq_sdi_timeout_rd = REG_ADDR(softsp->address,
48103831d35Sstevel 	    AXQ_SLOT_SDI_TIMEOUT_RD);
48203831d35Sstevel 	softsp->axq_sdi_timeout_rdclr = REG_ADDR(softsp->address,
48303831d35Sstevel 	    AXQ_SLOT_SDI_TIMEOUT_RDCLR);
48403831d35Sstevel 
48503831d35Sstevel 	/*
48603831d35Sstevel 	 * Save the CDC state (enabled or disabled)
48703831d35Sstevel 	 * as originally setup by Post.
48803831d35Sstevel 	 */
48903831d35Sstevel 	if (softsp->slotnum == 0) {
49003831d35Sstevel 		softsp->axq_cdc_state = *softsp->axq_cdc_ctrltest &
49103831d35Sstevel 		    AXQ_CDC_DIS;
49203831d35Sstevel 	}
49303831d35Sstevel 
49403831d35Sstevel #ifndef _AXQ_LOCAL_ACCESS_SUPPORTED
49503831d35Sstevel 	/*
49603831d35Sstevel 	 * Setup cpu2ssc intr register in explicit expander
49703831d35Sstevel 	 * space. Local space addressing for this is broken,
49803831d35Sstevel 	 * we'll use explicit addressing for now.
49903831d35Sstevel 	 */
50003831d35Sstevel 	softsp->axq_cpu2ssc_intr = REG_ADDR(softsp->address,
50103831d35Sstevel 	    AXQ_SLOT_CPU2SSC_INTR);
50203831d35Sstevel #endif /* _AXQ_LOCAL_ACCESS_SUPPORTED */
50303831d35Sstevel }
50403831d35Sstevel 
50503831d35Sstevel 
50603831d35Sstevel static void
axq_init_local(struct axq_local_regs * localregs)50703831d35Sstevel axq_init_local(struct axq_local_regs *localregs)
50803831d35Sstevel {
50903831d35Sstevel 	/*
51003831d35Sstevel 	 * local access to cpu2ssc intr register will
51103831d35Sstevel 	 * be the only one that may work properly in the
51203831d35Sstevel 	 * next revision of the AXQ asics.
51303831d35Sstevel 	 * Set it up here for now.
51403831d35Sstevel 	 */
51503831d35Sstevel 	localregs->axq_cpu2ssc_intr = REG_ADDR(localregs->laddress,
51603831d35Sstevel 	    AXQ_SLOT_CPU2SSC_INTR);
51703831d35Sstevel }
51803831d35Sstevel 
51903831d35Sstevel /* ARGSUSED */
52003831d35Sstevel static int
axq_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)52103831d35Sstevel axq_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
52203831d35Sstevel {
52303831d35Sstevel 	int instance;
52403831d35Sstevel 	int i;
52503831d35Sstevel 	struct axq_soft_state *softsp;
52603831d35Sstevel 	processorid_t cpuid;
52703831d35Sstevel 
52803831d35Sstevel 	/* get the instance of this devi */
52903831d35Sstevel 	instance = ddi_get_instance(devi);
53003831d35Sstevel 
53103831d35Sstevel 	/* get the soft state pointer for this device node */
53203831d35Sstevel 	softsp = ddi_get_soft_state(axq_softp, instance);
53303831d35Sstevel 
53403831d35Sstevel 	switch (cmd) {
53503831d35Sstevel 	case DDI_SUSPEND:
53603831d35Sstevel 		/*
53703831d35Sstevel 		 * Depending on the variable "use_axq_iopause"
53803831d35Sstevel 		 * we set the axq iopause bit as a paranoid
53903831d35Sstevel 		 * safety net. This is assuming all the devices
54003831d35Sstevel 		 * associated with the slot are already suspended.
54103831d35Sstevel 		 * Care must be taken to not set iopause when CPUs
54203831d35Sstevel 		 * are known to be present on the slot 1 board,
54303831d35Sstevel 		 * i.e. MCPU board type.
54403831d35Sstevel 		 * This io pause bit only applies to slot 1 axq,
54503831d35Sstevel 		 */
54603831d35Sstevel 		if (softsp->slotnum && use_axq_iopause && axq_suspend_iopause) {
54703831d35Sstevel 			/*
54803831d35Sstevel 			 * Do not enable AXQ_DOMCTRL_PAUSE if CPUs are
54903831d35Sstevel 			 * known to be present in slot 1.
55003831d35Sstevel 			 */
55103831d35Sstevel 			mutex_enter(&cpu_lock);
55203831d35Sstevel 			for (i = 0; i < STARCAT_SLOT1_CPU_MAX; i++) {
55303831d35Sstevel 				cpuid = MAKE_CPUID(softsp->expid,
55403831d35Sstevel 				    softsp->slotnum, i);
55503831d35Sstevel 				if (cpu[cpuid]) {
55603831d35Sstevel 					mutex_exit(&cpu_lock);
55703831d35Sstevel 					return (DDI_SUCCESS);
55803831d35Sstevel 				}
55903831d35Sstevel 			}
56003831d35Sstevel 			mutex_exit(&cpu_lock);
56103831d35Sstevel 
56203831d35Sstevel 			/*
56303831d35Sstevel 			 * Make sure that there is no outstanding
56403831d35Sstevel 			 * I/O activity by reading the domain ctrl reg.
56503831d35Sstevel 			 * A non-zero lsb indicates no I/O activity.
56603831d35Sstevel 			 */
56703831d35Sstevel 			if (axq_slot1_idle(softsp) == DDI_FAILURE) {
56803831d35Sstevel 				cmn_err(CE_WARN, "%s%d: busy! suspend failed",
56903831d35Sstevel 				    ddi_get_name(softsp->dip),
57003831d35Sstevel 				    ddi_get_instance(softsp->dip));
57103831d35Sstevel 				return (DDI_FAILURE);
57203831d35Sstevel 			}
57303831d35Sstevel 
57403831d35Sstevel 			*softsp->axq_domain_ctrl |= AXQ_DOMCTRL_PAUSE;
57503831d35Sstevel 			softsp->paused = 1;
57603831d35Sstevel 		}
57703831d35Sstevel 		return (DDI_SUCCESS);
57803831d35Sstevel 
57903831d35Sstevel 	case DDI_DETACH:
58003831d35Sstevel 		rw_enter(&axq_array_lock, RW_WRITER);
58103831d35Sstevel 		ASSERT(axq_array[softsp->expid][softsp->slotnum]
58203831d35Sstevel 		    != NULL);
58303831d35Sstevel 		axq_array[softsp->expid][softsp->slotnum] = NULL;
58403831d35Sstevel 		rw_exit(&axq_array_lock);
58503831d35Sstevel 
58603831d35Sstevel 		ddi_regs_map_free(&softsp->ac0);
58703831d35Sstevel 
58803831d35Sstevel 		/*
58903831d35Sstevel 		 * remove counter kstats for this device
59003831d35Sstevel 		 */
59103831d35Sstevel 		if (softsp->axq_counters_ksp != (kstat_t *)NULL) {
59203831d35Sstevel 			kstat_delete(softsp->axq_counters_ksp);
59303831d35Sstevel 		}
59403831d35Sstevel 
59503831d35Sstevel 		/*
59603831d35Sstevel 		 * See if we are the last instance to detach.
59703831d35Sstevel 		 * If so, we need to remove the picN kstats
59803831d35Sstevel 		 */
59903831d35Sstevel 		mutex_enter(&axq_attachcnt_lock);
60003831d35Sstevel 		if (--axq_attachcnt == 0) {
60103831d35Sstevel 			for (i = 0; i < AXQ_NUM_PICS; i++) {
60203831d35Sstevel 				if (axq_picN_ksp[i] != (kstat_t *)NULL) {
60303831d35Sstevel 					kstat_delete(axq_picN_ksp[i]);
60403831d35Sstevel 					axq_picN_ksp[i] = NULL;
60503831d35Sstevel 				}
60603831d35Sstevel 			}
60703831d35Sstevel 		}
60803831d35Sstevel 		mutex_exit(&axq_attachcnt_lock);
60903831d35Sstevel 
61003831d35Sstevel 		ddi_soft_state_free(axq_softp, instance);
61103831d35Sstevel 
61203831d35Sstevel 		return (DDI_SUCCESS);
61303831d35Sstevel 	default:
61403831d35Sstevel 		return (DDI_FAILURE);
61503831d35Sstevel 	}
61603831d35Sstevel }
61703831d35Sstevel 
61803831d35Sstevel 
61903831d35Sstevel /* ARGSUSED0 */
62003831d35Sstevel static int
axq_getinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)62103831d35Sstevel axq_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
62203831d35Sstevel {
62303831d35Sstevel 	dev_t dev = (dev_t)arg;
62403831d35Sstevel 	struct axq_soft_state *softsp;
62503831d35Sstevel 	int instance, ret;
62603831d35Sstevel 
62703831d35Sstevel 	instance = getminor(dev);
62803831d35Sstevel 
62903831d35Sstevel 	switch (infocmd) {
63003831d35Sstevel 		case DDI_INFO_DEVT2DEVINFO:
63103831d35Sstevel 			softsp = (struct axq_soft_state *)
63203831d35Sstevel 			    ddi_get_soft_state(axq_softp, instance);
63303831d35Sstevel 			if (softsp == NULL) {
63403831d35Sstevel 				ret = DDI_FAILURE;
63503831d35Sstevel 			} else {
63603831d35Sstevel 				*result = softsp->dip;
63703831d35Sstevel 				ret = DDI_SUCCESS;
63803831d35Sstevel 			}
63903831d35Sstevel 			break;
64003831d35Sstevel 		case DDI_INFO_DEVT2INSTANCE:
64103831d35Sstevel 			*result = (void *)(uintptr_t)instance;
64203831d35Sstevel 			ret = DDI_SUCCESS;
64303831d35Sstevel 			break;
64403831d35Sstevel 		default:
64503831d35Sstevel 			ret = DDI_FAILURE;
64603831d35Sstevel 			break;
64703831d35Sstevel 	}
64803831d35Sstevel 	return (ret);
64903831d35Sstevel }
65003831d35Sstevel 
65103831d35Sstevel /*
65203831d35Sstevel  * Flush the CDC Sram of the slot0 axq
65303831d35Sstevel  * indicated by the expid argument
65403831d35Sstevel  */
65503831d35Sstevel int
axq_cdc_flush(uint32_t expid,int held,int disabled)65603831d35Sstevel axq_cdc_flush(uint32_t expid, int held, int disabled)
65703831d35Sstevel {
65803831d35Sstevel 	struct axq_soft_state *softsp;
65903831d35Sstevel 	uint32_t axq_ctrl_test_save0;
66003831d35Sstevel 	uint32_t tmpval;
66103831d35Sstevel 	int retval = 0;
66203831d35Sstevel 	int i;
66303831d35Sstevel 
66403831d35Sstevel 	if (!held)
66503831d35Sstevel 		rw_enter(&axq_array_lock, RW_READER);
66603831d35Sstevel 
66703831d35Sstevel 	ASSERT(axq_array[expid][SLOT0_AXQ] != NULL);
66803831d35Sstevel 
66903831d35Sstevel 	softsp = axq_array[expid][SLOT0_AXQ];
67003831d35Sstevel 
67103831d35Sstevel 	mutex_enter(&softsp->axq_lock);
67203831d35Sstevel 
67303831d35Sstevel 	/* save the value of the ctrl test reg */
67403831d35Sstevel 	axq_ctrl_test_save0 = *softsp->axq_cdc_ctrltest;
67503831d35Sstevel 
67603831d35Sstevel 	/* disable sram and setup the ctrl test reg for flushing */
67703831d35Sstevel 	tmpval = axq_ctrl_test_save0 & (AXQ_CDC_DATA_ECC_CHK_EN |
67803831d35Sstevel 	    AXQ_CDC_ADR_PAR_CHK_EN |
67903831d35Sstevel 	    AXQ_CDC_DATA_ECC_GEN_EN |
68003831d35Sstevel 	    AXQ_CDC_ADR_PAR_GEN_EN);
68103831d35Sstevel 	*softsp->axq_cdc_ctrltest = tmpval | AXQ_CDC_TMODE_WR
68203831d35Sstevel 	    | AXQ_CDC_DATA2PAR_MUX_SEL_DATA
68303831d35Sstevel 	    | AXQ_CDC_ADR2SRAM_MUX_SEL_TEST
68403831d35Sstevel 	    | AXQ_CDC_ADR_INCR_XOR_CTRL
68503831d35Sstevel 	    | AXQ_CDC_DIS;
68603831d35Sstevel 
68703831d35Sstevel 	/* Enable CDC test in the CDC Address test reg */
68803831d35Sstevel 	*softsp->axq_cdc_addrtest = AXQ_CDC_ADR_TEST_EN;
68903831d35Sstevel 
69003831d35Sstevel 	/* clear the CDC Data write regs */
69103831d35Sstevel 	*softsp->axq_cdc_datawrite0 = *softsp->axq_cdc_datawrite1 = 0;
69203831d35Sstevel 	*softsp->axq_cdc_datawrite2 = *softsp->axq_cdc_datawrite3 = 0;
69303831d35Sstevel 
69403831d35Sstevel 	/*
69503831d35Sstevel 	 * write in the size of the sram to clear
69603831d35Sstevel 	 * into the CDC Counter test reg
69703831d35Sstevel 	 */
69803831d35Sstevel 	*softsp->axq_cdc_counter = AXQ_CDC_SRAM_SIZE;
69903831d35Sstevel 
70003831d35Sstevel 	/* wait for flush to complete */
70103831d35Sstevel 	for (i = 0; i < AXQ_CDC_FLUSH_WAIT; i++) {
70203831d35Sstevel 		DELAY(3000); /* should take only 1750 usecs */
70303831d35Sstevel 		if (((*softsp->axq_cdc_counter) &
70403831d35Sstevel 		    AXQ_CDC_CNT_TEST_DONE) != 0) {
70503831d35Sstevel 			break;
70603831d35Sstevel 		}
70703831d35Sstevel 	}
70803831d35Sstevel 	if (i >= AXQ_CDC_FLUSH_WAIT) {
70903831d35Sstevel 		retval = DDI_FAILURE;
71003831d35Sstevel 		cmn_err(CE_WARN, "axq_cdc_flush failed on expander %d",
71103831d35Sstevel 		    expid);
71203831d35Sstevel 	}
71303831d35Sstevel 
71403831d35Sstevel 	/*
71503831d35Sstevel 	 * Disable test mode in CDC address test reg
71603831d35Sstevel 	 */
71703831d35Sstevel 	*softsp->axq_cdc_addrtest = 0;
71803831d35Sstevel 
71903831d35Sstevel 	/*
72003831d35Sstevel 	 * If "disabled" option is requested, leave
72103831d35Sstevel 	 * the CDC disabled.
72203831d35Sstevel 	 */
72303831d35Sstevel 	if (disabled) {
72403831d35Sstevel 		axq_ctrl_test_save0 |= AXQ_CDC_DIS;
72503831d35Sstevel 		*softsp->axq_cdc_ctrltest = axq_ctrl_test_save0;
72603831d35Sstevel 	} else {
72703831d35Sstevel 		*softsp->axq_cdc_ctrltest = axq_ctrl_test_save0;
72803831d35Sstevel 	}
72903831d35Sstevel 
73003831d35Sstevel 	mutex_exit(&softsp->axq_lock);
73103831d35Sstevel 
73203831d35Sstevel 	if (!held)
73303831d35Sstevel 		rw_exit(&axq_array_lock);
73403831d35Sstevel 
73503831d35Sstevel 	return (retval);
73603831d35Sstevel }
73703831d35Sstevel 
73803831d35Sstevel 
73903831d35Sstevel /*
74003831d35Sstevel  * Flush all the CDC srams for all the AXQs in
74103831d35Sstevel  * the local domain.
74203831d35Sstevel  */
74303831d35Sstevel int
axq_cdc_flush_all()74403831d35Sstevel axq_cdc_flush_all()
74503831d35Sstevel {
74603831d35Sstevel 	int retval;
74703831d35Sstevel 	int i;
74803831d35Sstevel 
74903831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
75003831d35Sstevel 
75103831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
75203831d35Sstevel 		if (axq_array[i][SLOT0_AXQ] != NULL) {
75303831d35Sstevel 			retval = axq_cdc_flush(i, 1, 0);
75403831d35Sstevel 			if (retval != DDI_SUCCESS) break;
75503831d35Sstevel 		}
75603831d35Sstevel 	}
75703831d35Sstevel 	rw_exit(&axq_array_lock);
75803831d35Sstevel 	return (retval);
75903831d35Sstevel }
76003831d35Sstevel 
76103831d35Sstevel /*
76203831d35Sstevel  * Disable and flush all CDC srams for all the AXQs
76303831d35Sstevel  * in the local domain.
76403831d35Sstevel  */
76503831d35Sstevel int
axq_cdc_disable_flush_all()76603831d35Sstevel axq_cdc_disable_flush_all()
76703831d35Sstevel {
76803831d35Sstevel 	int retval;
76903831d35Sstevel 	int i;
77003831d35Sstevel 
77103831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
77203831d35Sstevel 
77303831d35Sstevel 	/*
77403831d35Sstevel 	 * Disable and flush all the CDC srams
77503831d35Sstevel 	 */
77603831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
77703831d35Sstevel 		if (axq_array[i][SLOT0_AXQ] != NULL) {
77803831d35Sstevel 			retval = axq_cdc_flush(i, 1, 1);
77903831d35Sstevel 			if (retval != DDI_SUCCESS) break;
78003831d35Sstevel 		}
78103831d35Sstevel 	}
78203831d35Sstevel 	rw_exit(&axq_array_lock);
78303831d35Sstevel 
78403831d35Sstevel 	if (retval != DDI_SUCCESS) {
78503831d35Sstevel 		axq_cdc_enable_all();
78603831d35Sstevel 	}
78703831d35Sstevel 	return (retval);
78803831d35Sstevel }
78903831d35Sstevel 
79003831d35Sstevel 
79103831d35Sstevel /*
79203831d35Sstevel  * Enable the CDC srams for all the AXQs in the
79303831d35Sstevel  * the local domain. This routine is used in
79403831d35Sstevel  * conjunction with axq_cdc_disable_flush_all().
79503831d35Sstevel  */
79603831d35Sstevel void
axq_cdc_enable_all()79703831d35Sstevel axq_cdc_enable_all()
79803831d35Sstevel {
79903831d35Sstevel 	struct axq_soft_state *softsp;
80003831d35Sstevel 	int i;
80103831d35Sstevel 
80203831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
80303831d35Sstevel 
80403831d35Sstevel 	/*
80503831d35Sstevel 	 * Enable all the CDC sram
80603831d35Sstevel 	 */
80703831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
80803831d35Sstevel 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
80903831d35Sstevel 			mutex_enter(&softsp->axq_lock);
81003831d35Sstevel 			if (softsp->axq_cdc_state != AXQ_CDC_DIS) {
81103831d35Sstevel 				*softsp->axq_cdc_ctrltest &= ~AXQ_CDC_DIS;
81203831d35Sstevel 			}
81303831d35Sstevel 			mutex_exit(&softsp->axq_lock);
81403831d35Sstevel 		}
81503831d35Sstevel 	}
81603831d35Sstevel 	rw_exit(&axq_array_lock);
81703831d35Sstevel }
81803831d35Sstevel 
81903831d35Sstevel /*
82003831d35Sstevel  * Interface for DR to enable slot1 iopause after cpus have been idled.
82103831d35Sstevel  * Precondition is for all devices to have been suspended (including axq).
82203831d35Sstevel  * This routine avoids locks as it is called by DR with cpus paused.
82303831d35Sstevel  */
82403831d35Sstevel int
axq_iopause_enable_all(uint32_t * errexp)82503831d35Sstevel axq_iopause_enable_all(uint32_t *errexp)
82603831d35Sstevel {
82703831d35Sstevel 	int i, j;
82803831d35Sstevel 	int retval = DDI_SUCCESS;
82903831d35Sstevel 	processorid_t cpuid;
83003831d35Sstevel 	struct axq_soft_state *softsp;
83103831d35Sstevel 
83203831d35Sstevel 	DELAY(1000);
83303831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
83403831d35Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL &&
83503831d35Sstevel 		    use_axq_iopause) {
83603831d35Sstevel 			/*
83703831d35Sstevel 			 * Do not enable if cpus configured in slot1.
83803831d35Sstevel 			 * Unconfigured cpus should be idle in nc space.
83903831d35Sstevel 			 */
84003831d35Sstevel 			for (j = 0; j < STARCAT_SLOT1_CPU_MAX; j++) {
84103831d35Sstevel 				cpuid = MAKE_CPUID(softsp->expid,
84203831d35Sstevel 				    softsp->slotnum, j);
84303831d35Sstevel 				if (cpu[cpuid]) {
84403831d35Sstevel 					break;
84503831d35Sstevel 				}
84603831d35Sstevel 			}
84703831d35Sstevel 			if (j < STARCAT_SLOT1_CPU_MAX) {
84803831d35Sstevel 				continue;
84903831d35Sstevel 			}
85003831d35Sstevel 
85103831d35Sstevel 			retval = axq_slot1_idle(softsp);
85203831d35Sstevel 			if (retval == DDI_FAILURE) {
85303831d35Sstevel 				break;
85403831d35Sstevel 			}
85503831d35Sstevel 
85603831d35Sstevel 			*softsp->axq_domain_ctrl |= AXQ_DOMCTRL_PAUSE;
85703831d35Sstevel 			softsp->paused = 1;
85803831d35Sstevel 		}
85903831d35Sstevel 	}
86003831d35Sstevel 
86103831d35Sstevel 	if (retval != DDI_SUCCESS) {
86203831d35Sstevel 		ASSERT(errexp);
86303831d35Sstevel 		*errexp = i;
86403831d35Sstevel 		axq_iopause_disable_all();
86503831d35Sstevel 	}
86603831d35Sstevel 	return (retval);
86703831d35Sstevel }
86803831d35Sstevel 
86903831d35Sstevel /*
87003831d35Sstevel  * De-assert axq iopause on all slot1 boards. This routine avoids locks
87103831d35Sstevel  * as it is called by DR with cpus paused.
87203831d35Sstevel  */
87303831d35Sstevel void
axq_iopause_disable_all()87403831d35Sstevel axq_iopause_disable_all()
87503831d35Sstevel {
87603831d35Sstevel 	int i;
87703831d35Sstevel 	struct axq_soft_state *softsp;
87803831d35Sstevel 
87903831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
88003831d35Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL &&
88103831d35Sstevel 		    softsp->paused) {
88203831d35Sstevel 			*softsp->axq_domain_ctrl &= ~AXQ_DOMCTRL_PAUSE;
88303831d35Sstevel 			softsp->paused = 0;
88403831d35Sstevel 		}
88503831d35Sstevel 	}
88603831d35Sstevel }
88703831d35Sstevel 
88803831d35Sstevel /*
88903831d35Sstevel  * Attempt to wait for slot1 activity to go idle.
89003831d35Sstevel  */
89103831d35Sstevel static int
axq_slot1_idle(struct axq_soft_state * softsp)89203831d35Sstevel axq_slot1_idle(struct axq_soft_state *softsp)
89303831d35Sstevel {
89403831d35Sstevel 	int i;
89503831d35Sstevel 
89603831d35Sstevel 	ASSERT(softsp->slotnum == SLOT1_AXQ);
89703831d35Sstevel 	for (i = 0; i < 10; i++) {
89803831d35Sstevel 		if ((*(softsp->axq_domain_ctrl) & AXQ_DOMCTRL_BUSY) != 0) {
89903831d35Sstevel 			return (DDI_SUCCESS);
90003831d35Sstevel 		}
90103831d35Sstevel 		DELAY(50);
90203831d35Sstevel 	}
90303831d35Sstevel 	return (DDI_FAILURE);
90403831d35Sstevel }
90503831d35Sstevel 
90603831d35Sstevel /*
90703831d35Sstevel  * Read a particular NASM entry
90803831d35Sstevel  */
90903831d35Sstevel int
axq_nasm_read(uint32_t expid,uint32_t slot,uint32_t nasm_entry,uint32_t * data)91003831d35Sstevel axq_nasm_read(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
91103831d35Sstevel     uint32_t *data)
91203831d35Sstevel {
91303831d35Sstevel 	axq_nasm_read_u aread;
91403831d35Sstevel 	axq_nasm_write_u awrite;
91503831d35Sstevel 	struct axq_soft_state *softsp;
91603831d35Sstevel 
91703831d35Sstevel 	if (slot > AXQ_MAX_SLOT_PER_EXP ||
91803831d35Sstevel 	    expid > AXQ_MAX_EXP ||
91903831d35Sstevel 	    nasm_entry > AXQ_NASM_SIZE) {
92003831d35Sstevel 		return (DDI_FAILURE);
92103831d35Sstevel 	}
92203831d35Sstevel 
92303831d35Sstevel 	awrite.bit.rw = 0;	/* read operation */
92403831d35Sstevel 	awrite.bit.addr = nasm_entry;
92503831d35Sstevel 	awrite.bit.data = 0;
92603831d35Sstevel 
92703831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
92803831d35Sstevel 
92903831d35Sstevel 	softsp = axq_array[expid][slot];
93003831d35Sstevel 	if (softsp == NULL) {
93103831d35Sstevel 		rw_exit(&axq_array_lock);
93203831d35Sstevel 		return (DDI_FAILURE);
93303831d35Sstevel 	}
93403831d35Sstevel 
93503831d35Sstevel 	mutex_enter(&softsp->axq_lock);
93603831d35Sstevel 
93703831d35Sstevel 	*(softsp->axq_nasm) = awrite.val;
93803831d35Sstevel 	aread.val = *(softsp->axq_nasm);
93903831d35Sstevel 
94003831d35Sstevel 	mutex_exit(&softsp->axq_lock);
94103831d35Sstevel 	rw_exit(&axq_array_lock);
94203831d35Sstevel 
94303831d35Sstevel 	if (aread.bit.valid) {
94403831d35Sstevel 		*data = aread.bit.data;
94503831d35Sstevel 		return (DDI_SUCCESS);
94603831d35Sstevel 	}
94703831d35Sstevel 	return (DDI_FAILURE);
94803831d35Sstevel }
94903831d35Sstevel 
95003831d35Sstevel /*
95103831d35Sstevel  * Write a particular NASM entry
95203831d35Sstevel  */
95303831d35Sstevel static int
axq_nasm_write_one(uint32_t expid,uint32_t slot,uint32_t nasm_entry,uint32_t data)95403831d35Sstevel axq_nasm_write_one(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
95503831d35Sstevel     uint32_t data)
95603831d35Sstevel {
95703831d35Sstevel 	axq_nasm_write_u awrite;
95803831d35Sstevel 	struct axq_soft_state *softsp;
95903831d35Sstevel 
96003831d35Sstevel 	/*
96103831d35Sstevel 	 * Note: need to make sure axq_array_lock held first, so that a
96203831d35Sstevel 	 * paused thread is not holding softsp->axq_lock, which could
96303831d35Sstevel 	 * result in deadlock.
96403831d35Sstevel 	 */
96503831d35Sstevel 	ASSERT(RW_LOCK_HELD(&axq_array_lock));
96603831d35Sstevel 
96703831d35Sstevel 	if (slot > AXQ_MAX_SLOT_PER_EXP ||
96803831d35Sstevel 	    expid > AXQ_MAX_EXP ||
96903831d35Sstevel 	    nasm_entry > AXQ_NASM_SIZE) {
97003831d35Sstevel 		return (DDI_FAILURE);
97103831d35Sstevel 	}
97203831d35Sstevel 
97303831d35Sstevel 	awrite.bit.rw = 1;	/* write operation */
97403831d35Sstevel 	awrite.bit.addr = nasm_entry;
97503831d35Sstevel 	awrite.bit.data = data;
97603831d35Sstevel 
97703831d35Sstevel 	softsp = axq_array[expid][slot];
97803831d35Sstevel 	if (softsp == NULL) {
97903831d35Sstevel 		return (DDI_FAILURE);
98003831d35Sstevel 	}
98103831d35Sstevel 
98203831d35Sstevel 	mutex_enter(&softsp->axq_lock);
98303831d35Sstevel 
98403831d35Sstevel 	*(softsp->axq_nasm) = awrite.val;
98503831d35Sstevel 
98603831d35Sstevel 	mutex_exit(&softsp->axq_lock);
98703831d35Sstevel 
98803831d35Sstevel 	return (DDI_SUCCESS);
98903831d35Sstevel }
99003831d35Sstevel 
99103831d35Sstevel int
axq_nasm_write(uint32_t expid,uint32_t slot,uint32_t nasm_entry,uint32_t data)99203831d35Sstevel axq_nasm_write(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
99303831d35Sstevel     uint32_t data)
99403831d35Sstevel {
99503831d35Sstevel 	int rc;
99603831d35Sstevel 
99703831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
99803831d35Sstevel 	rc = axq_nasm_write_one(expid, slot, nasm_entry, data);
99903831d35Sstevel 	rw_exit(&axq_array_lock);
100003831d35Sstevel 	return (rc);
100103831d35Sstevel }
100203831d35Sstevel 
100303831d35Sstevel /*
100403831d35Sstevel  * Write a particular NASM entry for all the
100503831d35Sstevel  * axqs in the domain
100603831d35Sstevel  * Note: other CPUs are paused when this function called.
100703831d35Sstevel  */
100803831d35Sstevel int
axq_nasm_write_all(uint32_t nasm_entry,uint32_t data)100903831d35Sstevel axq_nasm_write_all(uint32_t nasm_entry, uint32_t data)
101003831d35Sstevel {
101103831d35Sstevel 	int i;
101203831d35Sstevel 	int rc;
101303831d35Sstevel 
101403831d35Sstevel 	ASSERT(RW_WRITE_HELD(&axq_array_lock));
101503831d35Sstevel 
101603831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
101703831d35Sstevel 		if (axq_array[i][SLOT0_AXQ] != NULL) {
101803831d35Sstevel 			rc = axq_nasm_write_one(i, SLOT0_AXQ, nasm_entry,
101903831d35Sstevel 			    data);
102003831d35Sstevel 			if (rc != DDI_SUCCESS) {
102103831d35Sstevel 				return (DDI_FAILURE);
102203831d35Sstevel 			}
102303831d35Sstevel 		}
102403831d35Sstevel 		if (axq_array[i][SLOT1_AXQ] != NULL) {
102503831d35Sstevel 			rc = axq_nasm_write_one(i, SLOT1_AXQ, nasm_entry,
102603831d35Sstevel 			    data);
102703831d35Sstevel 			if (rc != DDI_SUCCESS) {
102803831d35Sstevel 				return (DDI_FAILURE);
102903831d35Sstevel 			}
103003831d35Sstevel 		}
103103831d35Sstevel 	}
103203831d35Sstevel 
103303831d35Sstevel 	return (DDI_SUCCESS);
103403831d35Sstevel }
103503831d35Sstevel 
103603831d35Sstevel /*
103703831d35Sstevel  * Take write lock for axq_nasm_write_all() outside
103803831d35Sstevel  * critical section where other CPUs are paused.
103903831d35Sstevel  */
104003831d35Sstevel void
axq_array_rw_enter(void)104103831d35Sstevel axq_array_rw_enter(void)
104203831d35Sstevel {
104303831d35Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
104403831d35Sstevel }
104503831d35Sstevel 
104603831d35Sstevel /*
104703831d35Sstevel  * Release write lock for axq_nasm_write_all() outside
104803831d35Sstevel  * critical section where other CPUs are paused.
104903831d35Sstevel  */
105003831d35Sstevel void
axq_array_rw_exit(void)105103831d35Sstevel axq_array_rw_exit(void)
105203831d35Sstevel {
105303831d35Sstevel 	rw_exit(&axq_array_lock);
105403831d35Sstevel }
105503831d35Sstevel 
105603831d35Sstevel /*
105703831d35Sstevel  * Read a particular CASM entry
105803831d35Sstevel  */
105903831d35Sstevel uint32_t
axq_casm_read(uint32_t expid,uint32_t slot,int casmslot)106003831d35Sstevel axq_casm_read(uint32_t expid, uint32_t slot, int casmslot)
106103831d35Sstevel {
106203831d35Sstevel 	struct axq_soft_state *softsp;
106303831d35Sstevel 	uint32_t retval;
106403831d35Sstevel 
106503831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
106603831d35Sstevel 
106703831d35Sstevel 	ASSERT(axq_array[expid][slot] != NULL);
106803831d35Sstevel 	ASSERT(casmslot >= 0 && casmslot < AXQ_MAX_EXP);
106903831d35Sstevel 
107003831d35Sstevel 	softsp = axq_array[expid][slot];
107103831d35Sstevel 
107203831d35Sstevel 	mutex_enter(&softsp->axq_lock);
107303831d35Sstevel 
107403831d35Sstevel 	retval = *(softsp->axq_casm_slot[casmslot]);
107503831d35Sstevel 
107603831d35Sstevel 	mutex_exit(&softsp->axq_lock);
107703831d35Sstevel 	rw_exit(&axq_array_lock);
107803831d35Sstevel 
107903831d35Sstevel 	return (retval);
108003831d35Sstevel }
108103831d35Sstevel 
108203831d35Sstevel 
108303831d35Sstevel /*
108403831d35Sstevel  * Write a particular CASM entry
108503831d35Sstevel  */
108603831d35Sstevel 
108703831d35Sstevel int
axq_casm_write(uint32_t expid,uint32_t slot,int casmslot,uint32_t value)108803831d35Sstevel axq_casm_write(uint32_t expid, uint32_t slot, int casmslot,
108903831d35Sstevel 		uint32_t value)
109003831d35Sstevel {
109103831d35Sstevel 	struct axq_soft_state *softsp;
109203831d35Sstevel 	int retval;
109303831d35Sstevel 
109403831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
109503831d35Sstevel 
109603831d35Sstevel 	ASSERT(axq_array[expid][slot] != NULL);
109703831d35Sstevel 	ASSERT(casmslot >= 0 && casmslot < AXQ_MAX_EXP);
109803831d35Sstevel 
109903831d35Sstevel 	softsp = axq_array[expid][slot];
110003831d35Sstevel 
110103831d35Sstevel 	mutex_enter(&softsp->axq_lock);
110203831d35Sstevel 
110303831d35Sstevel 	/*
110403831d35Sstevel 	 * first read the casm slot in question
110503831d35Sstevel 	 * it should be non-zero to indicate that
110603831d35Sstevel 	 * we have write permission to update it.
110703831d35Sstevel 	 * Note that if we write it without valid
110803831d35Sstevel 	 * permission, we can get an exception.
110903831d35Sstevel 	 */
111003831d35Sstevel 	if (*(softsp->axq_casm_slot[casmslot])) {
111103831d35Sstevel 		*(softsp->axq_casm_slot[casmslot]) = value;
111203831d35Sstevel 		retval = DDI_SUCCESS;
111303831d35Sstevel 	} else {
111403831d35Sstevel 		retval = DDI_FAILURE;
111503831d35Sstevel 	}
111603831d35Sstevel 
111703831d35Sstevel 	mutex_exit(&softsp->axq_lock);
111803831d35Sstevel 	rw_exit(&axq_array_lock);
111903831d35Sstevel 	return (retval);
112003831d35Sstevel }
112103831d35Sstevel 
112203831d35Sstevel /*
112303831d35Sstevel  * Write a particular CASM entry for all the
112403831d35Sstevel  * axqs in the domain
112503831d35Sstevel  */
112603831d35Sstevel 
112703831d35Sstevel int
axq_casm_write_all(int casmslot,uint32_t value)112803831d35Sstevel axq_casm_write_all(int casmslot, uint32_t value)
112903831d35Sstevel {
113003831d35Sstevel 	int i;
113103831d35Sstevel 	struct axq_soft_state *softsp;
113203831d35Sstevel 
113303831d35Sstevel 	/*
113403831d35Sstevel 	 * Since we are updating all the AXQs,
113503831d35Sstevel 	 * it will be easier to simply grab
113603831d35Sstevel 	 * exclusive access to the AXQs by obtaining
113703831d35Sstevel 	 * the RW_WRITER access to the axq_array.
113803831d35Sstevel 	 */
113903831d35Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
114003831d35Sstevel 
114103831d35Sstevel 	/*
114203831d35Sstevel 	 * Paranoid check: run thru all the avail AXQs
114303831d35Sstevel 	 * and make sure we can write into that slot in question
114403831d35Sstevel 	 * We check it by reading the slot and it should be
114503831d35Sstevel 	 * non-zero.
114603831d35Sstevel 	 */
114703831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
114803831d35Sstevel 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
114903831d35Sstevel 			if (*(softsp->axq_casm_slot[casmslot])
115003831d35Sstevel 			    == 0) {
115103831d35Sstevel 				break;
115203831d35Sstevel 			}
115303831d35Sstevel 		}
115403831d35Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL) {
115503831d35Sstevel 			if (*(softsp->axq_casm_slot[casmslot])
115603831d35Sstevel 			    == 0) {
115703831d35Sstevel 				break;
115803831d35Sstevel 			}
115903831d35Sstevel 		}
116003831d35Sstevel 	}
116103831d35Sstevel 
116203831d35Sstevel 	if (i < AXQ_MAX_EXP) {
116303831d35Sstevel 		/*
116403831d35Sstevel 		 * We have no write permission for some AXQ
116503831d35Sstevel 		 * for the CASM slot in question. Flag it
116603831d35Sstevel 		 * as an error
116703831d35Sstevel 		 */
116803831d35Sstevel 		rw_exit(&axq_array_lock);
116903831d35Sstevel 		return (DDI_FAILURE);
117003831d35Sstevel 	}
117103831d35Sstevel 
117203831d35Sstevel 	/*
117303831d35Sstevel 	 * everything looks good - do the update
117403831d35Sstevel 	 */
117503831d35Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
117603831d35Sstevel 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
117703831d35Sstevel 			*softsp->axq_casm_slot[casmslot] = value;
117803831d35Sstevel 		}
117903831d35Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL) {
118003831d35Sstevel 			*softsp->axq_casm_slot[casmslot] = value;
118103831d35Sstevel 		}
118203831d35Sstevel 	}
118303831d35Sstevel 
118403831d35Sstevel 	rw_exit(&axq_array_lock);
118503831d35Sstevel 	return (DDI_SUCCESS);
118603831d35Sstevel }
118703831d35Sstevel 
118803831d35Sstevel 
118903831d35Sstevel /*
119003831d35Sstevel  * Construct a script of <physicaladdr, data> tuple pairs that
119103831d35Sstevel  * reprogram the all the AXQs in the local domain to swap the
119203831d35Sstevel  * contents of casmslot0 with casmslot1.
119303831d35Sstevel  */
119403831d35Sstevel int
axq_do_casm_rename_script(uint64_t ** script_elm,int casmslot0,int casmslot1)119503831d35Sstevel axq_do_casm_rename_script(uint64_t **script_elm, int casmslot0,
119603831d35Sstevel 	int casmslot1)
119703831d35Sstevel {
119803831d35Sstevel 	struct axq_soft_state *softsp;
119903831d35Sstevel 	int i, slot;
120003831d35Sstevel 	uint32_t val0, val1;
120103831d35Sstevel 	uint64_t *s_elm = *script_elm;
120203831d35Sstevel 	uint64_t paddr;
120303831d35Sstevel 
120403831d35Sstevel 	/*
120503831d35Sstevel 	 * There should be some global locking at the
120603831d35Sstevel 	 * DR level to do this - since this is one of
120703831d35Sstevel 	 * the sequence of steps in copyrename.
120803831d35Sstevel 	 * For now, we grab the RW_WRITER lock for
120903831d35Sstevel 	 * script construction.
121003831d35Sstevel 	 */
121103831d35Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
121203831d35Sstevel 
121303831d35Sstevel 	/*
121403831d35Sstevel 	 * Construct the <physicaladdr, data> tuple pairs
121503831d35Sstevel 	 * for reprogramming the AXQs so that the value in
121603831d35Sstevel 	 * casmslot0 is swapped with the content in casmslot1.
121703831d35Sstevel 	 * Paranoid check: We make sure that we can write to
121803831d35Sstevel 	 * both slots in all the AXQs by reading the slots and
121903831d35Sstevel 	 * they should be non-zero.
122003831d35Sstevel 	 */
122103831d35Sstevel 	for (slot = SLOT0_AXQ; slot <= SLOT1_AXQ; slot++) {
122203831d35Sstevel 		for (i = 0; i < AXQ_MAX_EXP; i++) {
122303831d35Sstevel 		if ((softsp = axq_array[i][slot]) != NULL) {
122403831d35Sstevel 			paddr = softsp->axq_phyaddr;
122503831d35Sstevel 			val0 = *(softsp->axq_casm_slot[casmslot0]);
122603831d35Sstevel 			val1 = *(softsp->axq_casm_slot[casmslot1]);
122703831d35Sstevel 			if (val0 != 0 && val1 != 0) {
122803831d35Sstevel 				*s_elm++ = paddr + AXQ_CASM_SLOT_START +
122903831d35Sstevel 				    AXQ_REGOFF(casmslot0);
123003831d35Sstevel 				*s_elm++ = val1;
123103831d35Sstevel 				*s_elm++ = paddr + AXQ_CASM_SLOT_START +
123203831d35Sstevel 				    AXQ_REGOFF(casmslot1);
123303831d35Sstevel 				*s_elm++ = val0;
123403831d35Sstevel 			} else {
123503831d35Sstevel 				/*
123603831d35Sstevel 				 * Somehow we can't access one of
123703831d35Sstevel 				 * the casm slot - quit.
123803831d35Sstevel 				 */
123903831d35Sstevel 				break;
124003831d35Sstevel 			}
124103831d35Sstevel 		}
124203831d35Sstevel 		}
124303831d35Sstevel 		if (i < AXQ_MAX_EXP) break;
124403831d35Sstevel 	}
124503831d35Sstevel 
124603831d35Sstevel 	rw_exit(&axq_array_lock);
124703831d35Sstevel 
124803831d35Sstevel 	if (slot > SLOT1_AXQ) {
124903831d35Sstevel 		/* successful */
125003831d35Sstevel 		*script_elm = s_elm;
125103831d35Sstevel 		return (DDI_SUCCESS);
125203831d35Sstevel 	} else {
125303831d35Sstevel 		return (DDI_FAILURE);
125403831d35Sstevel 	}
125503831d35Sstevel }
125603831d35Sstevel 
125703831d35Sstevel 
125803831d35Sstevel /*
125903831d35Sstevel  * Send an interrupt to the SSC passing
126003831d35Sstevel  * a 8 bit cookie value
126103831d35Sstevel  */
126203831d35Sstevel int
axq_cpu2ssc_intr(uint8_t cookie)126303831d35Sstevel axq_cpu2ssc_intr(uint8_t cookie)
126403831d35Sstevel {
126503831d35Sstevel 	int retval, i;
126603831d35Sstevel 	volatile uint32_t *intr_reg;
126703831d35Sstevel 
126803831d35Sstevel #ifndef	_AXQ_LOCAL_SPACE_SUPPORTED
126903831d35Sstevel 	/* Local space access not available */
127003831d35Sstevel 
127103831d35Sstevel 	int exp, slot;
127203831d35Sstevel 
127303831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
127403831d35Sstevel 
127503831d35Sstevel 	/* Make sure the current cpu is not switched out */
127603831d35Sstevel 	kpreempt_disable();
127703831d35Sstevel 
127803831d35Sstevel 	/*
127903831d35Sstevel 	 * Compute the exp# and slot# of the current cpu
128003831d35Sstevel 	 * so that we know which AXQ cpu2ssc intr reg to
128103831d35Sstevel 	 * use.
128203831d35Sstevel 	 */
128303831d35Sstevel 	exp = CPU->cpu_id >> 5;
128403831d35Sstevel 	slot = (CPU->cpu_id >> 3) & 0x1;
128503831d35Sstevel 
128603831d35Sstevel 	intr_reg = axq_array[exp][slot]->axq_cpu2ssc_intr;
128703831d35Sstevel #else
128803831d35Sstevel 	/* use local space */
128903831d35Sstevel 	intr_reg = axq_local.axq_cpu2ssc_intr;
129003831d35Sstevel #endif /* _AXQ_LOCAL_SPACE_SUPPORTED */
129103831d35Sstevel 
129203831d35Sstevel 	ASSERT(intr_reg != 0);
129303831d35Sstevel 
129403831d35Sstevel 	retval = DDI_FAILURE;
129503831d35Sstevel 	for (i = 0; i < AXQ_INTR_PEND_WAIT; i++) {
129603831d35Sstevel 		if (!(*intr_reg & AXQ_CPU2SSC_INTR_PEND)) {
129703831d35Sstevel 			*intr_reg = cookie;
129803831d35Sstevel 			retval = DDI_SUCCESS;
129903831d35Sstevel 			break;
130003831d35Sstevel 		}
130103831d35Sstevel 		DELAY(200);
130203831d35Sstevel 	}
130303831d35Sstevel 
130403831d35Sstevel #ifndef	_AXQ_LOCAL_SPACE_SUPPORTED
130503831d35Sstevel 	kpreempt_enable();
130603831d35Sstevel 	rw_exit(&axq_array_lock);
130703831d35Sstevel #endif
130803831d35Sstevel 	return (retval);
130903831d35Sstevel }
131003831d35Sstevel 
131103831d35Sstevel 
131203831d35Sstevel /*
131303831d35Sstevel  * Read the SDI timeout register (SRD use)
131403831d35Sstevel  * This routine accepts a clear flag to indicate
131503831d35Sstevel  * whether the register should be cleared after
131603831d35Sstevel  * the read.
131703831d35Sstevel  */
131803831d35Sstevel uint32_t
axq_read_sdi_timeout_reg(uint32_t expid,uint32_t slot,int clearflag)131903831d35Sstevel axq_read_sdi_timeout_reg(uint32_t expid, uint32_t slot, int clearflag)
132003831d35Sstevel {
132103831d35Sstevel 	struct axq_soft_state *softsp;
132203831d35Sstevel 	uint32_t retval;
132303831d35Sstevel 
132403831d35Sstevel 	rw_enter(&axq_array_lock, RW_READER);
132503831d35Sstevel 
132603831d35Sstevel 	ASSERT(axq_array[expid][slot] != NULL);
132703831d35Sstevel 
132803831d35Sstevel 	softsp = axq_array[expid][slot];
132903831d35Sstevel 
133003831d35Sstevel 	mutex_enter(&softsp->axq_lock);
133103831d35Sstevel 
133203831d35Sstevel 	if (clearflag) {
133303831d35Sstevel 		/* read and then clear register */
133403831d35Sstevel 		retval = *softsp->axq_sdi_timeout_rdclr;
133503831d35Sstevel 	} else {
133603831d35Sstevel 		retval = *softsp->axq_sdi_timeout_rd;
133703831d35Sstevel 	}
133803831d35Sstevel 
133903831d35Sstevel 	mutex_exit(&softsp->axq_lock);
134003831d35Sstevel 	rw_exit(&axq_array_lock);
134103831d35Sstevel 
134203831d35Sstevel 	return (retval);
134303831d35Sstevel }
134403831d35Sstevel 
134503831d35Sstevel 
134603831d35Sstevel /*
134703831d35Sstevel  * Routine to create a kstat for each %pic that
134803831d35Sstevel  * the AXQ has (there are 3 of them). These read-only
134903831d35Sstevel  * kstats export event names that the respective %pic
135003831d35Sstevel  * supports. Pic0 and Pic1 are similar and they both have
135103831d35Sstevel  * a 128-input mux. Pic2 counts the clock and can set up
135203831d35Sstevel  * to count or freeze.
135303831d35Sstevel  * Note that all AXQ instances use the same events, we only
135403831d35Sstevel  * need to create one set of the picN kstats.
135503831d35Sstevel  */
135603831d35Sstevel static void
axq_add_picN_kstats(dev_info_t * dip)135703831d35Sstevel axq_add_picN_kstats(dev_info_t *dip)
135803831d35Sstevel {
135903831d35Sstevel 	struct kstat_named *axq_pic_named_data;
136003831d35Sstevel 	int event, pic;
136103831d35Sstevel 	int instance = ddi_get_instance(dip);
136203831d35Sstevel 	int pic_shift = 0;
136303831d35Sstevel 
136403831d35Sstevel 	/*
136503831d35Sstevel 	 * Create the picN kstat for Pic0 and Pic1
136603831d35Sstevel 	 * Both have similar set of events. Add one
136703831d35Sstevel 	 * extra event for the clear_event mask.
136803831d35Sstevel 	 */
136903831d35Sstevel 	for (pic = 0; pic < AXQ_NUM_PICS; pic++) {
137003831d35Sstevel 		char pic_name[20];
137103831d35Sstevel 		int num_events, i;
137203831d35Sstevel 
137303831d35Sstevel 		(void) sprintf(pic_name, "pic%d", pic);
137403831d35Sstevel 
137503831d35Sstevel 		num_events = (pic <= 1) ? AXQ_PIC0_1_NUM_EVENTS :
137603831d35Sstevel 		    AXQ_PIC2_NUM_EVENTS;
137703831d35Sstevel 
137803831d35Sstevel 		if ((axq_picN_ksp[pic] = kstat_create("axq",
137903831d35Sstevel 		    instance, pic_name, "bus", KSTAT_TYPE_NAMED,
138003831d35Sstevel 		    num_events + 1, NULL)) == NULL) {
138103831d35Sstevel 			cmn_err(CE_WARN, "axq %s: kstat_create failed",
138203831d35Sstevel 			    pic_name);
138303831d35Sstevel 
138403831d35Sstevel 			/* remove pic kstats that was created earlier */
138503831d35Sstevel 			for (i = 0; i < pic; i++) {
138603831d35Sstevel 				kstat_delete(axq_picN_ksp[i]);
138703831d35Sstevel 				axq_picN_ksp[i] = NULL;
138803831d35Sstevel 			}
138903831d35Sstevel 			return;
139003831d35Sstevel 		}
139103831d35Sstevel 
139203831d35Sstevel 		axq_pic_named_data =
139303831d35Sstevel 		    (struct kstat_named *)(axq_picN_ksp[pic]->ks_data);
139403831d35Sstevel 
139503831d35Sstevel 		pic_shift = pic * AXQ_PIC_SHIFT;
139603831d35Sstevel 
139703831d35Sstevel 		/*
139803831d35Sstevel 		 * for each picN event, write a kstat record of
139903831d35Sstevel 		 * name = EVENT & value.ui64 = PCR_MASK.
140003831d35Sstevel 		 */
140103831d35Sstevel 		for (event = 0; event < num_events; event++) {
140203831d35Sstevel 			/* pcr_mask */
140303831d35Sstevel 			axq_pic_named_data[event].value.ui64 =
140403831d35Sstevel 			    axq_events[event].pcr_mask << pic_shift;
140503831d35Sstevel 
140603831d35Sstevel 			/* event name */
140703831d35Sstevel 			kstat_named_init(&axq_pic_named_data[event],
140803831d35Sstevel 			    axq_events[event].event_name,
140903831d35Sstevel 			    KSTAT_DATA_UINT64);
141003831d35Sstevel 		}
141103831d35Sstevel 
141203831d35Sstevel 		/*
141303831d35Sstevel 		 * Add the clear pic event and mask as the last
141403831d35Sstevel 		 * record in the kstat.
141503831d35Sstevel 		 */
141603831d35Sstevel 		axq_pic_named_data[num_events].value.ui64 =
141703831d35Sstevel 		    (uint32_t)~(AXQ_PIC_CLEAR_MASK << pic_shift);
141803831d35Sstevel 
141903831d35Sstevel 		kstat_named_init(&axq_pic_named_data[num_events],
142003831d35Sstevel 		    "clear_pic", KSTAT_DATA_UINT64);
142103831d35Sstevel 
142203831d35Sstevel 		kstat_install(axq_picN_ksp[pic]);
142303831d35Sstevel 	}
142403831d35Sstevel }
142503831d35Sstevel 
142603831d35Sstevel 
142703831d35Sstevel static  void
axq_add_kstats(struct axq_soft_state * softsp)142803831d35Sstevel axq_add_kstats(struct axq_soft_state *softsp)
142903831d35Sstevel {
143003831d35Sstevel 	struct kstat *axq_counters_ksp;
143103831d35Sstevel 	struct kstat_named *axq_counters_named_data;
143203831d35Sstevel 
143303831d35Sstevel 	/*
143403831d35Sstevel 	 * Create the picN kstats if we are the first instance
143503831d35Sstevel 	 * to attach. We use axq_attachcnt as a count of how
143603831d35Sstevel 	 * many instances have attached. This is protected by
143703831d35Sstevel 	 * a lock.
143803831d35Sstevel 	 */
143903831d35Sstevel 	mutex_enter(&axq_attachcnt_lock);
144003831d35Sstevel 	if (axq_attachcnt++ == 0)
144103831d35Sstevel 		axq_add_picN_kstats(softsp->dip);
144203831d35Sstevel 
144303831d35Sstevel 	mutex_exit(&axq_attachcnt_lock);
144403831d35Sstevel 
144503831d35Sstevel 	/*
144603831d35Sstevel 	 * A "counter" kstat is created for each axq
144703831d35Sstevel 	 * instance that provides access to the %pcr and %pic
144803831d35Sstevel 	 * registers for that instance.
144903831d35Sstevel 	 *
145003831d35Sstevel 	 * The size of this kstat is AXQ_NUM_PICS + 1 for %pcr
145103831d35Sstevel 	 */
145203831d35Sstevel 	if ((axq_counters_ksp = kstat_create("axq",
145303831d35Sstevel 	    ddi_get_instance(softsp->dip), "counters",
145403831d35Sstevel 	    "bus", KSTAT_TYPE_NAMED, AXQ_NUM_PICS + 1,
145503831d35Sstevel 	    KSTAT_FLAG_WRITABLE)) == NULL) {
145603831d35Sstevel 			cmn_err(CE_WARN, "axq%d counters: kstat_create"
145703831d35Sstevel 			" failed", ddi_get_instance(softsp->dip));
145803831d35Sstevel 		return;
145903831d35Sstevel 	}
146003831d35Sstevel 
146103831d35Sstevel 	axq_counters_named_data =
146203831d35Sstevel 	    (struct kstat_named *)(axq_counters_ksp->ks_data);
146303831d35Sstevel 
146403831d35Sstevel 	/* initialize the named kstats */
146503831d35Sstevel 	kstat_named_init(&axq_counters_named_data[0],
146603831d35Sstevel 	    "pcr", KSTAT_DATA_UINT32);
146703831d35Sstevel 
146803831d35Sstevel 	kstat_named_init(&axq_counters_named_data[1],
146903831d35Sstevel 	    "pic0", KSTAT_DATA_UINT32);
147003831d35Sstevel 
147103831d35Sstevel 	kstat_named_init(&axq_counters_named_data[2],
147203831d35Sstevel 	    "pic1", KSTAT_DATA_UINT32);
147303831d35Sstevel 
147403831d35Sstevel 	kstat_named_init(&axq_counters_named_data[3],
147503831d35Sstevel 	    "pic2", KSTAT_DATA_UINT32);
147603831d35Sstevel 
147703831d35Sstevel 	axq_counters_ksp->ks_update = axq_counters_kstat_update;
147803831d35Sstevel 	axq_counters_ksp->ks_private = (void *)softsp;
147903831d35Sstevel 
148003831d35Sstevel 	kstat_install(axq_counters_ksp);
148103831d35Sstevel 
148203831d35Sstevel 	/* update the softstate */
148303831d35Sstevel 	softsp->axq_counters_ksp = axq_counters_ksp;
148403831d35Sstevel }
148503831d35Sstevel 
148603831d35Sstevel 
148703831d35Sstevel static  int
axq_counters_kstat_update(kstat_t * ksp,int rw)148803831d35Sstevel axq_counters_kstat_update(kstat_t *ksp, int rw)
148903831d35Sstevel {
149003831d35Sstevel 	struct kstat_named *axq_counters_data;
149103831d35Sstevel 	struct axq_soft_state *softsp;
149203831d35Sstevel 
149303831d35Sstevel 	axq_counters_data = (struct kstat_named *)ksp->ks_data;
149403831d35Sstevel 	softsp = (struct axq_soft_state *)ksp->ks_private;
149503831d35Sstevel 
149603831d35Sstevel 	if (rw == KSTAT_WRITE) {
149703831d35Sstevel 		/*
149803831d35Sstevel 		 * Write the pcr value to the softsp->axq_pcr.
149903831d35Sstevel 		 * The pic register is read-only so we don't
150003831d35Sstevel 		 * attempt to write to it.
150103831d35Sstevel 		 */
150203831d35Sstevel 		*softsp->axq_pcr = (uint32_t)axq_counters_data[0].value.ui64;
150303831d35Sstevel 	} else {
150403831d35Sstevel 		/*
150503831d35Sstevel 		 * Read %pcr and %pic register values and write them
150603831d35Sstevel 		 * into counters kstat.
150703831d35Sstevel 		 *
150803831d35Sstevel 		 */
150903831d35Sstevel 
151003831d35Sstevel 		/* pcr */
151103831d35Sstevel 		axq_counters_data[0].value.ui64 = (uint64_t)
151203831d35Sstevel 		    (*softsp->axq_pcr);
151303831d35Sstevel 
151403831d35Sstevel 		/* pic0 */
151503831d35Sstevel 		axq_counters_data[1].value.ui64 = (uint64_t)
151603831d35Sstevel 		    (*softsp->axq_pic0);
151703831d35Sstevel 
151803831d35Sstevel 		/* pic1 */
151903831d35Sstevel 		axq_counters_data[2].value.ui64 = (uint64_t)
152003831d35Sstevel 		    *softsp->axq_pic1;
152103831d35Sstevel 
152203831d35Sstevel 		/* pic2 */
152303831d35Sstevel 		axq_counters_data[3].value.ui64 = (uint64_t)
152403831d35Sstevel 		    *softsp->axq_pic2;
152503831d35Sstevel 	}
152603831d35Sstevel 	return (0);
152703831d35Sstevel }
152803831d35Sstevel 
152903831d35Sstevel struct gptwo_phys_spec {
153003831d35Sstevel 	uint_t gptwo_phys_hi;   /* child's address, hi word */
153103831d35Sstevel 	uint_t gptwo_phys_low;  /* child's address, low word */
153203831d35Sstevel 	uint_t gptwo_size_hi;   /* high word of size field */
153303831d35Sstevel 	uint_t gptwo_size_low;  /* low word of size field */
153403831d35Sstevel };
153503831d35Sstevel 
153603831d35Sstevel int axq_pio_workaround_disable = 0;
153703831d35Sstevel int axq_pio_limit = 3;
153803831d35Sstevel 
153903831d35Sstevel int
starcat_axq_pio_workaround(dev_info_t * dip)154003831d35Sstevel starcat_axq_pio_workaround(dev_info_t *dip)
154103831d35Sstevel {
154203831d35Sstevel 	dev_info_t *axq_dip, *cdip, *pdip;
154303831d35Sstevel 	int portid, axq_portid;
154403831d35Sstevel 	char *name;
154503831d35Sstevel 	int size, circ;
154603831d35Sstevel 	uint_t *base_addr, *io_domain_control_addr;
154703831d35Sstevel 	int32_t io_domain_control;
154803831d35Sstevel 	ddi_device_acc_attr_t acc;
154903831d35Sstevel 	ddi_acc_handle_t handle;
155003831d35Sstevel 	struct gptwo_phys_spec *gptwo_spec;
155103831d35Sstevel 	struct regspec phys_spec;
155203831d35Sstevel 
155303831d35Sstevel 	if (axq_pio_workaround_disable)
155403831d35Sstevel 		return (0);
155503831d35Sstevel 
155603831d35Sstevel 	/*
155703831d35Sstevel 	 * Get the portid for the PCI (Schizo) device).
155803831d35Sstevel 	 */
155903831d35Sstevel 	if ((portid = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "portid", -1)) < 0) {
156003831d35Sstevel 		cmn_err(CE_WARN, "%s: no portid\n", ddi_get_name(dip));
156103831d35Sstevel 		return (0);
156203831d35Sstevel 	}
156303831d35Sstevel 
156403831d35Sstevel 	/*
156503831d35Sstevel 	 * Calculate the portid for the Slot 1 AXQ.  The portid for
156603831d35Sstevel 	 * Schizo 0 EEEEE11100
156703831d35Sstevel 	 * Schizo 1 EEEEE11101
156803831d35Sstevel 	 * AXQ 0    EEEEE11110
156903831d35Sstevel 	 * AXQ 1    EEEEE11111
157003831d35Sstevel 	 * where EEEEE is the 5 bit expander number.  So the portid for
157103831d35Sstevel 	 * AXQ 1 can be easily calculated by oring a 3 to the portid of
157203831d35Sstevel 	 * Schizo 0 or 1.
157303831d35Sstevel 	 */
157403831d35Sstevel 	axq_portid = portid | 3;
157503831d35Sstevel 
157603831d35Sstevel 	/*
157703831d35Sstevel 	 * Look for AXQ nodes that have the portid we calculated.
157803831d35Sstevel 	 */
157903831d35Sstevel 	axq_dip = NULL;
158003831d35Sstevel 	pdip = ddi_root_node();
158103831d35Sstevel 	ndi_devi_enter(pdip, &circ);
158203831d35Sstevel 	for (cdip = ddi_get_child(pdip); cdip != NULL;
158303831d35Sstevel 	    cdip = ddi_get_next_sibling(cdip)) {
158403831d35Sstevel 
158503831d35Sstevel 		if (ddi_getlongprop(DDI_DEV_T_ANY, cdip,
158603831d35Sstevel 		    DDI_PROP_DONTPASS, "name", (caddr_t)&name, &size)
158703831d35Sstevel 		    != DDI_PROP_SUCCESS) {
158803831d35Sstevel 			continue;
158903831d35Sstevel 		}
159003831d35Sstevel 
159103831d35Sstevel 		if (strcmp(name, "address-extender-queue") != 0) {
159203831d35Sstevel 			kmem_free(name, size);
159303831d35Sstevel 			continue;
159403831d35Sstevel 		}
159503831d35Sstevel 
159603831d35Sstevel 		/*
159703831d35Sstevel 		 * Found an AXQ node.
159803831d35Sstevel 		 */
159903831d35Sstevel 
160003831d35Sstevel 		kmem_free(name, size);
160103831d35Sstevel 
160203831d35Sstevel 		portid = ddi_getprop(DDI_DEV_T_ANY, cdip, 0, "portid", -1);
160303831d35Sstevel 
160403831d35Sstevel 		if (portid == axq_portid) {
160503831d35Sstevel 
160603831d35Sstevel 			/*
160703831d35Sstevel 			 * We found the correct AXQ node.
160803831d35Sstevel 			 */
160903831d35Sstevel 			ndi_hold_devi(cdip);
161003831d35Sstevel 			axq_dip = cdip;
161103831d35Sstevel 			break;
161203831d35Sstevel 		}
161303831d35Sstevel 	}
161403831d35Sstevel 	ndi_devi_exit(pdip, circ);
161503831d35Sstevel 
161603831d35Sstevel 	if (axq_dip == NULL) {
161703831d35Sstevel 		cmn_err(CE_WARN, "can't find axq node with portid=0x%x\n",
161803831d35Sstevel 		    axq_portid);
161903831d35Sstevel 		return (0);
162003831d35Sstevel 	}
162103831d35Sstevel 
162203831d35Sstevel 	if (ddi_getlongprop(DDI_DEV_T_ANY, axq_dip, DDI_PROP_DONTPASS, "reg",
162303831d35Sstevel 	    (caddr_t)&gptwo_spec, &size) != DDI_PROP_SUCCESS) {
162403831d35Sstevel 		cmn_err(CE_WARN, "%s: no regspec\n", ddi_get_name(axq_dip));
162503831d35Sstevel 		ndi_rele_devi(axq_dip);
162603831d35Sstevel 		return (0);
162703831d35Sstevel 	}
162803831d35Sstevel 
162903831d35Sstevel 	phys_spec.regspec_bustype = gptwo_spec->gptwo_phys_hi;
163003831d35Sstevel 	phys_spec.regspec_addr = gptwo_spec->gptwo_phys_low;
163103831d35Sstevel 	phys_spec.regspec_size = gptwo_spec->gptwo_size_low;
163203831d35Sstevel 
163303831d35Sstevel 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
163403831d35Sstevel 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
163503831d35Sstevel 	acc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
163603831d35Sstevel 
163703831d35Sstevel 	if (axq_map_phys(axq_dip, &phys_spec, (caddr_t *)&base_addr,
163803831d35Sstevel 	    &acc, &handle)) {
163903831d35Sstevel 		cmn_err(CE_WARN, "%s: map phys failed\n",
164003831d35Sstevel 		    ddi_get_name(axq_dip));
164103831d35Sstevel 		kmem_free(gptwo_spec, size);
164203831d35Sstevel 		ndi_rele_devi(axq_dip);
164303831d35Sstevel 		return (0);
164403831d35Sstevel 	}
164503831d35Sstevel 
164603831d35Sstevel 	kmem_free(gptwo_spec, size);
164703831d35Sstevel 
164803831d35Sstevel 	io_domain_control_addr = REG_ADDR(base_addr, AXQ_SLOT1_DOMCTRL);
164903831d35Sstevel 
165003831d35Sstevel 	if (ddi_peek32(axq_dip, (int32_t *)io_domain_control_addr,
165103831d35Sstevel 	    (int32_t *)&io_domain_control)) {
165203831d35Sstevel 		cmn_err(CE_WARN, "%s: peek failed\n", ddi_get_name(axq_dip));
165303831d35Sstevel 		ndi_rele_devi(axq_dip);
165403831d35Sstevel 		return (0);
165503831d35Sstevel 	}
165603831d35Sstevel 
165703831d35Sstevel 	axq_unmap_phys(&handle);
165803831d35Sstevel 
165903831d35Sstevel 	ndi_rele_devi(axq_dip);
166003831d35Sstevel 
166103831d35Sstevel 	/*
166203831d35Sstevel 	 * If bit 6 of the IO Domain Control Register is a one,
166303831d35Sstevel 	 * then this AXQ version does not have the PIO Limit problem.
166403831d35Sstevel 	 */
166503831d35Sstevel 	if (io_domain_control & AXQ_DOMCTRL_PIOFIX)
166603831d35Sstevel 		return (0);
166703831d35Sstevel 
166803831d35Sstevel 	return (axq_pio_limit);
166903831d35Sstevel }
167003831d35Sstevel 
167103831d35Sstevel static int
axq_map_phys(dev_info_t * dip,struct regspec * phys_spec,caddr_t * addrp,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handlep)167203831d35Sstevel axq_map_phys(dev_info_t *dip, struct regspec *phys_spec,
167303831d35Sstevel 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
167403831d35Sstevel 	ddi_acc_handle_t *handlep)
167503831d35Sstevel {
167603831d35Sstevel 	ddi_map_req_t mr;
167703831d35Sstevel 	ddi_acc_hdl_t *hp;
167803831d35Sstevel 	int result;
167903831d35Sstevel 	struct regspec *ph;
168003831d35Sstevel 
168103831d35Sstevel 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
168203831d35Sstevel 	hp = impl_acc_hdl_get(*handlep);
168303831d35Sstevel 	hp->ah_vers = VERS_ACCHDL;
168403831d35Sstevel 	hp->ah_dip = dip;
168503831d35Sstevel 	hp->ah_rnumber = 0;
168603831d35Sstevel 	hp->ah_offset = 0;
168703831d35Sstevel 	hp->ah_len = 0;
168803831d35Sstevel 	hp->ah_acc = *accattrp;
168903831d35Sstevel 	ph = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
169003831d35Sstevel 	*ph = *phys_spec;
169103831d35Sstevel 	hp->ah_bus_private = ph;	/* cache a copy of the reg spec */
169203831d35Sstevel 
169303831d35Sstevel 	mr.map_op = DDI_MO_MAP_LOCKED;
169403831d35Sstevel 	mr.map_type = DDI_MT_REGSPEC;
169503831d35Sstevel 	mr.map_obj.rp = phys_spec;
169603831d35Sstevel 	mr.map_prot = PROT_READ | PROT_WRITE;
169703831d35Sstevel 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
169803831d35Sstevel 	mr.map_handlep = hp;
169903831d35Sstevel 	mr.map_vers = DDI_MAP_VERSION;
170003831d35Sstevel 
170103831d35Sstevel 	result = ddi_map(dip, &mr, 0, 0, addrp);
170203831d35Sstevel 
170303831d35Sstevel 	if (result != DDI_SUCCESS) {
170403831d35Sstevel 		impl_acc_hdl_free(*handlep);
170503831d35Sstevel 		*handlep = NULL;
170603831d35Sstevel 	} else {
170703831d35Sstevel 		hp->ah_addr = *addrp;
170803831d35Sstevel 	}
170903831d35Sstevel 
171003831d35Sstevel 	return (result);
171103831d35Sstevel }
171203831d35Sstevel 
171303831d35Sstevel static void
axq_unmap_phys(ddi_acc_handle_t * handlep)171403831d35Sstevel axq_unmap_phys(ddi_acc_handle_t *handlep)
171503831d35Sstevel {
171603831d35Sstevel 	ddi_map_req_t mr;
171703831d35Sstevel 	ddi_acc_hdl_t *hp;
171803831d35Sstevel 	struct regspec *ph;
171903831d35Sstevel 
172003831d35Sstevel 	hp = impl_acc_hdl_get(*handlep);
172103831d35Sstevel 	ASSERT(hp);
172203831d35Sstevel 	ph = hp->ah_bus_private;
172303831d35Sstevel 
172403831d35Sstevel 	mr.map_op = DDI_MO_UNMAP;
172503831d35Sstevel 	mr.map_type = DDI_MT_REGSPEC;
172603831d35Sstevel 	mr.map_obj.rp = ph;
172703831d35Sstevel 	mr.map_prot = PROT_READ | PROT_WRITE;
172803831d35Sstevel 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
172903831d35Sstevel 	mr.map_handlep = hp;
173003831d35Sstevel 	mr.map_vers = DDI_MAP_VERSION;
173103831d35Sstevel 
173203831d35Sstevel 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
173303831d35Sstevel 	    hp->ah_len, &hp->ah_addr);
173403831d35Sstevel 
173503831d35Sstevel 	impl_acc_hdl_free(*handlep);
173603831d35Sstevel 	kmem_free(ph, sizeof (struct regspec));	/* Free the cached copy */
173703831d35Sstevel 	*handlep = NULL;
173803831d35Sstevel }
173903831d35Sstevel 
174003831d35Sstevel /* ARGSUSED */
174103831d35Sstevel static boolean_t
axq_panic_callb(void * arg,int code)174203831d35Sstevel axq_panic_callb(void *arg, int code)
174303831d35Sstevel {
174403831d35Sstevel 	axq_iopause_disable_all();
174503831d35Sstevel 	return (B_TRUE);
174603831d35Sstevel }
1747