xref: /titanic_51/usr/src/uts/sun4u/io/px/px_hlib.c (revision 65488c97aeb108aeffd7b61db3b2b3bcb4fc9d72)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/cmn_err.h>
28 #include <sys/vmsystm.h>
29 #include <sys/vmem.h>
30 #include <sys/machsystm.h>	/* lddphys() */
31 #include <sys/iommutsb.h>
32 #include <sys/pci.h>
33 #include <sys/hotplug/pci/pciehpc.h>
34 #include <px_obj.h>
35 #include "px_regs.h"
36 #include "oberon_regs.h"
37 #include "px_csr.h"
38 #include "px_lib4u.h"
39 #include "px_err.h"
40 
41 /*
42  * Registers that need to be saved and restored during suspend/resume.
43  */
44 
45 /*
46  * Registers in the PEC Module.
47  * LPU_RESET should be set to 0ull during resume
48  *
49  * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips
50  * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only.
51  */
52 static struct px_pec_regs {
53 	uint64_t reg;
54 	uint64_t chip;
55 } pec_config_state_regs[] = {
56 	{PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
57 	{ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
58 	{ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
59 	{TLU_CONTROL, PX_CHIP_UNIDENTIFIED},
60 	{TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
61 	{TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
62 	{TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED},
63 	{TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED},
64 	{TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
65 	{TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
66 	{TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
67 	{TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
68 	{DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON},
69 	{DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON},
70 	{DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON},
71 	{LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE},
72 	{LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
73 	{LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
74 	{LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
75 	{LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE},
76 	{LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE},
77 	{LPU_RESET, PX_CHIP_FIRE},
78 	{LPU_DEBUG_CONFIG, PX_CHIP_FIRE},
79 	{LPU_INTERRUPT_MASK, PX_CHIP_FIRE},
80 	{LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE},
81 	{LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE},
82 	{LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE},
83 	{LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE},
84 	{LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE},
85 	{LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE},
86 	{LPU_LTSSM_CONFIG2, PX_CHIP_FIRE},
87 	{LPU_LTSSM_CONFIG3, PX_CHIP_FIRE},
88 	{LPU_LTSSM_CONFIG4, PX_CHIP_FIRE},
89 	{LPU_LTSSM_CONFIG5, PX_CHIP_FIRE},
90 	{DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
91 	{DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED},
92 	{DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED}
93 };
94 
95 #define	PEC_KEYS	\
96 	((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs))
97 
98 #define	PEC_SIZE	(PEC_KEYS * sizeof (uint64_t))
99 
100 /*
101  * Registers for the MMU module.
102  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
103  */
104 static uint64_t mmu_config_state_regs[] = {
105 	MMU_TSB_CONTROL,
106 	MMU_CONTROL_AND_STATUS,
107 	MMU_ERROR_LOG_ENABLE,
108 	MMU_INTERRUPT_ENABLE
109 };
110 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
111 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
112 
113 /*
114  * Registers for the IB Module
115  */
116 static uint64_t ib_config_state_regs[] = {
117 	IMU_ERROR_LOG_ENABLE,
118 	IMU_INTERRUPT_ENABLE
119 };
120 #define	IB_SIZE (sizeof (ib_config_state_regs))
121 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
122 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
123 
124 /*
125  * Registers for the JBC module.
126  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
127  */
128 static uint64_t	jbc_config_state_regs[] = {
129 	JBUS_PARITY_CONTROL,
130 	JBC_FATAL_RESET_ENABLE,
131 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
132 	JBC_ERROR_LOG_ENABLE,
133 	JBC_INTERRUPT_ENABLE
134 };
135 #define	JBC_SIZE (sizeof (jbc_config_state_regs))
136 #define	JBC_KEYS (JBC_SIZE / sizeof (uint64_t))
137 
138 /*
139  * Registers for the UBC module.
140  * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
141  */
142 static uint64_t	ubc_config_state_regs[] = {
143 	UBC_ERROR_LOG_ENABLE,
144 	UBC_INTERRUPT_ENABLE
145 };
146 #define	UBC_SIZE (sizeof (ubc_config_state_regs))
147 #define	UBC_KEYS (UBC_SIZE / sizeof (uint64_t))
148 
149 static uint64_t	msiq_config_other_regs[] = {
150 	ERR_COR_MAPPING,
151 	ERR_NONFATAL_MAPPING,
152 	ERR_FATAL_MAPPING,
153 	PM_PME_MAPPING,
154 	PME_TO_ACK_MAPPING,
155 	MSI_32_BIT_ADDRESS,
156 	MSI_64_BIT_ADDRESS
157 };
158 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
159 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
160 
161 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
162 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
163 
164 /* OPL tuning variables for link unstable issue */
165 int wait_perst = 5000000; 	/* step 9, default: 5s */
166 int wait_enable_port = 30000;	/* step 11, default: 30ms */
167 int link_retry_count = 2; 	/* step 11, default: 2 */
168 int link_status_check = 400000;	/* step 11, default: 400ms */
169 
170 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
171 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
172 static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
173 static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
174 
175 extern int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE];
176 extern int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE];
177 
178 /*
179  * Initialize the bus, but do not enable interrupts.
180  */
181 /* ARGSUSED */
182 void
183 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
184 {
185 	switch (PX_CHIP_TYPE(pxu_p)) {
186 	case PX_CHIP_OBERON:
187 		ubc_init(xbc_csr_base, pxu_p);
188 		break;
189 	case PX_CHIP_FIRE:
190 		jbc_init(xbc_csr_base, pxu_p);
191 		break;
192 	default:
193 		DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n",
194 		    PX_CHIP_TYPE(pxu_p));
195 		break;
196 	}
197 }
198 
199 /*
200  * Initialize the JBC module, but do not enable interrupts.
201  */
202 /* ARGSUSED */
203 static void
204 jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
205 {
206 	uint64_t val;
207 
208 	/* Check if we need to enable inverted parity */
209 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
210 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
211 	DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n",
212 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
213 
214 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
215 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
216 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
217 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
218 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
219 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
220 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
221 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
222 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
223 	DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
224 	    CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
225 
226 	/*
227 	 * Enable merge, jbc and dmc interrupts.
228 	 */
229 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
230 	DBG(DBG_CB, NULL,
231 	    "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
232 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
233 
234 	/*
235 	 * CSR_V JBC's interrupt regs (log, enable, status, clear)
236 	 */
237 	DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
238 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
239 
240 	DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
241 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
242 
243 	DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
244 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
245 
246 	DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
247 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
248 }
249 
250 /*
251  * Initialize the UBC module, but do not enable interrupts.
252  */
253 /* ARGSUSED */
254 static void
255 ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
256 {
257 	/*
258 	 * Enable Uranus bus error log bits.
259 	 */
260 	CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull);
261 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
262 	    CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
263 
264 	/*
265 	 * Clear Uranus bus errors.
266 	 */
267 	CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull);
268 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
269 	    CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
270 
271 	/*
272 	 * CSR_V UBC's interrupt regs (log, enable, status, clear)
273 	 */
274 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
275 	    CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
276 
277 	DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n",
278 	    CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE));
279 
280 	DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n",
281 	    CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS));
282 
283 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
284 	    CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
285 }
286 
287 /*
288  * Initialize the module, but do not enable interrupts.
289  */
290 /* ARGSUSED */
291 void
292 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
293 {
294 	/*
295 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
296 	 */
297 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
298 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
299 
300 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
301 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
302 
303 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
304 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
305 
306 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
307 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
308 }
309 
310 /*
311  * Initialize the module, but do not enable interrupts.
312  */
313 /* ARGSUSED */
314 static void
315 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
316 {
317 	/*
318 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
319 	 */
320 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
321 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
322 
323 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
324 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
325 
326 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
327 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
328 
329 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
330 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
331 }
332 
333 /*
334  * Initialize the module, but do not enable interrupts.
335  */
336 /* ARGSUSED */
337 static void
338 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
339 {
340 	uint64_t val;
341 
342 	/*
343 	 * CSR_V TLU_CONTROL Expect OBP ???
344 	 */
345 
346 	/*
347 	 * L0s entry default timer value - 7.0 us
348 	 * Completion timeout select default value - 67.1 ms and
349 	 * OBP will set this value.
350 	 *
351 	 * Configuration - Bit 0 should always be 0 for upstream port.
352 	 * Bit 1 is clock - how is this related to the clock bit in TLU
353 	 * Link Control register?  Both are hardware dependent and likely
354 	 * set by OBP.
355 	 *
356 	 * NOTE: Do not set the NPWR_EN bit.  The desired value of this bit
357 	 * will be set by OBP.
358 	 */
359 	val = CSR_XR(csr_base, TLU_CONTROL);
360 	val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
361 	    TLU_CONTROL_CONFIG_DEFAULT;
362 
363 	/*
364 	 * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking
365 	 * behind non-posted PIO writes. This blocking could cause a master or
366 	 * slave timeout on the host bus if multiple serialized PIOs were to
367 	 * suffer Completion Timeouts because the CTO delays for each PIO ahead
368 	 * of the read would accumulate. Since the Olympus processor can have
369 	 * only 1 PIO outstanding, there is no possibility of PIO accesses from
370 	 * a given CPU to a given device being re-ordered by the PCIe fabric;
371 	 * therefore turning off serialization should be safe from a PCIe
372 	 * ordering perspective.
373 	 */
374 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
375 		val &= ~(1ull << TLU_CONTROL_NPWR_EN);
376 
377 	/*
378 	 * Set Detect.Quiet. This will disable automatic link
379 	 * re-training, if the link goes down e.g. power management
380 	 * turns off power to the downstream device. This will enable
381 	 * Fire to go to Drain state, after link down. The drain state
382 	 * forces a reset to the FC state machine, which is required for
383 	 * proper link re-training.
384 	 */
385 	val |= (1ull << TLU_REMAIN_DETECT_QUIET);
386 	CSR_XS(csr_base, TLU_CONTROL, val);
387 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
388 	    CSR_XR(csr_base, TLU_CONTROL));
389 
390 	/*
391 	 * CSR_V TLU_STATUS Expect HW 0x4
392 	 */
393 
394 	/*
395 	 * Only bit [7:0] are currently defined.  Bits [2:0]
396 	 * are the state, which should likely be in state active,
397 	 * 100b.  Bit three is 'recovery', which is not understood.
398 	 * All other bits are reserved.
399 	 */
400 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
401 	    CSR_XR(csr_base, TLU_STATUS));
402 
403 	/*
404 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
405 	 */
406 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
407 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
408 
409 	/*
410 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
411 	 */
412 
413 	/*
414 	 * Ingress credits initial register.  Bits [39:32] should be
415 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
416 	 * be 0xC0.  These are the reset values, and should be set by
417 	 * HW.
418 	 */
419 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
420 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
421 
422 	/*
423 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
424 	 */
425 
426 	/*
427 	 * Diagnostic register - always zero unless we are debugging.
428 	 */
429 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
430 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
431 
432 	/*
433 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
434 	 */
435 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
436 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
437 
438 	/*
439 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
440 	 */
441 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
442 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
443 
444 	/*
445 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
446 	 */
447 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
448 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
449 
450 	/*
451 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
452 	 */
453 	DBG(DBG_TLU, NULL,
454 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
455 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
456 
457 	/*
458 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
459 	 */
460 	DBG(DBG_TLU, NULL,
461 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
462 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
463 
464 	/*
465 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
466 	 */
467 	DBG(DBG_TLU, NULL,
468 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
469 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
470 
471 	DBG(DBG_TLU, NULL,
472 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
473 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
474 
475 	DBG(DBG_TLU, NULL,
476 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
477 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
478 
479 	DBG(DBG_TLU, NULL,
480 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
481 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
482 
483 	/*
484 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
485 	 */
486 	DBG(DBG_TLU, NULL,
487 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
488 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
489 
490 	/*
491 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
492 	 */
493 	DBG(DBG_TLU, NULL,
494 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
495 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
496 
497 	/*
498 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
499 	 */
500 	DBG(DBG_TLU, NULL,
501 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
502 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
503 
504 	/*
505 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
506 	 */
507 	DBG(DBG_TLU, NULL,
508 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
509 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
510 
511 	/*
512 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
513 	 */
514 	DBG(DBG_TLU, NULL,
515 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
516 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
517 
518 	/*
519 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
520 	 */
521 	DBG(DBG_TLU, NULL,
522 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
523 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
524 
525 	/*
526 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
527 	 */
528 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
529 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
530 
531 	/*
532 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
533 	 */
534 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
535 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
536 
537 	/*
538 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
539 	 */
540 
541 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
542 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
543 
544 	/*
545 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
546 	 */
547 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
548 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
549 
550 	/*
551 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
552 	 */
553 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
554 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
555 
556 	/*
557 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
558 	 */
559 
560 	/*
561 	 * Bits [14:12] are the Max Read Request Size, which is always 64
562 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
563 	 * start at 128 bytes which is 000b.  This may be revisited if
564 	 * init_child finds greater values.
565 	 */
566 	val = 0x0ull;
567 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
568 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
569 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
570 
571 	/*
572 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
573 	 */
574 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
575 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
576 
577 	/*
578 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
579 	 */
580 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
581 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
582 
583 	/*
584 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
585 	 */
586 
587 	/*
588 	 * The CLOCK bit should be set by OBP if the hardware dictates,
589 	 * and if it is set then ASPM should be used since then L0s exit
590 	 * latency should be lower than L1 exit latency.
591 	 *
592 	 * Note that we will not enable power management during bringup
593 	 * since it has not been test and is creating some problems in
594 	 * simulation.
595 	 */
596 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
597 
598 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
599 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
600 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
601 
602 	/*
603 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
604 	 */
605 
606 	/*
607 	 * Not sure if HW or OBP will be setting this read only
608 	 * register.  Bit 12 is Clock, and it should always be 1
609 	 * signifying that the component uses the same physical
610 	 * clock as the platform.  Bits [9:4] are for the width,
611 	 * with the expected value above signifying a x1 width.
612 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
613 	 * the only speed as yet supported by the PCI-E spec.
614 	 */
615 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
616 	    CSR_XR(csr_base, TLU_LINK_STATUS));
617 
618 	/*
619 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
620 	 */
621 
622 	/*
623 	 * Power Limits for the slots.  Will be platform
624 	 * dependent, and OBP will need to set after consulting
625 	 * with the HW guys.
626 	 *
627 	 * Bits [16:15] are power limit scale, which most likely
628 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
629 	 * Power Limit Value, which is a number which is multiplied
630 	 * by the power limit scale to get the actual power limit.
631 	 */
632 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
633 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
634 
635 	/*
636 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
637 	 */
638 	DBG(DBG_TLU, NULL,
639 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
640 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
641 
642 	/*
643 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
644 	 * Kernel 0x17F0110017F011
645 	 */
646 	DBG(DBG_TLU, NULL,
647 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
648 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
649 
650 	/*
651 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
652 	 */
653 	DBG(DBG_TLU, NULL,
654 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
655 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
656 
657 	/*
658 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
659 	 */
660 	DBG(DBG_TLU, NULL,
661 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
662 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
663 
664 	/*
665 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
666 	 */
667 	DBG(DBG_TLU, NULL,
668 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
669 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
670 
671 	/*
672 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
673 	 */
674 	DBG(DBG_TLU, NULL,
675 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
676 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
677 
678 	/*
679 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
680 	 */
681 	DBG(DBG_TLU, NULL,
682 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
683 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
684 
685 	/*
686 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
687 	 */
688 	DBG(DBG_TLU, NULL,
689 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
690 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
691 
692 
693 	/*
694 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
695 	 * Plus header logs
696 	 */
697 
698 	/*
699 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
700 	 */
701 	DBG(DBG_TLU, NULL,
702 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
703 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
704 
705 	/*
706 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
707 	 */
708 	DBG(DBG_TLU, NULL,
709 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
710 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
711 
712 	/*
713 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
714 	 */
715 	DBG(DBG_TLU, NULL,
716 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
717 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
718 
719 	/*
720 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
721 	 */
722 	DBG(DBG_TLU, NULL,
723 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
724 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
725 }
726 
727 /* ARGSUSED */
728 static void
729 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
730 {
731 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
732 	int link_width, max_payload;
733 
734 	uint64_t val;
735 
736 	/*
737 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
738 	 * Only Link Widths of x1, x4, and x8 are supported.
739 	 * If any width is reported other than x8, set default to x8.
740 	 */
741 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
742 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
743 
744 	/*
745 	 * Convert link_width to match timer array configuration.
746 	 */
747 	switch (link_width) {
748 	case 1:
749 		link_width = 0;
750 		break;
751 	case 4:
752 		link_width = 1;
753 		break;
754 	case 8:
755 		link_width = 2;
756 		break;
757 	case 16:
758 		link_width = 3;
759 		break;
760 	default:
761 		link_width = 0;
762 	}
763 
764 	/*
765 	 * Get the Max Payload Size.
766 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
767 	 */
768 	max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
769 	    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
770 
771 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
772 	    (0x80 << max_payload));
773 
774 	/* Make sure the packet size is not greater than 4096 */
775 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
776 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
777 
778 	/*
779 	 * CSR_V LPU_ID Expect HW 0x0
780 	 */
781 
782 	/*
783 	 * This register has link id, phy id and gigablaze id.
784 	 * Should be set by HW.
785 	 */
786 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
787 	    CSR_XR(csr_base, LPU_ID));
788 
789 	/*
790 	 * CSR_V LPU_RESET Expect Kernel 0x0
791 	 */
792 
793 	/*
794 	 * No reason to have any reset bits high until an error is
795 	 * detected on the link.
796 	 */
797 	val = 0ull;
798 	CSR_XS(csr_base, LPU_RESET, val);
799 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
800 	    CSR_XR(csr_base, LPU_RESET));
801 
802 	/*
803 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
804 	 */
805 
806 	/*
807 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
808 	 * They are read-only.  What do the 8 bits mean, and
809 	 * how do they get set if they are read only?
810 	 */
811 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
812 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
813 
814 	/*
815 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
816 	 */
817 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
818 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
819 
820 	/*
821 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
822 	 */
823 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
824 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
825 
826 	/*
827 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
828 	 */
829 
830 	/*
831 	 * This register has bits [9:4] for link width, and the
832 	 * default 0x10, means a width of x16.  The problem is
833 	 * this width is not supported according to the TLU
834 	 * link status register.
835 	 */
836 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
837 	    CSR_XR(csr_base, LPU_LINK_STATUS));
838 
839 	/*
840 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
841 	 */
842 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
843 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
844 
845 	/*
846 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
847 	 */
848 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
849 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
850 
851 	/*
852 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
853 	 */
854 	DBG(DBG_LPU, NULL,
855 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
856 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
857 
858 	/*
859 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
860 	 */
861 	DBG(DBG_LPU, NULL,
862 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
863 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
864 
865 	/*
866 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
867 	 */
868 	DBG(DBG_LPU, NULL,
869 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
870 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
871 
872 	/*
873 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
874 	 */
875 	DBG(DBG_LPU, NULL,
876 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
877 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
878 
879 	/*
880 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
881 	 */
882 	DBG(DBG_LPU, NULL,
883 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
884 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
885 
886 	/*
887 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
888 	 */
889 	DBG(DBG_LPU, NULL,
890 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
891 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
892 
893 	/*
894 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
895 	 */
896 
897 	/*
898 	 * This is another place where Max Payload can be set,
899 	 * this time for the link layer.  It will be set to
900 	 * 128B, which is the default, but this will need to
901 	 * be revisited.
902 	 */
903 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
904 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
905 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
906 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
907 
908 	/*
909 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
910 	 */
911 
912 	/*
913 	 * Another R/W status register.  Bit 3, DL up Status, will
914 	 * be set high.  The link state machine status bits [2:0]
915 	 * are set to 0x1, but the status bits are not defined in the
916 	 * PRM.  What does 0x1 mean, what others values are possible
917 	 * and what are thier meanings?
918 	 *
919 	 * This register has been giving us problems in simulation.
920 	 * It has been mentioned that software should not program
921 	 * any registers with WE bits except during debug.  So
922 	 * this register will no longer be programmed.
923 	 */
924 
925 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
926 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
927 
928 	/*
929 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
930 	 */
931 	DBG(DBG_LPU, NULL,
932 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
933 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
934 
935 	/*
936 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
937 	 */
938 	DBG(DBG_LPU, NULL,
939 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
940 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
941 
942 	DBG(DBG_LPU, NULL,
943 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
944 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
945 
946 	/*
947 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
948 	 */
949 
950 	/*
951 	 * The PRM says that only the first two bits will be set
952 	 * high by default, which will enable flow control for
953 	 * posted and non-posted updates, but NOT completetion
954 	 * updates.
955 	 */
956 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
957 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
958 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
959 	DBG(DBG_LPU, NULL,
960 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
961 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
962 
963 	/*
964 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
965 	 * Expect OBP 0x1D4C
966 	 */
967 
968 	/*
969 	 * This should be set by OBP.  We'll check to make sure.
970 	 */
971 	DBG(DBG_LPU, NULL, "lpu_init - "
972 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
973 	    CSR_XR(csr_base,
974 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
975 
976 	/*
977 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
978 	 */
979 
980 	/*
981 	 * This register has Flow Control Update Timer values for
982 	 * non-posted and posted requests, bits [30:16] and bits
983 	 * [14:0], respectively.  These are read-only to SW so
984 	 * either HW or OBP needs to set them.
985 	 */
986 	DBG(DBG_LPU, NULL, "lpu_init - "
987 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
988 	    CSR_XR(csr_base,
989 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
990 
991 	/*
992 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
993 	 */
994 
995 	/*
996 	 * Same as timer0 register above, except for bits [14:0]
997 	 * have the timer values for completetions.  Read-only to
998 	 * SW; OBP or HW need to set it.
999 	 */
1000 	DBG(DBG_LPU, NULL, "lpu_init - "
1001 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
1002 	    CSR_XR(csr_base,
1003 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
1004 
1005 	/*
1006 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
1007 	 */
1008 	val = px_acknak_timer_table[max_payload][link_width];
1009 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
1010 
1011 	DBG(DBG_LPU, NULL, "lpu_init - "
1012 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
1013 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
1014 
1015 	/*
1016 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
1017 	 */
1018 	DBG(DBG_LPU, NULL,
1019 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
1020 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
1021 
1022 	/*
1023 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
1024 	 */
1025 	val = px_replay_timer_table[max_payload][link_width];
1026 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1027 
1028 	DBG(DBG_LPU, NULL,
1029 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
1030 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
1031 
1032 	/*
1033 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
1034 	 */
1035 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1036 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
1037 
1038 	/*
1039 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1040 	 */
1041 	DBG(DBG_LPU, NULL,
1042 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1043 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
1044 
1045 	/*
1046 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1047 	 */
1048 	DBG(DBG_LPU, NULL,
1049 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1050 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
1051 
1052 	/*
1053 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1054 	 */
1055 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
1056 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
1057 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
1058 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
1059 
1060 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
1061 	DBG(DBG_LPU, NULL,
1062 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1063 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
1064 
1065 	/*
1066 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1067 	 */
1068 	DBG(DBG_LPU, NULL,
1069 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1070 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
1071 
1072 	/*
1073 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1074 	 */
1075 	DBG(DBG_LPU, NULL,
1076 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1077 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
1078 
1079 	/*
1080 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1081 	 */
1082 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1083 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1084 
1085 	/*
1086 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1087 	 */
1088 	DBG(DBG_LPU, NULL,
1089 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1090 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1091 
1092 	/*
1093 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1094 	 */
1095 
1096 	/*
1097 	 * Test only register.  Will not be programmed.
1098 	 */
1099 	DBG(DBG_LPU, NULL,
1100 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1101 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1102 
1103 	/*
1104 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1105 	 */
1106 
1107 	/*
1108 	 * Test only register.  Will not be programmed.
1109 	 */
1110 	DBG(DBG_LPU, NULL,
1111 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1112 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1113 
1114 	/*
1115 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1116 	 */
1117 	DBG(DBG_LPU, NULL,
1118 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1119 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1120 
1121 	/*
1122 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1123 	 */
1124 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1125 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1126 
1127 	/*
1128 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1129 	 */
1130 
1131 	/*
1132 	 * Test only register.  Will not be programmed.
1133 	 */
1134 	DBG(DBG_LPU, NULL,
1135 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1136 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1137 
1138 	/*
1139 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1140 	 */
1141 	DBG(DBG_LPU, NULL,
1142 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1143 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1144 
1145 	/*
1146 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1147 	 */
1148 	DBG(DBG_LPU, NULL,
1149 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1150 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1151 
1152 	/*
1153 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1154 	 */
1155 	DBG(DBG_LPU, NULL,
1156 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1157 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1158 
1159 	/*
1160 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1161 	 */
1162 	DBG(DBG_LPU, NULL,
1163 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1164 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1165 
1166 	/*
1167 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1168 	 */
1169 	DBG(DBG_LPU, NULL,
1170 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1171 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1172 
1173 	/*
1174 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1175 	 */
1176 
1177 	/*
1178 	 * Test only register.  Will not be programmed.
1179 	 */
1180 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1181 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1182 
1183 	/*
1184 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1185 	 */
1186 
1187 	/*
1188 	 * Test only register.  Will not be programmed.
1189 	 */
1190 	DBG(DBG_LPU, NULL,
1191 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1192 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1193 
1194 	/*
1195 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1196 	 */
1197 
1198 	/*
1199 	 * Test only register.
1200 	 */
1201 	DBG(DBG_LPU, NULL,
1202 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1203 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1204 
1205 	/*
1206 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1207 	 */
1208 	DBG(DBG_LPU, NULL, "lpu_init - "
1209 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1210 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1211 
1212 	/*
1213 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1214 	 */
1215 
1216 	/*
1217 	 * test only register.
1218 	 */
1219 	DBG(DBG_LPU, NULL,
1220 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1221 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1222 
1223 	/*
1224 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1225 	 */
1226 
1227 	/*
1228 	 * test only register.
1229 	 */
1230 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1231 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1232 
1233 	/*
1234 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1235 	 */
1236 	DBG(DBG_LPU, NULL,
1237 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1238 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1239 
1240 	/*
1241 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1242 	 */
1243 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1244 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1245 
1246 	/*
1247 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1248 	 */
1249 	DBG(DBG_LPU, NULL,
1250 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1251 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1252 
1253 	/*
1254 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1255 	 */
1256 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1257 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1258 
1259 	DBG(DBG_LPU, NULL,
1260 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1261 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1262 
1263 	/*
1264 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1265 	 */
1266 
1267 	/*
1268 	 * This also needs some explanation.  What is the best value
1269 	 * for the water mark?  Test mode enables which test mode?
1270 	 * Programming model needed for the Receiver Reset Lane N
1271 	 * bits.
1272 	 */
1273 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1274 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1275 
1276 	/*
1277 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1278 	 */
1279 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1280 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1281 
1282 	/*
1283 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1284 	 */
1285 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1286 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1287 
1288 	/*
1289 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1290 	 */
1291 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1292 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1293 
1294 	/*
1295 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1296 	 */
1297 	DBG(DBG_LPU, NULL,
1298 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1299 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1300 
1301 	/*
1302 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1303 	 */
1304 	DBG(DBG_LPU, NULL,
1305 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1306 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1307 
1308 	DBG(DBG_LPU, NULL,
1309 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1310 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1311 
1312 	/*
1313 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1314 	 */
1315 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1316 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1317 
1318 	/*
1319 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1320 	 */
1321 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1322 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1323 
1324 	/*
1325 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1326 	 */
1327 	DBG(DBG_LPU, NULL,
1328 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1329 	    CSR_XR(csr_base,
1330 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1331 
1332 	/*
1333 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1334 	 */
1335 	DBG(DBG_LPU, NULL,
1336 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1337 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1338 
1339 	DBG(DBG_LPU, NULL,
1340 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1341 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1342 
1343 	/*
1344 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1345 	 */
1346 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1347 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1348 
1349 	/*
1350 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1351 	 */
1352 
1353 	/*
1354 	 * The new PRM has values for LTSSM 8 ns timeout value and
1355 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1356 	 * Most of the other bits are questions as well.
1357 	 *
1358 	 * As such we will use the reset value.
1359 	 */
1360 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1361 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1362 
1363 	/*
1364 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1365 	 */
1366 
1367 	/*
1368 	 * Again, what does '12 ms timeout value mean'?
1369 	 */
1370 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1371 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1372 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1373 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1374 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1375 
1376 	/*
1377 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1378 	 */
1379 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1380 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1381 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1382 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1383 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1384 
1385 	/*
1386 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1387 	 */
1388 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1389 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1390 	    (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1391 	    LPU_LTSSM_CONFIG4_N_FTS));
1392 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1393 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1394 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1395 
1396 	/*
1397 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1398 	 */
1399 	val = 0ull;
1400 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1401 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1402 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1403 
1404 	/*
1405 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1406 	 */
1407 
1408 	/*
1409 	 * LTSSM Status registers are test only.
1410 	 */
1411 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1412 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1413 
1414 	/*
1415 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1416 	 */
1417 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1418 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1419 
1420 	/*
1421 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1422 	 */
1423 	DBG(DBG_LPU, NULL,
1424 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1425 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1426 
1427 	/*
1428 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1429 	 */
1430 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1431 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1432 
1433 	DBG(DBG_LPU, NULL,
1434 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1435 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1436 
1437 	/*
1438 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1439 	 */
1440 	DBG(DBG_LPU, NULL,
1441 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1442 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1443 
1444 	/*
1445 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1446 	 */
1447 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1448 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1449 
1450 	/*
1451 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1452 	 */
1453 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1454 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1455 
1456 	/*
1457 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1458 	 */
1459 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1460 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1461 
1462 	/*
1463 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1464 	 */
1465 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1466 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1467 
1468 	/*
1469 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1470 	 */
1471 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1472 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1473 
1474 	/*
1475 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1476 	 */
1477 	DBG(DBG_LPU, NULL, "lpu_init - "
1478 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1479 	    CSR_XR(csr_base,
1480 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1481 
1482 	/*
1483 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1484 	 */
1485 	DBG(DBG_LPU, NULL,
1486 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1487 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1488 
1489 	DBG(DBG_LPU, NULL,
1490 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1491 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1492 
1493 	/*
1494 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1495 	 */
1496 	DBG(DBG_LPU, NULL,
1497 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1498 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1499 
1500 	/*
1501 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1502 	 */
1503 	DBG(DBG_LPU, NULL,
1504 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1505 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1506 
1507 	/*
1508 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1509 	 */
1510 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1511 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1512 }
1513 
1514 /* ARGSUSED */
1515 static void
1516 dlu_init(caddr_t csr_base, pxu_t *pxu_p)
1517 {
1518 uint64_t val;
1519 
1520 	CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull);
1521 	DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n",
1522 	    CSR_XR(csr_base, DLU_INTERRUPT_MASK));
1523 
1524 	val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN);
1525 	CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val);
1526 	DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n",
1527 	    CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG));
1528 
1529 	val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
1530 	    (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
1531 
1532 	CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val);
1533 	DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: "
1534 	    "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL));
1535 
1536 	val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT <<
1537 	    DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR);
1538 
1539 	CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1540 
1541 	DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: "
1542 	    "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD));
1543 }
1544 
1545 /* ARGSUSED */
1546 static void
1547 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1548 {
1549 	uint64_t val;
1550 
1551 /*
1552  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1553  */
1554 
1555 	val = -1ull;
1556 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1557 	DBG(DBG_DMC, NULL,
1558 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1559 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1560 
1561 	/*
1562 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1563 	 */
1564 	DBG(DBG_DMC, NULL,
1565 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1566 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1567 
1568 	/*
1569 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1570 	 */
1571 	val = 0x0ull;
1572 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1573 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1574 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1575 
1576 	/*
1577 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1578 	 */
1579 	val = 0x0ull;
1580 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1581 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1582 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1583 }
1584 
1585 void
1586 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1587 {
1588 	uint64_t val;
1589 
1590 	ilu_init(csr_base, pxu_p);
1591 	tlu_init(csr_base, pxu_p);
1592 
1593 	switch (PX_CHIP_TYPE(pxu_p)) {
1594 	case PX_CHIP_OBERON:
1595 		dlu_init(csr_base, pxu_p);
1596 		break;
1597 	case PX_CHIP_FIRE:
1598 		lpu_init(csr_base, pxu_p);
1599 		break;
1600 	default:
1601 		DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n",
1602 		    PX_CHIP_TYPE(pxu_p));
1603 		break;
1604 	}
1605 
1606 	dmc_init(csr_base, pxu_p);
1607 
1608 /*
1609  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1610  */
1611 
1612 	val = -1ull;
1613 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1614 	DBG(DBG_PEC, NULL,
1615 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1616 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1617 
1618 	/*
1619 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1620 	 */
1621 	DBG(DBG_PEC, NULL,
1622 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1623 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1624 }
1625 
1626 /*
1627  * Convert a TTE to physical address
1628  */
1629 static r_addr_t
1630 mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p)
1631 {
1632 	uint64_t pa_mask;
1633 
1634 	switch (PX_CHIP_TYPE(pxu_p)) {
1635 	case PX_CHIP_OBERON:
1636 		pa_mask = MMU_OBERON_PADDR_MASK;
1637 		break;
1638 	case PX_CHIP_FIRE:
1639 		pa_mask = MMU_FIRE_PADDR_MASK;
1640 		break;
1641 	default:
1642 		DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n",
1643 		    PX_CHIP_TYPE(pxu_p));
1644 		pa_mask = 0;
1645 		break;
1646 	}
1647 	return ((tte & pa_mask) >> MMU_PAGE_SHIFT);
1648 }
1649 
1650 /*
1651  * Return MMU bypass noncache bit for chip
1652  */
1653 static r_addr_t
1654 mmu_bypass_noncache(pxu_t *pxu_p)
1655 {
1656 	r_addr_t bypass_noncache_bit;
1657 
1658 	switch (PX_CHIP_TYPE(pxu_p)) {
1659 	case PX_CHIP_OBERON:
1660 		bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE;
1661 		break;
1662 	case PX_CHIP_FIRE:
1663 		bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE;
1664 		break;
1665 	default:
1666 		DBG(DBG_MMU, NULL,
1667 		    "mmu_bypass_nocache - unknown chip type: 0x%x\n",
1668 		    PX_CHIP_TYPE(pxu_p));
1669 		bypass_noncache_bit = 0;
1670 		break;
1671 	}
1672 	return (bypass_noncache_bit);
1673 }
1674 
1675 /*
1676  * Calculate number of TSB entries for the chip.
1677  */
1678 /* ARGSUSED */
1679 static uint_t
1680 mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p)
1681 {
1682 	uint64_t tsb_ctrl;
1683 	uint_t obp_tsb_entries, obp_tsb_size;
1684 
1685 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1686 
1687 	obp_tsb_size = tsb_ctrl & 0xF;
1688 
1689 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1690 
1691 	return (obp_tsb_entries);
1692 }
1693 
1694 /*
1695  * Initialize the module, but do not enable interrupts.
1696  */
1697 void
1698 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1699 {
1700 	uint64_t	val, i, obp_tsb_pa, *base_tte_addr;
1701 	uint_t obp_tsb_entries;
1702 
1703 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1704 
1705 	/*
1706 	 * Preserve OBP's TSB
1707 	 */
1708 	obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK;
1709 
1710 	obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p);
1711 
1712 	base_tte_addr = pxu_p->tsb_vaddr +
1713 	    ((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1714 
1715 	for (i = 0; i < obp_tsb_entries; i++) {
1716 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1717 
1718 		if (!MMU_TTE_VALID(tte))
1719 			continue;
1720 
1721 		base_tte_addr[i] = tte;
1722 	}
1723 
1724 	/*
1725 	 * Invalidate the TLB through the diagnostic register.
1726 	 */
1727 
1728 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1729 
1730 	/*
1731 	 * Configure the Fire MMU TSB Control Register.  Determine
1732 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1733 	 *
1734 	 * Write the most significant 30 bits of the TSB physical address
1735 	 * and the encoded TSB table size.
1736 	 */
1737 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--) {}
1738 
1739 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1740 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1741 
1742 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1743 
1744 	/*
1745 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1746 	 * the "Cache Mode", the "Bypass Enable" and
1747 	 * the "Translation Enable" bits.
1748 	 */
1749 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1750 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1751 	    |  (MMU_CONTROL_AND_STATUS_ROE_BIT63_ENABLE <<
1752 	    MMU_CONTROL_AND_STATUS_ROE)
1753 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1754 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1755 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1756 
1757 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1758 
1759 	/*
1760 	 * Read the register here to ensure that the previous writes to
1761 	 * the Fire MMU registers have been flushed.  (Technically, this
1762 	 * is not entirely necessary here as we will likely do later reads
1763 	 * during Fire initialization, but it is a small price to pay for
1764 	 * more modular code.)
1765 	 */
1766 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1767 
1768 	/*
1769 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1770 	 * Plus header logs
1771 	 */
1772 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1773 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1774 
1775 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1776 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1777 
1778 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1779 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1780 
1781 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1782 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1783 }
1784 
1785 /*
1786  * Generic IOMMU Servies
1787  */
1788 
1789 /* ARGSUSED */
1790 uint64_t
1791 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages,
1792     io_attributes_t io_attr, void *addr, size_t pfn_index, int flags)
1793 {
1794 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1795 	uint64_t	attr = MMU_TTE_V;
1796 	int		i;
1797 
1798 	if (io_attr & PCI_MAP_ATTR_WRITE)
1799 		attr |= MMU_TTE_W;
1800 
1801 	if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) &&
1802 	    (io_attr & PCI_MAP_ATTR_RO))
1803 		attr |= MMU_TTE_RO;
1804 
1805 	if (attr & MMU_TTE_RO) {
1806 		DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x "
1807 		    "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr);
1808 	}
1809 
1810 	if (flags & MMU_MAP_PFN) {
1811 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)addr;
1812 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1813 			px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
1814 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1815 
1816 			/*
1817 			 * Oberon will need to flush the corresponding TTEs in
1818 			 * Cache. We only need to flush every cache line.
1819 			 * Extra PIO's are expensive.
1820 			 */
1821 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1822 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1823 					CSR_XS(dev_hdl,
1824 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1825 					    (pxu_p->tsb_paddr+
1826 					    (tsb_index*MMU_TTE_SIZE)));
1827 				}
1828 			}
1829 		}
1830 	} else {
1831 		caddr_t	a = (caddr_t)addr;
1832 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1833 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1834 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1835 
1836 			/*
1837 			 * Oberon will need to flush the corresponding TTEs in
1838 			 * Cache. We only need to flush every cache line.
1839 			 * Extra PIO's are expensive.
1840 			 */
1841 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1842 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1843 					CSR_XS(dev_hdl,
1844 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1845 					    (pxu_p->tsb_paddr+
1846 					    (tsb_index*MMU_TTE_SIZE)));
1847 				}
1848 			}
1849 		}
1850 	}
1851 
1852 	return (H_EOK);
1853 }
1854 
1855 /* ARGSUSED */
1856 uint64_t
1857 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1858     pages_t pages)
1859 {
1860 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1861 	int		i;
1862 
1863 	for (i = 0; i < pages; i++, tsb_index++) {
1864 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1865 
1866 			/*
1867 			 * Oberon will need to flush the corresponding TTEs in
1868 			 * Cache. We only need to flush every cache line.
1869 			 * Extra PIO's are expensive.
1870 			 */
1871 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1872 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1873 					CSR_XS(dev_hdl,
1874 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1875 					    (pxu_p->tsb_paddr+
1876 					    (tsb_index*MMU_TTE_SIZE)));
1877 				}
1878 			}
1879 	}
1880 
1881 	return (H_EOK);
1882 }
1883 
1884 /* ARGSUSED */
1885 uint64_t
1886 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1887     io_attributes_t *attr_p, r_addr_t *r_addr_p)
1888 {
1889 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1890 	uint64_t	*tte_addr;
1891 	uint64_t	ret = H_EOK;
1892 
1893 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1894 
1895 	if (*tte_addr & MMU_TTE_V) {
1896 		*r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p);
1897 		*attr_p = (*tte_addr & MMU_TTE_W) ?
1898 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1899 	} else {
1900 		*r_addr_p = 0;
1901 		*attr_p = 0;
1902 		ret = H_ENOMAP;
1903 	}
1904 
1905 	return (ret);
1906 }
1907 
1908 /* ARGSUSED */
1909 uint64_t
1910 hvio_get_bypass_base(pxu_t *pxu_p)
1911 {
1912 	uint64_t base;
1913 
1914 	switch (PX_CHIP_TYPE(pxu_p)) {
1915 	case PX_CHIP_OBERON:
1916 		base = MMU_OBERON_BYPASS_BASE;
1917 		break;
1918 	case PX_CHIP_FIRE:
1919 		base = MMU_FIRE_BYPASS_BASE;
1920 		break;
1921 	default:
1922 		DBG(DBG_MMU, NULL,
1923 		    "hvio_get_bypass_base - unknown chip type: 0x%x\n",
1924 		    PX_CHIP_TYPE(pxu_p));
1925 		base = 0;
1926 		break;
1927 	}
1928 	return (base);
1929 }
1930 
1931 /* ARGSUSED */
1932 uint64_t
1933 hvio_get_bypass_end(pxu_t *pxu_p)
1934 {
1935 	uint64_t end;
1936 
1937 	switch (PX_CHIP_TYPE(pxu_p)) {
1938 	case PX_CHIP_OBERON:
1939 		end = MMU_OBERON_BYPASS_END;
1940 		break;
1941 	case PX_CHIP_FIRE:
1942 		end = MMU_FIRE_BYPASS_END;
1943 		break;
1944 	default:
1945 		DBG(DBG_MMU, NULL,
1946 		    "hvio_get_bypass_end - unknown chip type: 0x%x\n",
1947 		    PX_CHIP_TYPE(pxu_p));
1948 		end = 0;
1949 		break;
1950 	}
1951 	return (end);
1952 }
1953 
1954 /* ARGSUSED */
1955 uint64_t
1956 hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra,
1957     io_attributes_t attr, io_addr_t *io_addr_p)
1958 {
1959 	uint64_t	pfn = MMU_BTOP(ra);
1960 
1961 	*io_addr_p = hvio_get_bypass_base(pxu_p) | ra |
1962 	    (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p));
1963 
1964 	return (H_EOK);
1965 }
1966 
1967 /*
1968  * Generic IO Interrupt Servies
1969  */
1970 
1971 /*
1972  * Converts a device specific interrupt number given by the
1973  * arguments devhandle and devino into a system specific ino.
1974  */
1975 /* ARGSUSED */
1976 uint64_t
1977 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
1978     sysino_t *sysino)
1979 {
1980 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
1981 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
1982 		return (H_ENOINTR);
1983 	}
1984 
1985 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
1986 
1987 	return (H_EOK);
1988 }
1989 
1990 /*
1991  * Returns state in intr_valid_state if the interrupt defined by sysino
1992  * is valid (enabled) or not-valid (disabled).
1993  */
1994 uint64_t
1995 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
1996     intr_valid_state_t *intr_valid_state)
1997 {
1998 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1999 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
2000 		*intr_valid_state = INTR_VALID;
2001 	} else {
2002 		*intr_valid_state = INTR_NOTVALID;
2003 	}
2004 
2005 	return (H_EOK);
2006 }
2007 
2008 /*
2009  * Sets the 'valid' state of the interrupt defined by
2010  * the argument sysino to the state defined by the
2011  * argument intr_valid_state.
2012  */
2013 uint64_t
2014 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
2015     intr_valid_state_t intr_valid_state)
2016 {
2017 	switch (intr_valid_state) {
2018 	case INTR_VALID:
2019 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2020 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2021 		break;
2022 	case INTR_NOTVALID:
2023 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2024 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2025 		break;
2026 	default:
2027 		return (EINVAL);
2028 	}
2029 
2030 	return (H_EOK);
2031 }
2032 
2033 /*
2034  * Returns the current state of the interrupt given by the sysino
2035  * argument.
2036  */
2037 uint64_t
2038 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
2039     intr_state_t *intr_state)
2040 {
2041 	intr_state_t state;
2042 
2043 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2044 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
2045 
2046 	switch (state) {
2047 	case INTERRUPT_IDLE_STATE:
2048 		*intr_state = INTR_IDLE_STATE;
2049 		break;
2050 	case INTERRUPT_RECEIVED_STATE:
2051 		*intr_state = INTR_RECEIVED_STATE;
2052 		break;
2053 	case INTERRUPT_PENDING_STATE:
2054 		*intr_state = INTR_DELIVERED_STATE;
2055 		break;
2056 	default:
2057 		return (EINVAL);
2058 	}
2059 
2060 	return (H_EOK);
2061 
2062 }
2063 
2064 /*
2065  * Sets the current state of the interrupt given by the sysino
2066  * argument to the value given in the argument intr_state.
2067  *
2068  * Note: Setting the state to INTR_IDLE clears any pending
2069  * interrupt for sysino.
2070  */
2071 uint64_t
2072 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
2073     intr_state_t intr_state)
2074 {
2075 	intr_state_t state;
2076 
2077 	switch (intr_state) {
2078 	case INTR_IDLE_STATE:
2079 		state = INTERRUPT_IDLE_STATE;
2080 		break;
2081 	case INTR_DELIVERED_STATE:
2082 		state = INTERRUPT_PENDING_STATE;
2083 		break;
2084 	default:
2085 		return (EINVAL);
2086 	}
2087 
2088 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2089 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
2090 
2091 	return (H_EOK);
2092 }
2093 
2094 /*
2095  * Returns the cpuid that is the current target of the
2096  * interrupt given by the sysino argument.
2097  *
2098  * The cpuid value returned is undefined if the target
2099  * has not been set via intr_settarget.
2100  */
2101 uint64_t
2102 hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2103     cpuid_t *cpuid)
2104 {
2105 	switch (PX_CHIP_TYPE(pxu_p)) {
2106 	case PX_CHIP_OBERON:
2107 		*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2108 		    SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID);
2109 		break;
2110 	case PX_CHIP_FIRE:
2111 		*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2112 		    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
2113 		break;
2114 	default:
2115 		DBG(DBG_CB, NULL, "hvio_intr_gettarget - "
2116 		    "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2117 		return (EINVAL);
2118 	}
2119 
2120 	return (H_EOK);
2121 }
2122 
2123 /*
2124  * Set the target cpu for the interrupt defined by the argument
2125  * sysino to the target cpu value defined by the argument cpuid.
2126  */
2127 uint64_t
2128 hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2129     cpuid_t cpuid)
2130 {
2131 
2132 	uint64_t	val, intr_controller;
2133 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
2134 
2135 	/*
2136 	 * For now, we assign interrupt controller in a round
2137 	 * robin fashion.  Later, we may need to come up with
2138 	 * a more efficient assignment algorithm.
2139 	 */
2140 	intr_controller = 0x1ull << (cpuid % 4);
2141 
2142 	switch (PX_CHIP_TYPE(pxu_p)) {
2143 	case PX_CHIP_OBERON:
2144 		val = (((cpuid &
2145 		    INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) <<
2146 		    INTERRUPT_MAPPING_ENTRIES_T_DESTID) |
2147 		    ((intr_controller &
2148 		    INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2149 		    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2150 		break;
2151 	case PX_CHIP_FIRE:
2152 		val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
2153 		    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
2154 		    ((intr_controller &
2155 		    INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2156 		    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2157 		break;
2158 	default:
2159 		DBG(DBG_CB, NULL, "hvio_intr_settarget - "
2160 		    "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2161 		return (EINVAL);
2162 	}
2163 
2164 	/* For EQ interrupts, set DATA MONDO bit */
2165 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
2166 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
2167 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
2168 
2169 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
2170 
2171 	return (H_EOK);
2172 }
2173 
2174 /*
2175  * MSIQ Functions:
2176  */
2177 uint64_t
2178 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
2179 {
2180 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
2181 	    (uint64_t)pxu_p->msiq_mapped_p);
2182 	DBG(DBG_IB, NULL,
2183 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
2184 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
2185 
2186 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
2187 	    (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p),
2188 	    pxu_p->portid) << INO_BITS);
2189 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
2190 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
2191 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
2192 
2193 	return (H_EOK);
2194 }
2195 
2196 uint64_t
2197 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2198     pci_msiq_valid_state_t *msiq_valid_state)
2199 {
2200 	uint32_t	eq_state;
2201 	uint64_t	ret = H_EOK;
2202 
2203 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2204 	    msiq_id, ENTRIES_STATE);
2205 
2206 	switch (eq_state) {
2207 	case EQ_IDLE_STATE:
2208 		*msiq_valid_state = PCI_MSIQ_INVALID;
2209 		break;
2210 	case EQ_ACTIVE_STATE:
2211 	case EQ_ERROR_STATE:
2212 		*msiq_valid_state = PCI_MSIQ_VALID;
2213 		break;
2214 	default:
2215 		ret = H_EIO;
2216 		break;
2217 	}
2218 
2219 	return (ret);
2220 }
2221 
2222 uint64_t
2223 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2224     pci_msiq_valid_state_t msiq_valid_state)
2225 {
2226 	uint64_t	ret = H_EOK;
2227 
2228 	switch (msiq_valid_state) {
2229 	case PCI_MSIQ_INVALID:
2230 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2231 		    msiq_id, ENTRIES_DIS);
2232 		break;
2233 	case PCI_MSIQ_VALID:
2234 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2235 		    msiq_id, ENTRIES_EN);
2236 		break;
2237 	default:
2238 		ret = H_EINVAL;
2239 		break;
2240 	}
2241 
2242 	return (ret);
2243 }
2244 
2245 uint64_t
2246 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2247     pci_msiq_state_t *msiq_state)
2248 {
2249 	uint32_t	eq_state;
2250 	uint64_t	ret = H_EOK;
2251 
2252 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2253 	    msiq_id, ENTRIES_STATE);
2254 
2255 	switch (eq_state) {
2256 	case EQ_IDLE_STATE:
2257 	case EQ_ACTIVE_STATE:
2258 		*msiq_state = PCI_MSIQ_STATE_IDLE;
2259 		break;
2260 	case EQ_ERROR_STATE:
2261 		*msiq_state = PCI_MSIQ_STATE_ERROR;
2262 		break;
2263 	default:
2264 		ret = H_EIO;
2265 	}
2266 
2267 	return (ret);
2268 }
2269 
2270 uint64_t
2271 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2272     pci_msiq_state_t msiq_state)
2273 {
2274 	uint32_t	eq_state;
2275 	uint64_t	ret = H_EOK;
2276 
2277 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2278 	    msiq_id, ENTRIES_STATE);
2279 
2280 	switch (eq_state) {
2281 	case EQ_IDLE_STATE:
2282 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2283 			ret = H_EIO;
2284 		break;
2285 	case EQ_ACTIVE_STATE:
2286 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2287 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2288 			    msiq_id, ENTRIES_ENOVERR);
2289 		else
2290 			ret = H_EIO;
2291 		break;
2292 	case EQ_ERROR_STATE:
2293 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
2294 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2295 			    msiq_id, ENTRIES_E2I);
2296 		else
2297 			ret = H_EIO;
2298 		break;
2299 	default:
2300 		ret = H_EIO;
2301 	}
2302 
2303 	return (ret);
2304 }
2305 
2306 uint64_t
2307 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2308     msiqhead_t *msiq_head)
2309 {
2310 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2311 	    msiq_id, ENTRIES_HEAD);
2312 
2313 	return (H_EOK);
2314 }
2315 
2316 uint64_t
2317 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2318     msiqhead_t msiq_head)
2319 {
2320 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2321 	    ENTRIES_HEAD, msiq_head);
2322 
2323 	return (H_EOK);
2324 }
2325 
2326 uint64_t
2327 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2328     msiqtail_t *msiq_tail)
2329 {
2330 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2331 	    msiq_id, ENTRIES_TAIL);
2332 
2333 	return (H_EOK);
2334 }
2335 
2336 /*
2337  * MSI Functions:
2338  */
2339 uint64_t
2340 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2341 {
2342 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2343 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2344 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2345 	DBG(DBG_IB, NULL, "hvio_msi_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2346 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2347 
2348 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2349 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2350 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2351 	DBG(DBG_IB, NULL, "hvio_msi_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2352 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2353 
2354 	return (H_EOK);
2355 }
2356 
2357 uint64_t
2358 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2359     msiqid_t *msiq_id)
2360 {
2361 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2362 	    msi_num, ENTRIES_EQNUM);
2363 
2364 	return (H_EOK);
2365 }
2366 
2367 uint64_t
2368 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2369     msiqid_t msiq_id)
2370 {
2371 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2372 	    ENTRIES_EQNUM, msiq_id);
2373 
2374 	return (H_EOK);
2375 }
2376 
2377 uint64_t
2378 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2379     pci_msi_valid_state_t *msi_valid_state)
2380 {
2381 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2382 	    msi_num, ENTRIES_V);
2383 
2384 	return (H_EOK);
2385 }
2386 
2387 uint64_t
2388 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2389     pci_msi_valid_state_t msi_valid_state)
2390 {
2391 	uint64_t	ret = H_EOK;
2392 
2393 	switch (msi_valid_state) {
2394 	case PCI_MSI_VALID:
2395 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2396 		    ENTRIES_V);
2397 		break;
2398 	case PCI_MSI_INVALID:
2399 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2400 		    ENTRIES_V);
2401 		break;
2402 	default:
2403 		ret = H_EINVAL;
2404 	}
2405 
2406 	return (ret);
2407 }
2408 
2409 uint64_t
2410 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2411     pci_msi_state_t *msi_state)
2412 {
2413 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2414 	    msi_num, ENTRIES_EQWR_N);
2415 
2416 	return (H_EOK);
2417 }
2418 
2419 uint64_t
2420 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2421     pci_msi_state_t msi_state)
2422 {
2423 	uint64_t	ret = H_EOK;
2424 
2425 	switch (msi_state) {
2426 	case PCI_MSI_STATE_IDLE:
2427 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2428 		    ENTRIES_EQWR_N);
2429 		break;
2430 	case PCI_MSI_STATE_DELIVERED:
2431 	default:
2432 		ret = H_EINVAL;
2433 		break;
2434 	}
2435 
2436 	return (ret);
2437 }
2438 
2439 /*
2440  * MSG Functions:
2441  */
2442 uint64_t
2443 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2444     msiqid_t *msiq_id)
2445 {
2446 	uint64_t	ret = H_EOK;
2447 
2448 	switch (msg_type) {
2449 	case PCIE_PME_MSG:
2450 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2451 		break;
2452 	case PCIE_PME_ACK_MSG:
2453 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2454 		    EQNUM);
2455 		break;
2456 	case PCIE_CORR_MSG:
2457 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2458 		break;
2459 	case PCIE_NONFATAL_MSG:
2460 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2461 		    EQNUM);
2462 		break;
2463 	case PCIE_FATAL_MSG:
2464 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2465 		break;
2466 	default:
2467 		ret = H_EINVAL;
2468 		break;
2469 	}
2470 
2471 	return (ret);
2472 }
2473 
2474 uint64_t
2475 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2476     msiqid_t msiq_id)
2477 {
2478 	uint64_t	ret = H_EOK;
2479 
2480 	switch (msg_type) {
2481 	case PCIE_PME_MSG:
2482 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2483 		break;
2484 	case PCIE_PME_ACK_MSG:
2485 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2486 		break;
2487 	case PCIE_CORR_MSG:
2488 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2489 		break;
2490 	case PCIE_NONFATAL_MSG:
2491 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2492 		break;
2493 	case PCIE_FATAL_MSG:
2494 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2495 		break;
2496 	default:
2497 		ret = H_EINVAL;
2498 		break;
2499 	}
2500 
2501 	return (ret);
2502 }
2503 
2504 uint64_t
2505 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2506     pcie_msg_valid_state_t *msg_valid_state)
2507 {
2508 	uint64_t	ret = H_EOK;
2509 
2510 	switch (msg_type) {
2511 	case PCIE_PME_MSG:
2512 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2513 		break;
2514 	case PCIE_PME_ACK_MSG:
2515 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2516 		    PME_TO_ACK_MAPPING, V);
2517 		break;
2518 	case PCIE_CORR_MSG:
2519 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2520 		break;
2521 	case PCIE_NONFATAL_MSG:
2522 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2523 		    ERR_NONFATAL_MAPPING, V);
2524 		break;
2525 	case PCIE_FATAL_MSG:
2526 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2527 		    V);
2528 		break;
2529 	default:
2530 		ret = H_EINVAL;
2531 		break;
2532 	}
2533 
2534 	return (ret);
2535 }
2536 
2537 uint64_t
2538 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2539     pcie_msg_valid_state_t msg_valid_state)
2540 {
2541 	uint64_t	ret = H_EOK;
2542 
2543 	switch (msg_valid_state) {
2544 	case PCIE_MSG_VALID:
2545 		switch (msg_type) {
2546 		case PCIE_PME_MSG:
2547 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2548 			break;
2549 		case PCIE_PME_ACK_MSG:
2550 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2551 			break;
2552 		case PCIE_CORR_MSG:
2553 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2554 			break;
2555 		case PCIE_NONFATAL_MSG:
2556 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2557 			break;
2558 		case PCIE_FATAL_MSG:
2559 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2560 			break;
2561 		default:
2562 			ret = H_EINVAL;
2563 			break;
2564 		}
2565 
2566 		break;
2567 	case PCIE_MSG_INVALID:
2568 		switch (msg_type) {
2569 		case PCIE_PME_MSG:
2570 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2571 			break;
2572 		case PCIE_PME_ACK_MSG:
2573 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2574 			break;
2575 		case PCIE_CORR_MSG:
2576 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2577 			break;
2578 		case PCIE_NONFATAL_MSG:
2579 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2580 			break;
2581 		case PCIE_FATAL_MSG:
2582 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2583 			break;
2584 		default:
2585 			ret = H_EINVAL;
2586 			break;
2587 		}
2588 		break;
2589 	default:
2590 		ret = H_EINVAL;
2591 	}
2592 
2593 	return (ret);
2594 }
2595 
2596 /*
2597  * Suspend/Resume Functions:
2598  *	(pec, mmu, ib)
2599  *	cb
2600  * Registers saved have all been touched in the XXX_init functions.
2601  */
2602 uint64_t
2603 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2604 {
2605 	uint64_t	*config_state;
2606 	int		total_size;
2607 	int		i;
2608 
2609 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2610 		return (H_EIO);
2611 
2612 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2613 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2614 
2615 	if (config_state == NULL) {
2616 		return (H_EIO);
2617 	}
2618 
2619 	/*
2620 	 * Soft state for suspend/resume  from pxu_t
2621 	 * uint64_t	*pec_config_state;
2622 	 * uint64_t	*mmu_config_state;
2623 	 * uint64_t	*ib_intr_map;
2624 	 * uint64_t	*ib_config_state;
2625 	 * uint64_t	*xcb_config_state;
2626 	 */
2627 
2628 	/* Save the PEC configuration states */
2629 	pxu_p->pec_config_state = config_state;
2630 	for (i = 0; i < PEC_KEYS; i++) {
2631 		if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2632 		    (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2633 			pxu_p->pec_config_state[i] =
2634 			    CSR_XR((caddr_t)dev_hdl,
2635 			    pec_config_state_regs[i].reg);
2636 		}
2637 	}
2638 
2639 	/* Save the MMU configuration states */
2640 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2641 	for (i = 0; i < MMU_KEYS; i++) {
2642 		pxu_p->mmu_config_state[i] =
2643 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2644 	}
2645 
2646 	/* Save the interrupt mapping registers */
2647 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2648 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2649 		pxu_p->ib_intr_map[i] =
2650 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2651 	}
2652 
2653 	/* Save the IB configuration states */
2654 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2655 	for (i = 0; i < IB_KEYS; i++) {
2656 		pxu_p->ib_config_state[i] =
2657 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2658 	}
2659 
2660 	return (H_EOK);
2661 }
2662 
2663 void
2664 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2665 {
2666 	int		total_size;
2667 	sysino_t	sysino;
2668 	int		i;
2669 	uint64_t	ret;
2670 
2671 	/* Make sure that suspend actually did occur */
2672 	if (!pxu_p->pec_config_state) {
2673 		return;
2674 	}
2675 
2676 	/* Restore IB configuration states */
2677 	for (i = 0; i < IB_KEYS; i++) {
2678 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2679 		    pxu_p->ib_config_state[i]);
2680 	}
2681 
2682 	/*
2683 	 * Restore the interrupt mapping registers
2684 	 * And make sure the intrs are idle.
2685 	 */
2686 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2687 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2688 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2689 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2690 		    pxu_p->ib_intr_map[i]);
2691 	}
2692 
2693 	/* Restore MMU configuration states */
2694 	/* Clear the cache. */
2695 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2696 
2697 	for (i = 0; i < MMU_KEYS; i++) {
2698 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2699 		    pxu_p->mmu_config_state[i]);
2700 	}
2701 
2702 	/* Restore PEC configuration states */
2703 	/* Make sure all reset bits are low until error is detected */
2704 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2705 
2706 	for (i = 0; i < PEC_KEYS; i++) {
2707 		if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2708 		    (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2709 			CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg,
2710 			    pxu_p->pec_config_state[i]);
2711 		}
2712 	}
2713 
2714 	/* Enable PCI-E interrupt */
2715 	if ((ret = hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino,
2716 	    &sysino)) != H_EOK) {
2717 		cmn_err(CE_WARN,
2718 		    "hvio_resume: hvio_intr_devino_to_sysino failed, "
2719 		    "ret 0x%lx", ret);
2720 	}
2721 
2722 	if ((ret =  hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE))
2723 	    != H_EOK) {
2724 		cmn_err(CE_WARN,
2725 		    "hvio_resume: hvio_intr_setstate failed, "
2726 		    "ret 0x%lx", ret);
2727 	}
2728 
2729 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2730 	kmem_free(pxu_p->pec_config_state, total_size);
2731 
2732 	pxu_p->pec_config_state = NULL;
2733 	pxu_p->mmu_config_state = NULL;
2734 	pxu_p->ib_config_state = NULL;
2735 	pxu_p->ib_intr_map = NULL;
2736 
2737 	msiq_resume(dev_hdl, pxu_p);
2738 }
2739 
2740 uint64_t
2741 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2742 {
2743 	uint64_t *config_state, *cb_regs;
2744 	int i, cb_size, cb_keys;
2745 
2746 	switch (PX_CHIP_TYPE(pxu_p)) {
2747 	case PX_CHIP_OBERON:
2748 		cb_size = UBC_SIZE;
2749 		cb_keys = UBC_KEYS;
2750 		cb_regs = ubc_config_state_regs;
2751 		break;
2752 	case PX_CHIP_FIRE:
2753 		cb_size = JBC_SIZE;
2754 		cb_keys = JBC_KEYS;
2755 		cb_regs = jbc_config_state_regs;
2756 		break;
2757 	default:
2758 		DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n",
2759 		    PX_CHIP_TYPE(pxu_p));
2760 		break;
2761 	}
2762 
2763 	config_state = kmem_zalloc(cb_size, KM_NOSLEEP);
2764 
2765 	if (config_state == NULL) {
2766 		return (H_EIO);
2767 	}
2768 
2769 	/* Save the configuration states */
2770 	pxu_p->xcb_config_state = config_state;
2771 	for (i = 0; i < cb_keys; i++) {
2772 		pxu_p->xcb_config_state[i] =
2773 		    CSR_XR((caddr_t)dev_hdl, cb_regs[i]);
2774 	}
2775 
2776 	return (H_EOK);
2777 }
2778 
2779 void
2780 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2781     devino_t devino, pxu_t *pxu_p)
2782 {
2783 	sysino_t sysino;
2784 	uint64_t *cb_regs;
2785 	int i, cb_size, cb_keys;
2786 	uint64_t ret;
2787 
2788 	switch (PX_CHIP_TYPE(pxu_p)) {
2789 	case PX_CHIP_OBERON:
2790 		cb_size = UBC_SIZE;
2791 		cb_keys = UBC_KEYS;
2792 		cb_regs = ubc_config_state_regs;
2793 		/*
2794 		 * No reason to have any reset bits high until an error is
2795 		 * detected on the link.
2796 		 */
2797 		CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull);
2798 		break;
2799 	case PX_CHIP_FIRE:
2800 		cb_size = JBC_SIZE;
2801 		cb_keys = JBC_KEYS;
2802 		cb_regs = jbc_config_state_regs;
2803 		/*
2804 		 * No reason to have any reset bits high until an error is
2805 		 * detected on the link.
2806 		 */
2807 		CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2808 		break;
2809 	default:
2810 		DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n",
2811 		    PX_CHIP_TYPE(pxu_p));
2812 		break;
2813 	}
2814 
2815 	ASSERT(pxu_p->xcb_config_state);
2816 
2817 	/* Restore the configuration states */
2818 	for (i = 0; i < cb_keys; i++) {
2819 		CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i],
2820 		    pxu_p->xcb_config_state[i]);
2821 	}
2822 
2823 	/* Enable XBC interrupt */
2824 	if ((ret = hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino,
2825 	    &sysino)) != H_EOK) {
2826 		cmn_err(CE_WARN,
2827 		    "hvio_cb_resume: hvio_intr_devino_to_sysino failed, "
2828 		    "ret 0x%lx", ret);
2829 	}
2830 
2831 	if ((ret = hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE))
2832 	    != H_EOK) {
2833 		cmn_err(CE_WARN,
2834 		    "hvio_cb_resume: hvio_intr_setstate failed, "
2835 		    "ret 0x%lx", ret);
2836 	}
2837 
2838 	kmem_free(pxu_p->xcb_config_state, cb_size);
2839 
2840 	pxu_p->xcb_config_state = NULL;
2841 }
2842 
2843 static uint64_t
2844 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2845 {
2846 	size_t	bufsz;
2847 	volatile uint64_t *cur_p;
2848 	int i;
2849 
2850 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2851 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2852 	    NULL)
2853 		return (H_EIO);
2854 
2855 	cur_p = pxu_p->msiq_config_state;
2856 
2857 	/* Save each EQ state */
2858 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2859 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2860 
2861 	/* Save MSI mapping registers */
2862 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2863 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2864 
2865 	/* Save all other MSIQ registers */
2866 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2867 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2868 	return (H_EOK);
2869 }
2870 
2871 static void
2872 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2873 {
2874 	size_t	bufsz;
2875 	uint64_t *cur_p, state;
2876 	int i;
2877 	uint64_t ret;
2878 
2879 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2880 	cur_p = pxu_p->msiq_config_state;
2881 	/*
2882 	 * Initialize EQ base address register and
2883 	 * Interrupt Mondo Data 0 register.
2884 	 */
2885 	if ((ret = hvio_msiq_init(dev_hdl, pxu_p)) != H_EOK) {
2886 		cmn_err(CE_WARN,
2887 		    "msiq_resume: hvio_msiq_init failed, "
2888 		    "ret 0x%lx", ret);
2889 	}
2890 
2891 	/* Restore EQ states */
2892 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2893 		state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK;
2894 		if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE))
2895 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2896 			    i, ENTRIES_EN);
2897 	}
2898 
2899 	/* Restore MSI mapping */
2900 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2901 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2902 
2903 	/*
2904 	 * Restore all other registers. MSI 32 bit address and
2905 	 * MSI 64 bit address are restored as part of this.
2906 	 */
2907 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2908 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2909 
2910 	kmem_free(pxu_p->msiq_config_state, bufsz);
2911 	pxu_p->msiq_config_state = NULL;
2912 }
2913 
2914 /*
2915  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2916  * called by px_goto_l23ready.
2917  * returns DDI_SUCCESS or DDI_FAILURE
2918  */
2919 int
2920 px_send_pme_turnoff(caddr_t csr_base)
2921 {
2922 	volatile uint64_t reg;
2923 
2924 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2925 	/* If already pending, return failure */
2926 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2927 		DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2928 		    "tlu_pme_turn_off_generate = %x\n", reg);
2929 		return (DDI_FAILURE);
2930 	}
2931 
2932 	/* write to PME_Turn_off reg to boradcast */
2933 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2934 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2935 
2936 	return (DDI_SUCCESS);
2937 }
2938 
2939 /*
2940  * Checks for link being in L1idle state.
2941  * Returns
2942  * DDI_SUCCESS - if the link is in L1idle
2943  * DDI_FAILURE - if the link is not in L1idle
2944  */
2945 int
2946 px_link_wait4l1idle(caddr_t csr_base)
2947 {
2948 	uint8_t ltssm_state;
2949 	int ntries = px_max_l1_tries;
2950 
2951 	while (ntries > 0) {
2952 		ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
2953 		if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
2954 			break;
2955 		delay(1);
2956 	}
2957 	DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
2958 	return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
2959 }
2960 
2961 /*
2962  * Tranisition the link to L0, after it is down.
2963  */
2964 int
2965 px_link_retrain(caddr_t csr_base)
2966 {
2967 	volatile uint64_t reg;
2968 
2969 	reg = CSR_XR(csr_base, TLU_CONTROL);
2970 	if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
2971 		DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
2972 		return (DDI_FAILURE);
2973 	}
2974 
2975 	/* Clear link down bit in TLU Other Event Clear Status Register. */
2976 	CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
2977 
2978 	/* Clear Drain bit in TLU Status Register */
2979 	CSR_BS(csr_base, TLU_STATUS, DRAIN);
2980 
2981 	/* Clear Remain in Detect.Quiet bit in TLU Control Register */
2982 	reg = CSR_XR(csr_base, TLU_CONTROL);
2983 	reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
2984 	CSR_XS(csr_base, TLU_CONTROL, reg);
2985 
2986 	return (DDI_SUCCESS);
2987 }
2988 
2989 void
2990 px_enable_detect_quiet(caddr_t csr_base)
2991 {
2992 	volatile uint64_t tlu_ctrl;
2993 
2994 	tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
2995 	tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
2996 	CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
2997 }
2998 
2999 static uint_t
3000 oberon_hp_pwron(caddr_t csr_base)
3001 {
3002 	volatile uint64_t reg;
3003 	boolean_t link_retry, link_up;
3004 	int loop, i;
3005 
3006 	DBG(DBG_HP, NULL, "oberon_hp_pwron the slot\n");
3007 
3008 	/* Check Leaf Reset status */
3009 	reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE);
3010 	if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) {
3011 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not reset\n");
3012 		goto fail;
3013 	}
3014 
3015 	/* Check HP Capable */
3016 	if (!CSR_BR(csr_base, TLU_SLOT_CAPABILITIES, HP)) {
3017 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not "
3018 		    "hotplugable\n");
3019 		goto fail;
3020 	}
3021 
3022 	/* Check Slot status */
3023 	reg = CSR_XR(csr_base, TLU_SLOT_STATUS);
3024 	if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) ||
3025 	    (reg & (1ull << TLU_SLOT_STATUS_MRLS))) {
3026 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: slot status %lx\n",
3027 		    reg);
3028 		goto fail;
3029 	}
3030 
3031 	/* Blink power LED, this is done from pciehpc already */
3032 
3033 	/* Turn on slot power */
3034 	CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3035 
3036 	/* power fault detection */
3037 	delay(drv_usectohz(25000));
3038 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3039 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3040 
3041 	/* wait to check power state */
3042 	delay(drv_usectohz(25000));
3043 
3044 	if (!CSR_BR(csr_base, TLU_SLOT_STATUS, PWFD)) {
3045 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: power fault\n");
3046 		goto fail1;
3047 	}
3048 
3049 	/* power is good */
3050 	CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3051 
3052 	delay(drv_usectohz(25000));
3053 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3054 	CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3055 
3056 	/* Turn on slot clock */
3057 	CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN);
3058 
3059 	link_up = B_FALSE;
3060 	link_retry = B_FALSE;
3061 
3062 	for (loop = 0; (loop < link_retry_count) && (link_up == B_FALSE);
3063 	    loop++) {
3064 		if (link_retry == B_TRUE) {
3065 			DBG(DBG_HP, NULL, "oberon_hp_pwron : retry link loop "
3066 			    "%d\n", loop);
3067 			CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3068 			CSR_XS(csr_base, FLP_PORT_CONTROL, 0x1);
3069 			delay(drv_usectohz(10000));
3070 			CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3071 			CSR_BS(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3072 			CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3073 			delay(drv_usectohz(50000));
3074 		}
3075 
3076 		/* Release PCI-E Reset */
3077 		delay(drv_usectohz(wait_perst));
3078 		CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST);
3079 
3080 		/*
3081 		 * Open events' mask
3082 		 * This should be done from pciehpc already
3083 		 */
3084 
3085 		/* Enable PCIE port */
3086 		delay(drv_usectohz(wait_enable_port));
3087 		CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3088 		CSR_XS(csr_base, FLP_PORT_CONTROL, 0x20);
3089 
3090 		/* wait for the link up */
3091 		/* BEGIN CSTYLED */
3092 		for (i = 0; (i < 2) && (link_up == B_FALSE); i++) {
3093 			delay(drv_usectohz(link_status_check));
3094 			reg = CSR_XR(csr_base, DLU_LINK_LAYER_STATUS);
3095 
3096 		    if ((((reg >> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS) &
3097 			DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK) ==
3098 			DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE) &&
3099 			(reg & (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS)) &&
3100 			((reg & DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK)
3101 			==
3102 			DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE)) {
3103 			DBG(DBG_HP, NULL, "oberon_hp_pwron : link is up\n");
3104 				link_up = B_TRUE;
3105 		    } else
3106 			link_retry = B_TRUE;
3107 		}
3108 		/* END CSTYLED */
3109 	}
3110 
3111 	if (link_up == B_FALSE) {
3112 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails to enable "
3113 		    "PCI-E port\n");
3114 		goto fail2;
3115 	}
3116 
3117 	/* link is up */
3118 	CSR_BC(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3119 	CSR_BS(csr_base, FLP_PORT_ACTIVE_STATUS, TRAIN_ERROR);
3120 	CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_P);
3121 	CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_S);
3122 	CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3123 
3124 	/* Restore LUP/LDN */
3125 	reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3126 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P))
3127 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P;
3128 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P))
3129 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P;
3130 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S))
3131 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S;
3132 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S))
3133 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S;
3134 	CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3135 
3136 	/*
3137 	 * Initialize Leaf
3138 	 * SPLS = 00b, SPLV = 11001b, i.e. 25W
3139 	 */
3140 	reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3141 	reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK <<
3142 	    TLU_SLOT_CAPABILITIES_SPLS);
3143 	reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK <<
3144 	    TLU_SLOT_CAPABILITIES_SPLV);
3145 	reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLV);
3146 	CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg);
3147 
3148 	/* Turn on Power LED */
3149 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3150 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3151 	reg = pcie_slotctl_pwr_indicator_set(reg,
3152 	    PCIE_SLOTCTL_INDICATOR_STATE_ON);
3153 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3154 
3155 	/* Notify to SCF */
3156 	if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3157 		CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3158 	else
3159 		CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3160 
3161 	/* Wait for one second */
3162 	delay(drv_usectohz(1000000));
3163 
3164 	return (DDI_SUCCESS);
3165 
3166 fail2:
3167 	/* Link up is failed */
3168 	CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3169 	CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3170 	delay(drv_usectohz(150));
3171 
3172 	CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3173 	delay(drv_usectohz(100));
3174 
3175 fail1:
3176 	CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3177 
3178 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3179 
3180 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3181 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3182 	reg = pcie_slotctl_pwr_indicator_set(reg,
3183 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3184 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3185 
3186 	CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD);
3187 
3188 fail:
3189 	return ((uint_t)DDI_FAILURE);
3190 }
3191 
3192 hrtime_t oberon_leaf_reset_timeout = 120ll * NANOSEC;	/* 120 seconds */
3193 
3194 static uint_t
3195 oberon_hp_pwroff(caddr_t csr_base)
3196 {
3197 	volatile uint64_t reg;
3198 	volatile uint64_t reg_tluue, reg_tluce;
3199 	hrtime_t start_time, end_time;
3200 
3201 	DBG(DBG_HP, NULL, "oberon_hp_pwroff the slot\n");
3202 
3203 	/* Blink power LED, this is done from pciehpc already */
3204 
3205 	/* Clear Slot Event */
3206 	CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC);
3207 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3208 
3209 	/* DRN_TR_DIS on */
3210 	CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3211 	delay(drv_usectohz(10000));
3212 
3213 	/* Disable LUP/LDN */
3214 	reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3215 	reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3216 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3217 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3218 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3219 	CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3220 
3221 	/* Save the TLU registers */
3222 	reg_tluue = CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE);
3223 	reg_tluce = CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE);
3224 	/* All clear */
3225 	CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 0);
3226 	CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, 0);
3227 
3228 	/* Disable port */
3229 	CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3230 
3231 	/* PCIE reset */
3232 	delay(drv_usectohz(10000));
3233 	CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3234 
3235 	/* PCIE clock stop */
3236 	delay(drv_usectohz(150));
3237 	CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3238 
3239 	/* Turn off slot power */
3240 	delay(drv_usectohz(100));
3241 	CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3242 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3243 	delay(drv_usectohz(25000));
3244 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3245 
3246 	/* write 0 to bit 7 of ILU Error Log Enable Register */
3247 	CSR_BC(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3);
3248 
3249 	/* Set back TLU registers */
3250 	CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, reg_tluue);
3251 	CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, reg_tluce);
3252 
3253 	/* Power LED off */
3254 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3255 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3256 	reg = pcie_slotctl_pwr_indicator_set(reg,
3257 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3258 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3259 
3260 	/* Indicator LED blink */
3261 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3262 	reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3263 	reg = pcie_slotctl_attn_indicator_set(reg,
3264 	    PCIE_SLOTCTL_INDICATOR_STATE_BLINK);
3265 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3266 
3267 	/* Notify to SCF */
3268 	if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3269 		CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3270 	else
3271 		CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3272 
3273 	start_time = gethrtime();
3274 	/* Check Leaf Reset status */
3275 	while (!(CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))) {
3276 		if ((end_time = (gethrtime() - start_time)) >
3277 		    oberon_leaf_reset_timeout) {
3278 			cmn_err(CE_WARN, "Oberon leaf reset is not completed, "
3279 			    "even after waiting %llx ticks", end_time);
3280 
3281 			break;
3282 		}
3283 
3284 		/* Wait for one second */
3285 		delay(drv_usectohz(1000000));
3286 	}
3287 
3288 	/* Indicator LED off */
3289 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3290 	reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3291 	reg = pcie_slotctl_attn_indicator_set(reg,
3292 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3293 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3294 
3295 	return (DDI_SUCCESS);
3296 }
3297 
3298 static uint_t
3299 oberon_hpreg_get(void *cookie, off_t off)
3300 {
3301 	caddr_t csr_base = *(caddr_t *)cookie;
3302 	volatile uint64_t val = -1ull;
3303 
3304 	switch (off) {
3305 	case PCIE_SLOTCAP:
3306 		val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3307 		break;
3308 	case PCIE_SLOTCTL:
3309 		val = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3310 
3311 		/* Get the power state */
3312 		val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) &
3313 		    (1ull << HOTPLUG_CONTROL_PWREN)) ?
3314 		    0 : PCIE_SLOTCTL_PWR_CONTROL;
3315 		break;
3316 	case PCIE_SLOTSTS:
3317 		val = CSR_XR(csr_base, TLU_SLOT_STATUS);
3318 		break;
3319 	case PCIE_LINKCAP:
3320 		val = CSR_XR(csr_base, TLU_LINK_CAPABILITIES);
3321 		break;
3322 	case PCIE_LINKSTS:
3323 		val = CSR_XR(csr_base, TLU_LINK_STATUS);
3324 		break;
3325 	default:
3326 		DBG(DBG_HP, NULL, "oberon_hpreg_get(): "
3327 		    "unsupported offset 0x%lx\n", off);
3328 		break;
3329 	}
3330 
3331 	return ((uint_t)val);
3332 }
3333 
3334 static uint_t
3335 oberon_hpreg_put(void *cookie, off_t off, uint_t val)
3336 {
3337 	caddr_t csr_base = *(caddr_t *)cookie;
3338 	volatile uint64_t pwr_state_on, pwr_fault;
3339 	uint_t pwr_off, ret = DDI_SUCCESS;
3340 
3341 	DBG(DBG_HP, NULL, "oberon_hpreg_put 0x%lx: cur %x, new %x\n",
3342 	    off, oberon_hpreg_get(cookie, off), val);
3343 
3344 	switch (off) {
3345 	case PCIE_SLOTCTL:
3346 		/*
3347 		 * Depending on the current state, insertion or removal
3348 		 * will go through their respective sequences.
3349 		 */
3350 		pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN);
3351 		pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL;
3352 
3353 		if (!pwr_off && !pwr_state_on)
3354 			ret = oberon_hp_pwron(csr_base);
3355 		else if (pwr_off && pwr_state_on) {
3356 			pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) &
3357 			    (1ull << TLU_SLOT_STATUS_PWFD);
3358 
3359 			if (pwr_fault) {
3360 				DBG(DBG_HP, NULL, "oberon_hpreg_put: power "
3361 				    "off because of power fault\n");
3362 				CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3363 			}
3364 			else
3365 				ret = oberon_hp_pwroff(csr_base);
3366 		} else
3367 			CSR_XS(csr_base, TLU_SLOT_CONTROL, val);
3368 		break;
3369 	case PCIE_SLOTSTS:
3370 		CSR_XS(csr_base, TLU_SLOT_STATUS, val);
3371 		break;
3372 	default:
3373 		DBG(DBG_HP, NULL, "oberon_hpreg_put(): "
3374 		    "unsupported offset 0x%lx\n", off);
3375 		ret = (uint_t)DDI_FAILURE;
3376 		break;
3377 	}
3378 
3379 	return (ret);
3380 }
3381 
3382 int
3383 hvio_hotplug_init(dev_info_t *dip, void *arg)
3384 {
3385 	pciehpc_regops_t *regops = (pciehpc_regops_t *)arg;
3386 	px_t	*px_p = DIP_TO_STATE(dip);
3387 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3388 	volatile uint64_t reg;
3389 
3390 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
3391 		if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3392 		    TLU_SLOT_CAPABILITIES, HP)) {
3393 			DBG(DBG_HP, NULL, "%s%d: hotplug capabale not set\n",
3394 			    ddi_driver_name(dip), ddi_get_instance(dip));
3395 			return (DDI_FAILURE);
3396 		}
3397 
3398 		/* For empty or disconnected slot, disable LUP/LDN */
3399 		if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3400 		    TLU_SLOT_STATUS, PSD) ||
3401 		    !CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3402 		    HOTPLUG_CONTROL, PWREN)) {
3403 
3404 			reg = CSR_XR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3405 			    TLU_OTHER_EVENT_LOG_ENABLE);
3406 			reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3407 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3408 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3409 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3410 			CSR_XS((caddr_t)pxu_p->px_address[PX_REG_CSR],
3411 			    TLU_OTHER_EVENT_LOG_ENABLE, reg);
3412 		}
3413 
3414 		regops->get = oberon_hpreg_get;
3415 		regops->put = oberon_hpreg_put;
3416 
3417 		/* cookie is the csr_base */
3418 		regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR];
3419 
3420 		return (DDI_SUCCESS);
3421 	}
3422 
3423 	return (DDI_ENOTSUP);
3424 }
3425 
3426 int
3427 hvio_hotplug_uninit(dev_info_t *dip)
3428 {
3429 	px_t	*px_p = DIP_TO_STATE(dip);
3430 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3431 
3432 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
3433 		return (DDI_SUCCESS);
3434 
3435 	return (DDI_FAILURE);
3436 }
3437