xref: /titanic_52/usr/src/uts/sun4u/io/px/px_hlib.c (revision 940d71d237794874e18a0eb72f6564821a823517)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/cmn_err.h>
30 #include <sys/vmsystm.h>
31 #include <sys/vmem.h>
32 #include <sys/machsystm.h>	/* lddphys() */
33 #include <sys/iommutsb.h>
34 #include <sys/pci.h>
35 #include <sys/hotplug/pci/pciehpc.h>
36 #include <pcie_pwr.h>
37 #include <px_obj.h>
38 #include "px_regs.h"
39 #include "oberon_regs.h"
40 #include "px_csr.h"
41 #include "px_lib4u.h"
42 #include "px_err.h"
43 
44 /*
45  * Registers that need to be saved and restored during suspend/resume.
46  */
47 
48 /*
49  * Registers in the PEC Module.
50  * LPU_RESET should be set to 0ull during resume
51  *
52  * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips
53  * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only.
54  */
55 static struct px_pec_regs {
56 	uint64_t reg;
57 	uint64_t chip;
58 } pec_config_state_regs[] = {
59 	{PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
60 	{ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
61 	{ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
62 	{TLU_CONTROL, PX_CHIP_UNIDENTIFIED},
63 	{TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
64 	{TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
65 	{TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED},
66 	{TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED},
67 	{TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
68 	{TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
69 	{TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
70 	{TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
71 	{DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON},
72 	{DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON},
73 	{DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON},
74 	{LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE},
75 	{LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
76 	{LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
77 	{LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
78 	{LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE},
79 	{LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE},
80 	{LPU_RESET, PX_CHIP_FIRE},
81 	{LPU_DEBUG_CONFIG, PX_CHIP_FIRE},
82 	{LPU_INTERRUPT_MASK, PX_CHIP_FIRE},
83 	{LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE},
84 	{LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE},
85 	{LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE},
86 	{LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE},
87 	{LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE},
88 	{LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE},
89 	{LPU_LTSSM_CONFIG2, PX_CHIP_FIRE},
90 	{LPU_LTSSM_CONFIG3, PX_CHIP_FIRE},
91 	{LPU_LTSSM_CONFIG4, PX_CHIP_FIRE},
92 	{LPU_LTSSM_CONFIG5, PX_CHIP_FIRE},
93 	{DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
94 	{DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED},
95 	{DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED}
96 };
97 
98 #define	PEC_KEYS	\
99 	((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs))
100 
101 #define	PEC_SIZE	(PEC_KEYS * sizeof (uint64_t))
102 
103 /*
104  * Registers for the MMU module.
105  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
106  */
107 static uint64_t mmu_config_state_regs[] = {
108 	MMU_TSB_CONTROL,
109 	MMU_CONTROL_AND_STATUS,
110 	MMU_ERROR_LOG_ENABLE,
111 	MMU_INTERRUPT_ENABLE
112 };
113 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
114 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
115 
116 /*
117  * Registers for the IB Module
118  */
119 static uint64_t ib_config_state_regs[] = {
120 	IMU_ERROR_LOG_ENABLE,
121 	IMU_INTERRUPT_ENABLE
122 };
123 #define	IB_SIZE (sizeof (ib_config_state_regs))
124 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
125 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
126 
127 /*
128  * Registers for the JBC module.
129  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
130  */
131 static uint64_t	jbc_config_state_regs[] = {
132 	JBUS_PARITY_CONTROL,
133 	JBC_FATAL_RESET_ENABLE,
134 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
135 	JBC_ERROR_LOG_ENABLE,
136 	JBC_INTERRUPT_ENABLE
137 };
138 #define	JBC_SIZE (sizeof (jbc_config_state_regs))
139 #define	JBC_KEYS (JBC_SIZE / sizeof (uint64_t))
140 
141 /*
142  * Registers for the UBC module.
143  * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
144  */
145 static uint64_t	ubc_config_state_regs[] = {
146 	UBC_ERROR_LOG_ENABLE,
147 	UBC_INTERRUPT_ENABLE
148 };
149 #define	UBC_SIZE (sizeof (ubc_config_state_regs))
150 #define	UBC_KEYS (UBC_SIZE / sizeof (uint64_t))
151 
152 static uint64_t	msiq_config_other_regs[] = {
153 	ERR_COR_MAPPING,
154 	ERR_NONFATAL_MAPPING,
155 	ERR_FATAL_MAPPING,
156 	PM_PME_MAPPING,
157 	PME_TO_ACK_MAPPING,
158 	MSI_32_BIT_ADDRESS,
159 	MSI_64_BIT_ADDRESS
160 };
161 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
162 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
163 
164 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
165 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
166 
167 /* OPL tuning variables for link unstable issue */
168 int wait_perst = 5000000; 	/* step 9, default: 5s */
169 int wait_enable_port = 30000;	/* step 11, default: 30ms */
170 int link_retry_count = 2; 	/* step 11, default: 2 */
171 int link_status_check = 400000;	/* step 11, default: 400ms */
172 
173 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
174 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
175 static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
176 static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
177 
178 /*
179  * Initialize the bus, but do not enable interrupts.
180  */
181 /* ARGSUSED */
182 void
183 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
184 {
185 	switch (PX_CHIP_TYPE(pxu_p)) {
186 	case PX_CHIP_OBERON:
187 		ubc_init(xbc_csr_base, pxu_p);
188 		break;
189 	case PX_CHIP_FIRE:
190 		jbc_init(xbc_csr_base, pxu_p);
191 		break;
192 	default:
193 		DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n",
194 		    PX_CHIP_TYPE(pxu_p));
195 		break;
196 	}
197 }
198 
199 /*
200  * Initialize the JBC module, but do not enable interrupts.
201  */
202 /* ARGSUSED */
203 static void
204 jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
205 {
206 	uint64_t val;
207 
208 	/* Check if we need to enable inverted parity */
209 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
210 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
211 	DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n",
212 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
213 
214 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
215 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
216 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
217 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
218 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
219 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
220 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
221 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
222 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
223 	DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
224 		CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
225 
226 	/*
227 	 * Enable merge, jbc and dmc interrupts.
228 	 */
229 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
230 	DBG(DBG_CB, NULL,
231 	    "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
232 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
233 
234 	/*
235 	 * CSR_V JBC's interrupt regs (log, enable, status, clear)
236 	 */
237 	DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
238 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
239 
240 	DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
241 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
242 
243 	DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
244 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
245 
246 	DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
247 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
248 }
249 
250 /*
251  * Initialize the UBC module, but do not enable interrupts.
252  */
253 /* ARGSUSED */
254 static void
255 ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
256 {
257 	/*
258 	 * Enable Uranus bus error log bits.
259 	 */
260 	CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull);
261 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
262 	    CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
263 
264 	/*
265 	 * Clear Uranus bus errors.
266 	 */
267 	CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull);
268 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
269 	    CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
270 
271 	/*
272 	 * CSR_V UBC's interrupt regs (log, enable, status, clear)
273 	 */
274 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
275 	    CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
276 
277 	DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n",
278 	    CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE));
279 
280 	DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n",
281 	    CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS));
282 
283 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
284 	    CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
285 }
286 
287 /*
288  * Initialize the module, but do not enable interrupts.
289  */
290 /* ARGSUSED */
291 void
292 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
293 {
294 	/*
295 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
296 	 */
297 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
298 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
299 
300 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
301 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
302 
303 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
304 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
305 
306 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
307 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
308 }
309 
310 /*
311  * Initialize the module, but do not enable interrupts.
312  */
313 /* ARGSUSED */
314 static void
315 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
316 {
317 	/*
318 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
319 	 */
320 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
321 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
322 
323 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
324 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
325 
326 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
327 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
328 
329 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
330 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
331 }
332 
333 /*
334  * Initialize the module, but do not enable interrupts.
335  */
336 /* ARGSUSED */
337 static void
338 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
339 {
340 	uint64_t val;
341 
342 	/*
343 	 * CSR_V TLU_CONTROL Expect OBP ???
344 	 */
345 
346 	/*
347 	 * L0s entry default timer value - 7.0 us
348 	 * Completion timeout select default value - 67.1 ms and
349 	 * OBP will set this value.
350 	 *
351 	 * Configuration - Bit 0 should always be 0 for upstream port.
352 	 * Bit 1 is clock - how is this related to the clock bit in TLU
353 	 * Link Control register?  Both are hardware dependent and likely
354 	 * set by OBP.
355 	 *
356 	 * NOTE: Do not set the NPWR_EN bit.  The desired value of this bit
357 	 * will be set by OBP.
358 	 */
359 	val = CSR_XR(csr_base, TLU_CONTROL);
360 	val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
361 	    TLU_CONTROL_CONFIG_DEFAULT;
362 
363 	/*
364 	 * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking
365 	 * behind non-posted PIO writes. This blocking could cause a master or
366 	 * slave timeout on the host bus if multiple serialized PIOs were to
367 	 * suffer Completion Timeouts because the CTO delays for each PIO ahead
368 	 * of the read would accumulate. Since the Olympus processor can have
369 	 * only 1 PIO outstanding, there is no possibility of PIO accesses from
370 	 * a given CPU to a given device being re-ordered by the PCIe fabric;
371 	 * therefore turning off serialization should be safe from a PCIe
372 	 * ordering perspective.
373 	 */
374 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
375 		val &= ~(1ull << TLU_CONTROL_NPWR_EN);
376 
377 	/*
378 	 * Set Detect.Quiet. This will disable automatic link
379 	 * re-training, if the link goes down e.g. power management
380 	 * turns off power to the downstream device. This will enable
381 	 * Fire to go to Drain state, after link down. The drain state
382 	 * forces a reset to the FC state machine, which is required for
383 	 * proper link re-training.
384 	 */
385 	val |= (1ull << TLU_REMAIN_DETECT_QUIET);
386 	CSR_XS(csr_base, TLU_CONTROL, val);
387 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
388 	    CSR_XR(csr_base, TLU_CONTROL));
389 
390 	/*
391 	 * CSR_V TLU_STATUS Expect HW 0x4
392 	 */
393 
394 	/*
395 	 * Only bit [7:0] are currently defined.  Bits [2:0]
396 	 * are the state, which should likely be in state active,
397 	 * 100b.  Bit three is 'recovery', which is not understood.
398 	 * All other bits are reserved.
399 	 */
400 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
401 	    CSR_XR(csr_base, TLU_STATUS));
402 
403 	/*
404 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
405 	 */
406 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
407 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
408 
409 	/*
410 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
411 	 */
412 
413 	/*
414 	 * Ingress credits initial register.  Bits [39:32] should be
415 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
416 	 * be 0xC0.  These are the reset values, and should be set by
417 	 * HW.
418 	 */
419 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
420 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
421 
422 	/*
423 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
424 	 */
425 
426 	/*
427 	 * Diagnostic register - always zero unless we are debugging.
428 	 */
429 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
430 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
431 
432 	/*
433 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
434 	 */
435 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
436 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
437 
438 	/*
439 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
440 	 */
441 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
442 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
443 
444 	/*
445 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
446 	 */
447 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
448 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
449 
450 	/*
451 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
452 	 */
453 	DBG(DBG_TLU, NULL,
454 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
455 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
456 
457 	/*
458 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
459 	 */
460 	DBG(DBG_TLU, NULL,
461 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
462 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
463 
464 	/*
465 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
466 	 */
467 	DBG(DBG_TLU, NULL,
468 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
469 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
470 
471 	DBG(DBG_TLU, NULL,
472 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
473 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
474 
475 	DBG(DBG_TLU, NULL,
476 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
477 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
478 
479 	DBG(DBG_TLU, NULL,
480 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
481 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
482 
483 	/*
484 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
485 	 */
486 	DBG(DBG_TLU, NULL,
487 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
488 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
489 
490 	/*
491 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
492 	 */
493 	DBG(DBG_TLU, NULL,
494 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
495 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
496 
497 	/*
498 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
499 	 */
500 	DBG(DBG_TLU, NULL,
501 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
502 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
503 
504 	/*
505 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
506 	 */
507 	DBG(DBG_TLU, NULL,
508 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
509 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
510 
511 	/*
512 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
513 	 */
514 	DBG(DBG_TLU, NULL,
515 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
516 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
517 
518 	/*
519 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
520 	 */
521 	DBG(DBG_TLU, NULL,
522 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
523 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
524 
525 	/*
526 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
527 	 */
528 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
529 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
530 
531 	/*
532 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
533 	 */
534 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
535 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
536 
537 	/*
538 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
539 	 */
540 
541 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
542 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
543 
544 	/*
545 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
546 	 */
547 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
548 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
549 
550 	/*
551 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
552 	 */
553 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
554 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
555 
556 	/*
557 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
558 	 */
559 
560 	/*
561 	 * Bits [14:12] are the Max Read Request Size, which is always 64
562 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
563 	 * start at 128 bytes which is 000b.  This may be revisited if
564 	 * init_child finds greater values.
565 	 */
566 	val = 0x0ull;
567 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
568 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
569 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
570 
571 	/*
572 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
573 	 */
574 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
575 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
576 
577 	/*
578 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
579 	 */
580 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
581 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
582 
583 	/*
584 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
585 	 */
586 
587 	/*
588 	 * The CLOCK bit should be set by OBP if the hardware dictates,
589 	 * and if it is set then ASPM should be used since then L0s exit
590 	 * latency should be lower than L1 exit latency.
591 	 *
592 	 * Note that we will not enable power management during bringup
593 	 * since it has not been test and is creating some problems in
594 	 * simulation.
595 	 */
596 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
597 
598 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
599 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
600 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
601 
602 	/*
603 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
604 	 */
605 
606 	/*
607 	 * Not sure if HW or OBP will be setting this read only
608 	 * register.  Bit 12 is Clock, and it should always be 1
609 	 * signifying that the component uses the same physical
610 	 * clock as the platform.  Bits [9:4] are for the width,
611 	 * with the expected value above signifying a x1 width.
612 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
613 	 * the only speed as yet supported by the PCI-E spec.
614 	 */
615 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
616 	    CSR_XR(csr_base, TLU_LINK_STATUS));
617 
618 	/*
619 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
620 	 */
621 
622 	/*
623 	 * Power Limits for the slots.  Will be platform
624 	 * dependent, and OBP will need to set after consulting
625 	 * with the HW guys.
626 	 *
627 	 * Bits [16:15] are power limit scale, which most likely
628 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
629 	 * Power Limit Value, which is a number which is multiplied
630 	 * by the power limit scale to get the actual power limit.
631 	 */
632 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
633 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
634 
635 	/*
636 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
637 	 */
638 	DBG(DBG_TLU, NULL,
639 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
640 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
641 
642 	/*
643 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
644 	 * Kernel 0x17F0110017F011
645 	 */
646 	DBG(DBG_TLU, NULL,
647 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
648 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
649 
650 	/*
651 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
652 	 */
653 	DBG(DBG_TLU, NULL,
654 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
655 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
656 
657 	/*
658 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
659 	 */
660 	DBG(DBG_TLU, NULL,
661 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
662 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
663 
664 	/*
665 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
666 	 */
667 	DBG(DBG_TLU, NULL,
668 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
669 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
670 
671 	/*
672 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
673 	 */
674 	DBG(DBG_TLU, NULL,
675 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
676 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
677 
678 	/*
679 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
680 	 */
681 	DBG(DBG_TLU, NULL,
682 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
683 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
684 
685 	/*
686 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
687 	 */
688 	DBG(DBG_TLU, NULL,
689 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
690 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
691 
692 
693 	/*
694 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
695 	 * Plus header logs
696 	 */
697 
698 	/*
699 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
700 	 */
701 	DBG(DBG_TLU, NULL,
702 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
703 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
704 
705 	/*
706 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
707 	 */
708 	DBG(DBG_TLU, NULL,
709 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
710 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
711 
712 	/*
713 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
714 	 */
715 	DBG(DBG_TLU, NULL,
716 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
717 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
718 
719 	/*
720 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
721 	 */
722 	DBG(DBG_TLU, NULL,
723 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
724 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
725 }
726 
727 /* ARGSUSED */
728 static void
729 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
730 {
731 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
732 	int link_width, max_payload;
733 
734 	uint64_t val;
735 
736 	/*
737 	 * ACKNAK Latency Threshold Table.
738 	 * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
739 	 */
740 	int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
741 		{0xED,   0x49,  0x43,  0x30},
742 		{0x1A0,  0x76,  0x6B,  0x48},
743 		{0x22F,  0x9A,  0x56,  0x56},
744 		{0x42F,  0x11A, 0x96,  0x96},
745 		{0x82F,  0x21A, 0x116, 0x116},
746 		{0x102F, 0x41A, 0x216, 0x216}
747 	};
748 
749 	/*
750 	 * TxLink Replay Timer Latency Table
751 	 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
752 	 */
753 	int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
754 		{0x379,  0x112, 0xFC,  0xB4},
755 		{0x618,  0x1BA, 0x192, 0x10E},
756 		{0x831,  0x242, 0x143, 0x143},
757 		{0xFB1,  0x422, 0x233, 0x233},
758 		{0x1EB0, 0x7E1, 0x412, 0x412},
759 		{0x3CB0, 0xF61, 0x7D2, 0x7D2}
760 	};
761 
762 	/*
763 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
764 	 * Only Link Widths of x1, x4, and x8 are supported.
765 	 * If any width is reported other than x8, set default to x8.
766 	 */
767 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
768 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
769 
770 	/*
771 	 * Convert link_width to match timer array configuration.
772 	 */
773 	switch (link_width) {
774 	case 1:
775 		link_width = 0;
776 		break;
777 	case 4:
778 		link_width = 1;
779 		break;
780 	case 8:
781 		link_width = 2;
782 		break;
783 	case 16:
784 		link_width = 3;
785 		break;
786 	default:
787 		link_width = 0;
788 	}
789 
790 	/*
791 	 * Get the Max Payload Size.
792 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
793 	 */
794 	max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
795 	    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
796 
797 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
798 	    (0x80 << max_payload));
799 
800 	/* Make sure the packet size is not greater than 4096 */
801 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
802 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
803 
804 	/*
805 	 * CSR_V LPU_ID Expect HW 0x0
806 	 */
807 
808 	/*
809 	 * This register has link id, phy id and gigablaze id.
810 	 * Should be set by HW.
811 	 */
812 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
813 	    CSR_XR(csr_base, LPU_ID));
814 
815 	/*
816 	 * CSR_V LPU_RESET Expect Kernel 0x0
817 	 */
818 
819 	/*
820 	 * No reason to have any reset bits high until an error is
821 	 * detected on the link.
822 	 */
823 	val = 0ull;
824 	CSR_XS(csr_base, LPU_RESET, val);
825 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
826 	    CSR_XR(csr_base, LPU_RESET));
827 
828 	/*
829 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
830 	 */
831 
832 	/*
833 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
834 	 * They are read-only.  What do the 8 bits mean, and
835 	 * how do they get set if they are read only?
836 	 */
837 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
838 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
839 
840 	/*
841 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
842 	 */
843 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
844 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
845 
846 	/*
847 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
848 	 */
849 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
850 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
851 
852 	/*
853 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
854 	 */
855 
856 	/*
857 	 * This register has bits [9:4] for link width, and the
858 	 * default 0x10, means a width of x16.  The problem is
859 	 * this width is not supported according to the TLU
860 	 * link status register.
861 	 */
862 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
863 	    CSR_XR(csr_base, LPU_LINK_STATUS));
864 
865 	/*
866 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
867 	 */
868 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
869 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
870 
871 	/*
872 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
873 	 */
874 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
875 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
876 
877 	/*
878 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
879 	 */
880 	DBG(DBG_LPU, NULL,
881 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
882 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
883 
884 	/*
885 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
886 	 */
887 	DBG(DBG_LPU, NULL,
888 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
889 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
890 
891 	/*
892 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
893 	 */
894 	DBG(DBG_LPU, NULL,
895 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
896 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
897 
898 	/*
899 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
900 	 */
901 	DBG(DBG_LPU, NULL,
902 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
903 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
904 
905 	/*
906 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
907 	 */
908 	DBG(DBG_LPU, NULL,
909 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
910 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
911 
912 	/*
913 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
914 	 */
915 	DBG(DBG_LPU, NULL,
916 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
917 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
918 
919 	/*
920 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
921 	 */
922 
923 	/*
924 	 * This is another place where Max Payload can be set,
925 	 * this time for the link layer.  It will be set to
926 	 * 128B, which is the default, but this will need to
927 	 * be revisited.
928 	 */
929 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
930 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
931 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
932 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
933 
934 	/*
935 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
936 	 */
937 
938 	/*
939 	 * Another R/W status register.  Bit 3, DL up Status, will
940 	 * be set high.  The link state machine status bits [2:0]
941 	 * are set to 0x1, but the status bits are not defined in the
942 	 * PRM.  What does 0x1 mean, what others values are possible
943 	 * and what are thier meanings?
944 	 *
945 	 * This register has been giving us problems in simulation.
946 	 * It has been mentioned that software should not program
947 	 * any registers with WE bits except during debug.  So
948 	 * this register will no longer be programmed.
949 	 */
950 
951 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
952 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
953 
954 	/*
955 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
956 	 */
957 	DBG(DBG_LPU, NULL,
958 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
959 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
960 
961 	/*
962 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
963 	 */
964 	DBG(DBG_LPU, NULL,
965 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
966 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
967 
968 	DBG(DBG_LPU, NULL,
969 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
970 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
971 
972 	/*
973 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
974 	 */
975 
976 	/*
977 	 * The PRM says that only the first two bits will be set
978 	 * high by default, which will enable flow control for
979 	 * posted and non-posted updates, but NOT completetion
980 	 * updates.
981 	 */
982 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
983 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
984 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
985 	DBG(DBG_LPU, NULL,
986 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
987 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
988 
989 	/*
990 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
991 	 * Expect OBP 0x1D4C
992 	 */
993 
994 	/*
995 	 * This should be set by OBP.  We'll check to make sure.
996 	 */
997 	DBG(DBG_LPU, NULL, "lpu_init - "
998 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
999 	    CSR_XR(csr_base,
1000 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
1001 
1002 	/*
1003 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
1004 	 */
1005 
1006 	/*
1007 	 * This register has Flow Control Update Timer values for
1008 	 * non-posted and posted requests, bits [30:16] and bits
1009 	 * [14:0], respectively.  These are read-only to SW so
1010 	 * either HW or OBP needs to set them.
1011 	 */
1012 	DBG(DBG_LPU, NULL, "lpu_init - "
1013 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
1014 	    CSR_XR(csr_base,
1015 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
1016 
1017 	/*
1018 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
1019 	 */
1020 
1021 	/*
1022 	 * Same as timer0 register above, except for bits [14:0]
1023 	 * have the timer values for completetions.  Read-only to
1024 	 * SW; OBP or HW need to set it.
1025 	 */
1026 	DBG(DBG_LPU, NULL, "lpu_init - "
1027 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
1028 	    CSR_XR(csr_base,
1029 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
1030 
1031 	/*
1032 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
1033 	 */
1034 	val = acknak_timer_table[max_payload][link_width];
1035 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
1036 
1037 	DBG(DBG_LPU, NULL, "lpu_init - "
1038 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
1039 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
1040 
1041 	/*
1042 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
1043 	 */
1044 	DBG(DBG_LPU, NULL,
1045 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
1046 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
1047 
1048 	/*
1049 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
1050 	 */
1051 	val = replay_timer_table[max_payload][link_width];
1052 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1053 
1054 	DBG(DBG_LPU, NULL,
1055 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
1056 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
1057 
1058 	/*
1059 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
1060 	 */
1061 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1062 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
1063 
1064 	/*
1065 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1066 	 */
1067 	DBG(DBG_LPU, NULL,
1068 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1069 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
1070 
1071 	/*
1072 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1073 	 */
1074 	DBG(DBG_LPU, NULL,
1075 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1076 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
1077 
1078 	/*
1079 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1080 	 */
1081 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
1082 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
1083 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
1084 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
1085 
1086 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
1087 	DBG(DBG_LPU, NULL,
1088 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1089 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
1090 
1091 	/*
1092 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1093 	 */
1094 	DBG(DBG_LPU, NULL,
1095 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1096 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
1097 
1098 	/*
1099 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1100 	 */
1101 	DBG(DBG_LPU, NULL,
1102 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1103 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
1104 
1105 	/*
1106 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1107 	 */
1108 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1109 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1110 
1111 	/*
1112 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1113 	 */
1114 	DBG(DBG_LPU, NULL,
1115 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1116 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1117 
1118 	/*
1119 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1120 	 */
1121 
1122 	/*
1123 	 * Test only register.  Will not be programmed.
1124 	 */
1125 	DBG(DBG_LPU, NULL,
1126 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1127 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1128 
1129 	/*
1130 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1131 	 */
1132 
1133 	/*
1134 	 * Test only register.  Will not be programmed.
1135 	 */
1136 	DBG(DBG_LPU, NULL,
1137 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1138 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1139 
1140 	/*
1141 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1142 	 */
1143 	DBG(DBG_LPU, NULL,
1144 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1145 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1146 
1147 	/*
1148 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1149 	 */
1150 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1151 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1152 
1153 	/*
1154 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1155 	 */
1156 
1157 	/*
1158 	 * Test only register.  Will not be programmed.
1159 	 */
1160 	DBG(DBG_LPU, NULL,
1161 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1162 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1163 
1164 	/*
1165 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1166 	 */
1167 	DBG(DBG_LPU, NULL,
1168 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1169 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1170 
1171 	/*
1172 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1173 	 */
1174 	DBG(DBG_LPU, NULL,
1175 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1176 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1177 
1178 	/*
1179 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1180 	 */
1181 	DBG(DBG_LPU, NULL,
1182 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1183 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1184 
1185 	/*
1186 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1187 	 */
1188 	DBG(DBG_LPU, NULL,
1189 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1190 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1191 
1192 	/*
1193 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1194 	 */
1195 	DBG(DBG_LPU, NULL,
1196 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1197 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1198 
1199 	/*
1200 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1201 	 */
1202 
1203 	/*
1204 	 * Test only register.  Will not be programmed.
1205 	 */
1206 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1207 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1208 
1209 	/*
1210 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1211 	 */
1212 
1213 	/*
1214 	 * Test only register.  Will not be programmed.
1215 	 */
1216 	DBG(DBG_LPU, NULL,
1217 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1218 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1219 
1220 	/*
1221 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1222 	 */
1223 
1224 	/*
1225 	 * Test only register.
1226 	 */
1227 	DBG(DBG_LPU, NULL,
1228 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1229 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1230 
1231 	/*
1232 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1233 	 */
1234 	DBG(DBG_LPU, NULL, "lpu_init - "
1235 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1236 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1237 
1238 	/*
1239 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1240 	 */
1241 
1242 	/*
1243 	 * test only register.
1244 	 */
1245 	DBG(DBG_LPU, NULL,
1246 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1247 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1248 
1249 	/*
1250 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1251 	 */
1252 
1253 	/*
1254 	 * test only register.
1255 	 */
1256 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1257 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1258 
1259 	/*
1260 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1261 	 */
1262 	DBG(DBG_LPU, NULL,
1263 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1264 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1265 
1266 	/*
1267 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1268 	 */
1269 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1270 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1271 
1272 	/*
1273 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1274 	 */
1275 	DBG(DBG_LPU, NULL,
1276 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1277 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1278 
1279 	/*
1280 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1281 	 */
1282 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1283 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1284 
1285 	DBG(DBG_LPU, NULL,
1286 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1287 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1288 
1289 	/*
1290 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1291 	 */
1292 
1293 	/*
1294 	 * This also needs some explanation.  What is the best value
1295 	 * for the water mark?  Test mode enables which test mode?
1296 	 * Programming model needed for the Receiver Reset Lane N
1297 	 * bits.
1298 	 */
1299 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1300 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1301 
1302 	/*
1303 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1304 	 */
1305 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1306 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1307 
1308 	/*
1309 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1310 	 */
1311 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1312 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1313 
1314 	/*
1315 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1316 	 */
1317 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1318 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1319 
1320 	/*
1321 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1322 	 */
1323 	DBG(DBG_LPU, NULL,
1324 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1325 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1326 
1327 	/*
1328 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1329 	 */
1330 	DBG(DBG_LPU, NULL,
1331 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1332 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1333 
1334 	DBG(DBG_LPU, NULL,
1335 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1336 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1337 
1338 	/*
1339 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1340 	 */
1341 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1342 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1343 
1344 	/*
1345 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1346 	 */
1347 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1348 		CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1349 
1350 	/*
1351 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1352 	 */
1353 	DBG(DBG_LPU, NULL,
1354 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1355 	    CSR_XR(csr_base,
1356 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1357 
1358 	/*
1359 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1360 	 */
1361 	DBG(DBG_LPU, NULL,
1362 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1363 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1364 
1365 	DBG(DBG_LPU, NULL,
1366 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1367 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1368 
1369 	/*
1370 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1371 	 */
1372 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1373 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1374 
1375 	/*
1376 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1377 	 */
1378 
1379 	/*
1380 	 * The new PRM has values for LTSSM 8 ns timeout value and
1381 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1382 	 * Most of the other bits are questions as well.
1383 	 *
1384 	 * As such we will use the reset value.
1385 	 */
1386 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1387 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1388 
1389 	/*
1390 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1391 	 */
1392 
1393 	/*
1394 	 * Again, what does '12 ms timeout value mean'?
1395 	 */
1396 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1397 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1398 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1399 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1400 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1401 
1402 	/*
1403 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1404 	 */
1405 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1406 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1407 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1408 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1409 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1410 
1411 	/*
1412 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1413 	 */
1414 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1415 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1416 		(LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1417 		LPU_LTSSM_CONFIG4_N_FTS));
1418 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1419 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1420 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1421 
1422 	/*
1423 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1424 	 */
1425 	val = 0ull;
1426 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1427 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1428 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1429 
1430 	/*
1431 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1432 	 */
1433 
1434 	/*
1435 	 * LTSSM Status registers are test only.
1436 	 */
1437 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1438 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1439 
1440 	/*
1441 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1442 	 */
1443 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1444 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1445 
1446 	/*
1447 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1448 	 */
1449 	DBG(DBG_LPU, NULL,
1450 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1451 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1452 
1453 	/*
1454 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1455 	 */
1456 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1457 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1458 
1459 	DBG(DBG_LPU, NULL,
1460 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1461 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1462 
1463 	/*
1464 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1465 	 */
1466 	DBG(DBG_LPU, NULL,
1467 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1468 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1469 
1470 	/*
1471 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1472 	 */
1473 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1474 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1475 
1476 	/*
1477 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1478 	 */
1479 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1480 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1481 
1482 	/*
1483 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1484 	 */
1485 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1486 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1487 
1488 	/*
1489 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1490 	 */
1491 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1492 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1493 
1494 	/*
1495 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1496 	 */
1497 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1498 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1499 
1500 	/*
1501 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1502 	 */
1503 	DBG(DBG_LPU, NULL, "lpu_init - "
1504 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1505 	    CSR_XR(csr_base,
1506 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1507 
1508 	/*
1509 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1510 	 */
1511 	DBG(DBG_LPU, NULL,
1512 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1513 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1514 
1515 	DBG(DBG_LPU, NULL,
1516 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1517 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1518 
1519 	/*
1520 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1521 	 */
1522 	DBG(DBG_LPU, NULL,
1523 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1524 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1525 
1526 	/*
1527 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1528 	 */
1529 	DBG(DBG_LPU, NULL,
1530 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1531 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1532 
1533 	/*
1534 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1535 	 */
1536 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1537 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1538 }
1539 
1540 /* ARGSUSED */
1541 static void
1542 dlu_init(caddr_t csr_base, pxu_t *pxu_p)
1543 {
1544 uint64_t val;
1545 
1546 	CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull);
1547 	DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n",
1548 	    CSR_XR(csr_base, DLU_INTERRUPT_MASK));
1549 
1550 	val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN);
1551 	CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val);
1552 	DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n",
1553 	    CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG));
1554 
1555 	val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
1556 	    (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
1557 
1558 	CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val);
1559 	DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: "
1560 	    "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL));
1561 
1562 	val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT <<
1563 	    DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR);
1564 
1565 	CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1566 
1567 	DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: "
1568 	    "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD));
1569 }
1570 
1571 /* ARGSUSED */
1572 static void
1573 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1574 {
1575 	uint64_t val;
1576 
1577 /*
1578  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1579  */
1580 
1581 	val = -1ull;
1582 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1583 	DBG(DBG_DMC, NULL,
1584 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1585 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1586 
1587 	/*
1588 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1589 	 */
1590 	DBG(DBG_DMC, NULL,
1591 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1592 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1593 
1594 	/*
1595 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1596 	 */
1597 	val = 0x0ull;
1598 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1599 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1600 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1601 
1602 	/*
1603 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1604 	 */
1605 	val = 0x0ull;
1606 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1607 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1608 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1609 }
1610 
1611 void
1612 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1613 {
1614 	uint64_t val;
1615 
1616 	ilu_init(csr_base, pxu_p);
1617 	tlu_init(csr_base, pxu_p);
1618 
1619 	switch (PX_CHIP_TYPE(pxu_p)) {
1620 	case PX_CHIP_OBERON:
1621 		dlu_init(csr_base, pxu_p);
1622 		break;
1623 	case PX_CHIP_FIRE:
1624 		lpu_init(csr_base, pxu_p);
1625 		break;
1626 	default:
1627 		DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n",
1628 		    PX_CHIP_TYPE(pxu_p));
1629 		break;
1630 	}
1631 
1632 	dmc_init(csr_base, pxu_p);
1633 
1634 /*
1635  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1636  */
1637 
1638 	val = -1ull;
1639 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1640 	DBG(DBG_PEC, NULL,
1641 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1642 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1643 
1644 	/*
1645 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1646 	 */
1647 	DBG(DBG_PEC, NULL,
1648 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1649 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1650 }
1651 
1652 /*
1653  * Convert a TTE to physical address
1654  */
1655 static r_addr_t
1656 mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p)
1657 {
1658 	uint64_t pa_mask;
1659 
1660 	switch (PX_CHIP_TYPE(pxu_p)) {
1661 	case PX_CHIP_OBERON:
1662 		pa_mask = MMU_OBERON_PADDR_MASK;
1663 		break;
1664 	case PX_CHIP_FIRE:
1665 		pa_mask = MMU_FIRE_PADDR_MASK;
1666 		break;
1667 	default:
1668 		DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n",
1669 		    PX_CHIP_TYPE(pxu_p));
1670 		pa_mask = 0;
1671 		break;
1672 	}
1673 	return ((tte & pa_mask) >> MMU_PAGE_SHIFT);
1674 }
1675 
1676 /*
1677  * Return MMU bypass noncache bit for chip
1678  */
1679 static r_addr_t
1680 mmu_bypass_noncache(pxu_t *pxu_p)
1681 {
1682 	r_addr_t bypass_noncache_bit;
1683 
1684 	switch (PX_CHIP_TYPE(pxu_p)) {
1685 	case PX_CHIP_OBERON:
1686 		bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE;
1687 		break;
1688 	case PX_CHIP_FIRE:
1689 		bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE;
1690 		break;
1691 	default:
1692 		DBG(DBG_MMU, NULL,
1693 		    "mmu_bypass_nocache - unknown chip type: 0x%x\n",
1694 		    PX_CHIP_TYPE(pxu_p));
1695 		bypass_noncache_bit = 0;
1696 		break;
1697 	}
1698 	return (bypass_noncache_bit);
1699 }
1700 
1701 /*
1702  * Calculate number of TSB entries for the chip.
1703  */
1704 /* ARGSUSED */
1705 static uint_t
1706 mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p)
1707 {
1708 	uint64_t tsb_ctrl;
1709 	uint_t obp_tsb_entries, obp_tsb_size;
1710 
1711 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1712 
1713 	obp_tsb_size = tsb_ctrl & 0xF;
1714 
1715 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1716 
1717 	return (obp_tsb_entries);
1718 }
1719 
1720 /*
1721  * Initialize the module, but do not enable interrupts.
1722  */
1723 void
1724 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1725 {
1726 	uint64_t	val, i, obp_tsb_pa, *base_tte_addr;
1727 	uint_t obp_tsb_entries;
1728 
1729 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1730 
1731 	/*
1732 	 * Preserve OBP's TSB
1733 	 */
1734 	obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK;
1735 
1736 	obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p);
1737 
1738 	base_tte_addr = pxu_p->tsb_vaddr +
1739 		((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1740 
1741 	for (i = 0; i < obp_tsb_entries; i++) {
1742 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1743 
1744 		if (!MMU_TTE_VALID(tte))
1745 			continue;
1746 
1747 		base_tte_addr[i] = tte;
1748 	}
1749 
1750 	/*
1751 	 * Invalidate the TLB through the diagnostic register.
1752 	 */
1753 
1754 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1755 
1756 	/*
1757 	 * Configure the Fire MMU TSB Control Register.  Determine
1758 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1759 	 *
1760 	 * Write the most significant 30 bits of the TSB physical address
1761 	 * and the encoded TSB table size.
1762 	 */
1763 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--);
1764 
1765 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1766 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1767 
1768 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1769 
1770 	/*
1771 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1772 	 * the "Cache Mode", the "Bypass Enable" and
1773 	 * the "Translation Enable" bits.
1774 	 */
1775 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1776 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1777 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1778 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1779 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1780 
1781 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1782 
1783 	/*
1784 	 * Read the register here to ensure that the previous writes to
1785 	 * the Fire MMU registers have been flushed.  (Technically, this
1786 	 * is not entirely necessary here as we will likely do later reads
1787 	 * during Fire initialization, but it is a small price to pay for
1788 	 * more modular code.)
1789 	 */
1790 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1791 
1792 	/*
1793 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1794 	 * Plus header logs
1795 	 */
1796 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1797 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1798 
1799 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1800 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1801 
1802 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1803 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1804 
1805 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1806 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1807 }
1808 
1809 /*
1810  * Generic IOMMU Servies
1811  */
1812 
1813 /* ARGSUSED */
1814 uint64_t
1815 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages,
1816     io_attributes_t io_attr, void *addr, size_t pfn_index, int flags)
1817 {
1818 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1819 	uint64_t	attr = MMU_TTE_V;
1820 	int		i;
1821 
1822 	if (io_attr & PCI_MAP_ATTR_WRITE)
1823 		attr |= MMU_TTE_W;
1824 
1825 	if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) &&
1826 	    (io_attr & PCI_MAP_ATTR_RO))
1827 		attr |= MMU_TTE_RO;
1828 
1829 	if (attr & MMU_TTE_RO) {
1830 		DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x "
1831 		    "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr);
1832 	}
1833 
1834 	if (flags & MMU_MAP_PFN) {
1835 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)addr;
1836 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1837 			px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
1838 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1839 
1840 			/*
1841 			 * Oberon will need to flush the corresponding TTEs in
1842 			 * Cache. We only need to flush every cache line.
1843 			 * Extra PIO's are expensive.
1844 			 */
1845 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1846 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1847 					CSR_XS(dev_hdl,
1848 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1849 					    (pxu_p->tsb_paddr+
1850 					    (tsb_index*MMU_TTE_SIZE)));
1851 				}
1852 			}
1853 		}
1854 	} else {
1855 		caddr_t	a = (caddr_t)addr;
1856 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1857 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1858 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1859 
1860 			/*
1861 			 * Oberon will need to flush the corresponding TTEs in
1862 			 * Cache. We only need to flush every cache line.
1863 			 * Extra PIO's are expensive.
1864 			 */
1865 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1866 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1867 					CSR_XS(dev_hdl,
1868 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1869 					    (pxu_p->tsb_paddr+
1870 					    (tsb_index*MMU_TTE_SIZE)));
1871 				}
1872 			}
1873 		}
1874 	}
1875 
1876 	return (H_EOK);
1877 }
1878 
1879 /* ARGSUSED */
1880 uint64_t
1881 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1882     pages_t pages)
1883 {
1884 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1885 	int		i;
1886 
1887 	for (i = 0; i < pages; i++, tsb_index++) {
1888 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1889 
1890 			/*
1891 			 * Oberon will need to flush the corresponding TTEs in
1892 			 * Cache. We only need to flush every cache line.
1893 			 * Extra PIO's are expensive.
1894 			 */
1895 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1896 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1897 					CSR_XS(dev_hdl,
1898 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1899 					    (pxu_p->tsb_paddr+
1900 					    (tsb_index*MMU_TTE_SIZE)));
1901 				}
1902 			}
1903 	}
1904 
1905 	return (H_EOK);
1906 }
1907 
1908 /* ARGSUSED */
1909 uint64_t
1910 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1911     io_attributes_t *attr_p, r_addr_t *r_addr_p)
1912 {
1913 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1914 	uint64_t	*tte_addr;
1915 	uint64_t	ret = H_EOK;
1916 
1917 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1918 
1919 	if (*tte_addr & MMU_TTE_V) {
1920 		*r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p);
1921 		*attr_p = (*tte_addr & MMU_TTE_W) ?
1922 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1923 	} else {
1924 		*r_addr_p = 0;
1925 		*attr_p = 0;
1926 		ret = H_ENOMAP;
1927 	}
1928 
1929 	return (ret);
1930 }
1931 
1932 /* ARGSUSED */
1933 uint64_t
1934 hvio_get_bypass_base(pxu_t *pxu_p)
1935 {
1936 	uint64_t base;
1937 
1938 	switch (PX_CHIP_TYPE(pxu_p)) {
1939 	case PX_CHIP_OBERON:
1940 		base = MMU_OBERON_BYPASS_BASE;
1941 		break;
1942 	case PX_CHIP_FIRE:
1943 		base = MMU_FIRE_BYPASS_BASE;
1944 		break;
1945 	default:
1946 		DBG(DBG_MMU, NULL,
1947 		    "hvio_get_bypass_base - unknown chip type: 0x%x\n",
1948 		    PX_CHIP_TYPE(pxu_p));
1949 		base = 0;
1950 		break;
1951 	}
1952 	return (base);
1953 }
1954 
1955 /* ARGSUSED */
1956 uint64_t
1957 hvio_get_bypass_end(pxu_t *pxu_p)
1958 {
1959 	uint64_t end;
1960 
1961 	switch (PX_CHIP_TYPE(pxu_p)) {
1962 	case PX_CHIP_OBERON:
1963 		end = MMU_OBERON_BYPASS_END;
1964 		break;
1965 	case PX_CHIP_FIRE:
1966 		end = MMU_FIRE_BYPASS_END;
1967 		break;
1968 	default:
1969 		DBG(DBG_MMU, NULL,
1970 		    "hvio_get_bypass_end - unknown chip type: 0x%x\n",
1971 		    PX_CHIP_TYPE(pxu_p));
1972 		end = 0;
1973 		break;
1974 	}
1975 	return (end);
1976 }
1977 
1978 /* ARGSUSED */
1979 uint64_t
1980 hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra,
1981     io_attributes_t attr, io_addr_t *io_addr_p)
1982 {
1983 	uint64_t	pfn = MMU_BTOP(ra);
1984 
1985 	*io_addr_p = hvio_get_bypass_base(pxu_p) | ra |
1986 	    (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p));
1987 
1988 	return (H_EOK);
1989 }
1990 
1991 /*
1992  * Generic IO Interrupt Servies
1993  */
1994 
1995 /*
1996  * Converts a device specific interrupt number given by the
1997  * arguments devhandle and devino into a system specific ino.
1998  */
1999 /* ARGSUSED */
2000 uint64_t
2001 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
2002     sysino_t *sysino)
2003 {
2004 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
2005 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
2006 		return (H_ENOINTR);
2007 	}
2008 
2009 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
2010 
2011 	return (H_EOK);
2012 }
2013 
2014 /*
2015  * Returns state in intr_valid_state if the interrupt defined by sysino
2016  * is valid (enabled) or not-valid (disabled).
2017  */
2018 uint64_t
2019 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
2020     intr_valid_state_t *intr_valid_state)
2021 {
2022 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2023 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
2024 		*intr_valid_state = INTR_VALID;
2025 	} else {
2026 		*intr_valid_state = INTR_NOTVALID;
2027 	}
2028 
2029 	return (H_EOK);
2030 }
2031 
2032 /*
2033  * Sets the 'valid' state of the interrupt defined by
2034  * the argument sysino to the state defined by the
2035  * argument intr_valid_state.
2036  */
2037 uint64_t
2038 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
2039     intr_valid_state_t intr_valid_state)
2040 {
2041 	switch (intr_valid_state) {
2042 	case INTR_VALID:
2043 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2044 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2045 		break;
2046 	case INTR_NOTVALID:
2047 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2048 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2049 		break;
2050 	default:
2051 		return (EINVAL);
2052 	}
2053 
2054 	return (H_EOK);
2055 }
2056 
2057 /*
2058  * Returns the current state of the interrupt given by the sysino
2059  * argument.
2060  */
2061 uint64_t
2062 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
2063     intr_state_t *intr_state)
2064 {
2065 	intr_state_t state;
2066 
2067 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2068 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
2069 
2070 	switch (state) {
2071 	case INTERRUPT_IDLE_STATE:
2072 		*intr_state = INTR_IDLE_STATE;
2073 		break;
2074 	case INTERRUPT_RECEIVED_STATE:
2075 		*intr_state = INTR_RECEIVED_STATE;
2076 		break;
2077 	case INTERRUPT_PENDING_STATE:
2078 		*intr_state = INTR_DELIVERED_STATE;
2079 		break;
2080 	default:
2081 		return (EINVAL);
2082 	}
2083 
2084 	return (H_EOK);
2085 
2086 }
2087 
2088 /*
2089  * Sets the current state of the interrupt given by the sysino
2090  * argument to the value given in the argument intr_state.
2091  *
2092  * Note: Setting the state to INTR_IDLE clears any pending
2093  * interrupt for sysino.
2094  */
2095 uint64_t
2096 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
2097     intr_state_t intr_state)
2098 {
2099 	intr_state_t state;
2100 
2101 	switch (intr_state) {
2102 	case INTR_IDLE_STATE:
2103 		state = INTERRUPT_IDLE_STATE;
2104 		break;
2105 	case INTR_DELIVERED_STATE:
2106 		state = INTERRUPT_PENDING_STATE;
2107 		break;
2108 	default:
2109 		return (EINVAL);
2110 	}
2111 
2112 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2113 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
2114 
2115 	return (H_EOK);
2116 }
2117 
2118 /*
2119  * Returns the cpuid that is the current target of the
2120  * interrupt given by the sysino argument.
2121  *
2122  * The cpuid value returned is undefined if the target
2123  * has not been set via intr_settarget.
2124  */
2125 uint64_t
2126 hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2127     cpuid_t *cpuid)
2128 {
2129 	switch (PX_CHIP_TYPE(pxu_p)) {
2130 	case PX_CHIP_OBERON:
2131 		*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2132 		    SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID);
2133 		break;
2134 	case PX_CHIP_FIRE:
2135 		*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2136 		    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
2137 		break;
2138 	default:
2139 		DBG(DBG_CB, NULL, "hvio_intr_gettarget - "
2140 		    "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2141 		return (EINVAL);
2142 	}
2143 
2144 	return (H_EOK);
2145 }
2146 
2147 /*
2148  * Set the target cpu for the interrupt defined by the argument
2149  * sysino to the target cpu value defined by the argument cpuid.
2150  */
2151 uint64_t
2152 hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2153     cpuid_t cpuid)
2154 {
2155 
2156 	uint64_t	val, intr_controller;
2157 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
2158 
2159 	/*
2160 	 * For now, we assign interrupt controller in a round
2161 	 * robin fashion.  Later, we may need to come up with
2162 	 * a more efficient assignment algorithm.
2163 	 */
2164 	intr_controller = 0x1ull << (cpuid % 4);
2165 
2166 	switch (PX_CHIP_TYPE(pxu_p)) {
2167 	case PX_CHIP_OBERON:
2168 		val = (((cpuid &
2169 		    INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) <<
2170 		    INTERRUPT_MAPPING_ENTRIES_T_DESTID) |
2171 		    ((intr_controller &
2172 		    INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2173 		    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2174 		break;
2175 	case PX_CHIP_FIRE:
2176 		val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
2177 		    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
2178 		    ((intr_controller &
2179 		    INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2180 		    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2181 		break;
2182 	default:
2183 		DBG(DBG_CB, NULL, "hvio_intr_settarget - "
2184 		    "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2185 		return (EINVAL);
2186 	}
2187 
2188 	/* For EQ interrupts, set DATA MONDO bit */
2189 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
2190 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
2191 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
2192 
2193 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
2194 
2195 	return (H_EOK);
2196 }
2197 
2198 /*
2199  * MSIQ Functions:
2200  */
2201 uint64_t
2202 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
2203 {
2204 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
2205 	    (uint64_t)pxu_p->msiq_mapped_p);
2206 	DBG(DBG_IB, NULL,
2207 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
2208 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
2209 
2210 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
2211 	    (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p),
2212 	    pxu_p->portid) << INO_BITS);
2213 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
2214 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
2215 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
2216 
2217 	return (H_EOK);
2218 }
2219 
2220 uint64_t
2221 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2222     pci_msiq_valid_state_t *msiq_valid_state)
2223 {
2224 	uint32_t	eq_state;
2225 	uint64_t	ret = H_EOK;
2226 
2227 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2228 	    msiq_id, ENTRIES_STATE);
2229 
2230 	switch (eq_state) {
2231 	case EQ_IDLE_STATE:
2232 		*msiq_valid_state = PCI_MSIQ_INVALID;
2233 		break;
2234 	case EQ_ACTIVE_STATE:
2235 	case EQ_ERROR_STATE:
2236 		*msiq_valid_state = PCI_MSIQ_VALID;
2237 		break;
2238 	default:
2239 		ret = H_EIO;
2240 		break;
2241 	}
2242 
2243 	return (ret);
2244 }
2245 
2246 uint64_t
2247 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2248     pci_msiq_valid_state_t msiq_valid_state)
2249 {
2250 	uint64_t	ret = H_EOK;
2251 
2252 	switch (msiq_valid_state) {
2253 	case PCI_MSIQ_INVALID:
2254 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2255 		    msiq_id, ENTRIES_DIS);
2256 		break;
2257 	case PCI_MSIQ_VALID:
2258 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2259 		    msiq_id, ENTRIES_EN);
2260 		break;
2261 	default:
2262 		ret = H_EINVAL;
2263 		break;
2264 	}
2265 
2266 	return (ret);
2267 }
2268 
2269 uint64_t
2270 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2271     pci_msiq_state_t *msiq_state)
2272 {
2273 	uint32_t	eq_state;
2274 	uint64_t	ret = H_EOK;
2275 
2276 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2277 	    msiq_id, ENTRIES_STATE);
2278 
2279 	switch (eq_state) {
2280 	case EQ_IDLE_STATE:
2281 	case EQ_ACTIVE_STATE:
2282 		*msiq_state = PCI_MSIQ_STATE_IDLE;
2283 		break;
2284 	case EQ_ERROR_STATE:
2285 		*msiq_state = PCI_MSIQ_STATE_ERROR;
2286 		break;
2287 	default:
2288 		ret = H_EIO;
2289 	}
2290 
2291 	return (ret);
2292 }
2293 
2294 uint64_t
2295 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2296     pci_msiq_state_t msiq_state)
2297 {
2298 	uint32_t	eq_state;
2299 	uint64_t	ret = H_EOK;
2300 
2301 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2302 	    msiq_id, ENTRIES_STATE);
2303 
2304 	switch (eq_state) {
2305 	case EQ_IDLE_STATE:
2306 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2307 			ret = H_EIO;
2308 		break;
2309 	case EQ_ACTIVE_STATE:
2310 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2311 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2312 			    msiq_id, ENTRIES_ENOVERR);
2313 		else
2314 			ret = H_EIO;
2315 		break;
2316 	case EQ_ERROR_STATE:
2317 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
2318 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2319 			    msiq_id, ENTRIES_E2I);
2320 		else
2321 			ret = H_EIO;
2322 		break;
2323 	default:
2324 		ret = H_EIO;
2325 	}
2326 
2327 	return (ret);
2328 }
2329 
2330 uint64_t
2331 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2332     msiqhead_t *msiq_head)
2333 {
2334 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2335 	    msiq_id, ENTRIES_HEAD);
2336 
2337 	return (H_EOK);
2338 }
2339 
2340 uint64_t
2341 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2342     msiqhead_t msiq_head)
2343 {
2344 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2345 	    ENTRIES_HEAD, msiq_head);
2346 
2347 	return (H_EOK);
2348 }
2349 
2350 uint64_t
2351 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2352     msiqtail_t *msiq_tail)
2353 {
2354 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2355 	    msiq_id, ENTRIES_TAIL);
2356 
2357 	return (H_EOK);
2358 }
2359 
2360 /*
2361  * MSI Functions:
2362  */
2363 uint64_t
2364 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2365 {
2366 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2367 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2368 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2369 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2370 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2371 
2372 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2373 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2374 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2375 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2376 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2377 
2378 	return (H_EOK);
2379 }
2380 
2381 uint64_t
2382 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2383     msiqid_t *msiq_id)
2384 {
2385 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2386 	    msi_num, ENTRIES_EQNUM);
2387 
2388 	return (H_EOK);
2389 }
2390 
2391 uint64_t
2392 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2393     msiqid_t msiq_id)
2394 {
2395 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2396 	    ENTRIES_EQNUM, msiq_id);
2397 
2398 	return (H_EOK);
2399 }
2400 
2401 uint64_t
2402 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2403     pci_msi_valid_state_t *msi_valid_state)
2404 {
2405 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2406 	    msi_num, ENTRIES_V);
2407 
2408 	return (H_EOK);
2409 }
2410 
2411 uint64_t
2412 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2413     pci_msi_valid_state_t msi_valid_state)
2414 {
2415 	uint64_t	ret = H_EOK;
2416 
2417 	switch (msi_valid_state) {
2418 	case PCI_MSI_VALID:
2419 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2420 		    ENTRIES_V);
2421 		break;
2422 	case PCI_MSI_INVALID:
2423 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2424 		    ENTRIES_V);
2425 		break;
2426 	default:
2427 		ret = H_EINVAL;
2428 	}
2429 
2430 	return (ret);
2431 }
2432 
2433 uint64_t
2434 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2435     pci_msi_state_t *msi_state)
2436 {
2437 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2438 	    msi_num, ENTRIES_EQWR_N);
2439 
2440 	return (H_EOK);
2441 }
2442 
2443 uint64_t
2444 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2445     pci_msi_state_t msi_state)
2446 {
2447 	uint64_t	ret = H_EOK;
2448 
2449 	switch (msi_state) {
2450 	case PCI_MSI_STATE_IDLE:
2451 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2452 		    ENTRIES_EQWR_N);
2453 		break;
2454 	case PCI_MSI_STATE_DELIVERED:
2455 	default:
2456 		ret = H_EINVAL;
2457 		break;
2458 	}
2459 
2460 	return (ret);
2461 }
2462 
2463 /*
2464  * MSG Functions:
2465  */
2466 uint64_t
2467 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2468     msiqid_t *msiq_id)
2469 {
2470 	uint64_t	ret = H_EOK;
2471 
2472 	switch (msg_type) {
2473 	case PCIE_PME_MSG:
2474 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2475 		break;
2476 	case PCIE_PME_ACK_MSG:
2477 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2478 		    EQNUM);
2479 		break;
2480 	case PCIE_CORR_MSG:
2481 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2482 		break;
2483 	case PCIE_NONFATAL_MSG:
2484 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2485 		    EQNUM);
2486 		break;
2487 	case PCIE_FATAL_MSG:
2488 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2489 		break;
2490 	default:
2491 		ret = H_EINVAL;
2492 		break;
2493 	}
2494 
2495 	return (ret);
2496 }
2497 
2498 uint64_t
2499 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2500     msiqid_t msiq_id)
2501 {
2502 	uint64_t	ret = H_EOK;
2503 
2504 	switch (msg_type) {
2505 	case PCIE_PME_MSG:
2506 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2507 		break;
2508 	case PCIE_PME_ACK_MSG:
2509 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2510 		break;
2511 	case PCIE_CORR_MSG:
2512 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2513 		break;
2514 	case PCIE_NONFATAL_MSG:
2515 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2516 		break;
2517 	case PCIE_FATAL_MSG:
2518 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2519 		break;
2520 	default:
2521 		ret = H_EINVAL;
2522 		break;
2523 	}
2524 
2525 	return (ret);
2526 }
2527 
2528 uint64_t
2529 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2530     pcie_msg_valid_state_t *msg_valid_state)
2531 {
2532 	uint64_t	ret = H_EOK;
2533 
2534 	switch (msg_type) {
2535 	case PCIE_PME_MSG:
2536 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2537 		break;
2538 	case PCIE_PME_ACK_MSG:
2539 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2540 		    PME_TO_ACK_MAPPING, V);
2541 		break;
2542 	case PCIE_CORR_MSG:
2543 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2544 		break;
2545 	case PCIE_NONFATAL_MSG:
2546 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2547 		    ERR_NONFATAL_MAPPING, V);
2548 		break;
2549 	case PCIE_FATAL_MSG:
2550 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2551 		    V);
2552 		break;
2553 	default:
2554 		ret = H_EINVAL;
2555 		break;
2556 	}
2557 
2558 	return (ret);
2559 }
2560 
2561 uint64_t
2562 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2563     pcie_msg_valid_state_t msg_valid_state)
2564 {
2565 	uint64_t	ret = H_EOK;
2566 
2567 	switch (msg_valid_state) {
2568 	case PCIE_MSG_VALID:
2569 		switch (msg_type) {
2570 		case PCIE_PME_MSG:
2571 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2572 			break;
2573 		case PCIE_PME_ACK_MSG:
2574 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2575 			break;
2576 		case PCIE_CORR_MSG:
2577 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2578 			break;
2579 		case PCIE_NONFATAL_MSG:
2580 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2581 			break;
2582 		case PCIE_FATAL_MSG:
2583 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2584 			break;
2585 		default:
2586 			ret = H_EINVAL;
2587 			break;
2588 		}
2589 
2590 		break;
2591 	case PCIE_MSG_INVALID:
2592 		switch (msg_type) {
2593 		case PCIE_PME_MSG:
2594 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2595 			break;
2596 		case PCIE_PME_ACK_MSG:
2597 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2598 			break;
2599 		case PCIE_CORR_MSG:
2600 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2601 			break;
2602 		case PCIE_NONFATAL_MSG:
2603 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2604 			break;
2605 		case PCIE_FATAL_MSG:
2606 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2607 			break;
2608 		default:
2609 			ret = H_EINVAL;
2610 			break;
2611 		}
2612 		break;
2613 	default:
2614 		ret = H_EINVAL;
2615 	}
2616 
2617 	return (ret);
2618 }
2619 
2620 /*
2621  * Suspend/Resume Functions:
2622  *	(pec, mmu, ib)
2623  *	cb
2624  * Registers saved have all been touched in the XXX_init functions.
2625  */
2626 uint64_t
2627 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2628 {
2629 	uint64_t	*config_state;
2630 	int		total_size;
2631 	int		i;
2632 
2633 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2634 		return (H_EIO);
2635 
2636 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2637 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2638 
2639 	if (config_state == NULL) {
2640 		return (H_EIO);
2641 	}
2642 
2643 	/*
2644 	 * Soft state for suspend/resume  from pxu_t
2645 	 * uint64_t	*pec_config_state;
2646 	 * uint64_t	*mmu_config_state;
2647 	 * uint64_t	*ib_intr_map;
2648 	 * uint64_t	*ib_config_state;
2649 	 * uint64_t	*xcb_config_state;
2650 	 */
2651 
2652 	/* Save the PEC configuration states */
2653 	pxu_p->pec_config_state = config_state;
2654 	for (i = 0; i < PEC_KEYS; i++) {
2655 		if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2656 		    (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2657 			pxu_p->pec_config_state[i] =
2658 			    CSR_XR((caddr_t)dev_hdl,
2659 			    pec_config_state_regs[i].reg);
2660 		    }
2661 	}
2662 
2663 	/* Save the MMU configuration states */
2664 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2665 	for (i = 0; i < MMU_KEYS; i++) {
2666 		pxu_p->mmu_config_state[i] =
2667 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2668 	}
2669 
2670 	/* Save the interrupt mapping registers */
2671 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2672 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2673 		pxu_p->ib_intr_map[i] =
2674 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2675 	}
2676 
2677 	/* Save the IB configuration states */
2678 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2679 	for (i = 0; i < IB_KEYS; i++) {
2680 		pxu_p->ib_config_state[i] =
2681 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2682 	}
2683 
2684 	return (H_EOK);
2685 }
2686 
2687 void
2688 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2689 {
2690 	int		total_size;
2691 	sysino_t	sysino;
2692 	int		i;
2693 
2694 	/* Make sure that suspend actually did occur */
2695 	if (!pxu_p->pec_config_state) {
2696 		return;
2697 	}
2698 
2699 	/* Restore IB configuration states */
2700 	for (i = 0; i < IB_KEYS; i++) {
2701 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2702 		    pxu_p->ib_config_state[i]);
2703 	}
2704 
2705 	/*
2706 	 * Restore the interrupt mapping registers
2707 	 * And make sure the intrs are idle.
2708 	 */
2709 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2710 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2711 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2712 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2713 		    pxu_p->ib_intr_map[i]);
2714 	}
2715 
2716 	/* Restore MMU configuration states */
2717 	/* Clear the cache. */
2718 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2719 
2720 	for (i = 0; i < MMU_KEYS; i++) {
2721 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2722 		    pxu_p->mmu_config_state[i]);
2723 	}
2724 
2725 	/* Restore PEC configuration states */
2726 	/* Make sure all reset bits are low until error is detected */
2727 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2728 
2729 	for (i = 0; i < PEC_KEYS; i++) {
2730 		if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2731 		    (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2732 			CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg,
2733 			    pxu_p->pec_config_state[i]);
2734 		    }
2735 	}
2736 
2737 	/* Enable PCI-E interrupt */
2738 	(void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino);
2739 
2740 	(void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE);
2741 
2742 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2743 	kmem_free(pxu_p->pec_config_state, total_size);
2744 
2745 	pxu_p->pec_config_state = NULL;
2746 	pxu_p->mmu_config_state = NULL;
2747 	pxu_p->ib_config_state = NULL;
2748 	pxu_p->ib_intr_map = NULL;
2749 
2750 	msiq_resume(dev_hdl, pxu_p);
2751 }
2752 
2753 uint64_t
2754 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2755 {
2756 	uint64_t *config_state, *cb_regs;
2757 	int i, cb_size, cb_keys;
2758 
2759 	switch (PX_CHIP_TYPE(pxu_p)) {
2760 	case PX_CHIP_OBERON:
2761 		cb_size = UBC_SIZE;
2762 		cb_keys = UBC_KEYS;
2763 		cb_regs = ubc_config_state_regs;
2764 		break;
2765 	case PX_CHIP_FIRE:
2766 		cb_size = JBC_SIZE;
2767 		cb_keys = JBC_KEYS;
2768 		cb_regs = jbc_config_state_regs;
2769 		break;
2770 	default:
2771 		DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n",
2772 		    PX_CHIP_TYPE(pxu_p));
2773 		break;
2774 	}
2775 
2776 	config_state = kmem_zalloc(cb_size, KM_NOSLEEP);
2777 
2778 	if (config_state == NULL) {
2779 		return (H_EIO);
2780 	}
2781 
2782 	/* Save the configuration states */
2783 	pxu_p->xcb_config_state = config_state;
2784 	for (i = 0; i < cb_keys; i++) {
2785 		pxu_p->xcb_config_state[i] =
2786 		    CSR_XR((caddr_t)dev_hdl, cb_regs[i]);
2787 	}
2788 
2789 	return (H_EOK);
2790 }
2791 
2792 void
2793 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2794     devino_t devino, pxu_t *pxu_p)
2795 {
2796 	sysino_t sysino;
2797 	uint64_t *cb_regs;
2798 	int i, cb_size, cb_keys;
2799 
2800 	switch (PX_CHIP_TYPE(pxu_p)) {
2801 	case PX_CHIP_OBERON:
2802 		cb_size = UBC_SIZE;
2803 		cb_keys = UBC_KEYS;
2804 		cb_regs = ubc_config_state_regs;
2805 		/*
2806 		 * No reason to have any reset bits high until an error is
2807 		 * detected on the link.
2808 		 */
2809 		CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull);
2810 		break;
2811 	case PX_CHIP_FIRE:
2812 		cb_size = JBC_SIZE;
2813 		cb_keys = JBC_KEYS;
2814 		cb_regs = jbc_config_state_regs;
2815 		/*
2816 		 * No reason to have any reset bits high until an error is
2817 		 * detected on the link.
2818 		 */
2819 		CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2820 		break;
2821 	default:
2822 		DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n",
2823 		    PX_CHIP_TYPE(pxu_p));
2824 		break;
2825 	}
2826 
2827 	ASSERT(pxu_p->xcb_config_state);
2828 
2829 	/* Restore the configuration states */
2830 	for (i = 0; i < cb_keys; i++) {
2831 		CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i],
2832 		    pxu_p->xcb_config_state[i]);
2833 	}
2834 
2835 	/* Enable XBC interrupt */
2836 	(void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino);
2837 
2838 	(void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
2839 
2840 	kmem_free(pxu_p->xcb_config_state, cb_size);
2841 
2842 	pxu_p->xcb_config_state = NULL;
2843 }
2844 
2845 static uint64_t
2846 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2847 {
2848 	size_t	bufsz;
2849 	volatile uint64_t *cur_p;
2850 	int i;
2851 
2852 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2853 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2854 	    NULL)
2855 		return (H_EIO);
2856 
2857 	cur_p = pxu_p->msiq_config_state;
2858 
2859 	/* Save each EQ state */
2860 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2861 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2862 
2863 	/* Save MSI mapping registers */
2864 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2865 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2866 
2867 	/* Save all other MSIQ registers */
2868 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2869 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2870 	return (H_EOK);
2871 }
2872 
2873 static void
2874 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2875 {
2876 	size_t	bufsz;
2877 	uint64_t *cur_p, state;
2878 	int i;
2879 
2880 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2881 	cur_p = pxu_p->msiq_config_state;
2882 	/*
2883 	 * Initialize EQ base address register and
2884 	 * Interrupt Mondo Data 0 register.
2885 	 */
2886 	(void) hvio_msiq_init(dev_hdl, pxu_p);
2887 
2888 	/* Restore EQ states */
2889 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2890 		state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK;
2891 		if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE))
2892 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2893 			    i, ENTRIES_EN);
2894 	}
2895 
2896 	/* Restore MSI mapping */
2897 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2898 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2899 
2900 	/*
2901 	 * Restore all other registers. MSI 32 bit address and
2902 	 * MSI 64 bit address are restored as part of this.
2903 	 */
2904 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2905 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2906 
2907 	kmem_free(pxu_p->msiq_config_state, bufsz);
2908 	pxu_p->msiq_config_state = NULL;
2909 }
2910 
2911 /*
2912  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2913  * called by px_goto_l23ready.
2914  * returns DDI_SUCCESS or DDI_FAILURE
2915  */
2916 int
2917 px_send_pme_turnoff(caddr_t csr_base)
2918 {
2919 	volatile uint64_t reg;
2920 
2921 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2922 	/* If already pending, return failure */
2923 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2924 		DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2925 		    "tlu_pme_turn_off_generate = %x\n", reg);
2926 		return (DDI_FAILURE);
2927 	}
2928 
2929 	/* write to PME_Turn_off reg to boradcast */
2930 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2931 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2932 
2933 	return (DDI_SUCCESS);
2934 }
2935 
2936 /*
2937  * Checks for link being in L1idle state.
2938  * Returns
2939  * DDI_SUCCESS - if the link is in L1idle
2940  * DDI_FAILURE - if the link is not in L1idle
2941  */
2942 int
2943 px_link_wait4l1idle(caddr_t csr_base)
2944 {
2945 	uint8_t ltssm_state;
2946 	int ntries = px_max_l1_tries;
2947 
2948 	while (ntries > 0) {
2949 		ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
2950 		if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
2951 			break;
2952 		delay(1);
2953 	}
2954 	DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
2955 	return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
2956 }
2957 
2958 /*
2959  * Tranisition the link to L0, after it is down.
2960  */
2961 int
2962 px_link_retrain(caddr_t csr_base)
2963 {
2964 	volatile uint64_t reg;
2965 
2966 	reg = CSR_XR(csr_base, TLU_CONTROL);
2967 	if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
2968 		DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
2969 		return (DDI_FAILURE);
2970 	}
2971 
2972 	/* Clear link down bit in TLU Other Event Clear Status Register. */
2973 	CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
2974 
2975 	/* Clear Drain bit in TLU Status Register */
2976 	CSR_BS(csr_base, TLU_STATUS, DRAIN);
2977 
2978 	/* Clear Remain in Detect.Quiet bit in TLU Control Register */
2979 	reg = CSR_XR(csr_base, TLU_CONTROL);
2980 	reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
2981 	CSR_XS(csr_base, TLU_CONTROL, reg);
2982 
2983 	return (DDI_SUCCESS);
2984 }
2985 
2986 void
2987 px_enable_detect_quiet(caddr_t csr_base)
2988 {
2989 	volatile uint64_t tlu_ctrl;
2990 
2991 	tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
2992 	tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
2993 	CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
2994 }
2995 
2996 static uint_t
2997 oberon_hp_pwron(caddr_t csr_base)
2998 {
2999 	volatile uint64_t reg;
3000 	boolean_t link_retry, link_up;
3001 	int loop, i;
3002 
3003 	DBG(DBG_HP, NULL, "oberon_hp_pwron the slot\n");
3004 
3005 	/* Check Leaf Reset status */
3006 	reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE);
3007 	if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) {
3008 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not reset\n");
3009 		goto fail;
3010 	}
3011 
3012 	/* Check HP Capable */
3013 	if (!CSR_BR(csr_base, TLU_SLOT_CAPABILITIES, HP)) {
3014 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not "
3015 			"hotplugable\n");
3016 		goto fail;
3017 	}
3018 
3019 	/* Check Slot status */
3020 	reg = CSR_XR(csr_base, TLU_SLOT_STATUS);
3021 	if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) ||
3022 	    (reg & (1ull << TLU_SLOT_STATUS_MRLS))) {
3023 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: slot status %lx\n",
3024 		    reg);
3025 		goto fail;
3026 	}
3027 
3028 	/* Blink power LED, this is done from pciehpc already */
3029 
3030 	/* Turn on slot power */
3031 	CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3032 
3033 	/* power fault detection */
3034 	delay(drv_usectohz(25000));
3035 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3036 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3037 
3038 	/* wait to check power state */
3039 	delay(drv_usectohz(25000));
3040 
3041 	if (!CSR_BR(csr_base, TLU_SLOT_STATUS, PWFD)) {
3042 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: power fault\n");
3043 		goto fail1;
3044 	}
3045 
3046 	/* power is good */
3047 	CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3048 
3049 	delay(drv_usectohz(25000));
3050 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3051 	CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3052 
3053 	/* Turn on slot clock */
3054 	CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN);
3055 
3056 	link_up = B_FALSE;
3057 	link_retry = B_FALSE;
3058 
3059 	for (loop = 0; (loop < link_retry_count) && (link_up == B_FALSE);
3060 		loop++) {
3061 		if (link_retry == B_TRUE) {
3062 			DBG(DBG_HP, NULL, "oberon_hp_pwron : retry link loop "
3063 				"%d\n", loop);
3064 			CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3065 			CSR_XS(csr_base, FLP_PORT_CONTROL, 0x1);
3066 			delay(drv_usectohz(10000));
3067 			CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3068 			CSR_BS(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3069 			CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3070 			delay(drv_usectohz(50000));
3071 		}
3072 
3073 		/* Release PCI-E Reset */
3074 		delay(drv_usectohz(wait_perst));
3075 		CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST);
3076 
3077 		/*
3078 		 * Open events' mask
3079 		 * This should be done from pciehpc already
3080 		 */
3081 
3082 		/* Enable PCIE port */
3083 		delay(drv_usectohz(wait_enable_port));
3084 		CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3085 		CSR_XS(csr_base, FLP_PORT_CONTROL, 0x20);
3086 
3087 		/* wait for the link up */
3088 		for (i = 0; (i < 2) && (link_up == B_FALSE); i++) {
3089 			delay(drv_usectohz(link_status_check));
3090 			reg = CSR_XR(csr_base, DLU_LINK_LAYER_STATUS);
3091 
3092 		    if ((((reg >> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS) &
3093 			DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK) ==
3094 			DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE) &&
3095 			(reg & (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS)) &&
3096 			((reg & DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK)
3097 			==
3098 			DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE)) {
3099 			DBG(DBG_HP, NULL, "oberon_hp_pwron : link is up\n");
3100 				link_up = B_TRUE;
3101 		    } else
3102 			link_retry = B_TRUE;
3103 		}
3104 	}
3105 
3106 	if (link_up == B_FALSE) {
3107 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails to enable "
3108 		    "PCI-E port\n");
3109 		goto fail2;
3110 	}
3111 
3112 	/* link is up */
3113 	CSR_BC(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3114 	CSR_BS(csr_base, FLP_PORT_ACTIVE_STATUS, TRAIN_ERROR);
3115 	CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_P);
3116 	CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_S);
3117 	CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3118 
3119 	/* Restore LUP/LDN */
3120 	reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3121 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P))
3122 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P;
3123 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P))
3124 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P;
3125 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S))
3126 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S;
3127 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S))
3128 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S;
3129 	CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3130 
3131 	/*
3132 	 * Initialize Leaf
3133 	 * SPLS = 00b, SPLV = 11001b, i.e. 25W
3134 	 */
3135 	reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3136 	reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK <<
3137 	    TLU_SLOT_CAPABILITIES_SPLS);
3138 	reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK <<
3139 	    TLU_SLOT_CAPABILITIES_SPLV);
3140 	reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLV);
3141 	CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg);
3142 
3143 	/* Turn on Power LED */
3144 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3145 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3146 	reg = pcie_slotctl_pwr_indicator_set(reg,
3147 	    PCIE_SLOTCTL_INDICATOR_STATE_ON);
3148 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3149 
3150 	/* Notify to SCF */
3151 	if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3152 		CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3153 	else
3154 		CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3155 
3156 	/* Wait for one second */
3157 	delay(drv_usectohz(1000000));
3158 
3159 	return (DDI_SUCCESS);
3160 
3161 fail2:
3162 	/* Link up is failed */
3163 	CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3164 	CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3165 	delay(drv_usectohz(150));
3166 
3167 	CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3168 	delay(drv_usectohz(100));
3169 
3170 fail1:
3171 	CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3172 
3173 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3174 
3175 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3176 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3177 	reg = pcie_slotctl_pwr_indicator_set(reg,
3178 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3179 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3180 
3181 	CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD);
3182 
3183 fail:
3184 	return ((uint_t)DDI_FAILURE);
3185 }
3186 
3187 hrtime_t oberon_leaf_reset_timeout = 120ll * NANOSEC;	/* 120 seconds */
3188 
3189 static uint_t
3190 oberon_hp_pwroff(caddr_t csr_base)
3191 {
3192 	volatile uint64_t reg;
3193 	volatile uint64_t reg_tluue, reg_tluce;
3194 	hrtime_t start_time, end_time;
3195 
3196 	DBG(DBG_HP, NULL, "oberon_hp_pwroff the slot\n");
3197 
3198 	/* Blink power LED, this is done from pciehpc already */
3199 
3200 	/* Clear Slot Event */
3201 	CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC);
3202 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3203 
3204 	/* DRN_TR_DIS on */
3205 	CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3206 	delay(drv_usectohz(10000));
3207 
3208 	/* Disable LUP/LDN */
3209 	reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3210 	reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3211 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3212 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3213 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3214 	CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3215 
3216 	/* Save the TLU registers */
3217 	reg_tluue = CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE);
3218 	reg_tluce = CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE);
3219 	/* All clear */
3220 	CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 0);
3221 	CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, 0);
3222 
3223 	/* Disable port */
3224 	CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3225 
3226 	/* PCIE reset */
3227 	delay(drv_usectohz(10000));
3228 	CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3229 
3230 	/* PCIE clock stop */
3231 	delay(drv_usectohz(150));
3232 	CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3233 
3234 	/* Turn off slot power */
3235 	delay(drv_usectohz(100));
3236 	CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3237 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3238 	delay(drv_usectohz(25000));
3239 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3240 
3241 	/* write 0 to bit 7 of ILU Error Log Enable Register */
3242 	CSR_BC(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3);
3243 
3244 	/* Set back TLU registers */
3245 	CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, reg_tluue);
3246 	CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, reg_tluce);
3247 
3248 	/* Power LED off */
3249 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3250 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3251 	reg = pcie_slotctl_pwr_indicator_set(reg,
3252 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3253 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3254 
3255 	/* Indicator LED blink */
3256 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3257 	reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3258 	reg = pcie_slotctl_attn_indicator_set(reg,
3259 	    PCIE_SLOTCTL_INDICATOR_STATE_BLINK);
3260 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3261 
3262 	/* Notify to SCF */
3263 	if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3264 		CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3265 	else
3266 		CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3267 
3268 	start_time = gethrtime();
3269 	/* Check Leaf Reset status */
3270 	while (!(CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))) {
3271 		if ((end_time = (gethrtime() - start_time)) >
3272 		    oberon_leaf_reset_timeout) {
3273 			cmn_err(CE_WARN, "Oberon leaf reset is not completed, "
3274 			    "even after waiting %llx ticks", end_time);
3275 
3276 			break;
3277 		}
3278 
3279 		/* Wait for one second */
3280 		delay(drv_usectohz(1000000));
3281 	}
3282 
3283 	/* Indicator LED off */
3284 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3285 	reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3286 	reg = pcie_slotctl_attn_indicator_set(reg,
3287 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3288 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3289 
3290 	return (DDI_SUCCESS);
3291 }
3292 
3293 static uint_t
3294 oberon_hpreg_get(void *cookie, off_t off)
3295 {
3296 	caddr_t csr_base = *(caddr_t *)cookie;
3297 	volatile uint64_t val = -1ull;
3298 
3299 	switch (off) {
3300 	case PCIE_SLOTCAP:
3301 		val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3302 		break;
3303 	case PCIE_SLOTCTL:
3304 		val = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3305 
3306 		/* Get the power state */
3307 		val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) &
3308 		    (1ull << HOTPLUG_CONTROL_PWREN)) ?
3309 		    0 : PCIE_SLOTCTL_PWR_CONTROL;
3310 		break;
3311 	case PCIE_SLOTSTS:
3312 		val = CSR_XR(csr_base, TLU_SLOT_STATUS);
3313 		break;
3314 	case PCIE_LINKCAP:
3315 		val = CSR_XR(csr_base, TLU_LINK_CAPABILITIES);
3316 		break;
3317 	case PCIE_LINKSTS:
3318 		val = CSR_XR(csr_base, TLU_LINK_STATUS);
3319 		break;
3320 	default:
3321 		DBG(DBG_HP, NULL, "oberon_hpreg_get(): "
3322 		    "unsupported offset 0x%lx\n", off);
3323 		break;
3324 	}
3325 
3326 	return ((uint_t)val);
3327 }
3328 
3329 static uint_t
3330 oberon_hpreg_put(void *cookie, off_t off, uint_t val)
3331 {
3332 	caddr_t csr_base = *(caddr_t *)cookie;
3333 	volatile uint64_t pwr_state_on, pwr_fault;
3334 	uint_t pwr_off, ret = DDI_SUCCESS;
3335 
3336 	DBG(DBG_HP, NULL, "oberon_hpreg_put 0x%lx: cur %x, new %x\n",
3337 	    off, oberon_hpreg_get(cookie, off), val);
3338 
3339 	switch (off) {
3340 	case PCIE_SLOTCTL:
3341 		/*
3342 		 * Depending on the current state, insertion or removal
3343 		 * will go through their respective sequences.
3344 		 */
3345 		pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN);
3346 		pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL;
3347 
3348 		if (!pwr_off && !pwr_state_on)
3349 			ret = oberon_hp_pwron(csr_base);
3350 		else if (pwr_off && pwr_state_on) {
3351 			pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) &
3352 			    (1ull << TLU_SLOT_STATUS_PWFD);
3353 
3354 			if (pwr_fault) {
3355 				DBG(DBG_HP, NULL, "oberon_hpreg_put: power "
3356 				    "off because of power fault\n");
3357 				CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3358 			}
3359 			else
3360 				ret = oberon_hp_pwroff(csr_base);
3361 		} else
3362 			CSR_XS(csr_base, TLU_SLOT_CONTROL, val);
3363 		break;
3364 	case PCIE_SLOTSTS:
3365 		CSR_XS(csr_base, TLU_SLOT_STATUS, val);
3366 		break;
3367 	default:
3368 		DBG(DBG_HP, NULL, "oberon_hpreg_put(): "
3369 		    "unsupported offset 0x%lx\n", off);
3370 		ret = (uint_t)DDI_FAILURE;
3371 		break;
3372 	}
3373 
3374 	return (ret);
3375 }
3376 
3377 int
3378 hvio_hotplug_init(dev_info_t *dip, void *arg)
3379 {
3380 	pciehpc_regops_t *regops = (pciehpc_regops_t *)arg;
3381 	px_t	*px_p = DIP_TO_STATE(dip);
3382 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3383 	volatile uint64_t reg;
3384 
3385 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
3386 		if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3387 		    TLU_SLOT_CAPABILITIES, HP)) {
3388 			DBG(DBG_HP, NULL, "%s%d: hotplug capabale not set\n",
3389 			    ddi_driver_name(dip), ddi_get_instance(dip));
3390 			return (DDI_FAILURE);
3391 		}
3392 
3393 		/* For empty or disconnected slot, disable LUP/LDN */
3394 		if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3395 			TLU_SLOT_STATUS, PSD) ||
3396 		    !CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3397 			HOTPLUG_CONTROL, PWREN)) {
3398 
3399 			reg = CSR_XR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3400 			    TLU_OTHER_EVENT_LOG_ENABLE);
3401 			reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3402 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3403 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3404 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3405 			CSR_XS((caddr_t)pxu_p->px_address[PX_REG_CSR],
3406 			    TLU_OTHER_EVENT_LOG_ENABLE, reg);
3407 		}
3408 
3409 		regops->get = oberon_hpreg_get;
3410 		regops->put = oberon_hpreg_put;
3411 
3412 		/* cookie is the csr_base */
3413 		regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR];
3414 
3415 		return (DDI_SUCCESS);
3416 	}
3417 
3418 	return (DDI_ENOTSUP);
3419 }
3420 
3421 int
3422 hvio_hotplug_uninit(dev_info_t *dip)
3423 {
3424 	px_t	*px_p = DIP_TO_STATE(dip);
3425 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3426 
3427 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
3428 		return (DDI_SUCCESS);
3429 
3430 	return (DDI_FAILURE);
3431 }
3432