xref: /illumos-gate/usr/src/uts/sun4u/io/px/px_hlib.c (revision 71269a2275bf5a143dad6461eee2710a344e7261)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/cmn_err.h>
28 #include <sys/vmsystm.h>
29 #include <sys/vmem.h>
30 #include <sys/machsystm.h>	/* lddphys() */
31 #include <sys/iommutsb.h>
32 #include <sys/pci.h>
33 #include <sys/hotplug/pci/pciehpc.h>
34 #include <pcie_pwr.h>
35 #include <px_obj.h>
36 #include "px_regs.h"
37 #include "oberon_regs.h"
38 #include "px_csr.h"
39 #include "px_lib4u.h"
40 #include "px_err.h"
41 
42 /*
43  * Registers that need to be saved and restored during suspend/resume.
44  */
45 
46 /*
47  * Registers in the PEC Module.
48  * LPU_RESET should be set to 0ull during resume
49  *
50  * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips
51  * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only.
52  */
53 static struct px_pec_regs {
54 	uint64_t reg;
55 	uint64_t chip;
56 } pec_config_state_regs[] = {
57 	{PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
58 	{ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
59 	{ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
60 	{TLU_CONTROL, PX_CHIP_UNIDENTIFIED},
61 	{TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
62 	{TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
63 	{TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED},
64 	{TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED},
65 	{TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
66 	{TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
67 	{TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
68 	{TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
69 	{DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON},
70 	{DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON},
71 	{DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON},
72 	{LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE},
73 	{LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
74 	{LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
75 	{LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
76 	{LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE},
77 	{LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE},
78 	{LPU_RESET, PX_CHIP_FIRE},
79 	{LPU_DEBUG_CONFIG, PX_CHIP_FIRE},
80 	{LPU_INTERRUPT_MASK, PX_CHIP_FIRE},
81 	{LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE},
82 	{LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE},
83 	{LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE},
84 	{LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE},
85 	{LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE},
86 	{LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE},
87 	{LPU_LTSSM_CONFIG2, PX_CHIP_FIRE},
88 	{LPU_LTSSM_CONFIG3, PX_CHIP_FIRE},
89 	{LPU_LTSSM_CONFIG4, PX_CHIP_FIRE},
90 	{LPU_LTSSM_CONFIG5, PX_CHIP_FIRE},
91 	{DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
92 	{DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED},
93 	{DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED}
94 };
95 
96 #define	PEC_KEYS	\
97 	((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs))
98 
99 #define	PEC_SIZE	(PEC_KEYS * sizeof (uint64_t))
100 
101 /*
102  * Registers for the MMU module.
103  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
104  */
105 static uint64_t mmu_config_state_regs[] = {
106 	MMU_TSB_CONTROL,
107 	MMU_CONTROL_AND_STATUS,
108 	MMU_ERROR_LOG_ENABLE,
109 	MMU_INTERRUPT_ENABLE
110 };
111 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
112 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
113 
114 /*
115  * Registers for the IB Module
116  */
117 static uint64_t ib_config_state_regs[] = {
118 	IMU_ERROR_LOG_ENABLE,
119 	IMU_INTERRUPT_ENABLE
120 };
121 #define	IB_SIZE (sizeof (ib_config_state_regs))
122 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
123 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
124 
125 /*
126  * Registers for the JBC module.
127  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
128  */
129 static uint64_t	jbc_config_state_regs[] = {
130 	JBUS_PARITY_CONTROL,
131 	JBC_FATAL_RESET_ENABLE,
132 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
133 	JBC_ERROR_LOG_ENABLE,
134 	JBC_INTERRUPT_ENABLE
135 };
136 #define	JBC_SIZE (sizeof (jbc_config_state_regs))
137 #define	JBC_KEYS (JBC_SIZE / sizeof (uint64_t))
138 
139 /*
140  * Registers for the UBC module.
141  * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
142  */
143 static uint64_t	ubc_config_state_regs[] = {
144 	UBC_ERROR_LOG_ENABLE,
145 	UBC_INTERRUPT_ENABLE
146 };
147 #define	UBC_SIZE (sizeof (ubc_config_state_regs))
148 #define	UBC_KEYS (UBC_SIZE / sizeof (uint64_t))
149 
150 static uint64_t	msiq_config_other_regs[] = {
151 	ERR_COR_MAPPING,
152 	ERR_NONFATAL_MAPPING,
153 	ERR_FATAL_MAPPING,
154 	PM_PME_MAPPING,
155 	PME_TO_ACK_MAPPING,
156 	MSI_32_BIT_ADDRESS,
157 	MSI_64_BIT_ADDRESS
158 };
159 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
160 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
161 
162 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
163 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
164 
165 /* OPL tuning variables for link unstable issue */
166 int wait_perst = 5000000; 	/* step 9, default: 5s */
167 int wait_enable_port = 30000;	/* step 11, default: 30ms */
168 int link_retry_count = 2; 	/* step 11, default: 2 */
169 int link_status_check = 400000;	/* step 11, default: 400ms */
170 
171 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
172 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
173 static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
174 static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
175 
176 extern int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE];
177 extern int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE];
178 
179 /*
180  * Initialize the bus, but do not enable interrupts.
181  */
182 /* ARGSUSED */
183 void
184 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
185 {
186 	switch (PX_CHIP_TYPE(pxu_p)) {
187 	case PX_CHIP_OBERON:
188 		ubc_init(xbc_csr_base, pxu_p);
189 		break;
190 	case PX_CHIP_FIRE:
191 		jbc_init(xbc_csr_base, pxu_p);
192 		break;
193 	default:
194 		DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n",
195 		    PX_CHIP_TYPE(pxu_p));
196 		break;
197 	}
198 }
199 
200 /*
201  * Initialize the JBC module, but do not enable interrupts.
202  */
203 /* ARGSUSED */
204 static void
205 jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
206 {
207 	uint64_t val;
208 
209 	/* Check if we need to enable inverted parity */
210 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
211 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
212 	DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n",
213 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
214 
215 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
216 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
217 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
218 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
219 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
220 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
221 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
222 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
223 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
224 	DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
225 	    CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
226 
227 	/*
228 	 * Enable merge, jbc and dmc interrupts.
229 	 */
230 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
231 	DBG(DBG_CB, NULL,
232 	    "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
233 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
234 
235 	/*
236 	 * CSR_V JBC's interrupt regs (log, enable, status, clear)
237 	 */
238 	DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
239 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
240 
241 	DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
242 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
243 
244 	DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
245 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
246 
247 	DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
248 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
249 }
250 
251 /*
252  * Initialize the UBC module, but do not enable interrupts.
253  */
254 /* ARGSUSED */
255 static void
256 ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
257 {
258 	/*
259 	 * Enable Uranus bus error log bits.
260 	 */
261 	CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull);
262 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
263 	    CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
264 
265 	/*
266 	 * Clear Uranus bus errors.
267 	 */
268 	CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull);
269 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
270 	    CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
271 
272 	/*
273 	 * CSR_V UBC's interrupt regs (log, enable, status, clear)
274 	 */
275 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
276 	    CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
277 
278 	DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n",
279 	    CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE));
280 
281 	DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n",
282 	    CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS));
283 
284 	DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
285 	    CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
286 }
287 
288 /*
289  * Initialize the module, but do not enable interrupts.
290  */
291 /* ARGSUSED */
292 void
293 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
294 {
295 	/*
296 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
297 	 */
298 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
299 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
300 
301 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
302 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
303 
304 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
305 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
306 
307 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
308 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
309 }
310 
311 /*
312  * Initialize the module, but do not enable interrupts.
313  */
314 /* ARGSUSED */
315 static void
316 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
317 {
318 	/*
319 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
320 	 */
321 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
322 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
323 
324 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
325 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
326 
327 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
328 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
329 
330 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
331 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
332 }
333 
334 /*
335  * Initialize the module, but do not enable interrupts.
336  */
337 /* ARGSUSED */
338 static void
339 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
340 {
341 	uint64_t val;
342 
343 	/*
344 	 * CSR_V TLU_CONTROL Expect OBP ???
345 	 */
346 
347 	/*
348 	 * L0s entry default timer value - 7.0 us
349 	 * Completion timeout select default value - 67.1 ms and
350 	 * OBP will set this value.
351 	 *
352 	 * Configuration - Bit 0 should always be 0 for upstream port.
353 	 * Bit 1 is clock - how is this related to the clock bit in TLU
354 	 * Link Control register?  Both are hardware dependent and likely
355 	 * set by OBP.
356 	 *
357 	 * NOTE: Do not set the NPWR_EN bit.  The desired value of this bit
358 	 * will be set by OBP.
359 	 */
360 	val = CSR_XR(csr_base, TLU_CONTROL);
361 	val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
362 	    TLU_CONTROL_CONFIG_DEFAULT;
363 
364 	/*
365 	 * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking
366 	 * behind non-posted PIO writes. This blocking could cause a master or
367 	 * slave timeout on the host bus if multiple serialized PIOs were to
368 	 * suffer Completion Timeouts because the CTO delays for each PIO ahead
369 	 * of the read would accumulate. Since the Olympus processor can have
370 	 * only 1 PIO outstanding, there is no possibility of PIO accesses from
371 	 * a given CPU to a given device being re-ordered by the PCIe fabric;
372 	 * therefore turning off serialization should be safe from a PCIe
373 	 * ordering perspective.
374 	 */
375 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
376 		val &= ~(1ull << TLU_CONTROL_NPWR_EN);
377 
378 	/*
379 	 * Set Detect.Quiet. This will disable automatic link
380 	 * re-training, if the link goes down e.g. power management
381 	 * turns off power to the downstream device. This will enable
382 	 * Fire to go to Drain state, after link down. The drain state
383 	 * forces a reset to the FC state machine, which is required for
384 	 * proper link re-training.
385 	 */
386 	val |= (1ull << TLU_REMAIN_DETECT_QUIET);
387 	CSR_XS(csr_base, TLU_CONTROL, val);
388 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
389 	    CSR_XR(csr_base, TLU_CONTROL));
390 
391 	/*
392 	 * CSR_V TLU_STATUS Expect HW 0x4
393 	 */
394 
395 	/*
396 	 * Only bit [7:0] are currently defined.  Bits [2:0]
397 	 * are the state, which should likely be in state active,
398 	 * 100b.  Bit three is 'recovery', which is not understood.
399 	 * All other bits are reserved.
400 	 */
401 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
402 	    CSR_XR(csr_base, TLU_STATUS));
403 
404 	/*
405 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
406 	 */
407 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
408 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
409 
410 	/*
411 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
412 	 */
413 
414 	/*
415 	 * Ingress credits initial register.  Bits [39:32] should be
416 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
417 	 * be 0xC0.  These are the reset values, and should be set by
418 	 * HW.
419 	 */
420 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
421 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
422 
423 	/*
424 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
425 	 */
426 
427 	/*
428 	 * Diagnostic register - always zero unless we are debugging.
429 	 */
430 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
431 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
432 
433 	/*
434 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
435 	 */
436 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
437 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
438 
439 	/*
440 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
441 	 */
442 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
443 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
444 
445 	/*
446 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
447 	 */
448 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
449 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
450 
451 	/*
452 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
453 	 */
454 	DBG(DBG_TLU, NULL,
455 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
456 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
457 
458 	/*
459 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
460 	 */
461 	DBG(DBG_TLU, NULL,
462 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
463 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
464 
465 	/*
466 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
467 	 */
468 	DBG(DBG_TLU, NULL,
469 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
470 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
471 
472 	DBG(DBG_TLU, NULL,
473 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
474 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
475 
476 	DBG(DBG_TLU, NULL,
477 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
478 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
479 
480 	DBG(DBG_TLU, NULL,
481 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
482 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
483 
484 	/*
485 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
486 	 */
487 	DBG(DBG_TLU, NULL,
488 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
489 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
490 
491 	/*
492 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
493 	 */
494 	DBG(DBG_TLU, NULL,
495 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
496 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
497 
498 	/*
499 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
500 	 */
501 	DBG(DBG_TLU, NULL,
502 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
503 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
504 
505 	/*
506 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
507 	 */
508 	DBG(DBG_TLU, NULL,
509 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
510 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
511 
512 	/*
513 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
514 	 */
515 	DBG(DBG_TLU, NULL,
516 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
517 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
518 
519 	/*
520 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
521 	 */
522 	DBG(DBG_TLU, NULL,
523 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
524 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
525 
526 	/*
527 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
528 	 */
529 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
530 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
531 
532 	/*
533 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
534 	 */
535 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
536 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
537 
538 	/*
539 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
540 	 */
541 
542 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
543 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
544 
545 	/*
546 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
547 	 */
548 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
549 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
550 
551 	/*
552 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
553 	 */
554 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
555 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
556 
557 	/*
558 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
559 	 */
560 
561 	/*
562 	 * Bits [14:12] are the Max Read Request Size, which is always 64
563 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
564 	 * start at 128 bytes which is 000b.  This may be revisited if
565 	 * init_child finds greater values.
566 	 */
567 	val = 0x0ull;
568 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
569 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
570 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
571 
572 	/*
573 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
574 	 */
575 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
576 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
577 
578 	/*
579 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
580 	 */
581 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
582 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
583 
584 	/*
585 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
586 	 */
587 
588 	/*
589 	 * The CLOCK bit should be set by OBP if the hardware dictates,
590 	 * and if it is set then ASPM should be used since then L0s exit
591 	 * latency should be lower than L1 exit latency.
592 	 *
593 	 * Note that we will not enable power management during bringup
594 	 * since it has not been test and is creating some problems in
595 	 * simulation.
596 	 */
597 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
598 
599 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
600 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
601 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
602 
603 	/*
604 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
605 	 */
606 
607 	/*
608 	 * Not sure if HW or OBP will be setting this read only
609 	 * register.  Bit 12 is Clock, and it should always be 1
610 	 * signifying that the component uses the same physical
611 	 * clock as the platform.  Bits [9:4] are for the width,
612 	 * with the expected value above signifying a x1 width.
613 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
614 	 * the only speed as yet supported by the PCI-E spec.
615 	 */
616 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
617 	    CSR_XR(csr_base, TLU_LINK_STATUS));
618 
619 	/*
620 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
621 	 */
622 
623 	/*
624 	 * Power Limits for the slots.  Will be platform
625 	 * dependent, and OBP will need to set after consulting
626 	 * with the HW guys.
627 	 *
628 	 * Bits [16:15] are power limit scale, which most likely
629 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
630 	 * Power Limit Value, which is a number which is multiplied
631 	 * by the power limit scale to get the actual power limit.
632 	 */
633 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
634 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
635 
636 	/*
637 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
638 	 */
639 	DBG(DBG_TLU, NULL,
640 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
641 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
642 
643 	/*
644 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
645 	 * Kernel 0x17F0110017F011
646 	 */
647 	DBG(DBG_TLU, NULL,
648 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
649 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
650 
651 	/*
652 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
653 	 */
654 	DBG(DBG_TLU, NULL,
655 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
656 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
657 
658 	/*
659 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
660 	 */
661 	DBG(DBG_TLU, NULL,
662 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
663 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
664 
665 	/*
666 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
667 	 */
668 	DBG(DBG_TLU, NULL,
669 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
670 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
671 
672 	/*
673 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
674 	 */
675 	DBG(DBG_TLU, NULL,
676 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
677 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
678 
679 	/*
680 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
681 	 */
682 	DBG(DBG_TLU, NULL,
683 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
684 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
685 
686 	/*
687 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
688 	 */
689 	DBG(DBG_TLU, NULL,
690 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
691 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
692 
693 
694 	/*
695 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
696 	 * Plus header logs
697 	 */
698 
699 	/*
700 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
701 	 */
702 	DBG(DBG_TLU, NULL,
703 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
704 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
705 
706 	/*
707 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
708 	 */
709 	DBG(DBG_TLU, NULL,
710 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
711 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
712 
713 	/*
714 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
715 	 */
716 	DBG(DBG_TLU, NULL,
717 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
718 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
719 
720 	/*
721 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
722 	 */
723 	DBG(DBG_TLU, NULL,
724 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
725 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
726 }
727 
728 /* ARGSUSED */
729 static void
730 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
731 {
732 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
733 	int link_width, max_payload;
734 
735 	uint64_t val;
736 
737 	/*
738 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
739 	 * Only Link Widths of x1, x4, and x8 are supported.
740 	 * If any width is reported other than x8, set default to x8.
741 	 */
742 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
743 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
744 
745 	/*
746 	 * Convert link_width to match timer array configuration.
747 	 */
748 	switch (link_width) {
749 	case 1:
750 		link_width = 0;
751 		break;
752 	case 4:
753 		link_width = 1;
754 		break;
755 	case 8:
756 		link_width = 2;
757 		break;
758 	case 16:
759 		link_width = 3;
760 		break;
761 	default:
762 		link_width = 0;
763 	}
764 
765 	/*
766 	 * Get the Max Payload Size.
767 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
768 	 */
769 	max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
770 	    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
771 
772 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
773 	    (0x80 << max_payload));
774 
775 	/* Make sure the packet size is not greater than 4096 */
776 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
777 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
778 
779 	/*
780 	 * CSR_V LPU_ID Expect HW 0x0
781 	 */
782 
783 	/*
784 	 * This register has link id, phy id and gigablaze id.
785 	 * Should be set by HW.
786 	 */
787 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
788 	    CSR_XR(csr_base, LPU_ID));
789 
790 	/*
791 	 * CSR_V LPU_RESET Expect Kernel 0x0
792 	 */
793 
794 	/*
795 	 * No reason to have any reset bits high until an error is
796 	 * detected on the link.
797 	 */
798 	val = 0ull;
799 	CSR_XS(csr_base, LPU_RESET, val);
800 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
801 	    CSR_XR(csr_base, LPU_RESET));
802 
803 	/*
804 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
805 	 */
806 
807 	/*
808 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
809 	 * They are read-only.  What do the 8 bits mean, and
810 	 * how do they get set if they are read only?
811 	 */
812 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
813 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
814 
815 	/*
816 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
817 	 */
818 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
819 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
820 
821 	/*
822 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
823 	 */
824 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
825 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
826 
827 	/*
828 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
829 	 */
830 
831 	/*
832 	 * This register has bits [9:4] for link width, and the
833 	 * default 0x10, means a width of x16.  The problem is
834 	 * this width is not supported according to the TLU
835 	 * link status register.
836 	 */
837 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
838 	    CSR_XR(csr_base, LPU_LINK_STATUS));
839 
840 	/*
841 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
842 	 */
843 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
844 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
845 
846 	/*
847 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
848 	 */
849 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
850 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
851 
852 	/*
853 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
854 	 */
855 	DBG(DBG_LPU, NULL,
856 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
857 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
858 
859 	/*
860 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
861 	 */
862 	DBG(DBG_LPU, NULL,
863 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
864 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
865 
866 	/*
867 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
868 	 */
869 	DBG(DBG_LPU, NULL,
870 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
871 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
872 
873 	/*
874 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
875 	 */
876 	DBG(DBG_LPU, NULL,
877 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
878 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
879 
880 	/*
881 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
882 	 */
883 	DBG(DBG_LPU, NULL,
884 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
885 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
886 
887 	/*
888 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
889 	 */
890 	DBG(DBG_LPU, NULL,
891 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
892 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
893 
894 	/*
895 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
896 	 */
897 
898 	/*
899 	 * This is another place where Max Payload can be set,
900 	 * this time for the link layer.  It will be set to
901 	 * 128B, which is the default, but this will need to
902 	 * be revisited.
903 	 */
904 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
905 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
906 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
907 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
908 
909 	/*
910 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
911 	 */
912 
913 	/*
914 	 * Another R/W status register.  Bit 3, DL up Status, will
915 	 * be set high.  The link state machine status bits [2:0]
916 	 * are set to 0x1, but the status bits are not defined in the
917 	 * PRM.  What does 0x1 mean, what others values are possible
918 	 * and what are thier meanings?
919 	 *
920 	 * This register has been giving us problems in simulation.
921 	 * It has been mentioned that software should not program
922 	 * any registers with WE bits except during debug.  So
923 	 * this register will no longer be programmed.
924 	 */
925 
926 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
927 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
928 
929 	/*
930 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
931 	 */
932 	DBG(DBG_LPU, NULL,
933 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
934 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
935 
936 	/*
937 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
938 	 */
939 	DBG(DBG_LPU, NULL,
940 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
941 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
942 
943 	DBG(DBG_LPU, NULL,
944 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
945 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
946 
947 	/*
948 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
949 	 */
950 
951 	/*
952 	 * The PRM says that only the first two bits will be set
953 	 * high by default, which will enable flow control for
954 	 * posted and non-posted updates, but NOT completetion
955 	 * updates.
956 	 */
957 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
958 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
959 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
960 	DBG(DBG_LPU, NULL,
961 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
962 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
963 
964 	/*
965 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
966 	 * Expect OBP 0x1D4C
967 	 */
968 
969 	/*
970 	 * This should be set by OBP.  We'll check to make sure.
971 	 */
972 	DBG(DBG_LPU, NULL, "lpu_init - "
973 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
974 	    CSR_XR(csr_base,
975 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
976 
977 	/*
978 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
979 	 */
980 
981 	/*
982 	 * This register has Flow Control Update Timer values for
983 	 * non-posted and posted requests, bits [30:16] and bits
984 	 * [14:0], respectively.  These are read-only to SW so
985 	 * either HW or OBP needs to set them.
986 	 */
987 	DBG(DBG_LPU, NULL, "lpu_init - "
988 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
989 	    CSR_XR(csr_base,
990 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
991 
992 	/*
993 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
994 	 */
995 
996 	/*
997 	 * Same as timer0 register above, except for bits [14:0]
998 	 * have the timer values for completetions.  Read-only to
999 	 * SW; OBP or HW need to set it.
1000 	 */
1001 	DBG(DBG_LPU, NULL, "lpu_init - "
1002 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
1003 	    CSR_XR(csr_base,
1004 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
1005 
1006 	/*
1007 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
1008 	 */
1009 	val = px_acknak_timer_table[max_payload][link_width];
1010 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
1011 
1012 	DBG(DBG_LPU, NULL, "lpu_init - "
1013 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
1014 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
1015 
1016 	/*
1017 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
1018 	 */
1019 	DBG(DBG_LPU, NULL,
1020 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
1021 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
1022 
1023 	/*
1024 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
1025 	 */
1026 	val = px_replay_timer_table[max_payload][link_width];
1027 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1028 
1029 	DBG(DBG_LPU, NULL,
1030 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
1031 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
1032 
1033 	/*
1034 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
1035 	 */
1036 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1037 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
1038 
1039 	/*
1040 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1041 	 */
1042 	DBG(DBG_LPU, NULL,
1043 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1044 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
1045 
1046 	/*
1047 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1048 	 */
1049 	DBG(DBG_LPU, NULL,
1050 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1051 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
1052 
1053 	/*
1054 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1055 	 */
1056 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
1057 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
1058 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
1059 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
1060 
1061 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
1062 	DBG(DBG_LPU, NULL,
1063 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1064 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
1065 
1066 	/*
1067 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1068 	 */
1069 	DBG(DBG_LPU, NULL,
1070 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1071 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
1072 
1073 	/*
1074 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1075 	 */
1076 	DBG(DBG_LPU, NULL,
1077 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1078 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
1079 
1080 	/*
1081 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1082 	 */
1083 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1084 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1085 
1086 	/*
1087 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1088 	 */
1089 	DBG(DBG_LPU, NULL,
1090 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1091 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1092 
1093 	/*
1094 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1095 	 */
1096 
1097 	/*
1098 	 * Test only register.  Will not be programmed.
1099 	 */
1100 	DBG(DBG_LPU, NULL,
1101 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1102 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1103 
1104 	/*
1105 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1106 	 */
1107 
1108 	/*
1109 	 * Test only register.  Will not be programmed.
1110 	 */
1111 	DBG(DBG_LPU, NULL,
1112 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1113 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1114 
1115 	/*
1116 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1117 	 */
1118 	DBG(DBG_LPU, NULL,
1119 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1120 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1121 
1122 	/*
1123 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1124 	 */
1125 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1126 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1127 
1128 	/*
1129 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1130 	 */
1131 
1132 	/*
1133 	 * Test only register.  Will not be programmed.
1134 	 */
1135 	DBG(DBG_LPU, NULL,
1136 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1137 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1138 
1139 	/*
1140 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1141 	 */
1142 	DBG(DBG_LPU, NULL,
1143 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1144 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1145 
1146 	/*
1147 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1148 	 */
1149 	DBG(DBG_LPU, NULL,
1150 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1151 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1152 
1153 	/*
1154 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1155 	 */
1156 	DBG(DBG_LPU, NULL,
1157 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1158 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1159 
1160 	/*
1161 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1162 	 */
1163 	DBG(DBG_LPU, NULL,
1164 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1165 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1166 
1167 	/*
1168 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1169 	 */
1170 	DBG(DBG_LPU, NULL,
1171 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1172 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1173 
1174 	/*
1175 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1176 	 */
1177 
1178 	/*
1179 	 * Test only register.  Will not be programmed.
1180 	 */
1181 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1182 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1183 
1184 	/*
1185 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1186 	 */
1187 
1188 	/*
1189 	 * Test only register.  Will not be programmed.
1190 	 */
1191 	DBG(DBG_LPU, NULL,
1192 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1193 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1194 
1195 	/*
1196 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1197 	 */
1198 
1199 	/*
1200 	 * Test only register.
1201 	 */
1202 	DBG(DBG_LPU, NULL,
1203 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1204 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1205 
1206 	/*
1207 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1208 	 */
1209 	DBG(DBG_LPU, NULL, "lpu_init - "
1210 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1211 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1212 
1213 	/*
1214 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1215 	 */
1216 
1217 	/*
1218 	 * test only register.
1219 	 */
1220 	DBG(DBG_LPU, NULL,
1221 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1222 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1223 
1224 	/*
1225 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1226 	 */
1227 
1228 	/*
1229 	 * test only register.
1230 	 */
1231 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1232 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1233 
1234 	/*
1235 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1236 	 */
1237 	DBG(DBG_LPU, NULL,
1238 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1239 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1240 
1241 	/*
1242 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1243 	 */
1244 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1245 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1246 
1247 	/*
1248 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1249 	 */
1250 	DBG(DBG_LPU, NULL,
1251 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1252 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1253 
1254 	/*
1255 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1256 	 */
1257 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1258 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1259 
1260 	DBG(DBG_LPU, NULL,
1261 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1262 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1263 
1264 	/*
1265 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1266 	 */
1267 
1268 	/*
1269 	 * This also needs some explanation.  What is the best value
1270 	 * for the water mark?  Test mode enables which test mode?
1271 	 * Programming model needed for the Receiver Reset Lane N
1272 	 * bits.
1273 	 */
1274 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1275 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1276 
1277 	/*
1278 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1279 	 */
1280 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1281 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1282 
1283 	/*
1284 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1285 	 */
1286 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1287 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1288 
1289 	/*
1290 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1291 	 */
1292 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1293 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1294 
1295 	/*
1296 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1297 	 */
1298 	DBG(DBG_LPU, NULL,
1299 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1300 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1301 
1302 	/*
1303 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1304 	 */
1305 	DBG(DBG_LPU, NULL,
1306 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1307 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1308 
1309 	DBG(DBG_LPU, NULL,
1310 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1311 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1312 
1313 	/*
1314 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1315 	 */
1316 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1317 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1318 
1319 	/*
1320 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1321 	 */
1322 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1323 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1324 
1325 	/*
1326 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1327 	 */
1328 	DBG(DBG_LPU, NULL,
1329 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1330 	    CSR_XR(csr_base,
1331 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1332 
1333 	/*
1334 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1335 	 */
1336 	DBG(DBG_LPU, NULL,
1337 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1338 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1339 
1340 	DBG(DBG_LPU, NULL,
1341 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1342 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1343 
1344 	/*
1345 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1346 	 */
1347 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1348 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1349 
1350 	/*
1351 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1352 	 */
1353 
1354 	/*
1355 	 * The new PRM has values for LTSSM 8 ns timeout value and
1356 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1357 	 * Most of the other bits are questions as well.
1358 	 *
1359 	 * As such we will use the reset value.
1360 	 */
1361 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1362 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1363 
1364 	/*
1365 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1366 	 */
1367 
1368 	/*
1369 	 * Again, what does '12 ms timeout value mean'?
1370 	 */
1371 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1372 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1373 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1374 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1375 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1376 
1377 	/*
1378 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1379 	 */
1380 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1381 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1382 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1383 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1384 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1385 
1386 	/*
1387 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1388 	 */
1389 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1390 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1391 	    (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1392 	    LPU_LTSSM_CONFIG4_N_FTS));
1393 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1394 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1395 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1396 
1397 	/*
1398 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1399 	 */
1400 	val = 0ull;
1401 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1402 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1403 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1404 
1405 	/*
1406 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1407 	 */
1408 
1409 	/*
1410 	 * LTSSM Status registers are test only.
1411 	 */
1412 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1413 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1414 
1415 	/*
1416 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1417 	 */
1418 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1419 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1420 
1421 	/*
1422 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1423 	 */
1424 	DBG(DBG_LPU, NULL,
1425 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1426 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1427 
1428 	/*
1429 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1430 	 */
1431 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1432 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1433 
1434 	DBG(DBG_LPU, NULL,
1435 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1436 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1437 
1438 	/*
1439 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1440 	 */
1441 	DBG(DBG_LPU, NULL,
1442 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1443 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1444 
1445 	/*
1446 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1447 	 */
1448 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1449 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1450 
1451 	/*
1452 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1453 	 */
1454 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1455 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1456 
1457 	/*
1458 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1459 	 */
1460 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1461 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1462 
1463 	/*
1464 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1465 	 */
1466 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1467 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1468 
1469 	/*
1470 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1471 	 */
1472 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1473 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1474 
1475 	/*
1476 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1477 	 */
1478 	DBG(DBG_LPU, NULL, "lpu_init - "
1479 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1480 	    CSR_XR(csr_base,
1481 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1482 
1483 	/*
1484 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1485 	 */
1486 	DBG(DBG_LPU, NULL,
1487 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1488 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1489 
1490 	DBG(DBG_LPU, NULL,
1491 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1492 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1493 
1494 	/*
1495 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1496 	 */
1497 	DBG(DBG_LPU, NULL,
1498 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1499 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1500 
1501 	/*
1502 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1503 	 */
1504 	DBG(DBG_LPU, NULL,
1505 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1506 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1507 
1508 	/*
1509 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1510 	 */
1511 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1512 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1513 }
1514 
1515 /* ARGSUSED */
1516 static void
1517 dlu_init(caddr_t csr_base, pxu_t *pxu_p)
1518 {
1519 uint64_t val;
1520 
1521 	CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull);
1522 	DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n",
1523 	    CSR_XR(csr_base, DLU_INTERRUPT_MASK));
1524 
1525 	val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN);
1526 	CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val);
1527 	DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n",
1528 	    CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG));
1529 
1530 	val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
1531 	    (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
1532 
1533 	CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val);
1534 	DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: "
1535 	    "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL));
1536 
1537 	val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT <<
1538 	    DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR);
1539 
1540 	CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1541 
1542 	DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: "
1543 	    "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD));
1544 }
1545 
1546 /* ARGSUSED */
1547 static void
1548 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1549 {
1550 	uint64_t val;
1551 
1552 /*
1553  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1554  */
1555 
1556 	val = -1ull;
1557 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1558 	DBG(DBG_DMC, NULL,
1559 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1560 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1561 
1562 	/*
1563 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1564 	 */
1565 	DBG(DBG_DMC, NULL,
1566 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1567 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1568 
1569 	/*
1570 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1571 	 */
1572 	val = 0x0ull;
1573 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1574 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1575 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1576 
1577 	/*
1578 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1579 	 */
1580 	val = 0x0ull;
1581 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1582 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1583 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1584 }
1585 
1586 void
1587 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1588 {
1589 	uint64_t val;
1590 
1591 	ilu_init(csr_base, pxu_p);
1592 	tlu_init(csr_base, pxu_p);
1593 
1594 	switch (PX_CHIP_TYPE(pxu_p)) {
1595 	case PX_CHIP_OBERON:
1596 		dlu_init(csr_base, pxu_p);
1597 		break;
1598 	case PX_CHIP_FIRE:
1599 		lpu_init(csr_base, pxu_p);
1600 		break;
1601 	default:
1602 		DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n",
1603 		    PX_CHIP_TYPE(pxu_p));
1604 		break;
1605 	}
1606 
1607 	dmc_init(csr_base, pxu_p);
1608 
1609 /*
1610  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1611  */
1612 
1613 	val = -1ull;
1614 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1615 	DBG(DBG_PEC, NULL,
1616 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1617 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1618 
1619 	/*
1620 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1621 	 */
1622 	DBG(DBG_PEC, NULL,
1623 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1624 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1625 }
1626 
1627 /*
1628  * Convert a TTE to physical address
1629  */
1630 static r_addr_t
1631 mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p)
1632 {
1633 	uint64_t pa_mask;
1634 
1635 	switch (PX_CHIP_TYPE(pxu_p)) {
1636 	case PX_CHIP_OBERON:
1637 		pa_mask = MMU_OBERON_PADDR_MASK;
1638 		break;
1639 	case PX_CHIP_FIRE:
1640 		pa_mask = MMU_FIRE_PADDR_MASK;
1641 		break;
1642 	default:
1643 		DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n",
1644 		    PX_CHIP_TYPE(pxu_p));
1645 		pa_mask = 0;
1646 		break;
1647 	}
1648 	return ((tte & pa_mask) >> MMU_PAGE_SHIFT);
1649 }
1650 
1651 /*
1652  * Return MMU bypass noncache bit for chip
1653  */
1654 static r_addr_t
1655 mmu_bypass_noncache(pxu_t *pxu_p)
1656 {
1657 	r_addr_t bypass_noncache_bit;
1658 
1659 	switch (PX_CHIP_TYPE(pxu_p)) {
1660 	case PX_CHIP_OBERON:
1661 		bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE;
1662 		break;
1663 	case PX_CHIP_FIRE:
1664 		bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE;
1665 		break;
1666 	default:
1667 		DBG(DBG_MMU, NULL,
1668 		    "mmu_bypass_nocache - unknown chip type: 0x%x\n",
1669 		    PX_CHIP_TYPE(pxu_p));
1670 		bypass_noncache_bit = 0;
1671 		break;
1672 	}
1673 	return (bypass_noncache_bit);
1674 }
1675 
1676 /*
1677  * Calculate number of TSB entries for the chip.
1678  */
1679 /* ARGSUSED */
1680 static uint_t
1681 mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p)
1682 {
1683 	uint64_t tsb_ctrl;
1684 	uint_t obp_tsb_entries, obp_tsb_size;
1685 
1686 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1687 
1688 	obp_tsb_size = tsb_ctrl & 0xF;
1689 
1690 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1691 
1692 	return (obp_tsb_entries);
1693 }
1694 
1695 /*
1696  * Initialize the module, but do not enable interrupts.
1697  */
1698 void
1699 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1700 {
1701 	uint64_t	val, i, obp_tsb_pa, *base_tte_addr;
1702 	uint_t obp_tsb_entries;
1703 
1704 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1705 
1706 	/*
1707 	 * Preserve OBP's TSB
1708 	 */
1709 	obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK;
1710 
1711 	obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p);
1712 
1713 	base_tte_addr = pxu_p->tsb_vaddr +
1714 	    ((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1715 
1716 	for (i = 0; i < obp_tsb_entries; i++) {
1717 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1718 
1719 		if (!MMU_TTE_VALID(tte))
1720 			continue;
1721 
1722 		base_tte_addr[i] = tte;
1723 	}
1724 
1725 	/*
1726 	 * Invalidate the TLB through the diagnostic register.
1727 	 */
1728 
1729 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1730 
1731 	/*
1732 	 * Configure the Fire MMU TSB Control Register.  Determine
1733 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1734 	 *
1735 	 * Write the most significant 30 bits of the TSB physical address
1736 	 * and the encoded TSB table size.
1737 	 */
1738 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--) {}
1739 
1740 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1741 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1742 
1743 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1744 
1745 	/*
1746 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1747 	 * the "Cache Mode", the "Bypass Enable" and
1748 	 * the "Translation Enable" bits.
1749 	 */
1750 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1751 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1752 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1753 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1754 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1755 
1756 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1757 
1758 	/*
1759 	 * Read the register here to ensure that the previous writes to
1760 	 * the Fire MMU registers have been flushed.  (Technically, this
1761 	 * is not entirely necessary here as we will likely do later reads
1762 	 * during Fire initialization, but it is a small price to pay for
1763 	 * more modular code.)
1764 	 */
1765 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1766 
1767 	/*
1768 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1769 	 * Plus header logs
1770 	 */
1771 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1772 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1773 
1774 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1775 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1776 
1777 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1778 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1779 
1780 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1781 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1782 }
1783 
1784 /*
1785  * Generic IOMMU Servies
1786  */
1787 
1788 /* ARGSUSED */
1789 uint64_t
1790 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages,
1791     io_attributes_t io_attr, void *addr, size_t pfn_index, int flags)
1792 {
1793 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1794 	uint64_t	attr = MMU_TTE_V;
1795 	int		i;
1796 
1797 	if (io_attr & PCI_MAP_ATTR_WRITE)
1798 		attr |= MMU_TTE_W;
1799 
1800 	if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) &&
1801 	    (io_attr & PCI_MAP_ATTR_RO))
1802 		attr |= MMU_TTE_RO;
1803 
1804 	if (attr & MMU_TTE_RO) {
1805 		DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x "
1806 		    "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr);
1807 	}
1808 
1809 	if (flags & MMU_MAP_PFN) {
1810 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)addr;
1811 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1812 			px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
1813 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1814 
1815 			/*
1816 			 * Oberon will need to flush the corresponding TTEs in
1817 			 * Cache. We only need to flush every cache line.
1818 			 * Extra PIO's are expensive.
1819 			 */
1820 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1821 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1822 					CSR_XS(dev_hdl,
1823 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1824 					    (pxu_p->tsb_paddr+
1825 					    (tsb_index*MMU_TTE_SIZE)));
1826 				}
1827 			}
1828 		}
1829 	} else {
1830 		caddr_t	a = (caddr_t)addr;
1831 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1832 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1833 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1834 
1835 			/*
1836 			 * Oberon will need to flush the corresponding TTEs in
1837 			 * Cache. We only need to flush every cache line.
1838 			 * Extra PIO's are expensive.
1839 			 */
1840 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1841 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1842 					CSR_XS(dev_hdl,
1843 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1844 					    (pxu_p->tsb_paddr+
1845 					    (tsb_index*MMU_TTE_SIZE)));
1846 				}
1847 			}
1848 		}
1849 	}
1850 
1851 	return (H_EOK);
1852 }
1853 
1854 /* ARGSUSED */
1855 uint64_t
1856 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1857     pages_t pages)
1858 {
1859 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1860 	int		i;
1861 
1862 	for (i = 0; i < pages; i++, tsb_index++) {
1863 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1864 
1865 			/*
1866 			 * Oberon will need to flush the corresponding TTEs in
1867 			 * Cache. We only need to flush every cache line.
1868 			 * Extra PIO's are expensive.
1869 			 */
1870 			if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1871 				if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1872 					CSR_XS(dev_hdl,
1873 					    MMU_TTE_CACHE_FLUSH_ADDRESS,
1874 					    (pxu_p->tsb_paddr+
1875 					    (tsb_index*MMU_TTE_SIZE)));
1876 				}
1877 			}
1878 	}
1879 
1880 	return (H_EOK);
1881 }
1882 
1883 /* ARGSUSED */
1884 uint64_t
1885 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1886     io_attributes_t *attr_p, r_addr_t *r_addr_p)
1887 {
1888 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1889 	uint64_t	*tte_addr;
1890 	uint64_t	ret = H_EOK;
1891 
1892 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1893 
1894 	if (*tte_addr & MMU_TTE_V) {
1895 		*r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p);
1896 		*attr_p = (*tte_addr & MMU_TTE_W) ?
1897 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1898 	} else {
1899 		*r_addr_p = 0;
1900 		*attr_p = 0;
1901 		ret = H_ENOMAP;
1902 	}
1903 
1904 	return (ret);
1905 }
1906 
1907 /* ARGSUSED */
1908 uint64_t
1909 hvio_get_bypass_base(pxu_t *pxu_p)
1910 {
1911 	uint64_t base;
1912 
1913 	switch (PX_CHIP_TYPE(pxu_p)) {
1914 	case PX_CHIP_OBERON:
1915 		base = MMU_OBERON_BYPASS_BASE;
1916 		break;
1917 	case PX_CHIP_FIRE:
1918 		base = MMU_FIRE_BYPASS_BASE;
1919 		break;
1920 	default:
1921 		DBG(DBG_MMU, NULL,
1922 		    "hvio_get_bypass_base - unknown chip type: 0x%x\n",
1923 		    PX_CHIP_TYPE(pxu_p));
1924 		base = 0;
1925 		break;
1926 	}
1927 	return (base);
1928 }
1929 
1930 /* ARGSUSED */
1931 uint64_t
1932 hvio_get_bypass_end(pxu_t *pxu_p)
1933 {
1934 	uint64_t end;
1935 
1936 	switch (PX_CHIP_TYPE(pxu_p)) {
1937 	case PX_CHIP_OBERON:
1938 		end = MMU_OBERON_BYPASS_END;
1939 		break;
1940 	case PX_CHIP_FIRE:
1941 		end = MMU_FIRE_BYPASS_END;
1942 		break;
1943 	default:
1944 		DBG(DBG_MMU, NULL,
1945 		    "hvio_get_bypass_end - unknown chip type: 0x%x\n",
1946 		    PX_CHIP_TYPE(pxu_p));
1947 		end = 0;
1948 		break;
1949 	}
1950 	return (end);
1951 }
1952 
1953 /* ARGSUSED */
1954 uint64_t
1955 hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra,
1956     io_attributes_t attr, io_addr_t *io_addr_p)
1957 {
1958 	uint64_t	pfn = MMU_BTOP(ra);
1959 
1960 	*io_addr_p = hvio_get_bypass_base(pxu_p) | ra |
1961 	    (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p));
1962 
1963 	return (H_EOK);
1964 }
1965 
1966 /*
1967  * Generic IO Interrupt Servies
1968  */
1969 
1970 /*
1971  * Converts a device specific interrupt number given by the
1972  * arguments devhandle and devino into a system specific ino.
1973  */
1974 /* ARGSUSED */
1975 uint64_t
1976 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
1977     sysino_t *sysino)
1978 {
1979 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
1980 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
1981 		return (H_ENOINTR);
1982 	}
1983 
1984 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
1985 
1986 	return (H_EOK);
1987 }
1988 
1989 /*
1990  * Returns state in intr_valid_state if the interrupt defined by sysino
1991  * is valid (enabled) or not-valid (disabled).
1992  */
1993 uint64_t
1994 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
1995     intr_valid_state_t *intr_valid_state)
1996 {
1997 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1998 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
1999 		*intr_valid_state = INTR_VALID;
2000 	} else {
2001 		*intr_valid_state = INTR_NOTVALID;
2002 	}
2003 
2004 	return (H_EOK);
2005 }
2006 
2007 /*
2008  * Sets the 'valid' state of the interrupt defined by
2009  * the argument sysino to the state defined by the
2010  * argument intr_valid_state.
2011  */
2012 uint64_t
2013 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
2014     intr_valid_state_t intr_valid_state)
2015 {
2016 	switch (intr_valid_state) {
2017 	case INTR_VALID:
2018 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2019 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2020 		break;
2021 	case INTR_NOTVALID:
2022 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2023 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2024 		break;
2025 	default:
2026 		return (EINVAL);
2027 	}
2028 
2029 	return (H_EOK);
2030 }
2031 
2032 /*
2033  * Returns the current state of the interrupt given by the sysino
2034  * argument.
2035  */
2036 uint64_t
2037 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
2038     intr_state_t *intr_state)
2039 {
2040 	intr_state_t state;
2041 
2042 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2043 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
2044 
2045 	switch (state) {
2046 	case INTERRUPT_IDLE_STATE:
2047 		*intr_state = INTR_IDLE_STATE;
2048 		break;
2049 	case INTERRUPT_RECEIVED_STATE:
2050 		*intr_state = INTR_RECEIVED_STATE;
2051 		break;
2052 	case INTERRUPT_PENDING_STATE:
2053 		*intr_state = INTR_DELIVERED_STATE;
2054 		break;
2055 	default:
2056 		return (EINVAL);
2057 	}
2058 
2059 	return (H_EOK);
2060 
2061 }
2062 
2063 /*
2064  * Sets the current state of the interrupt given by the sysino
2065  * argument to the value given in the argument intr_state.
2066  *
2067  * Note: Setting the state to INTR_IDLE clears any pending
2068  * interrupt for sysino.
2069  */
2070 uint64_t
2071 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
2072     intr_state_t intr_state)
2073 {
2074 	intr_state_t state;
2075 
2076 	switch (intr_state) {
2077 	case INTR_IDLE_STATE:
2078 		state = INTERRUPT_IDLE_STATE;
2079 		break;
2080 	case INTR_DELIVERED_STATE:
2081 		state = INTERRUPT_PENDING_STATE;
2082 		break;
2083 	default:
2084 		return (EINVAL);
2085 	}
2086 
2087 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2088 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
2089 
2090 	return (H_EOK);
2091 }
2092 
2093 /*
2094  * Returns the cpuid that is the current target of the
2095  * interrupt given by the sysino argument.
2096  *
2097  * The cpuid value returned is undefined if the target
2098  * has not been set via intr_settarget.
2099  */
2100 uint64_t
2101 hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2102     cpuid_t *cpuid)
2103 {
2104 	switch (PX_CHIP_TYPE(pxu_p)) {
2105 	case PX_CHIP_OBERON:
2106 		*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2107 		    SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID);
2108 		break;
2109 	case PX_CHIP_FIRE:
2110 		*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2111 		    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
2112 		break;
2113 	default:
2114 		DBG(DBG_CB, NULL, "hvio_intr_gettarget - "
2115 		    "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2116 		return (EINVAL);
2117 	}
2118 
2119 	return (H_EOK);
2120 }
2121 
2122 /*
2123  * Set the target cpu for the interrupt defined by the argument
2124  * sysino to the target cpu value defined by the argument cpuid.
2125  */
2126 uint64_t
2127 hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2128     cpuid_t cpuid)
2129 {
2130 
2131 	uint64_t	val, intr_controller;
2132 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
2133 
2134 	/*
2135 	 * For now, we assign interrupt controller in a round
2136 	 * robin fashion.  Later, we may need to come up with
2137 	 * a more efficient assignment algorithm.
2138 	 */
2139 	intr_controller = 0x1ull << (cpuid % 4);
2140 
2141 	switch (PX_CHIP_TYPE(pxu_p)) {
2142 	case PX_CHIP_OBERON:
2143 		val = (((cpuid &
2144 		    INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) <<
2145 		    INTERRUPT_MAPPING_ENTRIES_T_DESTID) |
2146 		    ((intr_controller &
2147 		    INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2148 		    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2149 		break;
2150 	case PX_CHIP_FIRE:
2151 		val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
2152 		    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
2153 		    ((intr_controller &
2154 		    INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2155 		    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2156 		break;
2157 	default:
2158 		DBG(DBG_CB, NULL, "hvio_intr_settarget - "
2159 		    "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2160 		return (EINVAL);
2161 	}
2162 
2163 	/* For EQ interrupts, set DATA MONDO bit */
2164 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
2165 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
2166 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
2167 
2168 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
2169 
2170 	return (H_EOK);
2171 }
2172 
2173 /*
2174  * MSIQ Functions:
2175  */
2176 uint64_t
2177 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
2178 {
2179 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
2180 	    (uint64_t)pxu_p->msiq_mapped_p);
2181 	DBG(DBG_IB, NULL,
2182 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
2183 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
2184 
2185 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
2186 	    (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p),
2187 	    pxu_p->portid) << INO_BITS);
2188 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
2189 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
2190 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
2191 
2192 	return (H_EOK);
2193 }
2194 
2195 uint64_t
2196 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2197     pci_msiq_valid_state_t *msiq_valid_state)
2198 {
2199 	uint32_t	eq_state;
2200 	uint64_t	ret = H_EOK;
2201 
2202 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2203 	    msiq_id, ENTRIES_STATE);
2204 
2205 	switch (eq_state) {
2206 	case EQ_IDLE_STATE:
2207 		*msiq_valid_state = PCI_MSIQ_INVALID;
2208 		break;
2209 	case EQ_ACTIVE_STATE:
2210 	case EQ_ERROR_STATE:
2211 		*msiq_valid_state = PCI_MSIQ_VALID;
2212 		break;
2213 	default:
2214 		ret = H_EIO;
2215 		break;
2216 	}
2217 
2218 	return (ret);
2219 }
2220 
2221 uint64_t
2222 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2223     pci_msiq_valid_state_t msiq_valid_state)
2224 {
2225 	uint64_t	ret = H_EOK;
2226 
2227 	switch (msiq_valid_state) {
2228 	case PCI_MSIQ_INVALID:
2229 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2230 		    msiq_id, ENTRIES_DIS);
2231 		break;
2232 	case PCI_MSIQ_VALID:
2233 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2234 		    msiq_id, ENTRIES_EN);
2235 		break;
2236 	default:
2237 		ret = H_EINVAL;
2238 		break;
2239 	}
2240 
2241 	return (ret);
2242 }
2243 
2244 uint64_t
2245 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2246     pci_msiq_state_t *msiq_state)
2247 {
2248 	uint32_t	eq_state;
2249 	uint64_t	ret = H_EOK;
2250 
2251 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2252 	    msiq_id, ENTRIES_STATE);
2253 
2254 	switch (eq_state) {
2255 	case EQ_IDLE_STATE:
2256 	case EQ_ACTIVE_STATE:
2257 		*msiq_state = PCI_MSIQ_STATE_IDLE;
2258 		break;
2259 	case EQ_ERROR_STATE:
2260 		*msiq_state = PCI_MSIQ_STATE_ERROR;
2261 		break;
2262 	default:
2263 		ret = H_EIO;
2264 	}
2265 
2266 	return (ret);
2267 }
2268 
2269 uint64_t
2270 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2271     pci_msiq_state_t msiq_state)
2272 {
2273 	uint32_t	eq_state;
2274 	uint64_t	ret = H_EOK;
2275 
2276 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2277 	    msiq_id, ENTRIES_STATE);
2278 
2279 	switch (eq_state) {
2280 	case EQ_IDLE_STATE:
2281 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2282 			ret = H_EIO;
2283 		break;
2284 	case EQ_ACTIVE_STATE:
2285 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2286 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2287 			    msiq_id, ENTRIES_ENOVERR);
2288 		else
2289 			ret = H_EIO;
2290 		break;
2291 	case EQ_ERROR_STATE:
2292 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
2293 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2294 			    msiq_id, ENTRIES_E2I);
2295 		else
2296 			ret = H_EIO;
2297 		break;
2298 	default:
2299 		ret = H_EIO;
2300 	}
2301 
2302 	return (ret);
2303 }
2304 
2305 uint64_t
2306 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2307     msiqhead_t *msiq_head)
2308 {
2309 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2310 	    msiq_id, ENTRIES_HEAD);
2311 
2312 	return (H_EOK);
2313 }
2314 
2315 uint64_t
2316 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2317     msiqhead_t msiq_head)
2318 {
2319 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2320 	    ENTRIES_HEAD, msiq_head);
2321 
2322 	return (H_EOK);
2323 }
2324 
2325 uint64_t
2326 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2327     msiqtail_t *msiq_tail)
2328 {
2329 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2330 	    msiq_id, ENTRIES_TAIL);
2331 
2332 	return (H_EOK);
2333 }
2334 
2335 /*
2336  * MSI Functions:
2337  */
2338 uint64_t
2339 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2340 {
2341 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2342 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2343 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2344 	DBG(DBG_IB, NULL, "hvio_msi_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2345 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2346 
2347 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2348 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2349 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2350 	DBG(DBG_IB, NULL, "hvio_msi_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2351 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2352 
2353 	return (H_EOK);
2354 }
2355 
2356 uint64_t
2357 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2358     msiqid_t *msiq_id)
2359 {
2360 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2361 	    msi_num, ENTRIES_EQNUM);
2362 
2363 	return (H_EOK);
2364 }
2365 
2366 uint64_t
2367 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2368     msiqid_t msiq_id)
2369 {
2370 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2371 	    ENTRIES_EQNUM, msiq_id);
2372 
2373 	return (H_EOK);
2374 }
2375 
2376 uint64_t
2377 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2378     pci_msi_valid_state_t *msi_valid_state)
2379 {
2380 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2381 	    msi_num, ENTRIES_V);
2382 
2383 	return (H_EOK);
2384 }
2385 
2386 uint64_t
2387 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2388     pci_msi_valid_state_t msi_valid_state)
2389 {
2390 	uint64_t	ret = H_EOK;
2391 
2392 	switch (msi_valid_state) {
2393 	case PCI_MSI_VALID:
2394 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2395 		    ENTRIES_V);
2396 		break;
2397 	case PCI_MSI_INVALID:
2398 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2399 		    ENTRIES_V);
2400 		break;
2401 	default:
2402 		ret = H_EINVAL;
2403 	}
2404 
2405 	return (ret);
2406 }
2407 
2408 uint64_t
2409 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2410     pci_msi_state_t *msi_state)
2411 {
2412 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2413 	    msi_num, ENTRIES_EQWR_N);
2414 
2415 	return (H_EOK);
2416 }
2417 
2418 uint64_t
2419 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2420     pci_msi_state_t msi_state)
2421 {
2422 	uint64_t	ret = H_EOK;
2423 
2424 	switch (msi_state) {
2425 	case PCI_MSI_STATE_IDLE:
2426 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2427 		    ENTRIES_EQWR_N);
2428 		break;
2429 	case PCI_MSI_STATE_DELIVERED:
2430 	default:
2431 		ret = H_EINVAL;
2432 		break;
2433 	}
2434 
2435 	return (ret);
2436 }
2437 
2438 /*
2439  * MSG Functions:
2440  */
2441 uint64_t
2442 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2443     msiqid_t *msiq_id)
2444 {
2445 	uint64_t	ret = H_EOK;
2446 
2447 	switch (msg_type) {
2448 	case PCIE_PME_MSG:
2449 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2450 		break;
2451 	case PCIE_PME_ACK_MSG:
2452 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2453 		    EQNUM);
2454 		break;
2455 	case PCIE_CORR_MSG:
2456 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2457 		break;
2458 	case PCIE_NONFATAL_MSG:
2459 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2460 		    EQNUM);
2461 		break;
2462 	case PCIE_FATAL_MSG:
2463 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2464 		break;
2465 	default:
2466 		ret = H_EINVAL;
2467 		break;
2468 	}
2469 
2470 	return (ret);
2471 }
2472 
2473 uint64_t
2474 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2475     msiqid_t msiq_id)
2476 {
2477 	uint64_t	ret = H_EOK;
2478 
2479 	switch (msg_type) {
2480 	case PCIE_PME_MSG:
2481 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2482 		break;
2483 	case PCIE_PME_ACK_MSG:
2484 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2485 		break;
2486 	case PCIE_CORR_MSG:
2487 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2488 		break;
2489 	case PCIE_NONFATAL_MSG:
2490 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2491 		break;
2492 	case PCIE_FATAL_MSG:
2493 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2494 		break;
2495 	default:
2496 		ret = H_EINVAL;
2497 		break;
2498 	}
2499 
2500 	return (ret);
2501 }
2502 
2503 uint64_t
2504 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2505     pcie_msg_valid_state_t *msg_valid_state)
2506 {
2507 	uint64_t	ret = H_EOK;
2508 
2509 	switch (msg_type) {
2510 	case PCIE_PME_MSG:
2511 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2512 		break;
2513 	case PCIE_PME_ACK_MSG:
2514 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2515 		    PME_TO_ACK_MAPPING, V);
2516 		break;
2517 	case PCIE_CORR_MSG:
2518 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2519 		break;
2520 	case PCIE_NONFATAL_MSG:
2521 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2522 		    ERR_NONFATAL_MAPPING, V);
2523 		break;
2524 	case PCIE_FATAL_MSG:
2525 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2526 		    V);
2527 		break;
2528 	default:
2529 		ret = H_EINVAL;
2530 		break;
2531 	}
2532 
2533 	return (ret);
2534 }
2535 
2536 uint64_t
2537 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2538     pcie_msg_valid_state_t msg_valid_state)
2539 {
2540 	uint64_t	ret = H_EOK;
2541 
2542 	switch (msg_valid_state) {
2543 	case PCIE_MSG_VALID:
2544 		switch (msg_type) {
2545 		case PCIE_PME_MSG:
2546 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2547 			break;
2548 		case PCIE_PME_ACK_MSG:
2549 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2550 			break;
2551 		case PCIE_CORR_MSG:
2552 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2553 			break;
2554 		case PCIE_NONFATAL_MSG:
2555 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2556 			break;
2557 		case PCIE_FATAL_MSG:
2558 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2559 			break;
2560 		default:
2561 			ret = H_EINVAL;
2562 			break;
2563 		}
2564 
2565 		break;
2566 	case PCIE_MSG_INVALID:
2567 		switch (msg_type) {
2568 		case PCIE_PME_MSG:
2569 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2570 			break;
2571 		case PCIE_PME_ACK_MSG:
2572 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2573 			break;
2574 		case PCIE_CORR_MSG:
2575 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2576 			break;
2577 		case PCIE_NONFATAL_MSG:
2578 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2579 			break;
2580 		case PCIE_FATAL_MSG:
2581 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2582 			break;
2583 		default:
2584 			ret = H_EINVAL;
2585 			break;
2586 		}
2587 		break;
2588 	default:
2589 		ret = H_EINVAL;
2590 	}
2591 
2592 	return (ret);
2593 }
2594 
2595 /*
2596  * Suspend/Resume Functions:
2597  *	(pec, mmu, ib)
2598  *	cb
2599  * Registers saved have all been touched in the XXX_init functions.
2600  */
2601 uint64_t
2602 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2603 {
2604 	uint64_t	*config_state;
2605 	int		total_size;
2606 	int		i;
2607 
2608 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2609 		return (H_EIO);
2610 
2611 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2612 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2613 
2614 	if (config_state == NULL) {
2615 		return (H_EIO);
2616 	}
2617 
2618 	/*
2619 	 * Soft state for suspend/resume  from pxu_t
2620 	 * uint64_t	*pec_config_state;
2621 	 * uint64_t	*mmu_config_state;
2622 	 * uint64_t	*ib_intr_map;
2623 	 * uint64_t	*ib_config_state;
2624 	 * uint64_t	*xcb_config_state;
2625 	 */
2626 
2627 	/* Save the PEC configuration states */
2628 	pxu_p->pec_config_state = config_state;
2629 	for (i = 0; i < PEC_KEYS; i++) {
2630 		if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2631 		    (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2632 			pxu_p->pec_config_state[i] =
2633 			    CSR_XR((caddr_t)dev_hdl,
2634 			    pec_config_state_regs[i].reg);
2635 		}
2636 	}
2637 
2638 	/* Save the MMU configuration states */
2639 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2640 	for (i = 0; i < MMU_KEYS; i++) {
2641 		pxu_p->mmu_config_state[i] =
2642 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2643 	}
2644 
2645 	/* Save the interrupt mapping registers */
2646 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2647 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2648 		pxu_p->ib_intr_map[i] =
2649 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2650 	}
2651 
2652 	/* Save the IB configuration states */
2653 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2654 	for (i = 0; i < IB_KEYS; i++) {
2655 		pxu_p->ib_config_state[i] =
2656 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2657 	}
2658 
2659 	return (H_EOK);
2660 }
2661 
2662 void
2663 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2664 {
2665 	int		total_size;
2666 	sysino_t	sysino;
2667 	int		i;
2668 	uint64_t	ret;
2669 
2670 	/* Make sure that suspend actually did occur */
2671 	if (!pxu_p->pec_config_state) {
2672 		return;
2673 	}
2674 
2675 	/* Restore IB configuration states */
2676 	for (i = 0; i < IB_KEYS; i++) {
2677 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2678 		    pxu_p->ib_config_state[i]);
2679 	}
2680 
2681 	/*
2682 	 * Restore the interrupt mapping registers
2683 	 * And make sure the intrs are idle.
2684 	 */
2685 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2686 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2687 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2688 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2689 		    pxu_p->ib_intr_map[i]);
2690 	}
2691 
2692 	/* Restore MMU configuration states */
2693 	/* Clear the cache. */
2694 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2695 
2696 	for (i = 0; i < MMU_KEYS; i++) {
2697 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2698 		    pxu_p->mmu_config_state[i]);
2699 	}
2700 
2701 	/* Restore PEC configuration states */
2702 	/* Make sure all reset bits are low until error is detected */
2703 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2704 
2705 	for (i = 0; i < PEC_KEYS; i++) {
2706 		if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2707 		    (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2708 			CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg,
2709 			    pxu_p->pec_config_state[i]);
2710 		}
2711 	}
2712 
2713 	/* Enable PCI-E interrupt */
2714 	if ((ret = hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino,
2715 	    &sysino)) != H_EOK) {
2716 		cmn_err(CE_WARN,
2717 		    "hvio_resume: hvio_intr_devino_to_sysino failed, "
2718 		    "ret 0x%lx", ret);
2719 	}
2720 
2721 	if ((ret =  hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE))
2722 	    != H_EOK) {
2723 		cmn_err(CE_WARN,
2724 		    "hvio_resume: hvio_intr_setstate failed, "
2725 		    "ret 0x%lx", ret);
2726 	}
2727 
2728 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2729 	kmem_free(pxu_p->pec_config_state, total_size);
2730 
2731 	pxu_p->pec_config_state = NULL;
2732 	pxu_p->mmu_config_state = NULL;
2733 	pxu_p->ib_config_state = NULL;
2734 	pxu_p->ib_intr_map = NULL;
2735 
2736 	msiq_resume(dev_hdl, pxu_p);
2737 }
2738 
2739 uint64_t
2740 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2741 {
2742 	uint64_t *config_state, *cb_regs;
2743 	int i, cb_size, cb_keys;
2744 
2745 	switch (PX_CHIP_TYPE(pxu_p)) {
2746 	case PX_CHIP_OBERON:
2747 		cb_size = UBC_SIZE;
2748 		cb_keys = UBC_KEYS;
2749 		cb_regs = ubc_config_state_regs;
2750 		break;
2751 	case PX_CHIP_FIRE:
2752 		cb_size = JBC_SIZE;
2753 		cb_keys = JBC_KEYS;
2754 		cb_regs = jbc_config_state_regs;
2755 		break;
2756 	default:
2757 		DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n",
2758 		    PX_CHIP_TYPE(pxu_p));
2759 		break;
2760 	}
2761 
2762 	config_state = kmem_zalloc(cb_size, KM_NOSLEEP);
2763 
2764 	if (config_state == NULL) {
2765 		return (H_EIO);
2766 	}
2767 
2768 	/* Save the configuration states */
2769 	pxu_p->xcb_config_state = config_state;
2770 	for (i = 0; i < cb_keys; i++) {
2771 		pxu_p->xcb_config_state[i] =
2772 		    CSR_XR((caddr_t)dev_hdl, cb_regs[i]);
2773 	}
2774 
2775 	return (H_EOK);
2776 }
2777 
2778 void
2779 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2780     devino_t devino, pxu_t *pxu_p)
2781 {
2782 	sysino_t sysino;
2783 	uint64_t *cb_regs;
2784 	int i, cb_size, cb_keys;
2785 	uint64_t ret;
2786 
2787 	switch (PX_CHIP_TYPE(pxu_p)) {
2788 	case PX_CHIP_OBERON:
2789 		cb_size = UBC_SIZE;
2790 		cb_keys = UBC_KEYS;
2791 		cb_regs = ubc_config_state_regs;
2792 		/*
2793 		 * No reason to have any reset bits high until an error is
2794 		 * detected on the link.
2795 		 */
2796 		CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull);
2797 		break;
2798 	case PX_CHIP_FIRE:
2799 		cb_size = JBC_SIZE;
2800 		cb_keys = JBC_KEYS;
2801 		cb_regs = jbc_config_state_regs;
2802 		/*
2803 		 * No reason to have any reset bits high until an error is
2804 		 * detected on the link.
2805 		 */
2806 		CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2807 		break;
2808 	default:
2809 		DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n",
2810 		    PX_CHIP_TYPE(pxu_p));
2811 		break;
2812 	}
2813 
2814 	ASSERT(pxu_p->xcb_config_state);
2815 
2816 	/* Restore the configuration states */
2817 	for (i = 0; i < cb_keys; i++) {
2818 		CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i],
2819 		    pxu_p->xcb_config_state[i]);
2820 	}
2821 
2822 	/* Enable XBC interrupt */
2823 	if ((ret = hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino,
2824 	    &sysino)) != H_EOK) {
2825 		cmn_err(CE_WARN,
2826 		    "hvio_cb_resume: hvio_intr_devino_to_sysino failed, "
2827 		    "ret 0x%lx", ret);
2828 	}
2829 
2830 	if ((ret = hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE))
2831 	    != H_EOK) {
2832 		cmn_err(CE_WARN,
2833 		    "hvio_cb_resume: hvio_intr_setstate failed, "
2834 		    "ret 0x%lx", ret);
2835 	}
2836 
2837 	kmem_free(pxu_p->xcb_config_state, cb_size);
2838 
2839 	pxu_p->xcb_config_state = NULL;
2840 }
2841 
2842 static uint64_t
2843 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2844 {
2845 	size_t	bufsz;
2846 	volatile uint64_t *cur_p;
2847 	int i;
2848 
2849 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2850 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2851 	    NULL)
2852 		return (H_EIO);
2853 
2854 	cur_p = pxu_p->msiq_config_state;
2855 
2856 	/* Save each EQ state */
2857 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2858 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2859 
2860 	/* Save MSI mapping registers */
2861 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2862 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2863 
2864 	/* Save all other MSIQ registers */
2865 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2866 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2867 	return (H_EOK);
2868 }
2869 
2870 static void
2871 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2872 {
2873 	size_t	bufsz;
2874 	uint64_t *cur_p, state;
2875 	int i;
2876 	uint64_t ret;
2877 
2878 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2879 	cur_p = pxu_p->msiq_config_state;
2880 	/*
2881 	 * Initialize EQ base address register and
2882 	 * Interrupt Mondo Data 0 register.
2883 	 */
2884 	if ((ret = hvio_msiq_init(dev_hdl, pxu_p)) != H_EOK) {
2885 		cmn_err(CE_WARN,
2886 		    "msiq_resume: hvio_msiq_init failed, "
2887 		    "ret 0x%lx", ret);
2888 	}
2889 
2890 	/* Restore EQ states */
2891 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2892 		state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK;
2893 		if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE))
2894 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2895 			    i, ENTRIES_EN);
2896 	}
2897 
2898 	/* Restore MSI mapping */
2899 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2900 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2901 
2902 	/*
2903 	 * Restore all other registers. MSI 32 bit address and
2904 	 * MSI 64 bit address are restored as part of this.
2905 	 */
2906 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2907 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2908 
2909 	kmem_free(pxu_p->msiq_config_state, bufsz);
2910 	pxu_p->msiq_config_state = NULL;
2911 }
2912 
2913 /*
2914  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2915  * called by px_goto_l23ready.
2916  * returns DDI_SUCCESS or DDI_FAILURE
2917  */
2918 int
2919 px_send_pme_turnoff(caddr_t csr_base)
2920 {
2921 	volatile uint64_t reg;
2922 
2923 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2924 	/* If already pending, return failure */
2925 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2926 		DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2927 		    "tlu_pme_turn_off_generate = %x\n", reg);
2928 		return (DDI_FAILURE);
2929 	}
2930 
2931 	/* write to PME_Turn_off reg to boradcast */
2932 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2933 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2934 
2935 	return (DDI_SUCCESS);
2936 }
2937 
2938 /*
2939  * Checks for link being in L1idle state.
2940  * Returns
2941  * DDI_SUCCESS - if the link is in L1idle
2942  * DDI_FAILURE - if the link is not in L1idle
2943  */
2944 int
2945 px_link_wait4l1idle(caddr_t csr_base)
2946 {
2947 	uint8_t ltssm_state;
2948 	int ntries = px_max_l1_tries;
2949 
2950 	while (ntries > 0) {
2951 		ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
2952 		if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
2953 			break;
2954 		delay(1);
2955 	}
2956 	DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
2957 	return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
2958 }
2959 
2960 /*
2961  * Tranisition the link to L0, after it is down.
2962  */
2963 int
2964 px_link_retrain(caddr_t csr_base)
2965 {
2966 	volatile uint64_t reg;
2967 
2968 	reg = CSR_XR(csr_base, TLU_CONTROL);
2969 	if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
2970 		DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
2971 		return (DDI_FAILURE);
2972 	}
2973 
2974 	/* Clear link down bit in TLU Other Event Clear Status Register. */
2975 	CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
2976 
2977 	/* Clear Drain bit in TLU Status Register */
2978 	CSR_BS(csr_base, TLU_STATUS, DRAIN);
2979 
2980 	/* Clear Remain in Detect.Quiet bit in TLU Control Register */
2981 	reg = CSR_XR(csr_base, TLU_CONTROL);
2982 	reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
2983 	CSR_XS(csr_base, TLU_CONTROL, reg);
2984 
2985 	return (DDI_SUCCESS);
2986 }
2987 
2988 void
2989 px_enable_detect_quiet(caddr_t csr_base)
2990 {
2991 	volatile uint64_t tlu_ctrl;
2992 
2993 	tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
2994 	tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
2995 	CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
2996 }
2997 
2998 static uint_t
2999 oberon_hp_pwron(caddr_t csr_base)
3000 {
3001 	volatile uint64_t reg;
3002 	boolean_t link_retry, link_up;
3003 	int loop, i;
3004 
3005 	DBG(DBG_HP, NULL, "oberon_hp_pwron the slot\n");
3006 
3007 	/* Check Leaf Reset status */
3008 	reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE);
3009 	if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) {
3010 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not reset\n");
3011 		goto fail;
3012 	}
3013 
3014 	/* Check HP Capable */
3015 	if (!CSR_BR(csr_base, TLU_SLOT_CAPABILITIES, HP)) {
3016 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not "
3017 		    "hotplugable\n");
3018 		goto fail;
3019 	}
3020 
3021 	/* Check Slot status */
3022 	reg = CSR_XR(csr_base, TLU_SLOT_STATUS);
3023 	if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) ||
3024 	    (reg & (1ull << TLU_SLOT_STATUS_MRLS))) {
3025 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: slot status %lx\n",
3026 		    reg);
3027 		goto fail;
3028 	}
3029 
3030 	/* Blink power LED, this is done from pciehpc already */
3031 
3032 	/* Turn on slot power */
3033 	CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3034 
3035 	/* power fault detection */
3036 	delay(drv_usectohz(25000));
3037 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3038 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3039 
3040 	/* wait to check power state */
3041 	delay(drv_usectohz(25000));
3042 
3043 	if (!CSR_BR(csr_base, TLU_SLOT_STATUS, PWFD)) {
3044 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails: power fault\n");
3045 		goto fail1;
3046 	}
3047 
3048 	/* power is good */
3049 	CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3050 
3051 	delay(drv_usectohz(25000));
3052 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3053 	CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3054 
3055 	/* Turn on slot clock */
3056 	CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN);
3057 
3058 	link_up = B_FALSE;
3059 	link_retry = B_FALSE;
3060 
3061 	for (loop = 0; (loop < link_retry_count) && (link_up == B_FALSE);
3062 	    loop++) {
3063 		if (link_retry == B_TRUE) {
3064 			DBG(DBG_HP, NULL, "oberon_hp_pwron : retry link loop "
3065 			    "%d\n", loop);
3066 			CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3067 			CSR_XS(csr_base, FLP_PORT_CONTROL, 0x1);
3068 			delay(drv_usectohz(10000));
3069 			CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3070 			CSR_BS(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3071 			CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3072 			delay(drv_usectohz(50000));
3073 		}
3074 
3075 		/* Release PCI-E Reset */
3076 		delay(drv_usectohz(wait_perst));
3077 		CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST);
3078 
3079 		/*
3080 		 * Open events' mask
3081 		 * This should be done from pciehpc already
3082 		 */
3083 
3084 		/* Enable PCIE port */
3085 		delay(drv_usectohz(wait_enable_port));
3086 		CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3087 		CSR_XS(csr_base, FLP_PORT_CONTROL, 0x20);
3088 
3089 		/* wait for the link up */
3090 		/* BEGIN CSTYLED */
3091 		for (i = 0; (i < 2) && (link_up == B_FALSE); i++) {
3092 			delay(drv_usectohz(link_status_check));
3093 			reg = CSR_XR(csr_base, DLU_LINK_LAYER_STATUS);
3094 
3095 		    if ((((reg >> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS) &
3096 			DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK) ==
3097 			DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE) &&
3098 			(reg & (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS)) &&
3099 			((reg & DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK)
3100 			==
3101 			DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE)) {
3102 			DBG(DBG_HP, NULL, "oberon_hp_pwron : link is up\n");
3103 				link_up = B_TRUE;
3104 		    } else
3105 			link_retry = B_TRUE;
3106 		}
3107 		/* END CSTYLED */
3108 	}
3109 
3110 	if (link_up == B_FALSE) {
3111 		DBG(DBG_HP, NULL, "oberon_hp_pwron fails to enable "
3112 		    "PCI-E port\n");
3113 		goto fail2;
3114 	}
3115 
3116 	/* link is up */
3117 	CSR_BC(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3118 	CSR_BS(csr_base, FLP_PORT_ACTIVE_STATUS, TRAIN_ERROR);
3119 	CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_P);
3120 	CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_S);
3121 	CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3122 
3123 	/* Restore LUP/LDN */
3124 	reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3125 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P))
3126 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P;
3127 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P))
3128 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P;
3129 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S))
3130 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S;
3131 	if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S))
3132 		reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S;
3133 	CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3134 
3135 	/*
3136 	 * Initialize Leaf
3137 	 * SPLS = 00b, SPLV = 11001b, i.e. 25W
3138 	 */
3139 	reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3140 	reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK <<
3141 	    TLU_SLOT_CAPABILITIES_SPLS);
3142 	reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK <<
3143 	    TLU_SLOT_CAPABILITIES_SPLV);
3144 	reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLV);
3145 	CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg);
3146 
3147 	/* Turn on Power LED */
3148 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3149 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3150 	reg = pcie_slotctl_pwr_indicator_set(reg,
3151 	    PCIE_SLOTCTL_INDICATOR_STATE_ON);
3152 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3153 
3154 	/* Notify to SCF */
3155 	if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3156 		CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3157 	else
3158 		CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3159 
3160 	/* Wait for one second */
3161 	delay(drv_usectohz(1000000));
3162 
3163 	return (DDI_SUCCESS);
3164 
3165 fail2:
3166 	/* Link up is failed */
3167 	CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3168 	CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3169 	delay(drv_usectohz(150));
3170 
3171 	CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3172 	delay(drv_usectohz(100));
3173 
3174 fail1:
3175 	CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3176 
3177 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3178 
3179 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3180 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3181 	reg = pcie_slotctl_pwr_indicator_set(reg,
3182 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3183 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3184 
3185 	CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD);
3186 
3187 fail:
3188 	return ((uint_t)DDI_FAILURE);
3189 }
3190 
3191 hrtime_t oberon_leaf_reset_timeout = 120ll * NANOSEC;	/* 120 seconds */
3192 
3193 static uint_t
3194 oberon_hp_pwroff(caddr_t csr_base)
3195 {
3196 	volatile uint64_t reg;
3197 	volatile uint64_t reg_tluue, reg_tluce;
3198 	hrtime_t start_time, end_time;
3199 
3200 	DBG(DBG_HP, NULL, "oberon_hp_pwroff the slot\n");
3201 
3202 	/* Blink power LED, this is done from pciehpc already */
3203 
3204 	/* Clear Slot Event */
3205 	CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC);
3206 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3207 
3208 	/* DRN_TR_DIS on */
3209 	CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3210 	delay(drv_usectohz(10000));
3211 
3212 	/* Disable LUP/LDN */
3213 	reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3214 	reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3215 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3216 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3217 	    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3218 	CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3219 
3220 	/* Save the TLU registers */
3221 	reg_tluue = CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE);
3222 	reg_tluce = CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE);
3223 	/* All clear */
3224 	CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 0);
3225 	CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, 0);
3226 
3227 	/* Disable port */
3228 	CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3229 
3230 	/* PCIE reset */
3231 	delay(drv_usectohz(10000));
3232 	CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3233 
3234 	/* PCIE clock stop */
3235 	delay(drv_usectohz(150));
3236 	CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3237 
3238 	/* Turn off slot power */
3239 	delay(drv_usectohz(100));
3240 	CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3241 	CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3242 	delay(drv_usectohz(25000));
3243 	CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3244 
3245 	/* write 0 to bit 7 of ILU Error Log Enable Register */
3246 	CSR_BC(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3);
3247 
3248 	/* Set back TLU registers */
3249 	CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, reg_tluue);
3250 	CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, reg_tluce);
3251 
3252 	/* Power LED off */
3253 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3254 	reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3255 	reg = pcie_slotctl_pwr_indicator_set(reg,
3256 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3257 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3258 
3259 	/* Indicator LED blink */
3260 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3261 	reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3262 	reg = pcie_slotctl_attn_indicator_set(reg,
3263 	    PCIE_SLOTCTL_INDICATOR_STATE_BLINK);
3264 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3265 
3266 	/* Notify to SCF */
3267 	if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3268 		CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3269 	else
3270 		CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3271 
3272 	start_time = gethrtime();
3273 	/* Check Leaf Reset status */
3274 	while (!(CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))) {
3275 		if ((end_time = (gethrtime() - start_time)) >
3276 		    oberon_leaf_reset_timeout) {
3277 			cmn_err(CE_WARN, "Oberon leaf reset is not completed, "
3278 			    "even after waiting %llx ticks", end_time);
3279 
3280 			break;
3281 		}
3282 
3283 		/* Wait for one second */
3284 		delay(drv_usectohz(1000000));
3285 	}
3286 
3287 	/* Indicator LED off */
3288 	reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3289 	reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3290 	reg = pcie_slotctl_attn_indicator_set(reg,
3291 	    PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3292 	CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3293 
3294 	return (DDI_SUCCESS);
3295 }
3296 
3297 static uint_t
3298 oberon_hpreg_get(void *cookie, off_t off)
3299 {
3300 	caddr_t csr_base = *(caddr_t *)cookie;
3301 	volatile uint64_t val = -1ull;
3302 
3303 	switch (off) {
3304 	case PCIE_SLOTCAP:
3305 		val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3306 		break;
3307 	case PCIE_SLOTCTL:
3308 		val = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3309 
3310 		/* Get the power state */
3311 		val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) &
3312 		    (1ull << HOTPLUG_CONTROL_PWREN)) ?
3313 		    0 : PCIE_SLOTCTL_PWR_CONTROL;
3314 		break;
3315 	case PCIE_SLOTSTS:
3316 		val = CSR_XR(csr_base, TLU_SLOT_STATUS);
3317 		break;
3318 	case PCIE_LINKCAP:
3319 		val = CSR_XR(csr_base, TLU_LINK_CAPABILITIES);
3320 		break;
3321 	case PCIE_LINKSTS:
3322 		val = CSR_XR(csr_base, TLU_LINK_STATUS);
3323 		break;
3324 	default:
3325 		DBG(DBG_HP, NULL, "oberon_hpreg_get(): "
3326 		    "unsupported offset 0x%lx\n", off);
3327 		break;
3328 	}
3329 
3330 	return ((uint_t)val);
3331 }
3332 
3333 static uint_t
3334 oberon_hpreg_put(void *cookie, off_t off, uint_t val)
3335 {
3336 	caddr_t csr_base = *(caddr_t *)cookie;
3337 	volatile uint64_t pwr_state_on, pwr_fault;
3338 	uint_t pwr_off, ret = DDI_SUCCESS;
3339 
3340 	DBG(DBG_HP, NULL, "oberon_hpreg_put 0x%lx: cur %x, new %x\n",
3341 	    off, oberon_hpreg_get(cookie, off), val);
3342 
3343 	switch (off) {
3344 	case PCIE_SLOTCTL:
3345 		/*
3346 		 * Depending on the current state, insertion or removal
3347 		 * will go through their respective sequences.
3348 		 */
3349 		pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN);
3350 		pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL;
3351 
3352 		if (!pwr_off && !pwr_state_on)
3353 			ret = oberon_hp_pwron(csr_base);
3354 		else if (pwr_off && pwr_state_on) {
3355 			pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) &
3356 			    (1ull << TLU_SLOT_STATUS_PWFD);
3357 
3358 			if (pwr_fault) {
3359 				DBG(DBG_HP, NULL, "oberon_hpreg_put: power "
3360 				    "off because of power fault\n");
3361 				CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3362 			}
3363 			else
3364 				ret = oberon_hp_pwroff(csr_base);
3365 		} else
3366 			CSR_XS(csr_base, TLU_SLOT_CONTROL, val);
3367 		break;
3368 	case PCIE_SLOTSTS:
3369 		CSR_XS(csr_base, TLU_SLOT_STATUS, val);
3370 		break;
3371 	default:
3372 		DBG(DBG_HP, NULL, "oberon_hpreg_put(): "
3373 		    "unsupported offset 0x%lx\n", off);
3374 		ret = (uint_t)DDI_FAILURE;
3375 		break;
3376 	}
3377 
3378 	return (ret);
3379 }
3380 
3381 int
3382 hvio_hotplug_init(dev_info_t *dip, void *arg)
3383 {
3384 	pciehpc_regops_t *regops = (pciehpc_regops_t *)arg;
3385 	px_t	*px_p = DIP_TO_STATE(dip);
3386 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3387 	volatile uint64_t reg;
3388 
3389 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
3390 		if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3391 		    TLU_SLOT_CAPABILITIES, HP)) {
3392 			DBG(DBG_HP, NULL, "%s%d: hotplug capabale not set\n",
3393 			    ddi_driver_name(dip), ddi_get_instance(dip));
3394 			return (DDI_FAILURE);
3395 		}
3396 
3397 		/* For empty or disconnected slot, disable LUP/LDN */
3398 		if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3399 		    TLU_SLOT_STATUS, PSD) ||
3400 		    !CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3401 		    HOTPLUG_CONTROL, PWREN)) {
3402 
3403 			reg = CSR_XR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3404 			    TLU_OTHER_EVENT_LOG_ENABLE);
3405 			reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3406 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3407 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3408 			    (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3409 			CSR_XS((caddr_t)pxu_p->px_address[PX_REG_CSR],
3410 			    TLU_OTHER_EVENT_LOG_ENABLE, reg);
3411 		}
3412 
3413 		regops->get = oberon_hpreg_get;
3414 		regops->put = oberon_hpreg_put;
3415 
3416 		/* cookie is the csr_base */
3417 		regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR];
3418 
3419 		return (DDI_SUCCESS);
3420 	}
3421 
3422 	return (DDI_ENOTSUP);
3423 }
3424 
3425 int
3426 hvio_hotplug_uninit(dev_info_t *dip)
3427 {
3428 	px_t	*px_p = DIP_TO_STATE(dip);
3429 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3430 
3431 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
3432 		return (DDI_SUCCESS);
3433 
3434 	return (DDI_FAILURE);
3435 }
3436