xref: /titanic_51/usr/src/uts/sun4u/io/px/px_hlib.c (revision 5aefb6555731130ca4fd295960123d71f2d21fe8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/cmn_err.h>
30 #include <sys/vmsystm.h>
31 #include <sys/vmem.h>
32 #include <sys/machsystm.h>	/* lddphys() */
33 #include <sys/iommutsb.h>
34 #include <sys/pci.h>
35 #include <pcie_pwr.h>
36 #include <px_obj.h>
37 #include "px_regs.h"
38 #include "px_csr.h"
39 #include "px_lib4u.h"
40 
41 /*
42  * Registers that need to be saved and restored during suspend/resume.
43  */
44 
45 /*
46  * Registers in the PEC Module.
47  * LPU_RESET should be set to 0ull during resume
48  */
49 static uint64_t	pec_config_state_regs[] = {
50 	PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
51 	ILU_ERROR_LOG_ENABLE,
52 	ILU_INTERRUPT_ENABLE,
53 	TLU_CONTROL,
54 	TLU_OTHER_EVENT_LOG_ENABLE,
55 	TLU_OTHER_EVENT_INTERRUPT_ENABLE,
56 	TLU_DEVICE_CONTROL,
57 	TLU_LINK_CONTROL,
58 	TLU_UNCORRECTABLE_ERROR_LOG_ENABLE,
59 	TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE,
60 	TLU_CORRECTABLE_ERROR_LOG_ENABLE,
61 	TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE,
62 	LPU_LINK_LAYER_INTERRUPT_MASK,
63 	LPU_PHY_INTERRUPT_MASK,
64 	LPU_RECEIVE_PHY_INTERRUPT_MASK,
65 	LPU_TRANSMIT_PHY_INTERRUPT_MASK,
66 	LPU_GIGABLAZE_GLUE_INTERRUPT_MASK,
67 	LPU_LTSSM_INTERRUPT_MASK,
68 	LPU_RESET,
69 	LPU_DEBUG_CONFIG,
70 	LPU_INTERRUPT_MASK,
71 	LPU_LINK_LAYER_CONFIG,
72 	LPU_FLOW_CONTROL_UPDATE_CONTROL,
73 	LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD,
74 	LPU_TXLINK_REPLAY_TIMER_THRESHOLD,
75 	LPU_REPLAY_BUFFER_MAX_ADDRESS,
76 	LPU_TXLINK_RETRY_FIFO_POINTER,
77 	LPU_LTSSM_CONFIG2,
78 	LPU_LTSSM_CONFIG3,
79 	LPU_LTSSM_CONFIG4,
80 	LPU_LTSSM_CONFIG5,
81 	DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
82 	DMC_DEBUG_SELECT_FOR_PORT_A,
83 	DMC_DEBUG_SELECT_FOR_PORT_B
84 };
85 #define	PEC_SIZE (sizeof (pec_config_state_regs))
86 #define	PEC_KEYS (PEC_SIZE / sizeof (uint64_t))
87 
88 /*
89  * Registers for the MMU module.
90  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
91  */
92 static uint64_t mmu_config_state_regs[] = {
93 	MMU_TSB_CONTROL,
94 	MMU_CONTROL_AND_STATUS,
95 	MMU_ERROR_LOG_ENABLE,
96 	MMU_INTERRUPT_ENABLE
97 };
98 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
99 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
100 
101 /*
102  * Registers for the IB Module
103  */
104 static uint64_t ib_config_state_regs[] = {
105 	IMU_ERROR_LOG_ENABLE,
106 	IMU_INTERRUPT_ENABLE
107 };
108 #define	IB_SIZE (sizeof (ib_config_state_regs))
109 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
110 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
111 
112 /*
113  * Registers for the CB module.
114  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
115  */
116 static uint64_t	cb_config_state_regs[] = {
117 	JBUS_PARITY_CONTROL,
118 	JBC_FATAL_RESET_ENABLE,
119 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
120 	JBC_ERROR_LOG_ENABLE,
121 	JBC_INTERRUPT_ENABLE
122 };
123 #define	CB_SIZE (sizeof (cb_config_state_regs))
124 #define	CB_KEYS (CB_SIZE / sizeof (uint64_t))
125 
126 static uint64_t	msiq_config_other_regs[] = {
127 	ERR_COR_MAPPING,
128 	ERR_NONFATAL_MAPPING,
129 	ERR_FATAL_MAPPING,
130 	PM_PME_MAPPING,
131 	PME_TO_ACK_MAPPING,
132 	MSI_32_BIT_ADDRESS,
133 	MSI_64_BIT_ADDRESS
134 };
135 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
136 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
137 
138 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
139 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
140 
141 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
142 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
143 
144 /*
145  * Initialize the module, but do not enable interrupts.
146  */
147 /* ARGSUSED */
148 void
149 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
150 {
151 	uint64_t val;
152 
153 	/* Check if we need to enable inverted parity */
154 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
155 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
156 	DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n",
157 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
158 
159 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
160 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
161 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
162 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
163 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
164 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
165 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
166 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
167 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
168 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
169 		CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
170 
171 	/*
172 	 * Enable merge, jbc and dmc interrupts.
173 	 */
174 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
175 	DBG(DBG_CB, NULL,
176 	    "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
177 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
178 
179 	/*
180 	 * CSR_V CB's interrupt regs (log, enable, status, clear)
181 	 */
182 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
183 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
184 
185 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
186 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
187 
188 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
189 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
190 
191 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
192 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
193 }
194 
195 /*
196  * Initialize the module, but do not enable interrupts.
197  */
198 /* ARGSUSED */
199 void
200 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
201 {
202 	/*
203 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
204 	 */
205 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
206 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
207 
208 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
209 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
210 
211 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
212 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
213 
214 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
215 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
216 }
217 
218 /*
219  * Initialize the module, but do not enable interrupts.
220  */
221 /* ARGSUSED */
222 static void
223 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
224 {
225 	/*
226 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
227 	 */
228 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
229 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
230 
231 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
232 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
233 
234 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
235 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
236 
237 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
238 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
239 }
240 
241 /*
242  * Initialize the module, but do not enable interrupts.
243  */
244 /* ARGSUSED */
245 static void
246 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
247 {
248 	uint64_t val;
249 
250 	/*
251 	 * CSR_V TLU_CONTROL Expect OBP ???
252 	 */
253 
254 	/*
255 	 * L0s entry default timer value - 7.0 us
256 	 * Completion timeout select default value - 67.1 ms and
257 	 * OBP will set this value.
258 	 *
259 	 * Configuration - Bit 0 should always be 0 for upstream port.
260 	 * Bit 1 is clock - how is this related to the clock bit in TLU
261 	 * Link Control register?  Both are hardware dependent and likely
262 	 * set by OBP.
263 	 *
264 	 * Disable non-posted write bit - ordering by setting
265 	 * NPWR_EN bit to force serialization of writes.
266 	 */
267 	val = CSR_XR(csr_base, TLU_CONTROL);
268 	val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
269 	    (1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT;
270 
271 	/*
272 	 * Set Detect.Quiet. This will disable automatic link
273 	 * re-training, if the link goes down e.g. power management
274 	 * turns off power to the downstream device. This will enable
275 	 * Fire to go to Drain state, after link down. The drain state
276 	 * forces a reset to the FC state machine, which is required for
277 	 * proper link re-training.
278 	 */
279 	val |= (1ull << TLU_REMAIN_DETECT_QUIET);
280 	CSR_XS(csr_base, TLU_CONTROL, val);
281 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
282 	    CSR_XR(csr_base, TLU_CONTROL));
283 
284 	/*
285 	 * CSR_V TLU_STATUS Expect HW 0x4
286 	 */
287 
288 	/*
289 	 * Only bit [7:0] are currently defined.  Bits [2:0]
290 	 * are the state, which should likely be in state active,
291 	 * 100b.  Bit three is 'recovery', which is not understood.
292 	 * All other bits are reserved.
293 	 */
294 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
295 	    CSR_XR(csr_base, TLU_STATUS));
296 
297 	/*
298 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
299 	 */
300 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
301 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
302 
303 	/*
304 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
305 	 */
306 
307 	/*
308 	 * Ingress credits initial register.  Bits [39:32] should be
309 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
310 	 * be 0xC0.  These are the reset values, and should be set by
311 	 * HW.
312 	 */
313 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
314 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
315 
316 	/*
317 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
318 	 */
319 
320 	/*
321 	 * Diagnostic register - always zero unless we are debugging.
322 	 */
323 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
324 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
325 
326 	/*
327 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
328 	 */
329 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
330 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
331 
332 	/*
333 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
334 	 */
335 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
336 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
337 
338 	/*
339 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
340 	 */
341 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
342 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
343 
344 	/*
345 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
346 	 */
347 	DBG(DBG_TLU, NULL,
348 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
349 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
350 
351 	/*
352 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
353 	 */
354 	DBG(DBG_TLU, NULL,
355 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
356 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
357 
358 	/*
359 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
360 	 */
361 	DBG(DBG_TLU, NULL,
362 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
363 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
364 
365 	DBG(DBG_TLU, NULL,
366 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
367 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
368 
369 	DBG(DBG_TLU, NULL,
370 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
371 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
372 
373 	DBG(DBG_TLU, NULL,
374 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
375 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
376 
377 	/*
378 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
379 	 */
380 	DBG(DBG_TLU, NULL,
381 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
382 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
383 
384 	/*
385 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
386 	 */
387 	DBG(DBG_TLU, NULL,
388 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
389 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
390 
391 	/*
392 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
393 	 */
394 	DBG(DBG_TLU, NULL,
395 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
396 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
397 
398 	/*
399 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
400 	 */
401 	DBG(DBG_TLU, NULL,
402 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
403 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
404 
405 	/*
406 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
407 	 */
408 	DBG(DBG_TLU, NULL,
409 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
410 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
411 
412 	/*
413 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
414 	 */
415 	DBG(DBG_TLU, NULL,
416 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
417 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
418 
419 	/*
420 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
421 	 */
422 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
423 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
424 
425 	/*
426 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
427 	 */
428 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
429 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
430 
431 	/*
432 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
433 	 */
434 
435 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
436 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
437 
438 	/*
439 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
440 	 */
441 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
442 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
443 
444 	/*
445 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
446 	 */
447 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
448 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
449 
450 	/*
451 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
452 	 */
453 
454 	/*
455 	 * Bits [14:12] are the Max Read Request Size, which is always 64
456 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
457 	 * start at 128 bytes which is 000b.  This may be revisited if
458 	 * init_child finds greater values.
459 	 */
460 	val = 0x0ull;
461 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
462 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
463 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
464 
465 	/*
466 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
467 	 */
468 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
469 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
470 
471 	/*
472 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
473 	 */
474 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
475 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
476 
477 	/*
478 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
479 	 */
480 
481 	/*
482 	 * The CLOCK bit should be set by OBP if the hardware dictates,
483 	 * and if it is set then ASPM should be used since then L0s exit
484 	 * latency should be lower than L1 exit latency.
485 	 *
486 	 * Note that we will not enable power management during bringup
487 	 * since it has not been test and is creating some problems in
488 	 * simulation.
489 	 */
490 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
491 
492 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
493 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
494 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
495 
496 	/*
497 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
498 	 */
499 
500 	/*
501 	 * Not sure if HW or OBP will be setting this read only
502 	 * register.  Bit 12 is Clock, and it should always be 1
503 	 * signifying that the component uses the same physical
504 	 * clock as the platform.  Bits [9:4] are for the width,
505 	 * with the expected value above signifying a x1 width.
506 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
507 	 * the only speed as yet supported by the PCI-E spec.
508 	 */
509 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
510 	    CSR_XR(csr_base, TLU_LINK_STATUS));
511 
512 	/*
513 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
514 	 */
515 
516 	/*
517 	 * Power Limits for the slots.  Will be platform
518 	 * dependent, and OBP will need to set after consulting
519 	 * with the HW guys.
520 	 *
521 	 * Bits [16:15] are power limit scale, which most likely
522 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
523 	 * Power Limit Value, which is a number which is multiplied
524 	 * by the power limit scale to get the actual power limit.
525 	 */
526 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
527 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
528 
529 	/*
530 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
531 	 */
532 	DBG(DBG_TLU, NULL,
533 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
534 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
535 
536 	/*
537 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
538 	 * Kernel 0x17F0110017F011
539 	 */
540 	DBG(DBG_TLU, NULL,
541 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
542 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
543 
544 	/*
545 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
546 	 */
547 	DBG(DBG_TLU, NULL,
548 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
549 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
550 
551 	/*
552 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
553 	 */
554 	DBG(DBG_TLU, NULL,
555 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
556 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
557 
558 	/*
559 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
560 	 */
561 	DBG(DBG_TLU, NULL,
562 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
563 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
564 
565 	/*
566 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
567 	 */
568 	DBG(DBG_TLU, NULL,
569 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
570 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
571 
572 	/*
573 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
574 	 */
575 	DBG(DBG_TLU, NULL,
576 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
577 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
578 
579 	/*
580 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
581 	 */
582 	DBG(DBG_TLU, NULL,
583 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
584 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
585 
586 
587 	/*
588 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
589 	 * Plus header logs
590 	 */
591 
592 	/*
593 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
594 	 */
595 	DBG(DBG_TLU, NULL,
596 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
597 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
598 
599 	/*
600 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
601 	 */
602 	DBG(DBG_TLU, NULL,
603 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
604 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
605 
606 	/*
607 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
608 	 */
609 	DBG(DBG_TLU, NULL,
610 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
611 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
612 
613 	/*
614 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
615 	 */
616 	DBG(DBG_TLU, NULL,
617 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
618 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
619 }
620 
621 /* ARGSUSED */
622 static void
623 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
624 {
625 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
626 	int link_width, max_payload;
627 
628 	uint64_t val;
629 
630 	/*
631 	 * ACKNAK Latency Threshold Table.
632 	 * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
633 	 */
634 	int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
635 		{0xED,   0x49,  0x43,  0x30},
636 		{0x1A0,  0x76,  0x6B,  0x48},
637 		{0x22F,  0x9A,  0x56,  0x56},
638 		{0x42F,  0x11A, 0x96,  0x96},
639 		{0x82F,  0x21A, 0x116, 0x116},
640 		{0x102F, 0x41A, 0x216, 0x216}
641 	};
642 
643 	/*
644 	 * TxLink Replay Timer Latency Table
645 	 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
646 	 */
647 	int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
648 		{0x379,  0x112, 0xFC,  0xB4},
649 		{0x618,  0x1BA, 0x192, 0x10E},
650 		{0x831,  0x242, 0x143, 0x143},
651 		{0xFB1,  0x422, 0x233, 0x233},
652 		{0x1EB0, 0x7E1, 0x412, 0x412},
653 		{0x3CB0, 0xF61, 0x7D2, 0x7D2}
654 	};
655 
656 	/*
657 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
658 	 * Only Link Widths of x1, x4, and x8 are supported.
659 	 * If any width is reported other than x8, set default to x8.
660 	 */
661 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
662 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
663 
664 	/*
665 	 * Convert link_width to match timer array configuration.
666 	 */
667 	switch (link_width) {
668 	case 1:
669 		link_width = 0;
670 		break;
671 	case 4:
672 		link_width = 1;
673 		break;
674 	case 8:
675 		link_width = 2;
676 		break;
677 	case 16:
678 		link_width = 3;
679 		break;
680 	default:
681 		link_width = 0;
682 	}
683 
684 	/*
685 	 * Get the Max Payload Size.
686 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
687 	 */
688 	max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
689 	    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
690 
691 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
692 	    (0x80 << max_payload));
693 
694 	/* Make sure the packet size is not greater than 4096 */
695 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
696 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
697 
698 	/*
699 	 * CSR_V LPU_ID Expect HW 0x0
700 	 */
701 
702 	/*
703 	 * This register has link id, phy id and gigablaze id.
704 	 * Should be set by HW.
705 	 */
706 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
707 	    CSR_XR(csr_base, LPU_ID));
708 
709 	/*
710 	 * CSR_V LPU_RESET Expect Kernel 0x0
711 	 */
712 
713 	/*
714 	 * No reason to have any reset bits high until an error is
715 	 * detected on the link.
716 	 */
717 	val = 0ull;
718 	CSR_XS(csr_base, LPU_RESET, val);
719 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
720 	    CSR_XR(csr_base, LPU_RESET));
721 
722 	/*
723 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
724 	 */
725 
726 	/*
727 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
728 	 * They are read-only.  What do the 8 bits mean, and
729 	 * how do they get set if they are read only?
730 	 */
731 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
732 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
733 
734 	/*
735 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
736 	 */
737 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
738 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
739 
740 	/*
741 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
742 	 */
743 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
744 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
745 
746 	/*
747 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
748 	 */
749 
750 	/*
751 	 * This register has bits [9:4] for link width, and the
752 	 * default 0x10, means a width of x16.  The problem is
753 	 * this width is not supported according to the TLU
754 	 * link status register.
755 	 */
756 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
757 	    CSR_XR(csr_base, LPU_LINK_STATUS));
758 
759 	/*
760 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
761 	 */
762 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
763 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
764 
765 	/*
766 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
767 	 */
768 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
769 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
770 
771 	/*
772 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
773 	 */
774 	DBG(DBG_LPU, NULL,
775 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
776 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
777 
778 	/*
779 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
780 	 */
781 	DBG(DBG_LPU, NULL,
782 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
783 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
784 
785 	/*
786 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
787 	 */
788 	DBG(DBG_LPU, NULL,
789 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
790 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
791 
792 	/*
793 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
794 	 */
795 	DBG(DBG_LPU, NULL,
796 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
797 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
798 
799 	/*
800 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
801 	 */
802 	DBG(DBG_LPU, NULL,
803 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
804 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
805 
806 	/*
807 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
808 	 */
809 	DBG(DBG_LPU, NULL,
810 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
811 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
812 
813 	/*
814 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
815 	 */
816 
817 	/*
818 	 * This is another place where Max Payload can be set,
819 	 * this time for the link layer.  It will be set to
820 	 * 128B, which is the default, but this will need to
821 	 * be revisited.
822 	 */
823 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
824 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
825 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
826 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
827 
828 	/*
829 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
830 	 */
831 
832 	/*
833 	 * Another R/W status register.  Bit 3, DL up Status, will
834 	 * be set high.  The link state machine status bits [2:0]
835 	 * are set to 0x1, but the status bits are not defined in the
836 	 * PRM.  What does 0x1 mean, what others values are possible
837 	 * and what are thier meanings?
838 	 *
839 	 * This register has been giving us problems in simulation.
840 	 * It has been mentioned that software should not program
841 	 * any registers with WE bits except during debug.  So
842 	 * this register will no longer be programmed.
843 	 */
844 
845 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
846 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
847 
848 	/*
849 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
850 	 */
851 	DBG(DBG_LPU, NULL,
852 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
853 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
854 
855 	/*
856 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
857 	 */
858 	DBG(DBG_LPU, NULL,
859 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
860 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
861 
862 	DBG(DBG_LPU, NULL,
863 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
864 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
865 
866 	/*
867 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
868 	 */
869 
870 	/*
871 	 * The PRM says that only the first two bits will be set
872 	 * high by default, which will enable flow control for
873 	 * posted and non-posted updates, but NOT completetion
874 	 * updates.
875 	 */
876 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
877 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
878 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
879 	DBG(DBG_LPU, NULL,
880 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
881 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
882 
883 	/*
884 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
885 	 * Expect OBP 0x1D4C
886 	 */
887 
888 	/*
889 	 * This should be set by OBP.  We'll check to make sure.
890 	 */
891 	DBG(DBG_LPU, NULL, "lpu_init - "
892 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
893 	    CSR_XR(csr_base,
894 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
895 
896 	/*
897 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
898 	 */
899 
900 	/*
901 	 * This register has Flow Control Update Timer values for
902 	 * non-posted and posted requests, bits [30:16] and bits
903 	 * [14:0], respectively.  These are read-only to SW so
904 	 * either HW or OBP needs to set them.
905 	 */
906 	DBG(DBG_LPU, NULL, "lpu_init - "
907 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
908 	    CSR_XR(csr_base,
909 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
910 
911 	/*
912 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
913 	 */
914 
915 	/*
916 	 * Same as timer0 register above, except for bits [14:0]
917 	 * have the timer values for completetions.  Read-only to
918 	 * SW; OBP or HW need to set it.
919 	 */
920 	DBG(DBG_LPU, NULL, "lpu_init - "
921 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
922 	    CSR_XR(csr_base,
923 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
924 
925 	/*
926 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
927 	 */
928 	val = acknak_timer_table[max_payload][link_width];
929 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
930 
931 	DBG(DBG_LPU, NULL, "lpu_init - "
932 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
933 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
934 
935 	/*
936 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
937 	 */
938 	DBG(DBG_LPU, NULL,
939 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
940 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
941 
942 	/*
943 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
944 	 */
945 	val = replay_timer_table[max_payload][link_width];
946 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
947 
948 	DBG(DBG_LPU, NULL,
949 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
950 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
951 
952 	/*
953 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
954 	 */
955 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
956 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
957 
958 	/*
959 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
960 	 */
961 	DBG(DBG_LPU, NULL,
962 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
963 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
964 
965 	/*
966 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
967 	 */
968 	DBG(DBG_LPU, NULL,
969 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
970 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
971 
972 	/*
973 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
974 	 */
975 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
976 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
977 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
978 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
979 
980 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
981 	DBG(DBG_LPU, NULL,
982 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
983 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
984 
985 	/*
986 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
987 	 */
988 	DBG(DBG_LPU, NULL,
989 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
990 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
991 
992 	/*
993 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
994 	 */
995 	DBG(DBG_LPU, NULL,
996 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
997 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
998 
999 	/*
1000 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1001 	 */
1002 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1003 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1004 
1005 	/*
1006 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1007 	 */
1008 	DBG(DBG_LPU, NULL,
1009 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1010 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1011 
1012 	/*
1013 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1014 	 */
1015 
1016 	/*
1017 	 * Test only register.  Will not be programmed.
1018 	 */
1019 	DBG(DBG_LPU, NULL,
1020 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1021 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1022 
1023 	/*
1024 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1025 	 */
1026 
1027 	/*
1028 	 * Test only register.  Will not be programmed.
1029 	 */
1030 	DBG(DBG_LPU, NULL,
1031 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1032 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1033 
1034 	/*
1035 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1036 	 */
1037 	DBG(DBG_LPU, NULL,
1038 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1039 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1040 
1041 	/*
1042 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1043 	 */
1044 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1045 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1046 
1047 	/*
1048 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1049 	 */
1050 
1051 	/*
1052 	 * Test only register.  Will not be programmed.
1053 	 */
1054 	DBG(DBG_LPU, NULL,
1055 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1056 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1057 
1058 	/*
1059 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1060 	 */
1061 	DBG(DBG_LPU, NULL,
1062 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1063 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1064 
1065 	/*
1066 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1067 	 */
1068 	DBG(DBG_LPU, NULL,
1069 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1070 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1071 
1072 	/*
1073 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1074 	 */
1075 	DBG(DBG_LPU, NULL,
1076 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1077 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1078 
1079 	/*
1080 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1081 	 */
1082 	DBG(DBG_LPU, NULL,
1083 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1084 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1085 
1086 	/*
1087 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1088 	 */
1089 	DBG(DBG_LPU, NULL,
1090 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1091 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1092 
1093 	/*
1094 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1095 	 */
1096 
1097 	/*
1098 	 * Test only register.  Will not be programmed.
1099 	 */
1100 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1101 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1102 
1103 	/*
1104 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1105 	 */
1106 
1107 	/*
1108 	 * Test only register.  Will not be programmed.
1109 	 */
1110 	DBG(DBG_LPU, NULL,
1111 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1112 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1113 
1114 	/*
1115 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1116 	 */
1117 
1118 	/*
1119 	 * Test only register.
1120 	 */
1121 	DBG(DBG_LPU, NULL,
1122 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1123 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1124 
1125 	/*
1126 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1127 	 */
1128 	DBG(DBG_LPU, NULL, "lpu_init - "
1129 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1130 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1131 
1132 	/*
1133 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1134 	 */
1135 
1136 	/*
1137 	 * test only register.
1138 	 */
1139 	DBG(DBG_LPU, NULL,
1140 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1141 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1142 
1143 	/*
1144 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1145 	 */
1146 
1147 	/*
1148 	 * test only register.
1149 	 */
1150 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1151 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1152 
1153 	/*
1154 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1155 	 */
1156 	DBG(DBG_LPU, NULL,
1157 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1158 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1159 
1160 	/*
1161 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1162 	 */
1163 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1164 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1165 
1166 	/*
1167 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1168 	 */
1169 	DBG(DBG_LPU, NULL,
1170 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1171 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1172 
1173 	/*
1174 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1175 	 */
1176 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1177 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1178 
1179 	DBG(DBG_LPU, NULL,
1180 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1181 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1182 
1183 	/*
1184 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1185 	 */
1186 
1187 	/*
1188 	 * This also needs some explanation.  What is the best value
1189 	 * for the water mark?  Test mode enables which test mode?
1190 	 * Programming model needed for the Receiver Reset Lane N
1191 	 * bits.
1192 	 */
1193 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1194 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1195 
1196 	/*
1197 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1198 	 */
1199 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1200 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1201 
1202 	/*
1203 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1204 	 */
1205 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1206 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1207 
1208 	/*
1209 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1210 	 */
1211 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1212 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1213 
1214 	/*
1215 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1216 	 */
1217 	DBG(DBG_LPU, NULL,
1218 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1219 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1220 
1221 	/*
1222 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1223 	 */
1224 	DBG(DBG_LPU, NULL,
1225 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1226 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1227 
1228 	DBG(DBG_LPU, NULL,
1229 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1230 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1231 
1232 	/*
1233 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1234 	 */
1235 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1236 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1237 
1238 	/*
1239 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1240 	 */
1241 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1242 		CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1243 
1244 	/*
1245 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1246 	 */
1247 	DBG(DBG_LPU, NULL,
1248 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1249 	    CSR_XR(csr_base,
1250 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1251 
1252 	/*
1253 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1254 	 */
1255 	DBG(DBG_LPU, NULL,
1256 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1257 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1258 
1259 	DBG(DBG_LPU, NULL,
1260 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1261 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1262 
1263 	/*
1264 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1265 	 */
1266 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1267 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1268 
1269 	/*
1270 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1271 	 */
1272 
1273 	/*
1274 	 * The new PRM has values for LTSSM 8 ns timeout value and
1275 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1276 	 * Most of the other bits are questions as well.
1277 	 *
1278 	 * As such we will use the reset value.
1279 	 */
1280 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1281 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1282 
1283 	/*
1284 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1285 	 */
1286 
1287 	/*
1288 	 * Again, what does '12 ms timeout value mean'?
1289 	 */
1290 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1291 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1292 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1293 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1294 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1295 
1296 	/*
1297 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1298 	 */
1299 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1300 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1301 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1302 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1303 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1304 
1305 	/*
1306 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1307 	 */
1308 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1309 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1310 		(LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1311 		LPU_LTSSM_CONFIG4_N_FTS));
1312 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1313 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1314 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1315 
1316 	/*
1317 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1318 	 */
1319 	val = 0ull;
1320 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1321 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1322 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1323 
1324 	/*
1325 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1326 	 */
1327 
1328 	/*
1329 	 * LTSSM Status registers are test only.
1330 	 */
1331 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1332 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1333 
1334 	/*
1335 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1336 	 */
1337 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1338 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1339 
1340 	/*
1341 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1342 	 */
1343 	DBG(DBG_LPU, NULL,
1344 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1345 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1346 
1347 	/*
1348 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1349 	 */
1350 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1351 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1352 
1353 	DBG(DBG_LPU, NULL,
1354 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1355 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1356 
1357 	/*
1358 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1359 	 */
1360 	DBG(DBG_LPU, NULL,
1361 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1362 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1363 
1364 	/*
1365 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1366 	 */
1367 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1368 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1369 
1370 	/*
1371 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1372 	 */
1373 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1374 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1375 
1376 	/*
1377 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1378 	 */
1379 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1380 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1381 
1382 	/*
1383 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1384 	 */
1385 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1386 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1387 
1388 	/*
1389 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1390 	 */
1391 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1392 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1393 
1394 	/*
1395 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1396 	 */
1397 	DBG(DBG_LPU, NULL, "lpu_init - "
1398 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1399 	    CSR_XR(csr_base,
1400 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1401 
1402 	/*
1403 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1404 	 */
1405 	DBG(DBG_LPU, NULL,
1406 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1407 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1408 
1409 	DBG(DBG_LPU, NULL,
1410 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1411 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1412 
1413 	/*
1414 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1415 	 */
1416 	DBG(DBG_LPU, NULL,
1417 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1418 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1419 
1420 	/*
1421 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1422 	 */
1423 	DBG(DBG_LPU, NULL,
1424 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1425 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1426 
1427 	/*
1428 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1429 	 */
1430 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1431 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1432 }
1433 
1434 /* ARGSUSED */
1435 static void
1436 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1437 {
1438 	uint64_t val;
1439 
1440 /*
1441  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1442  */
1443 
1444 	val = -1ull;
1445 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1446 	DBG(DBG_DMC, NULL,
1447 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1448 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1449 
1450 	/*
1451 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1452 	 */
1453 	DBG(DBG_DMC, NULL,
1454 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1455 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1456 
1457 	/*
1458 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1459 	 */
1460 	val = 0x0ull;
1461 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1462 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1463 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1464 
1465 	/*
1466 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1467 	 */
1468 	val = 0x0ull;
1469 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1470 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1471 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1472 }
1473 
1474 void
1475 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1476 {
1477 	uint64_t val;
1478 
1479 	ilu_init(csr_base, pxu_p);
1480 	tlu_init(csr_base, pxu_p);
1481 	lpu_init(csr_base, pxu_p);
1482 	dmc_init(csr_base, pxu_p);
1483 
1484 /*
1485  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1486  */
1487 
1488 	val = -1ull;
1489 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1490 	DBG(DBG_PEC, NULL,
1491 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1492 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1493 
1494 	/*
1495 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1496 	 */
1497 	DBG(DBG_PEC, NULL,
1498 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1499 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1500 }
1501 
1502 /*
1503  * Initialize the module, but do not enable interrupts.
1504  */
1505 void
1506 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1507 {
1508 	uint64_t	val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr;
1509 	uint_t		obp_tsb_entries, obp_tsb_size;
1510 
1511 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1512 
1513 	/*
1514 	 * Preserve OBP's TSB
1515 	 */
1516 	val = CSR_XR(csr_base, MMU_TSB_CONTROL);
1517 
1518 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1519 
1520 	obp_tsb_pa = tsb_ctrl &  0x7FFFFFFE000;
1521 	obp_tsb_size = tsb_ctrl & 0xF;
1522 
1523 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1524 
1525 	base_tte_addr = pxu_p->tsb_vaddr +
1526 		((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1527 
1528 	for (i = 0; i < obp_tsb_entries; i++) {
1529 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1530 
1531 		if (!MMU_TTE_VALID(tte))
1532 			continue;
1533 
1534 		base_tte_addr[i] = tte;
1535 	}
1536 
1537 	/*
1538 	 * Invalidate the TLB through the diagnostic register.
1539 	 */
1540 
1541 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1542 
1543 	/*
1544 	 * Configure the Fire MMU TSB Control Register.  Determine
1545 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1546 	 *
1547 	 * Write the most significant 30 bits of the TSB physical address
1548 	 * and the encoded TSB table size.
1549 	 */
1550 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--);
1551 
1552 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1553 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1554 
1555 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1556 
1557 	/*
1558 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1559 	 * the "Cache Mode", the "Bypass Enable" and
1560 	 * the "Translation Enable" bits.
1561 	 */
1562 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1563 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1564 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1565 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1566 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1567 
1568 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1569 
1570 	/*
1571 	 * Read the register here to ensure that the previous writes to
1572 	 * the Fire MMU registers have been flushed.  (Technically, this
1573 	 * is not entirely necessary here as we will likely do later reads
1574 	 * during Fire initialization, but it is a small price to pay for
1575 	 * more modular code.)
1576 	 */
1577 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1578 
1579 	/*
1580 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1581 	 * Plus header logs
1582 	 */
1583 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1584 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1585 
1586 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1587 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1588 
1589 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1590 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1591 
1592 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1593 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1594 }
1595 
1596 /*
1597  * Generic IOMMU Servies
1598  */
1599 
1600 /* ARGSUSED */
1601 uint64_t
1602 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages,
1603     io_attributes_t io_attr, void *addr, size_t pfn_index, int flags)
1604 {
1605 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1606 	uint64_t	attr = MMU_TTE_V;
1607 	int		i;
1608 
1609 	if (io_attr & PCI_MAP_ATTR_WRITE)
1610 		attr |= MMU_TTE_W;
1611 
1612 	if (flags & MMU_MAP_PFN) {
1613 		ddi_dma_impl_t	*mp = (ddi_dma_impl_t *)addr;
1614 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1615 			px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
1616 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1617 		}
1618 	} else {
1619 		caddr_t	a = (caddr_t)addr;
1620 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1621 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1622 			pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1623 		}
1624 	}
1625 
1626 	return (H_EOK);
1627 }
1628 
1629 /* ARGSUSED */
1630 uint64_t
1631 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1632     pages_t pages)
1633 {
1634 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1635 	int		i;
1636 
1637 	for (i = 0; i < pages; i++, tsb_index++)
1638 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1639 
1640 	return (H_EOK);
1641 }
1642 
1643 /* ARGSUSED */
1644 uint64_t
1645 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1646     io_attributes_t *attr_p, r_addr_t *r_addr_p)
1647 {
1648 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1649 	uint64_t	*tte_addr;
1650 	uint64_t	ret = H_EOK;
1651 
1652 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1653 
1654 	if (*tte_addr & MMU_TTE_V) {
1655 		*r_addr_p = MMU_TTETOPA(*tte_addr);
1656 		*attr_p = (*tte_addr & MMU_TTE_W) ?
1657 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1658 	} else {
1659 		*r_addr_p = 0;
1660 		*attr_p = 0;
1661 		ret = H_ENOMAP;
1662 	}
1663 
1664 	return (ret);
1665 }
1666 
1667 /* ARGSUSED */
1668 uint64_t
1669 hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra, io_attributes_t attr,
1670     io_addr_t *io_addr_p)
1671 {
1672 	uint64_t	pfn = MMU_BTOP(ra);
1673 
1674 	*io_addr_p = MMU_BYPASS_BASE | ra |
1675 	    (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE);
1676 
1677 	return (H_EOK);
1678 }
1679 
1680 /*
1681  * Generic IO Interrupt Servies
1682  */
1683 
1684 /*
1685  * Converts a device specific interrupt number given by the
1686  * arguments devhandle and devino into a system specific ino.
1687  */
1688 /* ARGSUSED */
1689 uint64_t
1690 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
1691     sysino_t *sysino)
1692 {
1693 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
1694 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
1695 		return (H_ENOINTR);
1696 	}
1697 
1698 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
1699 
1700 	return (H_EOK);
1701 }
1702 
1703 /*
1704  * Returns state in intr_valid_state if the interrupt defined by sysino
1705  * is valid (enabled) or not-valid (disabled).
1706  */
1707 uint64_t
1708 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
1709     intr_valid_state_t *intr_valid_state)
1710 {
1711 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1712 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
1713 		*intr_valid_state = INTR_VALID;
1714 	} else {
1715 		*intr_valid_state = INTR_NOTVALID;
1716 	}
1717 
1718 	return (H_EOK);
1719 }
1720 
1721 /*
1722  * Sets the 'valid' state of the interrupt defined by
1723  * the argument sysino to the state defined by the
1724  * argument intr_valid_state.
1725  */
1726 uint64_t
1727 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
1728     intr_valid_state_t intr_valid_state)
1729 {
1730 	switch (intr_valid_state) {
1731 	case INTR_VALID:
1732 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1733 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1734 		break;
1735 	case INTR_NOTVALID:
1736 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1737 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1738 		break;
1739 	default:
1740 		return (EINVAL);
1741 	}
1742 
1743 	return (H_EOK);
1744 }
1745 
1746 /*
1747  * Returns the current state of the interrupt given by the sysino
1748  * argument.
1749  */
1750 uint64_t
1751 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
1752     intr_state_t *intr_state)
1753 {
1754 	intr_state_t state;
1755 
1756 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1757 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
1758 
1759 	switch (state) {
1760 	case INTERRUPT_IDLE_STATE:
1761 		*intr_state = INTR_IDLE_STATE;
1762 		break;
1763 	case INTERRUPT_RECEIVED_STATE:
1764 		*intr_state = INTR_RECEIVED_STATE;
1765 		break;
1766 	case INTERRUPT_PENDING_STATE:
1767 		*intr_state = INTR_DELIVERED_STATE;
1768 		break;
1769 	default:
1770 		return (EINVAL);
1771 	}
1772 
1773 	return (H_EOK);
1774 
1775 }
1776 
1777 /*
1778  * Sets the current state of the interrupt given by the sysino
1779  * argument to the value given in the argument intr_state.
1780  *
1781  * Note: Setting the state to INTR_IDLE clears any pending
1782  * interrupt for sysino.
1783  */
1784 uint64_t
1785 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
1786     intr_state_t intr_state)
1787 {
1788 	intr_state_t state;
1789 
1790 	switch (intr_state) {
1791 	case INTR_IDLE_STATE:
1792 		state = INTERRUPT_IDLE_STATE;
1793 		break;
1794 	case INTR_DELIVERED_STATE:
1795 		state = INTERRUPT_PENDING_STATE;
1796 		break;
1797 	default:
1798 		return (EINVAL);
1799 	}
1800 
1801 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1802 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
1803 
1804 	return (H_EOK);
1805 }
1806 
1807 /*
1808  * Returns the cpuid that is the current target of the
1809  * interrupt given by the sysino argument.
1810  *
1811  * The cpuid value returned is undefined if the target
1812  * has not been set via intr_settarget.
1813  */
1814 uint64_t
1815 hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
1816 {
1817 	*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1818 	    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
1819 
1820 	return (H_EOK);
1821 }
1822 
1823 /*
1824  * Set the target cpu for the interrupt defined by the argument
1825  * sysino to the target cpu value defined by the argument cpuid.
1826  */
1827 uint64_t
1828 hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
1829 {
1830 
1831 	uint64_t	val, intr_controller;
1832 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
1833 
1834 	/*
1835 	 * For now, we assign interrupt controller in a round
1836 	 * robin fashion.  Later, we may need to come up with
1837 	 * a more efficient assignment algorithm.
1838 	 */
1839 	intr_controller = 0x1ull << (cpuid % 4);
1840 
1841 	val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
1842 	    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
1843 	    ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
1844 	    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
1845 
1846 	/* For EQ interrupts, set DATA MONDO bit */
1847 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
1848 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
1849 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
1850 
1851 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
1852 
1853 	return (H_EOK);
1854 }
1855 
1856 /*
1857  * MSIQ Functions:
1858  */
1859 uint64_t
1860 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
1861 {
1862 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
1863 	    (uint64_t)pxu_p->msiq_mapped_p);
1864 	DBG(DBG_IB, NULL,
1865 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
1866 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
1867 
1868 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
1869 	    (uint64_t)ID_TO_IGN(pxu_p->portid) << INO_BITS);
1870 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
1871 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
1872 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
1873 
1874 	return (H_EOK);
1875 }
1876 
1877 uint64_t
1878 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1879     pci_msiq_valid_state_t *msiq_valid_state)
1880 {
1881 	uint32_t	eq_state;
1882 	uint64_t	ret = H_EOK;
1883 
1884 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1885 	    msiq_id, ENTRIES_STATE);
1886 
1887 	switch (eq_state) {
1888 	case EQ_IDLE_STATE:
1889 		*msiq_valid_state = PCI_MSIQ_INVALID;
1890 		break;
1891 	case EQ_ACTIVE_STATE:
1892 	case EQ_ERROR_STATE:
1893 		*msiq_valid_state = PCI_MSIQ_VALID;
1894 		break;
1895 	default:
1896 		ret = H_EIO;
1897 		break;
1898 	}
1899 
1900 	return (ret);
1901 }
1902 
1903 uint64_t
1904 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1905     pci_msiq_valid_state_t msiq_valid_state)
1906 {
1907 	uint64_t	ret = H_EOK;
1908 
1909 	switch (msiq_valid_state) {
1910 	case PCI_MSIQ_INVALID:
1911 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
1912 		    msiq_id, ENTRIES_DIS);
1913 		break;
1914 	case PCI_MSIQ_VALID:
1915 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
1916 		    msiq_id, ENTRIES_EN);
1917 		break;
1918 	default:
1919 		ret = H_EINVAL;
1920 		break;
1921 	}
1922 
1923 	return (ret);
1924 }
1925 
1926 uint64_t
1927 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
1928     pci_msiq_state_t *msiq_state)
1929 {
1930 	uint32_t	eq_state;
1931 	uint64_t	ret = H_EOK;
1932 
1933 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1934 	    msiq_id, ENTRIES_STATE);
1935 
1936 	switch (eq_state) {
1937 	case EQ_IDLE_STATE:
1938 	case EQ_ACTIVE_STATE:
1939 		*msiq_state = PCI_MSIQ_STATE_IDLE;
1940 		break;
1941 	case EQ_ERROR_STATE:
1942 		*msiq_state = PCI_MSIQ_STATE_ERROR;
1943 		break;
1944 	default:
1945 		ret = H_EIO;
1946 	}
1947 
1948 	return (ret);
1949 }
1950 
1951 uint64_t
1952 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
1953     pci_msiq_state_t msiq_state)
1954 {
1955 	uint32_t	eq_state;
1956 	uint64_t	ret = H_EOK;
1957 
1958 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1959 	    msiq_id, ENTRIES_STATE);
1960 
1961 	switch (eq_state) {
1962 	case EQ_IDLE_STATE:
1963 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
1964 			ret = H_EIO;
1965 		break;
1966 	case EQ_ACTIVE_STATE:
1967 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
1968 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
1969 			    msiq_id, ENTRIES_ENOVERR);
1970 		else
1971 			ret = H_EIO;
1972 		break;
1973 	case EQ_ERROR_STATE:
1974 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
1975 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
1976 			    msiq_id, ENTRIES_E2I);
1977 		else
1978 			ret = H_EIO;
1979 		break;
1980 	default:
1981 		ret = H_EIO;
1982 	}
1983 
1984 	return (ret);
1985 }
1986 
1987 uint64_t
1988 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
1989     msiqhead_t *msiq_head)
1990 {
1991 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
1992 	    msiq_id, ENTRIES_HEAD);
1993 
1994 	return (H_EOK);
1995 }
1996 
1997 uint64_t
1998 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
1999     msiqhead_t msiq_head)
2000 {
2001 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2002 	    ENTRIES_HEAD, msiq_head);
2003 
2004 	return (H_EOK);
2005 }
2006 
2007 uint64_t
2008 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2009     msiqtail_t *msiq_tail)
2010 {
2011 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2012 	    msiq_id, ENTRIES_TAIL);
2013 
2014 	return (H_EOK);
2015 }
2016 
2017 /*
2018  * MSI Functions:
2019  */
2020 uint64_t
2021 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2022 {
2023 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2024 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2025 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2026 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2027 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2028 
2029 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2030 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2031 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2032 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2033 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2034 
2035 	return (H_EOK);
2036 }
2037 
2038 uint64_t
2039 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2040     msiqid_t *msiq_id)
2041 {
2042 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2043 	    msi_num, ENTRIES_EQNUM);
2044 
2045 	return (H_EOK);
2046 }
2047 
2048 uint64_t
2049 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2050     msiqid_t msiq_id)
2051 {
2052 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2053 	    ENTRIES_EQNUM, msiq_id);
2054 
2055 	return (H_EOK);
2056 }
2057 
2058 uint64_t
2059 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2060     pci_msi_valid_state_t *msi_valid_state)
2061 {
2062 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2063 	    msi_num, ENTRIES_V);
2064 
2065 	return (H_EOK);
2066 }
2067 
2068 uint64_t
2069 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2070     pci_msi_valid_state_t msi_valid_state)
2071 {
2072 	uint64_t	ret = H_EOK;
2073 
2074 	switch (msi_valid_state) {
2075 	case PCI_MSI_VALID:
2076 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2077 		    ENTRIES_V);
2078 		break;
2079 	case PCI_MSI_INVALID:
2080 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2081 		    ENTRIES_V);
2082 		break;
2083 	default:
2084 		ret = H_EINVAL;
2085 	}
2086 
2087 	return (ret);
2088 }
2089 
2090 uint64_t
2091 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2092     pci_msi_state_t *msi_state)
2093 {
2094 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2095 	    msi_num, ENTRIES_EQWR_N);
2096 
2097 	return (H_EOK);
2098 }
2099 
2100 uint64_t
2101 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2102     pci_msi_state_t msi_state)
2103 {
2104 	uint64_t	ret = H_EOK;
2105 
2106 	switch (msi_state) {
2107 	case PCI_MSI_STATE_IDLE:
2108 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2109 		    ENTRIES_EQWR_N);
2110 		break;
2111 	case PCI_MSI_STATE_DELIVERED:
2112 	default:
2113 		ret = H_EINVAL;
2114 		break;
2115 	}
2116 
2117 	return (ret);
2118 }
2119 
2120 /*
2121  * MSG Functions:
2122  */
2123 uint64_t
2124 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2125     msiqid_t *msiq_id)
2126 {
2127 	uint64_t	ret = H_EOK;
2128 
2129 	switch (msg_type) {
2130 	case PCIE_PME_MSG:
2131 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2132 		break;
2133 	case PCIE_PME_ACK_MSG:
2134 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2135 		    EQNUM);
2136 		break;
2137 	case PCIE_CORR_MSG:
2138 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2139 		break;
2140 	case PCIE_NONFATAL_MSG:
2141 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2142 		    EQNUM);
2143 		break;
2144 	case PCIE_FATAL_MSG:
2145 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2146 		break;
2147 	default:
2148 		ret = H_EINVAL;
2149 		break;
2150 	}
2151 
2152 	return (ret);
2153 }
2154 
2155 uint64_t
2156 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2157     msiqid_t msiq_id)
2158 {
2159 	uint64_t	ret = H_EOK;
2160 
2161 	switch (msg_type) {
2162 	case PCIE_PME_MSG:
2163 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2164 		break;
2165 	case PCIE_PME_ACK_MSG:
2166 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2167 		break;
2168 	case PCIE_CORR_MSG:
2169 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2170 		break;
2171 	case PCIE_NONFATAL_MSG:
2172 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2173 		break;
2174 	case PCIE_FATAL_MSG:
2175 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2176 		break;
2177 	default:
2178 		ret = H_EINVAL;
2179 		break;
2180 	}
2181 
2182 	return (ret);
2183 }
2184 
2185 uint64_t
2186 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2187     pcie_msg_valid_state_t *msg_valid_state)
2188 {
2189 	uint64_t	ret = H_EOK;
2190 
2191 	switch (msg_type) {
2192 	case PCIE_PME_MSG:
2193 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2194 		break;
2195 	case PCIE_PME_ACK_MSG:
2196 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2197 		    PME_TO_ACK_MAPPING, V);
2198 		break;
2199 	case PCIE_CORR_MSG:
2200 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2201 		break;
2202 	case PCIE_NONFATAL_MSG:
2203 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2204 		    ERR_NONFATAL_MAPPING, V);
2205 		break;
2206 	case PCIE_FATAL_MSG:
2207 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2208 		    V);
2209 		break;
2210 	default:
2211 		ret = H_EINVAL;
2212 		break;
2213 	}
2214 
2215 	return (ret);
2216 }
2217 
2218 uint64_t
2219 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2220     pcie_msg_valid_state_t msg_valid_state)
2221 {
2222 	uint64_t	ret = H_EOK;
2223 
2224 	switch (msg_valid_state) {
2225 	case PCIE_MSG_VALID:
2226 		switch (msg_type) {
2227 		case PCIE_PME_MSG:
2228 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2229 			break;
2230 		case PCIE_PME_ACK_MSG:
2231 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2232 			break;
2233 		case PCIE_CORR_MSG:
2234 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2235 			break;
2236 		case PCIE_NONFATAL_MSG:
2237 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2238 			break;
2239 		case PCIE_FATAL_MSG:
2240 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2241 			break;
2242 		default:
2243 			ret = H_EINVAL;
2244 			break;
2245 		}
2246 
2247 		break;
2248 	case PCIE_MSG_INVALID:
2249 		switch (msg_type) {
2250 		case PCIE_PME_MSG:
2251 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2252 			break;
2253 		case PCIE_PME_ACK_MSG:
2254 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2255 			break;
2256 		case PCIE_CORR_MSG:
2257 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2258 			break;
2259 		case PCIE_NONFATAL_MSG:
2260 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2261 			break;
2262 		case PCIE_FATAL_MSG:
2263 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2264 			break;
2265 		default:
2266 			ret = H_EINVAL;
2267 			break;
2268 		}
2269 		break;
2270 	default:
2271 		ret = H_EINVAL;
2272 	}
2273 
2274 	return (ret);
2275 }
2276 
2277 /*
2278  * Suspend/Resume Functions:
2279  *	(pec, mmu, ib)
2280  *	cb
2281  * Registers saved have all been touched in the XXX_init functions.
2282  */
2283 uint64_t
2284 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2285 {
2286 	uint64_t	*config_state;
2287 	int		total_size;
2288 	int		i;
2289 
2290 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2291 		return (H_EIO);
2292 
2293 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2294 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2295 
2296 	if (config_state == NULL) {
2297 		return (H_EIO);
2298 	}
2299 
2300 	/*
2301 	 * Soft state for suspend/resume  from pxu_t
2302 	 * uint64_t	*pec_config_state;
2303 	 * uint64_t	*mmu_config_state;
2304 	 * uint64_t	*ib_intr_map;
2305 	 * uint64_t	*ib_config_state;
2306 	 * uint64_t	*xcb_config_state;
2307 	 */
2308 
2309 	/* Save the PEC configuration states */
2310 	pxu_p->pec_config_state = config_state;
2311 	for (i = 0; i < PEC_KEYS; i++) {
2312 		pxu_p->pec_config_state[i] =
2313 		    CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]);
2314 	}
2315 
2316 	/* Save the MMU configuration states */
2317 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2318 	for (i = 0; i < MMU_KEYS; i++) {
2319 		pxu_p->mmu_config_state[i] =
2320 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2321 	}
2322 
2323 	/* Save the interrupt mapping registers */
2324 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2325 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2326 		pxu_p->ib_intr_map[i] =
2327 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2328 	}
2329 
2330 	/* Save the IB configuration states */
2331 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2332 	for (i = 0; i < IB_KEYS; i++) {
2333 		pxu_p->ib_config_state[i] =
2334 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2335 	}
2336 
2337 	return (H_EOK);
2338 }
2339 
2340 void
2341 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2342 {
2343 	int		total_size;
2344 	sysino_t	sysino;
2345 	int		i;
2346 
2347 	/* Make sure that suspend actually did occur */
2348 	if (!pxu_p->pec_config_state) {
2349 		return;
2350 	}
2351 
2352 	/* Restore IB configuration states */
2353 	for (i = 0; i < IB_KEYS; i++) {
2354 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2355 		    pxu_p->ib_config_state[i]);
2356 	}
2357 
2358 	/*
2359 	 * Restore the interrupt mapping registers
2360 	 * And make sure the intrs are idle.
2361 	 */
2362 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2363 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2364 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2365 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2366 		    pxu_p->ib_intr_map[i]);
2367 	}
2368 
2369 	/* Restore MMU configuration states */
2370 	/* Clear the cache. */
2371 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2372 
2373 	for (i = 0; i < MMU_KEYS; i++) {
2374 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2375 		    pxu_p->mmu_config_state[i]);
2376 	}
2377 
2378 	/* Restore PEC configuration states */
2379 	/* Make sure all reset bits are low until error is detected */
2380 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2381 
2382 	for (i = 0; i < PEC_KEYS; i++) {
2383 		CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i],
2384 		    pxu_p->pec_config_state[i]);
2385 	}
2386 
2387 	/* Enable PCI-E interrupt */
2388 	(void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino);
2389 
2390 	(void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE);
2391 
2392 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2393 	kmem_free(pxu_p->pec_config_state, total_size);
2394 
2395 	pxu_p->pec_config_state = NULL;
2396 	pxu_p->mmu_config_state = NULL;
2397 	pxu_p->ib_config_state = NULL;
2398 	pxu_p->ib_intr_map = NULL;
2399 
2400 	msiq_resume(dev_hdl, pxu_p);
2401 }
2402 
2403 uint64_t
2404 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2405 {
2406 	uint64_t	*config_state;
2407 	int		i;
2408 
2409 	config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP);
2410 
2411 	if (config_state == NULL) {
2412 		return (H_EIO);
2413 	}
2414 
2415 	/* Save the configuration states */
2416 	pxu_p->xcb_config_state = config_state;
2417 	for (i = 0; i < CB_KEYS; i++) {
2418 		pxu_p->xcb_config_state[i] =
2419 		    CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]);
2420 	}
2421 
2422 	return (H_EOK);
2423 }
2424 
2425 void
2426 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2427     devino_t devino, pxu_t *pxu_p)
2428 {
2429 	sysino_t	sysino;
2430 	int		i;
2431 
2432 	/*
2433 	 * No reason to have any reset bits high until an error is
2434 	 * detected on the link.
2435 	 */
2436 	CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2437 
2438 	ASSERT(pxu_p->xcb_config_state);
2439 
2440 	/* Restore the configuration states */
2441 	for (i = 0; i < CB_KEYS; i++) {
2442 		CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i],
2443 		    pxu_p->xcb_config_state[i]);
2444 	}
2445 
2446 	/* Enable XBC interrupt */
2447 	(void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino);
2448 
2449 	(void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
2450 
2451 	kmem_free(pxu_p->xcb_config_state, CB_SIZE);
2452 
2453 	pxu_p->xcb_config_state = NULL;
2454 }
2455 
2456 static uint64_t
2457 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2458 {
2459 	size_t	bufsz;
2460 	volatile uint64_t *cur_p;
2461 	int i;
2462 
2463 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2464 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2465 	    NULL)
2466 		return (H_EIO);
2467 
2468 	cur_p = pxu_p->msiq_config_state;
2469 
2470 	/* Save each EQ state */
2471 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2472 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2473 
2474 	/* Save MSI mapping registers */
2475 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2476 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2477 
2478 	/* Save all other MSIQ registers */
2479 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2480 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2481 	return (H_EOK);
2482 }
2483 
2484 static void
2485 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2486 {
2487 	size_t	bufsz;
2488 	uint64_t *cur_p, state;
2489 	int i;
2490 
2491 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2492 	cur_p = pxu_p->msiq_config_state;
2493 	/*
2494 	 * Initialize EQ base address register and
2495 	 * Interrupt Mondo Data 0 register.
2496 	 */
2497 	(void) hvio_msiq_init(dev_hdl, pxu_p);
2498 
2499 	/* Restore EQ states */
2500 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2501 		state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK;
2502 		if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE))
2503 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2504 			    i, ENTRIES_EN);
2505 	}
2506 
2507 	/* Restore MSI mapping */
2508 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2509 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2510 
2511 	/*
2512 	 * Restore all other registers. MSI 32 bit address and
2513 	 * MSI 64 bit address are restored as part of this.
2514 	 */
2515 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2516 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2517 
2518 	kmem_free(pxu_p->msiq_config_state, bufsz);
2519 	pxu_p->msiq_config_state = NULL;
2520 }
2521 
2522 /*
2523  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2524  * called by px_goto_l23ready.
2525  * returns DDI_SUCCESS or DDI_FAILURE
2526  */
2527 int
2528 px_send_pme_turnoff(caddr_t csr_base)
2529 {
2530 	volatile uint64_t reg;
2531 
2532 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2533 	/* If already pending, return failure */
2534 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2535 		DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2536 		    "tlu_pme_turn_off_generate = %x\n", reg);
2537 		return (DDI_FAILURE);
2538 	}
2539 
2540 	/* write to PME_Turn_off reg to boradcast */
2541 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2542 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2543 
2544 	return (DDI_SUCCESS);
2545 }
2546 
2547 /*
2548  * Checks for link being in L1idle state.
2549  * Returns
2550  * DDI_SUCCESS - if the link is in L1idle
2551  * DDI_FAILURE - if the link is not in L1idle
2552  */
2553 int
2554 px_link_wait4l1idle(caddr_t csr_base)
2555 {
2556 	uint8_t ltssm_state;
2557 	int ntries = px_max_l1_tries;
2558 
2559 	while (ntries > 0) {
2560 		ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
2561 		if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
2562 			break;
2563 		delay(1);
2564 	}
2565 	DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
2566 	return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
2567 }
2568 
2569 /*
2570  * Tranisition the link to L0, after it is down.
2571  */
2572 int
2573 px_link_retrain(caddr_t csr_base)
2574 {
2575 	volatile uint64_t reg;
2576 
2577 	reg = CSR_XR(csr_base, TLU_CONTROL);
2578 	if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
2579 		DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
2580 		return (DDI_FAILURE);
2581 	}
2582 
2583 	/* Clear link down bit in TLU Other Event Clear Status Register. */
2584 	CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
2585 
2586 	/* Clear Drain bit in TLU Status Register */
2587 	CSR_BS(csr_base, TLU_STATUS, DRAIN);
2588 
2589 	/* Clear Remain in Detect.Quiet bit in TLU Control Register */
2590 	reg = CSR_XR(csr_base, TLU_CONTROL);
2591 	reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
2592 	CSR_XS(csr_base, TLU_CONTROL, reg);
2593 
2594 	return (DDI_SUCCESS);
2595 }
2596 
2597 void
2598 px_enable_detect_quiet(caddr_t csr_base)
2599 {
2600 	volatile uint64_t tlu_ctrl;
2601 
2602 	tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
2603 	tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
2604 	CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
2605 }
2606