xref: /illumos-gate/usr/src/uts/sun4u/io/px/px_hlib.c (revision 7ee93e3bbce920c0d0742deb6632b0939e30b783)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/vmsystm.h>
32 #include <sys/vmem.h>
33 #include <sys/machsystm.h>	/* lddphys() */
34 #include <sys/iommutsb.h>
35 #include <sys/pci.h>
36 #include <pcie_pwr.h>
37 #include <px_obj.h>
38 #include "px_regs.h"
39 #include "px_csr.h"
40 #include "px_lib4u.h"
41 
42 /*
43  * Registers that need to be saved and restored during suspend/resume.
44  */
45 
46 /*
47  * Registers in the PEC Module.
48  * LPU_RESET should be set to 0ull during resume
49  */
50 static uint64_t	pec_config_state_regs[] = {
51 	PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
52 	ILU_ERROR_LOG_ENABLE,
53 	ILU_INTERRUPT_ENABLE,
54 	TLU_CONTROL,
55 	TLU_OTHER_EVENT_LOG_ENABLE,
56 	TLU_OTHER_EVENT_INTERRUPT_ENABLE,
57 	TLU_DEVICE_CONTROL,
58 	TLU_LINK_CONTROL,
59 	TLU_UNCORRECTABLE_ERROR_LOG_ENABLE,
60 	TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE,
61 	TLU_CORRECTABLE_ERROR_LOG_ENABLE,
62 	TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE,
63 	LPU_LINK_LAYER_INTERRUPT_MASK,
64 	LPU_PHY_INTERRUPT_MASK,
65 	LPU_RECEIVE_PHY_INTERRUPT_MASK,
66 	LPU_TRANSMIT_PHY_INTERRUPT_MASK,
67 	LPU_GIGABLAZE_GLUE_INTERRUPT_MASK,
68 	LPU_LTSSM_INTERRUPT_MASK,
69 	LPU_RESET,
70 	LPU_DEBUG_CONFIG,
71 	LPU_INTERRUPT_MASK,
72 	LPU_LINK_LAYER_CONFIG,
73 	LPU_FLOW_CONTROL_UPDATE_CONTROL,
74 	LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD,
75 	LPU_TXLINK_REPLAY_TIMER_THRESHOLD,
76 	LPU_REPLAY_BUFFER_MAX_ADDRESS,
77 	LPU_TXLINK_RETRY_FIFO_POINTER,
78 	LPU_LTSSM_CONFIG2,
79 	LPU_LTSSM_CONFIG3,
80 	LPU_LTSSM_CONFIG4,
81 	LPU_LTSSM_CONFIG5,
82 	DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
83 	DMC_DEBUG_SELECT_FOR_PORT_A,
84 	DMC_DEBUG_SELECT_FOR_PORT_B
85 };
86 #define	PEC_SIZE (sizeof (pec_config_state_regs))
87 #define	PEC_KEYS (PEC_SIZE / sizeof (uint64_t))
88 
89 /*
90  * Registers for the MMU module.
91  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
92  */
93 static uint64_t mmu_config_state_regs[] = {
94 	MMU_TSB_CONTROL,
95 	MMU_CONTROL_AND_STATUS,
96 	MMU_ERROR_LOG_ENABLE,
97 	MMU_INTERRUPT_ENABLE
98 };
99 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
100 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
101 
102 /*
103  * Registers for the IB Module
104  */
105 static uint64_t ib_config_state_regs[] = {
106 	IMU_ERROR_LOG_ENABLE,
107 	IMU_INTERRUPT_ENABLE
108 };
109 #define	IB_SIZE (sizeof (ib_config_state_regs))
110 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
111 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
112 
113 /*
114  * Registers for the CB module.
115  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
116  */
117 static uint64_t	cb_config_state_regs[] = {
118 	JBUS_PARITY_CONTROL,
119 	JBC_FATAL_RESET_ENABLE,
120 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
121 	JBC_ERROR_LOG_ENABLE,
122 	JBC_INTERRUPT_ENABLE
123 };
124 #define	CB_SIZE (sizeof (cb_config_state_regs))
125 #define	CB_KEYS (CB_SIZE / sizeof (uint64_t))
126 
127 static uint64_t	msiq_config_other_regs[] = {
128 	ERR_COR_MAPPING,
129 	ERR_NONFATAL_MAPPING,
130 	ERR_FATAL_MAPPING,
131 	PM_PME_MAPPING,
132 	PME_TO_ACK_MAPPING,
133 	MSI_32_BIT_ADDRESS,
134 	MSI_64_BIT_ADDRESS
135 };
136 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
137 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
138 
139 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
140 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
141 
142 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
143 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
144 
145 /*
146  * Initialize the module, but do not enable interrupts.
147  */
148 /* ARGSUSED */
149 void
150 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
151 {
152 	uint64_t val;
153 
154 	/* Check if we need to enable inverted parity */
155 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
156 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
157 	DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n",
158 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
159 
160 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
161 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
162 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
163 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
164 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
165 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
166 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
167 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
168 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
169 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
170 		CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
171 
172 	/*
173 	 * Enable merge, jbc and dmc interrupts.
174 	 */
175 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
176 	DBG(DBG_CB, NULL,
177 	    "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
178 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
179 
180 	/*
181 	 * CSR_V CB's interrupt regs (log, enable, status, clear)
182 	 */
183 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
184 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
185 
186 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
187 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
188 
189 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
190 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
191 
192 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
193 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
194 }
195 
196 /*
197  * Initialize the module, but do not enable interrupts.
198  */
199 /* ARGSUSED */
200 void
201 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
202 {
203 	/*
204 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
205 	 */
206 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
207 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
208 
209 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
210 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
211 
212 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
213 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
214 
215 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
216 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
217 }
218 
219 /*
220  * Initialize the module, but do not enable interrupts.
221  */
222 /* ARGSUSED */
223 static void
224 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
225 {
226 	/*
227 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
228 	 */
229 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
230 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
231 
232 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
233 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
234 
235 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
236 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
237 
238 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
239 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
240 }
241 
242 /*
243  * Initialize the module, but do not enable interrupts.
244  */
245 /* ARGSUSED */
246 static void
247 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
248 {
249 	uint64_t val;
250 
251 	/*
252 	 * CSR_V TLU_CONTROL Expect OBP ???
253 	 */
254 
255 	/*
256 	 * L0s entry default timer value - 7.0 us
257 	 * Completion timeout select default value - 67.1 ms and
258 	 * OBP will set this value.
259 	 *
260 	 * Configuration - Bit 0 should always be 0 for upstream port.
261 	 * Bit 1 is clock - how is this related to the clock bit in TLU
262 	 * Link Control register?  Both are hardware dependent and likely
263 	 * set by OBP.
264 	 *
265 	 * Disable non-posted write bit - ordering by setting
266 	 * NPWR_EN bit to force serialization of writes.
267 	 */
268 	val = CSR_XR(csr_base, TLU_CONTROL);
269 	val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
270 	    (1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT;
271 
272 	/*
273 	 * Set Detect.Quiet. This will disable automatic link
274 	 * re-training, if the link goes down e.g. power management
275 	 * turns off power to the downstream device. This will enable
276 	 * Fire to go to Drain state, after link down. The drain state
277 	 * forces a reset to the FC state machine, which is required for
278 	 * proper link re-training.
279 	 */
280 	val |= (1ull << TLU_REMAIN_DETECT_QUIET);
281 	CSR_XS(csr_base, TLU_CONTROL, val);
282 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
283 	    CSR_XR(csr_base, TLU_CONTROL));
284 
285 	/*
286 	 * CSR_V TLU_STATUS Expect HW 0x4
287 	 */
288 
289 	/*
290 	 * Only bit [7:0] are currently defined.  Bits [2:0]
291 	 * are the state, which should likely be in state active,
292 	 * 100b.  Bit three is 'recovery', which is not understood.
293 	 * All other bits are reserved.
294 	 */
295 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
296 	    CSR_XR(csr_base, TLU_STATUS));
297 
298 	/*
299 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
300 	 */
301 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
302 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
303 
304 	/*
305 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
306 	 */
307 
308 	/*
309 	 * Ingress credits initial register.  Bits [39:32] should be
310 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
311 	 * be 0xC0.  These are the reset values, and should be set by
312 	 * HW.
313 	 */
314 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
315 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
316 
317 	/*
318 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
319 	 */
320 
321 	/*
322 	 * Diagnostic register - always zero unless we are debugging.
323 	 */
324 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
325 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
326 
327 	/*
328 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
329 	 */
330 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
331 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
332 
333 	/*
334 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
335 	 */
336 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
337 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
338 
339 	/*
340 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
341 	 */
342 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
343 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
344 
345 	/*
346 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
347 	 */
348 	DBG(DBG_TLU, NULL,
349 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
350 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
351 
352 	/*
353 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
354 	 */
355 	DBG(DBG_TLU, NULL,
356 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
357 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
358 
359 	/*
360 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
361 	 */
362 	DBG(DBG_TLU, NULL,
363 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
364 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
365 
366 	DBG(DBG_TLU, NULL,
367 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
368 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
369 
370 	DBG(DBG_TLU, NULL,
371 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
372 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
373 
374 	DBG(DBG_TLU, NULL,
375 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
376 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
377 
378 	/*
379 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
380 	 */
381 	DBG(DBG_TLU, NULL,
382 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
383 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
384 
385 	/*
386 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
387 	 */
388 	DBG(DBG_TLU, NULL,
389 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
390 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
391 
392 	/*
393 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
394 	 */
395 	DBG(DBG_TLU, NULL,
396 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
397 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
398 
399 	/*
400 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
401 	 */
402 	DBG(DBG_TLU, NULL,
403 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
404 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
405 
406 	/*
407 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
408 	 */
409 	DBG(DBG_TLU, NULL,
410 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
411 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
412 
413 	/*
414 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
415 	 */
416 	DBG(DBG_TLU, NULL,
417 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
418 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
419 
420 	/*
421 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
422 	 */
423 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
424 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
425 
426 	/*
427 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
428 	 */
429 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
430 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
431 
432 	/*
433 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
434 	 */
435 
436 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
437 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
438 
439 	/*
440 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
441 	 */
442 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
443 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
444 
445 	/*
446 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
447 	 */
448 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
449 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
450 
451 	/*
452 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
453 	 */
454 
455 	/*
456 	 * Bits [14:12] are the Max Read Request Size, which is always 64
457 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
458 	 * start at 128 bytes which is 000b.  This may be revisited if
459 	 * init_child finds greater values.
460 	 */
461 	val = 0x0ull;
462 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
463 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
464 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
465 
466 	/*
467 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
468 	 */
469 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
470 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
471 
472 	/*
473 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
474 	 */
475 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
476 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
477 
478 	/*
479 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
480 	 */
481 
482 	/*
483 	 * The CLOCK bit should be set by OBP if the hardware dictates,
484 	 * and if it is set then ASPM should be used since then L0s exit
485 	 * latency should be lower than L1 exit latency.
486 	 *
487 	 * Note that we will not enable power management during bringup
488 	 * since it has not been test and is creating some problems in
489 	 * simulation.
490 	 */
491 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
492 
493 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
494 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
495 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
496 
497 	/*
498 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
499 	 */
500 
501 	/*
502 	 * Not sure if HW or OBP will be setting this read only
503 	 * register.  Bit 12 is Clock, and it should always be 1
504 	 * signifying that the component uses the same physical
505 	 * clock as the platform.  Bits [9:4] are for the width,
506 	 * with the expected value above signifying a x1 width.
507 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
508 	 * the only speed as yet supported by the PCI-E spec.
509 	 */
510 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
511 	    CSR_XR(csr_base, TLU_LINK_STATUS));
512 
513 	/*
514 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
515 	 */
516 
517 	/*
518 	 * Power Limits for the slots.  Will be platform
519 	 * dependent, and OBP will need to set after consulting
520 	 * with the HW guys.
521 	 *
522 	 * Bits [16:15] are power limit scale, which most likely
523 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
524 	 * Power Limit Value, which is a number which is multiplied
525 	 * by the power limit scale to get the actual power limit.
526 	 */
527 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
528 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
529 
530 	/*
531 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
532 	 */
533 	DBG(DBG_TLU, NULL,
534 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
535 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
536 
537 	/*
538 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
539 	 * Kernel 0x17F0110017F011
540 	 */
541 	DBG(DBG_TLU, NULL,
542 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
543 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
544 
545 	/*
546 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
547 	 */
548 	DBG(DBG_TLU, NULL,
549 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
550 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
551 
552 	/*
553 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
554 	 */
555 	DBG(DBG_TLU, NULL,
556 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
557 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
558 
559 	/*
560 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
561 	 */
562 	DBG(DBG_TLU, NULL,
563 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
564 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
565 
566 	/*
567 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
568 	 */
569 	DBG(DBG_TLU, NULL,
570 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
571 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
572 
573 	/*
574 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
575 	 */
576 	DBG(DBG_TLU, NULL,
577 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
578 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
579 
580 	/*
581 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
582 	 */
583 	DBG(DBG_TLU, NULL,
584 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
585 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
586 
587 
588 	/*
589 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
590 	 * Plus header logs
591 	 */
592 
593 	/*
594 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
595 	 */
596 	DBG(DBG_TLU, NULL,
597 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
598 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
599 
600 	/*
601 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
602 	 */
603 	DBG(DBG_TLU, NULL,
604 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
605 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
606 
607 	/*
608 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
609 	 */
610 	DBG(DBG_TLU, NULL,
611 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
612 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
613 
614 	/*
615 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
616 	 */
617 	DBG(DBG_TLU, NULL,
618 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
619 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
620 }
621 
622 /* ARGSUSED */
623 static void
624 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
625 {
626 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
627 	int link_width, max_payload;
628 
629 	uint64_t val;
630 
631 	/*
632 	 * ACKNAK Latency Threshold Table.
633 	 * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
634 	 */
635 	int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
636 		{0xED,   0x49,  0x43,  0x30},
637 		{0x1A0,  0x76,  0x6B,  0x48},
638 		{0x22F,  0x9A,  0x56,  0x56},
639 		{0x42F,  0x11A, 0x96,  0x96},
640 		{0x82F,  0x21A, 0x116, 0x116},
641 		{0x102F, 0x41A, 0x216, 0x216}
642 	};
643 
644 	/*
645 	 * TxLink Replay Timer Latency Table
646 	 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
647 	 */
648 	int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
649 		{0x379,  0x112, 0xFC,  0xB4},
650 		{0x618,  0x1BA, 0x192, 0x10E},
651 		{0x831,  0x242, 0x143, 0x143},
652 		{0xFB1,  0x422, 0x233, 0x233},
653 		{0x1EB0, 0x7E1, 0x412, 0x412},
654 		{0x3CB0, 0xF61, 0x7D2, 0x7D2}
655 	};
656 
657 	/*
658 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
659 	 * Only Link Widths of x1, x4, and x8 are supported.
660 	 * If any width is reported other than x8, set default to x8.
661 	 */
662 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
663 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
664 
665 	/*
666 	 * Convert link_width to match timer array configuration.
667 	 */
668 	switch (link_width) {
669 	case 1:
670 		link_width = 0;
671 		break;
672 	case 4:
673 		link_width = 1;
674 		break;
675 	case 8:
676 		link_width = 2;
677 		break;
678 	case 16:
679 		link_width = 3;
680 		break;
681 	default:
682 		link_width = 0;
683 	}
684 
685 	/*
686 	 * Get the Max Payload Size.
687 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
688 	 */
689 	max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
690 	    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
691 
692 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
693 	    (0x80 << max_payload));
694 
695 	/* Make sure the packet size is not greater than 4096 */
696 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
697 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
698 
699 	/*
700 	 * CSR_V LPU_ID Expect HW 0x0
701 	 */
702 
703 	/*
704 	 * This register has link id, phy id and gigablaze id.
705 	 * Should be set by HW.
706 	 */
707 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
708 	    CSR_XR(csr_base, LPU_ID));
709 
710 	/*
711 	 * CSR_V LPU_RESET Expect Kernel 0x0
712 	 */
713 
714 	/*
715 	 * No reason to have any reset bits high until an error is
716 	 * detected on the link.
717 	 */
718 	val = 0ull;
719 	CSR_XS(csr_base, LPU_RESET, val);
720 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
721 	    CSR_XR(csr_base, LPU_RESET));
722 
723 	/*
724 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
725 	 */
726 
727 	/*
728 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
729 	 * They are read-only.  What do the 8 bits mean, and
730 	 * how do they get set if they are read only?
731 	 */
732 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
733 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
734 
735 	/*
736 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
737 	 */
738 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
739 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
740 
741 	/*
742 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
743 	 */
744 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
745 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
746 
747 	/*
748 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
749 	 */
750 
751 	/*
752 	 * This register has bits [9:4] for link width, and the
753 	 * default 0x10, means a width of x16.  The problem is
754 	 * this width is not supported according to the TLU
755 	 * link status register.
756 	 */
757 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
758 	    CSR_XR(csr_base, LPU_LINK_STATUS));
759 
760 	/*
761 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
762 	 */
763 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
764 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
765 
766 	/*
767 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
768 	 */
769 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
770 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
771 
772 	/*
773 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
774 	 */
775 	DBG(DBG_LPU, NULL,
776 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
777 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
778 
779 	/*
780 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
781 	 */
782 	DBG(DBG_LPU, NULL,
783 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
784 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
785 
786 	/*
787 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
788 	 */
789 	DBG(DBG_LPU, NULL,
790 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
791 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
792 
793 	/*
794 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
795 	 */
796 	DBG(DBG_LPU, NULL,
797 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
798 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
799 
800 	/*
801 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
802 	 */
803 	DBG(DBG_LPU, NULL,
804 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
805 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
806 
807 	/*
808 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
809 	 */
810 	DBG(DBG_LPU, NULL,
811 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
812 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
813 
814 	/*
815 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
816 	 */
817 
818 	/*
819 	 * This is another place where Max Payload can be set,
820 	 * this time for the link layer.  It will be set to
821 	 * 128B, which is the default, but this will need to
822 	 * be revisited.
823 	 */
824 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
825 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
826 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
827 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
828 
829 	/*
830 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
831 	 */
832 
833 	/*
834 	 * Another R/W status register.  Bit 3, DL up Status, will
835 	 * be set high.  The link state machine status bits [2:0]
836 	 * are set to 0x1, but the status bits are not defined in the
837 	 * PRM.  What does 0x1 mean, what others values are possible
838 	 * and what are thier meanings?
839 	 *
840 	 * This register has been giving us problems in simulation.
841 	 * It has been mentioned that software should not program
842 	 * any registers with WE bits except during debug.  So
843 	 * this register will no longer be programmed.
844 	 */
845 
846 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
847 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
848 
849 	/*
850 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
851 	 */
852 	DBG(DBG_LPU, NULL,
853 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
854 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
855 
856 	/*
857 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
858 	 */
859 	DBG(DBG_LPU, NULL,
860 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
861 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
862 
863 	DBG(DBG_LPU, NULL,
864 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
865 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
866 
867 	/*
868 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
869 	 */
870 
871 	/*
872 	 * The PRM says that only the first two bits will be set
873 	 * high by default, which will enable flow control for
874 	 * posted and non-posted updates, but NOT completetion
875 	 * updates.
876 	 */
877 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
878 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
879 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
880 	DBG(DBG_LPU, NULL,
881 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
882 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
883 
884 	/*
885 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
886 	 * Expect OBP 0x1D4C
887 	 */
888 
889 	/*
890 	 * This should be set by OBP.  We'll check to make sure.
891 	 */
892 	DBG(DBG_LPU, NULL, "lpu_init - "
893 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
894 	    CSR_XR(csr_base,
895 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
896 
897 	/*
898 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
899 	 */
900 
901 	/*
902 	 * This register has Flow Control Update Timer values for
903 	 * non-posted and posted requests, bits [30:16] and bits
904 	 * [14:0], respectively.  These are read-only to SW so
905 	 * either HW or OBP needs to set them.
906 	 */
907 	DBG(DBG_LPU, NULL, "lpu_init - "
908 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
909 	    CSR_XR(csr_base,
910 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
911 
912 	/*
913 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
914 	 */
915 
916 	/*
917 	 * Same as timer0 register above, except for bits [14:0]
918 	 * have the timer values for completetions.  Read-only to
919 	 * SW; OBP or HW need to set it.
920 	 */
921 	DBG(DBG_LPU, NULL, "lpu_init - "
922 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
923 	    CSR_XR(csr_base,
924 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
925 
926 	/*
927 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
928 	 */
929 	val = acknak_timer_table[max_payload][link_width];
930 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
931 
932 	DBG(DBG_LPU, NULL, "lpu_init - "
933 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
934 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
935 
936 	/*
937 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
938 	 */
939 	DBG(DBG_LPU, NULL,
940 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
941 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
942 
943 	/*
944 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
945 	 */
946 	val = replay_timer_table[max_payload][link_width];
947 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
948 
949 	DBG(DBG_LPU, NULL,
950 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
951 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
952 
953 	/*
954 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
955 	 */
956 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
957 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
958 
959 	/*
960 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
961 	 */
962 	DBG(DBG_LPU, NULL,
963 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
964 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
965 
966 	/*
967 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
968 	 */
969 	DBG(DBG_LPU, NULL,
970 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
971 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
972 
973 	/*
974 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
975 	 */
976 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
977 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
978 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
979 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
980 
981 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
982 	DBG(DBG_LPU, NULL,
983 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
984 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
985 
986 	/*
987 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
988 	 */
989 	DBG(DBG_LPU, NULL,
990 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
991 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
992 
993 	/*
994 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
995 	 */
996 	DBG(DBG_LPU, NULL,
997 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
998 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
999 
1000 	/*
1001 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1002 	 */
1003 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1004 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1005 
1006 	/*
1007 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1008 	 */
1009 	DBG(DBG_LPU, NULL,
1010 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1011 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1012 
1013 	/*
1014 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1015 	 */
1016 
1017 	/*
1018 	 * Test only register.  Will not be programmed.
1019 	 */
1020 	DBG(DBG_LPU, NULL,
1021 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1022 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1023 
1024 	/*
1025 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1026 	 */
1027 
1028 	/*
1029 	 * Test only register.  Will not be programmed.
1030 	 */
1031 	DBG(DBG_LPU, NULL,
1032 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1033 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1034 
1035 	/*
1036 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1037 	 */
1038 	DBG(DBG_LPU, NULL,
1039 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1040 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1041 
1042 	/*
1043 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1044 	 */
1045 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1046 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1047 
1048 	/*
1049 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1050 	 */
1051 
1052 	/*
1053 	 * Test only register.  Will not be programmed.
1054 	 */
1055 	DBG(DBG_LPU, NULL,
1056 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1057 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1058 
1059 	/*
1060 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1061 	 */
1062 	DBG(DBG_LPU, NULL,
1063 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1064 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1065 
1066 	/*
1067 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1068 	 */
1069 	DBG(DBG_LPU, NULL,
1070 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1071 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1072 
1073 	/*
1074 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1075 	 */
1076 	DBG(DBG_LPU, NULL,
1077 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1078 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1079 
1080 	/*
1081 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1082 	 */
1083 	DBG(DBG_LPU, NULL,
1084 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1085 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1086 
1087 	/*
1088 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1089 	 */
1090 	DBG(DBG_LPU, NULL,
1091 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1092 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1093 
1094 	/*
1095 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1096 	 */
1097 
1098 	/*
1099 	 * Test only register.  Will not be programmed.
1100 	 */
1101 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1102 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1103 
1104 	/*
1105 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1106 	 */
1107 
1108 	/*
1109 	 * Test only register.  Will not be programmed.
1110 	 */
1111 	DBG(DBG_LPU, NULL,
1112 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1113 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1114 
1115 	/*
1116 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1117 	 */
1118 
1119 	/*
1120 	 * Test only register.
1121 	 */
1122 	DBG(DBG_LPU, NULL,
1123 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1124 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1125 
1126 	/*
1127 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1128 	 */
1129 	DBG(DBG_LPU, NULL, "lpu_init - "
1130 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1131 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1132 
1133 	/*
1134 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1135 	 */
1136 
1137 	/*
1138 	 * test only register.
1139 	 */
1140 	DBG(DBG_LPU, NULL,
1141 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1142 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1143 
1144 	/*
1145 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1146 	 */
1147 
1148 	/*
1149 	 * test only register.
1150 	 */
1151 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1152 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1153 
1154 	/*
1155 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1156 	 */
1157 	DBG(DBG_LPU, NULL,
1158 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1159 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1160 
1161 	/*
1162 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1163 	 */
1164 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1165 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1166 
1167 	/*
1168 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1169 	 */
1170 	DBG(DBG_LPU, NULL,
1171 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1172 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1173 
1174 	/*
1175 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1176 	 */
1177 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1178 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1179 
1180 	DBG(DBG_LPU, NULL,
1181 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1182 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1183 
1184 	/*
1185 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1186 	 */
1187 
1188 	/*
1189 	 * This also needs some explanation.  What is the best value
1190 	 * for the water mark?  Test mode enables which test mode?
1191 	 * Programming model needed for the Receiver Reset Lane N
1192 	 * bits.
1193 	 */
1194 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1195 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1196 
1197 	/*
1198 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1199 	 */
1200 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1201 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1202 
1203 	/*
1204 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1205 	 */
1206 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1207 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1208 
1209 	/*
1210 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1211 	 */
1212 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1213 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1214 
1215 	/*
1216 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1217 	 */
1218 	DBG(DBG_LPU, NULL,
1219 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1220 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1221 
1222 	/*
1223 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1224 	 */
1225 	DBG(DBG_LPU, NULL,
1226 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1227 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1228 
1229 	DBG(DBG_LPU, NULL,
1230 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1231 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1232 
1233 	/*
1234 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1235 	 */
1236 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1237 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1238 
1239 	/*
1240 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1241 	 */
1242 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1243 		CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1244 
1245 	/*
1246 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1247 	 */
1248 	DBG(DBG_LPU, NULL,
1249 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1250 	    CSR_XR(csr_base,
1251 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1252 
1253 	/*
1254 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1255 	 */
1256 	DBG(DBG_LPU, NULL,
1257 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1258 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1259 
1260 	DBG(DBG_LPU, NULL,
1261 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1262 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1263 
1264 	/*
1265 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1266 	 */
1267 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1268 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1269 
1270 	/*
1271 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1272 	 */
1273 
1274 	/*
1275 	 * The new PRM has values for LTSSM 8 ns timeout value and
1276 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1277 	 * Most of the other bits are questions as well.
1278 	 *
1279 	 * As such we will use the reset value.
1280 	 */
1281 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1282 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1283 
1284 	/*
1285 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1286 	 */
1287 
1288 	/*
1289 	 * Again, what does '12 ms timeout value mean'?
1290 	 */
1291 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1292 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1293 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1294 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1295 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1296 
1297 	/*
1298 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1299 	 */
1300 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1301 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1302 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1303 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1304 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1305 
1306 	/*
1307 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1308 	 */
1309 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1310 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1311 		(LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1312 		LPU_LTSSM_CONFIG4_N_FTS));
1313 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1314 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1315 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1316 
1317 	/*
1318 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1319 	 */
1320 	val = 0ull;
1321 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1322 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1323 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1324 
1325 	/*
1326 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1327 	 */
1328 
1329 	/*
1330 	 * LTSSM Status registers are test only.
1331 	 */
1332 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1333 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1334 
1335 	/*
1336 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1337 	 */
1338 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1339 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1340 
1341 	/*
1342 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1343 	 */
1344 	DBG(DBG_LPU, NULL,
1345 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1346 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1347 
1348 	/*
1349 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1350 	 */
1351 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1352 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1353 
1354 	DBG(DBG_LPU, NULL,
1355 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1356 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1357 
1358 	/*
1359 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1360 	 */
1361 	DBG(DBG_LPU, NULL,
1362 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1363 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1364 
1365 	/*
1366 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1367 	 */
1368 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1369 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1370 
1371 	/*
1372 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1373 	 */
1374 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1375 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1376 
1377 	/*
1378 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1379 	 */
1380 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1381 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1382 
1383 	/*
1384 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1385 	 */
1386 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1387 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1388 
1389 	/*
1390 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1391 	 */
1392 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1393 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1394 
1395 	/*
1396 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1397 	 */
1398 	DBG(DBG_LPU, NULL, "lpu_init - "
1399 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1400 	    CSR_XR(csr_base,
1401 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1402 
1403 	/*
1404 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1405 	 */
1406 	DBG(DBG_LPU, NULL,
1407 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1408 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1409 
1410 	DBG(DBG_LPU, NULL,
1411 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1412 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1413 
1414 	/*
1415 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1416 	 */
1417 	DBG(DBG_LPU, NULL,
1418 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1419 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1420 
1421 	/*
1422 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1423 	 */
1424 	DBG(DBG_LPU, NULL,
1425 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1426 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1427 
1428 	/*
1429 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1430 	 */
1431 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1432 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1433 }
1434 
1435 /* ARGSUSED */
1436 static void
1437 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1438 {
1439 	uint64_t val;
1440 
1441 /*
1442  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1443  */
1444 
1445 	val = -1ull;
1446 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1447 	DBG(DBG_DMC, NULL,
1448 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1449 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1450 
1451 	/*
1452 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1453 	 */
1454 	DBG(DBG_DMC, NULL,
1455 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1456 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1457 
1458 	/*
1459 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1460 	 */
1461 	val = 0x0ull;
1462 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1463 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1464 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1465 
1466 	/*
1467 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1468 	 */
1469 	val = 0x0ull;
1470 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1471 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1472 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1473 }
1474 
1475 void
1476 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1477 {
1478 	uint64_t val;
1479 
1480 	ilu_init(csr_base, pxu_p);
1481 	tlu_init(csr_base, pxu_p);
1482 	lpu_init(csr_base, pxu_p);
1483 	dmc_init(csr_base, pxu_p);
1484 
1485 /*
1486  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1487  */
1488 
1489 	val = -1ull;
1490 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1491 	DBG(DBG_PEC, NULL,
1492 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1493 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1494 
1495 	/*
1496 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1497 	 */
1498 	DBG(DBG_PEC, NULL,
1499 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1500 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1501 }
1502 
1503 /*
1504  * Initialize the module, but do not enable interrupts.
1505  */
1506 void
1507 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1508 {
1509 	uint64_t	val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr;
1510 	uint_t		obp_tsb_entries, obp_tsb_size;
1511 
1512 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1513 
1514 	/*
1515 	 * Preserve OBP's TSB
1516 	 */
1517 	val = CSR_XR(csr_base, MMU_TSB_CONTROL);
1518 
1519 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1520 
1521 	obp_tsb_pa = tsb_ctrl &  0x7FFFFFFE000;
1522 	obp_tsb_size = tsb_ctrl & 0xF;
1523 
1524 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1525 
1526 	base_tte_addr = pxu_p->tsb_vaddr +
1527 		((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1528 
1529 	for (i = 0; i < obp_tsb_entries; i++) {
1530 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1531 
1532 		if (!MMU_TTE_VALID(tte))
1533 			continue;
1534 
1535 		base_tte_addr[i] = tte;
1536 	}
1537 
1538 	/*
1539 	 * Invalidate the TLB through the diagnostic register.
1540 	 */
1541 
1542 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1543 
1544 	/*
1545 	 * Configure the Fire MMU TSB Control Register.  Determine
1546 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1547 	 *
1548 	 * Write the most significant 30 bits of the TSB physical address
1549 	 * and the encoded TSB table size.
1550 	 */
1551 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--);
1552 
1553 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1554 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1555 
1556 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1557 
1558 	/*
1559 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1560 	 * the "Cache Mode", the "Bypass Enable" and
1561 	 * the "Translation Enable" bits.
1562 	 */
1563 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1564 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1565 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1566 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1567 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1568 
1569 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1570 
1571 	/*
1572 	 * Read the register here to ensure that the previous writes to
1573 	 * the Fire MMU registers have been flushed.  (Technically, this
1574 	 * is not entirely necessary here as we will likely do later reads
1575 	 * during Fire initialization, but it is a small price to pay for
1576 	 * more modular code.)
1577 	 */
1578 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1579 
1580 	/*
1581 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1582 	 * Plus header logs
1583 	 */
1584 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1585 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1586 
1587 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1588 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1589 
1590 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1591 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1592 
1593 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1594 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1595 }
1596 
1597 /*
1598  * Generic IOMMU Servies
1599  */
1600 
1601 /* ARGSUSED */
1602 uint64_t
1603 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1604     pages_t pages, io_attributes_t io_attributes,
1605     void *addr, size_t pfn_index, int flag)
1606 {
1607 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1608 	uint64_t	attr = MMU_TTE_V;
1609 	int		i;
1610 
1611 	if (io_attributes & PCI_MAP_ATTR_WRITE)
1612 		attr |= MMU_TTE_W;
1613 
1614 	if (flag == MMU_MAP_MP) {
1615 		ddi_dma_impl_t  *mp = (ddi_dma_impl_t *)addr;
1616 
1617 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1618 			px_iopfn_t	pfn = PX_GET_MP_PFN(mp, pfn_index);
1619 
1620 			pxu_p->tsb_vaddr[tsb_index] =
1621 			    MMU_PTOB(pfn) | attr;
1622 		}
1623 	} else {
1624 		caddr_t a = (caddr_t)addr;
1625 
1626 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1627 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1628 
1629 			pxu_p->tsb_vaddr[tsb_index] =
1630 			    MMU_PTOB(pfn) | attr;
1631 		}
1632 	}
1633 
1634 	return (H_EOK);
1635 }
1636 
1637 /* ARGSUSED */
1638 uint64_t
1639 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1640     pages_t pages)
1641 {
1642 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1643 	int		i;
1644 
1645 	for (i = 0; i < pages; i++, tsb_index++) {
1646 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1647 	}
1648 
1649 	return (H_EOK);
1650 }
1651 
1652 /* ARGSUSED */
1653 uint64_t
1654 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1655     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
1656 {
1657 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1658 	uint64_t	*tte_addr;
1659 	uint64_t	ret = H_EOK;
1660 
1661 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1662 
1663 	if (*tte_addr & MMU_TTE_V) {
1664 		*r_addr_p = MMU_TTETOPA(*tte_addr);
1665 		*attributes_p = (*tte_addr & MMU_TTE_W) ?
1666 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1667 	} else {
1668 		*r_addr_p = 0;
1669 		*attributes_p = 0;
1670 		ret = H_ENOMAP;
1671 	}
1672 
1673 	return (ret);
1674 }
1675 
1676 /* ARGSUSED */
1677 uint64_t
1678 hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra,
1679     io_attributes_t io_attributes, io_addr_t *io_addr_p)
1680 {
1681 	uint64_t	pfn = MMU_BTOP(ra);
1682 
1683 	*io_addr_p = MMU_BYPASS_BASE | ra |
1684 	    (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE);
1685 
1686 	return (H_EOK);
1687 }
1688 
1689 /*
1690  * Generic IO Interrupt Servies
1691  */
1692 
1693 /*
1694  * Converts a device specific interrupt number given by the
1695  * arguments devhandle and devino into a system specific ino.
1696  */
1697 /* ARGSUSED */
1698 uint64_t
1699 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
1700     sysino_t *sysino)
1701 {
1702 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
1703 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
1704 		return (H_ENOINTR);
1705 	}
1706 
1707 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
1708 
1709 	return (H_EOK);
1710 }
1711 
1712 /*
1713  * Returns state in intr_valid_state if the interrupt defined by sysino
1714  * is valid (enabled) or not-valid (disabled).
1715  */
1716 uint64_t
1717 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
1718     intr_valid_state_t *intr_valid_state)
1719 {
1720 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1721 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
1722 		*intr_valid_state = INTR_VALID;
1723 	} else {
1724 		*intr_valid_state = INTR_NOTVALID;
1725 	}
1726 
1727 	return (H_EOK);
1728 }
1729 
1730 /*
1731  * Sets the 'valid' state of the interrupt defined by
1732  * the argument sysino to the state defined by the
1733  * argument intr_valid_state.
1734  */
1735 uint64_t
1736 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
1737     intr_valid_state_t intr_valid_state)
1738 {
1739 	switch (intr_valid_state) {
1740 	case INTR_VALID:
1741 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1742 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1743 		break;
1744 	case INTR_NOTVALID:
1745 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1746 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1747 		break;
1748 	default:
1749 		return (EINVAL);
1750 	}
1751 
1752 	return (H_EOK);
1753 }
1754 
1755 /*
1756  * Returns the current state of the interrupt given by the sysino
1757  * argument.
1758  */
1759 uint64_t
1760 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
1761     intr_state_t *intr_state)
1762 {
1763 	intr_state_t state;
1764 
1765 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1766 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
1767 
1768 	switch (state) {
1769 	case INTERRUPT_IDLE_STATE:
1770 		*intr_state = INTR_IDLE_STATE;
1771 		break;
1772 	case INTERRUPT_RECEIVED_STATE:
1773 		*intr_state = INTR_RECEIVED_STATE;
1774 		break;
1775 	case INTERRUPT_PENDING_STATE:
1776 		*intr_state = INTR_DELIVERED_STATE;
1777 		break;
1778 	default:
1779 		return (EINVAL);
1780 	}
1781 
1782 	return (H_EOK);
1783 
1784 }
1785 
1786 /*
1787  * Sets the current state of the interrupt given by the sysino
1788  * argument to the value given in the argument intr_state.
1789  *
1790  * Note: Setting the state to INTR_IDLE clears any pending
1791  * interrupt for sysino.
1792  */
1793 uint64_t
1794 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
1795     intr_state_t intr_state)
1796 {
1797 	intr_state_t state;
1798 
1799 	switch (intr_state) {
1800 	case INTR_IDLE_STATE:
1801 		state = INTERRUPT_IDLE_STATE;
1802 		break;
1803 	case INTR_DELIVERED_STATE:
1804 		state = INTERRUPT_PENDING_STATE;
1805 		break;
1806 	default:
1807 		return (EINVAL);
1808 	}
1809 
1810 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1811 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
1812 
1813 	return (H_EOK);
1814 }
1815 
1816 /*
1817  * Returns the cpuid that is the current target of the
1818  * interrupt given by the sysino argument.
1819  *
1820  * The cpuid value returned is undefined if the target
1821  * has not been set via intr_settarget.
1822  */
1823 uint64_t
1824 hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
1825 {
1826 	*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1827 	    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
1828 
1829 	return (H_EOK);
1830 }
1831 
1832 /*
1833  * Set the target cpu for the interrupt defined by the argument
1834  * sysino to the target cpu value defined by the argument cpuid.
1835  */
1836 uint64_t
1837 hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
1838 {
1839 
1840 	uint64_t	val, intr_controller;
1841 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
1842 
1843 	/*
1844 	 * For now, we assign interrupt controller in a round
1845 	 * robin fashion.  Later, we may need to come up with
1846 	 * a more efficient assignment algorithm.
1847 	 */
1848 	intr_controller = 0x1ull << (cpuid % 4);
1849 
1850 	val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
1851 	    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
1852 	    ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
1853 	    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
1854 
1855 	/* For EQ interrupts, set DATA MONDO bit */
1856 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
1857 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
1858 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
1859 
1860 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
1861 
1862 	return (H_EOK);
1863 }
1864 
1865 /*
1866  * MSIQ Functions:
1867  */
1868 uint64_t
1869 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
1870 {
1871 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
1872 	    (uint64_t)pxu_p->msiq_mapped_p);
1873 	DBG(DBG_IB, NULL,
1874 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
1875 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
1876 
1877 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
1878 	    (uint64_t)ID_TO_IGN(pxu_p->portid) << INO_BITS);
1879 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
1880 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
1881 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
1882 
1883 	return (H_EOK);
1884 }
1885 
1886 uint64_t
1887 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1888     pci_msiq_valid_state_t *msiq_valid_state)
1889 {
1890 	uint32_t	eq_state;
1891 	uint64_t	ret = H_EOK;
1892 
1893 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1894 	    msiq_id, ENTRIES_STATE);
1895 
1896 	switch (eq_state) {
1897 	case EQ_IDLE_STATE:
1898 		*msiq_valid_state = PCI_MSIQ_INVALID;
1899 		break;
1900 	case EQ_ACTIVE_STATE:
1901 	case EQ_ERROR_STATE:
1902 		*msiq_valid_state = PCI_MSIQ_VALID;
1903 		break;
1904 	default:
1905 		ret = H_EIO;
1906 		break;
1907 	}
1908 
1909 	return (ret);
1910 }
1911 
1912 uint64_t
1913 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1914     pci_msiq_valid_state_t msiq_valid_state)
1915 {
1916 	uint64_t	ret = H_EOK;
1917 
1918 	switch (msiq_valid_state) {
1919 	case PCI_MSIQ_INVALID:
1920 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
1921 		    msiq_id, ENTRIES_DIS);
1922 		break;
1923 	case PCI_MSIQ_VALID:
1924 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
1925 		    msiq_id, ENTRIES_EN);
1926 		break;
1927 	default:
1928 		ret = H_EINVAL;
1929 		break;
1930 	}
1931 
1932 	return (ret);
1933 }
1934 
1935 uint64_t
1936 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
1937     pci_msiq_state_t *msiq_state)
1938 {
1939 	uint32_t	eq_state;
1940 	uint64_t	ret = H_EOK;
1941 
1942 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1943 	    msiq_id, ENTRIES_STATE);
1944 
1945 	switch (eq_state) {
1946 	case EQ_IDLE_STATE:
1947 	case EQ_ACTIVE_STATE:
1948 		*msiq_state = PCI_MSIQ_STATE_IDLE;
1949 		break;
1950 	case EQ_ERROR_STATE:
1951 		*msiq_state = PCI_MSIQ_STATE_ERROR;
1952 		break;
1953 	default:
1954 		ret = H_EIO;
1955 	}
1956 
1957 	return (ret);
1958 }
1959 
1960 uint64_t
1961 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
1962     pci_msiq_state_t msiq_state)
1963 {
1964 	uint32_t	eq_state;
1965 	uint64_t	ret = H_EOK;
1966 
1967 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1968 	    msiq_id, ENTRIES_STATE);
1969 
1970 	switch (eq_state) {
1971 	case EQ_IDLE_STATE:
1972 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
1973 			ret = H_EIO;
1974 		break;
1975 	case EQ_ACTIVE_STATE:
1976 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
1977 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
1978 			    msiq_id, ENTRIES_ENOVERR);
1979 		else
1980 			ret = H_EIO;
1981 		break;
1982 	case EQ_ERROR_STATE:
1983 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
1984 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
1985 			    msiq_id, ENTRIES_E2I);
1986 		else
1987 			ret = H_EIO;
1988 		break;
1989 	default:
1990 		ret = H_EIO;
1991 	}
1992 
1993 	return (ret);
1994 }
1995 
1996 uint64_t
1997 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
1998     msiqhead_t *msiq_head)
1999 {
2000 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2001 	    msiq_id, ENTRIES_HEAD);
2002 
2003 	return (H_EOK);
2004 }
2005 
2006 uint64_t
2007 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2008     msiqhead_t msiq_head)
2009 {
2010 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2011 	    ENTRIES_HEAD, msiq_head);
2012 
2013 	return (H_EOK);
2014 }
2015 
2016 uint64_t
2017 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2018     msiqtail_t *msiq_tail)
2019 {
2020 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2021 	    msiq_id, ENTRIES_TAIL);
2022 
2023 	return (H_EOK);
2024 }
2025 
2026 /*
2027  * MSI Functions:
2028  */
2029 uint64_t
2030 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2031 {
2032 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2033 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2034 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2035 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2036 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2037 
2038 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2039 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2040 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2041 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2042 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2043 
2044 	return (H_EOK);
2045 }
2046 
2047 uint64_t
2048 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2049     msiqid_t *msiq_id)
2050 {
2051 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2052 	    msi_num, ENTRIES_EQNUM);
2053 
2054 	return (H_EOK);
2055 }
2056 
2057 uint64_t
2058 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2059     msiqid_t msiq_id)
2060 {
2061 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2062 	    ENTRIES_EQNUM, msiq_id);
2063 
2064 	return (H_EOK);
2065 }
2066 
2067 uint64_t
2068 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2069     pci_msi_valid_state_t *msi_valid_state)
2070 {
2071 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2072 	    msi_num, ENTRIES_V);
2073 
2074 	return (H_EOK);
2075 }
2076 
2077 uint64_t
2078 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2079     pci_msi_valid_state_t msi_valid_state)
2080 {
2081 	uint64_t	ret = H_EOK;
2082 
2083 	switch (msi_valid_state) {
2084 	case PCI_MSI_VALID:
2085 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2086 		    ENTRIES_V);
2087 		break;
2088 	case PCI_MSI_INVALID:
2089 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2090 		    ENTRIES_V);
2091 		break;
2092 	default:
2093 		ret = H_EINVAL;
2094 	}
2095 
2096 	return (ret);
2097 }
2098 
2099 uint64_t
2100 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2101     pci_msi_state_t *msi_state)
2102 {
2103 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2104 	    msi_num, ENTRIES_EQWR_N);
2105 
2106 	return (H_EOK);
2107 }
2108 
2109 uint64_t
2110 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2111     pci_msi_state_t msi_state)
2112 {
2113 	uint64_t	ret = H_EOK;
2114 
2115 	switch (msi_state) {
2116 	case PCI_MSI_STATE_IDLE:
2117 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2118 		    ENTRIES_EQWR_N);
2119 		break;
2120 	case PCI_MSI_STATE_DELIVERED:
2121 	default:
2122 		ret = H_EINVAL;
2123 		break;
2124 	}
2125 
2126 	return (ret);
2127 }
2128 
2129 /*
2130  * MSG Functions:
2131  */
2132 uint64_t
2133 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2134     msiqid_t *msiq_id)
2135 {
2136 	uint64_t	ret = H_EOK;
2137 
2138 	switch (msg_type) {
2139 	case PCIE_PME_MSG:
2140 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2141 		break;
2142 	case PCIE_PME_ACK_MSG:
2143 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2144 		    EQNUM);
2145 		break;
2146 	case PCIE_CORR_MSG:
2147 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2148 		break;
2149 	case PCIE_NONFATAL_MSG:
2150 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2151 		    EQNUM);
2152 		break;
2153 	case PCIE_FATAL_MSG:
2154 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2155 		break;
2156 	default:
2157 		ret = H_EINVAL;
2158 		break;
2159 	}
2160 
2161 	return (ret);
2162 }
2163 
2164 uint64_t
2165 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2166     msiqid_t msiq_id)
2167 {
2168 	uint64_t	ret = H_EOK;
2169 
2170 	switch (msg_type) {
2171 	case PCIE_PME_MSG:
2172 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2173 		break;
2174 	case PCIE_PME_ACK_MSG:
2175 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2176 		break;
2177 	case PCIE_CORR_MSG:
2178 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2179 		break;
2180 	case PCIE_NONFATAL_MSG:
2181 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2182 		break;
2183 	case PCIE_FATAL_MSG:
2184 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2185 		break;
2186 	default:
2187 		ret = H_EINVAL;
2188 		break;
2189 	}
2190 
2191 	return (ret);
2192 }
2193 
2194 uint64_t
2195 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2196     pcie_msg_valid_state_t *msg_valid_state)
2197 {
2198 	uint64_t	ret = H_EOK;
2199 
2200 	switch (msg_type) {
2201 	case PCIE_PME_MSG:
2202 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2203 		break;
2204 	case PCIE_PME_ACK_MSG:
2205 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2206 		    PME_TO_ACK_MAPPING, V);
2207 		break;
2208 	case PCIE_CORR_MSG:
2209 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2210 		break;
2211 	case PCIE_NONFATAL_MSG:
2212 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2213 		    ERR_NONFATAL_MAPPING, V);
2214 		break;
2215 	case PCIE_FATAL_MSG:
2216 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2217 		    V);
2218 		break;
2219 	default:
2220 		ret = H_EINVAL;
2221 		break;
2222 	}
2223 
2224 	return (ret);
2225 }
2226 
2227 uint64_t
2228 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2229     pcie_msg_valid_state_t msg_valid_state)
2230 {
2231 	uint64_t	ret = H_EOK;
2232 
2233 	switch (msg_valid_state) {
2234 	case PCIE_MSG_VALID:
2235 		switch (msg_type) {
2236 		case PCIE_PME_MSG:
2237 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2238 			break;
2239 		case PCIE_PME_ACK_MSG:
2240 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2241 			break;
2242 		case PCIE_CORR_MSG:
2243 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2244 			break;
2245 		case PCIE_NONFATAL_MSG:
2246 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2247 			break;
2248 		case PCIE_FATAL_MSG:
2249 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2250 			break;
2251 		default:
2252 			ret = H_EINVAL;
2253 			break;
2254 		}
2255 
2256 		break;
2257 	case PCIE_MSG_INVALID:
2258 		switch (msg_type) {
2259 		case PCIE_PME_MSG:
2260 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2261 			break;
2262 		case PCIE_PME_ACK_MSG:
2263 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2264 			break;
2265 		case PCIE_CORR_MSG:
2266 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2267 			break;
2268 		case PCIE_NONFATAL_MSG:
2269 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2270 			break;
2271 		case PCIE_FATAL_MSG:
2272 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2273 			break;
2274 		default:
2275 			ret = H_EINVAL;
2276 			break;
2277 		}
2278 		break;
2279 	default:
2280 		ret = H_EINVAL;
2281 	}
2282 
2283 	return (ret);
2284 }
2285 
2286 /*
2287  * Suspend/Resume Functions:
2288  *	(pec, mmu, ib)
2289  *	cb
2290  * Registers saved have all been touched in the XXX_init functions.
2291  */
2292 uint64_t
2293 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2294 {
2295 	uint64_t	*config_state;
2296 	int		total_size;
2297 	int		i;
2298 
2299 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2300 		return (H_EIO);
2301 
2302 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2303 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2304 
2305 	if (config_state == NULL) {
2306 		return (H_EIO);
2307 	}
2308 
2309 	/*
2310 	 * Soft state for suspend/resume  from pxu_t
2311 	 * uint64_t	*pec_config_state;
2312 	 * uint64_t	*mmu_config_state;
2313 	 * uint64_t	*ib_intr_map;
2314 	 * uint64_t	*ib_config_state;
2315 	 * uint64_t	*xcb_config_state;
2316 	 */
2317 
2318 	/* Save the PEC configuration states */
2319 	pxu_p->pec_config_state = config_state;
2320 	for (i = 0; i < PEC_KEYS; i++) {
2321 		pxu_p->pec_config_state[i] =
2322 		    CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]);
2323 	}
2324 
2325 	/* Save the MMU configuration states */
2326 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2327 	for (i = 0; i < MMU_KEYS; i++) {
2328 		pxu_p->mmu_config_state[i] =
2329 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2330 	}
2331 
2332 	/* Save the interrupt mapping registers */
2333 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2334 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2335 		pxu_p->ib_intr_map[i] =
2336 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2337 	}
2338 
2339 	/* Save the IB configuration states */
2340 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2341 	for (i = 0; i < IB_KEYS; i++) {
2342 		pxu_p->ib_config_state[i] =
2343 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2344 	}
2345 
2346 	return (H_EOK);
2347 }
2348 
2349 void
2350 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2351 {
2352 	int		total_size;
2353 	sysino_t	sysino;
2354 	int		i;
2355 
2356 	/* Make sure that suspend actually did occur */
2357 	if (!pxu_p->pec_config_state) {
2358 		return;
2359 	}
2360 
2361 	/* Restore IB configuration states */
2362 	for (i = 0; i < IB_KEYS; i++) {
2363 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2364 		    pxu_p->ib_config_state[i]);
2365 	}
2366 
2367 	/*
2368 	 * Restore the interrupt mapping registers
2369 	 * And make sure the intrs are idle.
2370 	 */
2371 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2372 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2373 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2374 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2375 		    pxu_p->ib_intr_map[i]);
2376 	}
2377 
2378 	/* Restore MMU configuration states */
2379 	/* Clear the cache. */
2380 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2381 
2382 	for (i = 0; i < MMU_KEYS; i++) {
2383 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2384 		    pxu_p->mmu_config_state[i]);
2385 	}
2386 
2387 	/* Restore PEC configuration states */
2388 	/* Make sure all reset bits are low until error is detected */
2389 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2390 
2391 	for (i = 0; i < PEC_KEYS; i++) {
2392 		CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i],
2393 		    pxu_p->pec_config_state[i]);
2394 	}
2395 
2396 	/* Enable PCI-E interrupt */
2397 	(void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino);
2398 
2399 	(void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE);
2400 
2401 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2402 	kmem_free(pxu_p->pec_config_state, total_size);
2403 
2404 	pxu_p->pec_config_state = NULL;
2405 	pxu_p->mmu_config_state = NULL;
2406 	pxu_p->ib_config_state = NULL;
2407 	pxu_p->ib_intr_map = NULL;
2408 
2409 	msiq_resume(dev_hdl, pxu_p);
2410 }
2411 
2412 uint64_t
2413 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2414 {
2415 	uint64_t	*config_state;
2416 	int		i;
2417 
2418 	config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP);
2419 
2420 	if (config_state == NULL) {
2421 		return (H_EIO);
2422 	}
2423 
2424 	/* Save the configuration states */
2425 	pxu_p->xcb_config_state = config_state;
2426 	for (i = 0; i < CB_KEYS; i++) {
2427 		pxu_p->xcb_config_state[i] =
2428 		    CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]);
2429 	}
2430 
2431 	return (H_EOK);
2432 }
2433 
2434 void
2435 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2436     devino_t devino, pxu_t *pxu_p)
2437 {
2438 	sysino_t	sysino;
2439 	int		i;
2440 
2441 	/*
2442 	 * No reason to have any reset bits high until an error is
2443 	 * detected on the link.
2444 	 */
2445 	CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2446 
2447 	ASSERT(pxu_p->xcb_config_state);
2448 
2449 	/* Restore the configuration states */
2450 	for (i = 0; i < CB_KEYS; i++) {
2451 		CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i],
2452 		    pxu_p->xcb_config_state[i]);
2453 	}
2454 
2455 	/* Enable XBC interrupt */
2456 	(void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino);
2457 
2458 	(void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
2459 
2460 	kmem_free(pxu_p->xcb_config_state, CB_SIZE);
2461 
2462 	pxu_p->xcb_config_state = NULL;
2463 }
2464 
2465 static uint64_t
2466 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2467 {
2468 	size_t	bufsz;
2469 	volatile uint64_t *cur_p;
2470 	int i;
2471 
2472 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2473 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2474 	    NULL)
2475 		return (H_EIO);
2476 
2477 	cur_p = pxu_p->msiq_config_state;
2478 
2479 	/* Save each EQ state */
2480 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2481 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2482 
2483 	/* Save MSI mapping registers */
2484 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2485 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2486 
2487 	/* Save all other MSIQ registers */
2488 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2489 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2490 	return (H_EOK);
2491 }
2492 
2493 static void
2494 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2495 {
2496 	size_t	bufsz;
2497 	uint64_t *cur_p, state;
2498 	int i;
2499 
2500 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2501 	cur_p = pxu_p->msiq_config_state;
2502 	/*
2503 	 * Initialize EQ base address register and
2504 	 * Interrupt Mondo Data 0 register.
2505 	 */
2506 	(void) hvio_msiq_init(dev_hdl, pxu_p);
2507 
2508 	/* Restore EQ states */
2509 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2510 		state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK;
2511 		if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE))
2512 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2513 			    i, ENTRIES_EN);
2514 	}
2515 
2516 	/* Restore MSI mapping */
2517 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2518 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2519 
2520 	/*
2521 	 * Restore all other registers. MSI 32 bit address and
2522 	 * MSI 64 bit address are restored as part of this.
2523 	 */
2524 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2525 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2526 
2527 	kmem_free(pxu_p->msiq_config_state, bufsz);
2528 	pxu_p->msiq_config_state = NULL;
2529 }
2530 
2531 /*
2532  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2533  * called by px_goto_l23ready.
2534  * returns DDI_SUCCESS or DDI_FAILURE
2535  */
2536 int
2537 px_send_pme_turnoff(caddr_t csr_base)
2538 {
2539 	volatile uint64_t reg;
2540 
2541 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2542 	/* If already pending, return failure */
2543 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2544 		DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2545 		    "tlu_pme_turn_off_generate = %x\n", reg);
2546 		return (DDI_FAILURE);
2547 	}
2548 
2549 	/* write to PME_Turn_off reg to boradcast */
2550 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2551 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2552 
2553 	return (DDI_SUCCESS);
2554 }
2555 
2556 /*
2557  * Checks for link being in L1idle state.
2558  * Returns
2559  * DDI_SUCCESS - if the link is in L1idle
2560  * DDI_FAILURE - if the link is not in L1idle
2561  */
2562 int
2563 px_link_wait4l1idle(caddr_t csr_base)
2564 {
2565 	uint8_t ltssm_state;
2566 	int ntries = px_max_l1_tries;
2567 
2568 	while (ntries > 0) {
2569 		ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
2570 		if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
2571 			break;
2572 		delay(1);
2573 	}
2574 	DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
2575 	return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
2576 }
2577 
2578 /*
2579  * Tranisition the link to L0, after it is down.
2580  */
2581 int
2582 px_link_retrain(caddr_t csr_base)
2583 {
2584 	volatile uint64_t reg;
2585 
2586 	reg = CSR_XR(csr_base, TLU_CONTROL);
2587 	if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
2588 		DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
2589 		return (DDI_FAILURE);
2590 	}
2591 
2592 	/* Clear link down bit in TLU Other Event Clear Status Register. */
2593 	CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
2594 
2595 	/* Clear Drain bit in TLU Status Register */
2596 	CSR_BS(csr_base, TLU_STATUS, DRAIN);
2597 
2598 	/* Clear Remain in Detect.Quiet bit in TLU Control Register */
2599 	reg = CSR_XR(csr_base, TLU_CONTROL);
2600 	reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
2601 	CSR_XS(csr_base, TLU_CONTROL, reg);
2602 
2603 	return (DDI_SUCCESS);
2604 }
2605 
2606 void
2607 px_enable_detect_quiet(caddr_t csr_base)
2608 {
2609 	volatile uint64_t tlu_ctrl;
2610 
2611 	tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
2612 	tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
2613 	CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
2614 }
2615