xref: /titanic_44/usr/src/uts/sun4u/io/px/px_hlib.c (revision c36bd4ef54329d6af405a11302d4d4bbf1bc65c7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/vmsystm.h>
32 #include <sys/vmem.h>
33 #include <sys/machsystm.h>	/* lddphys() */
34 #include <sys/iommutsb.h>
35 #include <sys/pci.h>
36 #include <pcie_pwr.h>
37 #include <px_obj.h>
38 #include "px_regs.h"
39 #include "px_csr.h"
40 #include "px_lib4u.h"
41 
42 /*
43  * Registers that need to be saved and restored during suspend/resume.
44  */
45 
46 /*
47  * Registers in the PEC Module.
48  * LPU_RESET should be set to 0ull during resume
49  */
50 static uint64_t	pec_config_state_regs[] = {
51 	PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
52 	ILU_ERROR_LOG_ENABLE,
53 	ILU_INTERRUPT_ENABLE,
54 	TLU_CONTROL,
55 	TLU_OTHER_EVENT_LOG_ENABLE,
56 	TLU_OTHER_EVENT_INTERRUPT_ENABLE,
57 	TLU_DEVICE_CONTROL,
58 	TLU_LINK_CONTROL,
59 	TLU_UNCORRECTABLE_ERROR_LOG_ENABLE,
60 	TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE,
61 	TLU_CORRECTABLE_ERROR_LOG_ENABLE,
62 	TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE,
63 	LPU_LINK_LAYER_INTERRUPT_MASK,
64 	LPU_PHY_INTERRUPT_MASK,
65 	LPU_RECEIVE_PHY_INTERRUPT_MASK,
66 	LPU_TRANSMIT_PHY_INTERRUPT_MASK,
67 	LPU_GIGABLAZE_GLUE_INTERRUPT_MASK,
68 	LPU_LTSSM_INTERRUPT_MASK,
69 	LPU_RESET,
70 	LPU_DEBUG_CONFIG,
71 	LPU_INTERRUPT_MASK,
72 	LPU_LINK_LAYER_CONFIG,
73 	LPU_FLOW_CONTROL_UPDATE_CONTROL,
74 	LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD,
75 	LPU_TXLINK_REPLAY_TIMER_THRESHOLD,
76 	LPU_REPLAY_BUFFER_MAX_ADDRESS,
77 	LPU_TXLINK_RETRY_FIFO_POINTER,
78 	LPU_LTSSM_CONFIG2,
79 	LPU_LTSSM_CONFIG3,
80 	LPU_LTSSM_CONFIG4,
81 	LPU_LTSSM_CONFIG5,
82 	DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
83 	DMC_DEBUG_SELECT_FOR_PORT_A,
84 	DMC_DEBUG_SELECT_FOR_PORT_B
85 };
86 #define	PEC_SIZE (sizeof (pec_config_state_regs))
87 #define	PEC_KEYS (PEC_SIZE / sizeof (uint64_t))
88 
89 /*
90  * Registers for the MMU module.
91  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
92  */
93 static uint64_t mmu_config_state_regs[] = {
94 	MMU_TSB_CONTROL,
95 	MMU_CONTROL_AND_STATUS,
96 	MMU_ERROR_LOG_ENABLE,
97 	MMU_INTERRUPT_ENABLE
98 };
99 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
100 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
101 
102 /*
103  * Registers for the IB Module
104  */
105 static uint64_t ib_config_state_regs[] = {
106 	IMU_ERROR_LOG_ENABLE,
107 	IMU_INTERRUPT_ENABLE
108 };
109 #define	IB_SIZE (sizeof (ib_config_state_regs))
110 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
111 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
112 
113 /*
114  * Registers for the CB module.
115  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
116  */
117 static uint64_t	cb_config_state_regs[] = {
118 	JBUS_PARITY_CONTROL,
119 	JBC_FATAL_RESET_ENABLE,
120 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
121 	JBC_ERROR_LOG_ENABLE,
122 	JBC_INTERRUPT_ENABLE
123 };
124 #define	CB_SIZE (sizeof (cb_config_state_regs))
125 #define	CB_KEYS (CB_SIZE / sizeof (uint64_t))
126 
127 static uint64_t	msiq_config_other_regs[] = {
128 	ERR_COR_MAPPING,
129 	ERR_NONFATAL_MAPPING,
130 	ERR_FATAL_MAPPING,
131 	PM_PME_MAPPING,
132 	PME_TO_ACK_MAPPING,
133 	MSI_32_BIT_ADDRESS,
134 	MSI_64_BIT_ADDRESS
135 };
136 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
137 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
138 
139 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
140 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
141 
142 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
143 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
144 
145 /*
146  * Initialize the module, but do not enable interrupts.
147  */
148 /* ARGSUSED */
149 void
150 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
151 {
152 	uint64_t val;
153 
154 	/* Check if we need to enable inverted parity */
155 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
156 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
157 	DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n",
158 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
159 
160 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
161 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
162 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
163 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
164 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
165 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
166 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
167 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
168 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
169 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
170 		CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
171 
172 	/*
173 	 * Enable merge, jbc and dmc interrupts.
174 	 */
175 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
176 	DBG(DBG_CB, NULL,
177 	    "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
178 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
179 
180 	/*
181 	 * CSR_V CB's interrupt regs (log, enable, status, clear)
182 	 */
183 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
184 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
185 
186 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
187 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
188 
189 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
190 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
191 
192 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
193 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
194 }
195 
196 /*
197  * Initialize the module, but do not enable interrupts.
198  */
199 /* ARGSUSED */
200 void
201 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
202 {
203 	/*
204 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
205 	 */
206 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
207 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
208 
209 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
210 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
211 
212 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
213 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
214 
215 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
216 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
217 }
218 
219 /*
220  * Initialize the module, but do not enable interrupts.
221  */
222 /* ARGSUSED */
223 static void
224 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
225 {
226 	/*
227 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
228 	 */
229 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
230 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
231 
232 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
233 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
234 
235 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
236 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
237 
238 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
239 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
240 }
241 
242 /*
243  * Initialize the module, but do not enable interrupts.
244  */
245 static void
246 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
247 {
248 	uint64_t val;
249 
250 	/*
251 	 * CSR_V TLU_CONTROL Expect OBP ???
252 	 */
253 
254 	/*
255 	 * L0s entry default timer value - 7.0 us
256 	 * Completion timeout select default value - 67.1 ms and
257 	 * OBP will set this value.
258 	 *
259 	 * Configuration - Bit 0 should always be 0 for upstream port.
260 	 * Bit 1 is clock - how is this related to the clock bit in TLU
261 	 * Link Control register?  Both are hardware dependent and likely
262 	 * set by OBP.
263 	 *
264 	 * Disable non-posted write bit - ordering by setting
265 	 * NPWR_EN bit to force serialization of writes.
266 	 */
267 	val = CSR_XR(csr_base, TLU_CONTROL);
268 
269 	if (pxu_p->chip_id == FIRE_VER_10) {
270 		val |= (TLU_CONTROL_L0S_TIM_DEFAULT <<
271 		    FIRE10_TLU_CONTROL_L0S_TIM) |
272 		    (1ull << FIRE10_TLU_CONTROL_NPWR_EN) |
273 		    TLU_CONTROL_CONFIG_DEFAULT;
274 	} else {
275 		/* Default case is FIRE2.0 */
276 		val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
277 		    (1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT;
278 	}
279 
280 	CSR_XS(csr_base, TLU_CONTROL, val);
281 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
282 	    CSR_XR(csr_base, TLU_CONTROL));
283 
284 	/*
285 	 * CSR_V TLU_STATUS Expect HW 0x4
286 	 */
287 
288 	/*
289 	 * Only bit [7:0] are currently defined.  Bits [2:0]
290 	 * are the state, which should likely be in state active,
291 	 * 100b.  Bit three is 'recovery', which is not understood.
292 	 * All other bits are reserved.
293 	 */
294 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
295 	    CSR_XR(csr_base, TLU_STATUS));
296 
297 	/*
298 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
299 	 */
300 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
301 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
302 
303 	/*
304 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
305 	 */
306 
307 	/*
308 	 * Ingress credits initial register.  Bits [39:32] should be
309 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
310 	 * be 0xC0.  These are the reset values, and should be set by
311 	 * HW.
312 	 */
313 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
314 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
315 
316 	/*
317 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
318 	 */
319 
320 	/*
321 	 * Diagnostic register - always zero unless we are debugging.
322 	 */
323 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
324 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
325 
326 	/*
327 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
328 	 */
329 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
330 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
331 
332 	/*
333 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
334 	 */
335 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
336 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
337 
338 	/*
339 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
340 	 */
341 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
342 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
343 
344 	/*
345 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
346 	 */
347 	DBG(DBG_TLU, NULL,
348 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
349 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
350 
351 	/*
352 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
353 	 */
354 	DBG(DBG_TLU, NULL,
355 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
356 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
357 
358 	/*
359 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
360 	 */
361 	DBG(DBG_TLU, NULL,
362 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
363 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
364 
365 	DBG(DBG_TLU, NULL,
366 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
367 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
368 
369 	DBG(DBG_TLU, NULL,
370 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
371 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
372 
373 	DBG(DBG_TLU, NULL,
374 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
375 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
376 
377 	/*
378 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
379 	 */
380 	DBG(DBG_TLU, NULL,
381 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
382 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
383 
384 	/*
385 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
386 	 */
387 	DBG(DBG_TLU, NULL,
388 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
389 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
390 
391 	/*
392 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
393 	 */
394 	DBG(DBG_TLU, NULL,
395 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
396 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
397 
398 	/*
399 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
400 	 */
401 	DBG(DBG_TLU, NULL,
402 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
403 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
404 
405 	/*
406 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
407 	 */
408 	DBG(DBG_TLU, NULL,
409 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
410 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
411 
412 	/*
413 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
414 	 */
415 	DBG(DBG_TLU, NULL,
416 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
417 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
418 
419 	/*
420 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
421 	 */
422 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
423 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
424 
425 	/*
426 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
427 	 */
428 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
429 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
430 
431 	/*
432 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
433 	 */
434 
435 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
436 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
437 
438 	/*
439 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
440 	 */
441 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
442 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
443 
444 	/*
445 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
446 	 */
447 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
448 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
449 
450 	/*
451 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
452 	 */
453 
454 	/*
455 	 * Bits [14:12] are the Max Read Request Size, which is always 64
456 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
457 	 * start at 128 bytes which is 000b.  This may be revisited if
458 	 * init_child finds greater values.
459 	 */
460 	val = 0x0ull;
461 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
462 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
463 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
464 
465 	/*
466 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
467 	 */
468 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
469 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
470 
471 	/*
472 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
473 	 */
474 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
475 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
476 
477 	/*
478 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
479 	 */
480 
481 	/*
482 	 * The CLOCK bit should be set by OBP if the hardware dictates,
483 	 * and if it is set then ASPM should be used since then L0s exit
484 	 * latency should be lower than L1 exit latency.
485 	 *
486 	 * Note that we will not enable power management during bringup
487 	 * since it has not been test and is creating some problems in
488 	 * simulation.
489 	 */
490 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
491 
492 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
493 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
494 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
495 
496 	/*
497 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
498 	 */
499 
500 	/*
501 	 * Not sure if HW or OBP will be setting this read only
502 	 * register.  Bit 12 is Clock, and it should always be 1
503 	 * signifying that the component uses the same physical
504 	 * clock as the platform.  Bits [9:4] are for the width,
505 	 * with the expected value above signifying a x1 width.
506 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
507 	 * the only speed as yet supported by the PCI-E spec.
508 	 */
509 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
510 	    CSR_XR(csr_base, TLU_LINK_STATUS));
511 
512 	/*
513 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
514 	 */
515 
516 	/*
517 	 * Power Limits for the slots.  Will be platform
518 	 * dependent, and OBP will need to set after consulting
519 	 * with the HW guys.
520 	 *
521 	 * Bits [16:15] are power limit scale, which most likely
522 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
523 	 * Power Limit Value, which is a number which is multiplied
524 	 * by the power limit scale to get the actual power limit.
525 	 */
526 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
527 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
528 
529 	/*
530 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
531 	 */
532 	DBG(DBG_TLU, NULL,
533 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
534 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
535 
536 	/*
537 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
538 	 * Kernel 0x17F0110017F011
539 	 */
540 	DBG(DBG_TLU, NULL,
541 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
542 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
543 
544 	/*
545 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
546 	 */
547 	DBG(DBG_TLU, NULL,
548 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
549 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
550 
551 	/*
552 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
553 	 */
554 	DBG(DBG_TLU, NULL,
555 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
556 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
557 
558 	/*
559 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
560 	 */
561 	DBG(DBG_TLU, NULL,
562 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
563 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
564 
565 	/*
566 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
567 	 */
568 	DBG(DBG_TLU, NULL,
569 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
570 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
571 
572 	/*
573 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
574 	 */
575 	DBG(DBG_TLU, NULL,
576 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
577 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
578 
579 	/*
580 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
581 	 */
582 	DBG(DBG_TLU, NULL,
583 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
584 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
585 
586 
587 	/*
588 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
589 	 * Plus header logs
590 	 */
591 
592 	/*
593 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
594 	 */
595 	DBG(DBG_TLU, NULL,
596 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
597 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
598 
599 	/*
600 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
601 	 */
602 	DBG(DBG_TLU, NULL,
603 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
604 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
605 
606 	/*
607 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
608 	 */
609 	DBG(DBG_TLU, NULL,
610 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
611 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
612 
613 	/*
614 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
615 	 */
616 	DBG(DBG_TLU, NULL,
617 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
618 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
619 }
620 
621 static void
622 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
623 {
624 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
625 	int link_width, max_payload;
626 
627 	uint64_t val;
628 
629 	/*
630 	 * ACKNAK Latency Threshold Table.
631 	 * See Fire PRM 1.0 sections 1.2.11.1, table 1-17.
632 	 */
633 	int fire10_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE]
634 	    [LINK_WIDTH_ARR_SIZE] = {
635 		{0xED,   0x76,  0x70,  0x58},
636 		{0x1A0,  0x76,  0x6B,  0x61},
637 		{0x22F,  0x9A,  0x6A,  0x6A},
638 		{0x42F,  0x11A, 0x96,  0x96},
639 		{0x82F,  0x21A, 0x116, 0x116},
640 		{0x102F, 0x41A, 0x216, 0x216}
641 	};
642 
643 	/*
644 	 * TxLink Replay Timer Latency Table
645 	 * See Fire PRM 1.0 sections 1.2.11.2, table 1-18.
646 	 */
647 	int fire10_replay_timer_table[LINK_MAX_PKT_ARR_SIZE]
648 	    [LINK_WIDTH_ARR_SIZE] = {
649 		{0x2C7,  0x108, 0xF6,  0xBD},
650 		{0x4E0,  0x162, 0x141, 0xF1},
651 		{0x68D,  0x1CE, 0x102, 0x102},
652 		{0xC8D,  0x34E, 0x1C2, 0x1C2},
653 		{0x188D, 0x64E, 0x342, 0x342},
654 		{0x308D, 0xC4E, 0x642, 0x642}
655 	};
656 
657 	/*
658 	 * ACKNAK Latency Threshold Table.
659 	 * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
660 	 */
661 	int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
662 		{0xED,   0x49,  0x43,  0x30},
663 		{0x1A0,  0x76,  0x6B,  0x48},
664 		{0x22F,  0x9A,  0x56,  0x56},
665 		{0x42F,  0x11A, 0x96,  0x96},
666 		{0x82F,  0x21A, 0x116, 0x116},
667 		{0x102F, 0x41A, 0x216, 0x216}
668 	};
669 
670 	/*
671 	 * TxLink Replay Timer Latency Table
672 	 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
673 	 */
674 	int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
675 		{0x379,  0x112, 0xFC,  0xB4},
676 		{0x618,  0x1BA, 0x192, 0x10E},
677 		{0x831,  0x242, 0x143, 0x143},
678 		{0xFB1,  0x422, 0x233, 0x233},
679 		{0x1EB0, 0x7E1, 0x412, 0x412},
680 		{0x3CB0, 0xF61, 0x7D2, 0x7D2}
681 	};
682 	/*
683 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
684 	 * Only Link Widths of x1, x4, and x8 are supported.
685 	 * If any width is reported other than x8, set default to x8.
686 	 */
687 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
688 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
689 
690 	/*
691 	 * Convert link_width to match timer array configuration.
692 	 */
693 	switch (link_width) {
694 	case 1:
695 		link_width = 0;
696 		break;
697 	case 4:
698 		link_width = 1;
699 		break;
700 	case 8:
701 		link_width = 2;
702 		break;
703 	case 16:
704 		link_width = 3;
705 		break;
706 	default:
707 		link_width = 0;
708 	}
709 
710 	/*
711 	 * Get the Max Payload Size.
712 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
713 	 */
714 	if (pxu_p->chip_id == FIRE_VER_10) {
715 		max_payload = CSR_FR(csr_base,
716 		    FIRE10_LPU_LINK_LAYER_CONFIG, MAX_PAYLOAD);
717 	} else {
718 		/* Default case is FIRE2.0 */
719 		max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
720 		    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
721 	}
722 
723 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
724 	    (0x80 << max_payload));
725 
726 	/* Make sure the packet size is not greater than 4096 */
727 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
728 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
729 
730 	/*
731 	 * CSR_V LPU_ID Expect HW 0x0
732 	 */
733 
734 	/*
735 	 * This register has link id, phy id and gigablaze id.
736 	 * Should be set by HW.
737 	 */
738 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
739 	    CSR_XR(csr_base, LPU_ID));
740 
741 	/*
742 	 * CSR_V LPU_RESET Expect Kernel 0x0
743 	 */
744 
745 	/*
746 	 * No reason to have any reset bits high until an error is
747 	 * detected on the link.
748 	 */
749 	val = 0ull;
750 	CSR_XS(csr_base, LPU_RESET, val);
751 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
752 	    CSR_XR(csr_base, LPU_RESET));
753 
754 	/*
755 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
756 	 */
757 
758 	/*
759 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
760 	 * They are read-only.  What do the 8 bits mean, and
761 	 * how do they get set if they are read only?
762 	 */
763 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
764 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
765 
766 	/*
767 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
768 	 */
769 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
770 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
771 
772 	/*
773 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
774 	 */
775 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
776 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
777 
778 	/*
779 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
780 	 */
781 
782 	/*
783 	 * This register has bits [9:4] for link width, and the
784 	 * default 0x10, means a width of x16.  The problem is
785 	 * this width is not supported according to the TLU
786 	 * link status register.
787 	 */
788 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
789 	    CSR_XR(csr_base, LPU_LINK_STATUS));
790 
791 	/*
792 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
793 	 */
794 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
795 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
796 
797 	/*
798 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
799 	 */
800 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
801 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
802 
803 	/*
804 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
805 	 */
806 	DBG(DBG_LPU, NULL,
807 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
808 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
809 
810 	/*
811 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
812 	 */
813 	DBG(DBG_LPU, NULL,
814 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
815 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
816 
817 	/*
818 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
819 	 */
820 	DBG(DBG_LPU, NULL,
821 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
822 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
823 
824 	/*
825 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
826 	 */
827 	DBG(DBG_LPU, NULL,
828 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
829 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
830 
831 	/*
832 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
833 	 */
834 	DBG(DBG_LPU, NULL,
835 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
836 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
837 
838 	/*
839 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
840 	 */
841 	DBG(DBG_LPU, NULL,
842 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
843 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
844 
845 	/*
846 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
847 	 */
848 
849 	/*
850 	 * This is another place where Max Payload can be set,
851 	 * this time for the link layer.  It will be set to
852 	 * 128B, which is the default, but this will need to
853 	 * be revisited.
854 	 */
855 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
856 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
857 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
858 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
859 
860 	/*
861 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
862 	 */
863 
864 	/*
865 	 * Another R/W status register.  Bit 3, DL up Status, will
866 	 * be set high.  The link state machine status bits [2:0]
867 	 * are set to 0x1, but the status bits are not defined in the
868 	 * PRM.  What does 0x1 mean, what others values are possible
869 	 * and what are thier meanings?
870 	 *
871 	 * This register has been giving us problems in simulation.
872 	 * It has been mentioned that software should not program
873 	 * any registers with WE bits except during debug.  So
874 	 * this register will no longer be programmed.
875 	 */
876 
877 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
878 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
879 
880 	/*
881 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
882 	 */
883 	DBG(DBG_LPU, NULL,
884 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
885 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
886 
887 	/*
888 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
889 	 */
890 	DBG(DBG_LPU, NULL,
891 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
892 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
893 
894 	DBG(DBG_LPU, NULL,
895 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
896 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
897 
898 	/*
899 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
900 	 */
901 
902 	/*
903 	 * The PRM says that only the first two bits will be set
904 	 * high by default, which will enable flow control for
905 	 * posted and non-posted updates, but NOT completetion
906 	 * updates.
907 	 */
908 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
909 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
910 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
911 	DBG(DBG_LPU, NULL,
912 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
913 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
914 
915 	/*
916 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
917 	 * Expect OBP 0x1D4C
918 	 */
919 
920 	/*
921 	 * This should be set by OBP.  We'll check to make sure.
922 	 */
923 	DBG(DBG_LPU, NULL, "lpu_init - "
924 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
925 	    CSR_XR(csr_base,
926 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
927 
928 	/*
929 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
930 	 */
931 
932 	/*
933 	 * This register has Flow Control Update Timer values for
934 	 * non-posted and posted requests, bits [30:16] and bits
935 	 * [14:0], respectively.  These are read-only to SW so
936 	 * either HW or OBP needs to set them.
937 	 */
938 	DBG(DBG_LPU, NULL, "lpu_init - "
939 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
940 	    CSR_XR(csr_base,
941 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
942 
943 	/*
944 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
945 	 */
946 
947 	/*
948 	 * Same as timer0 register above, except for bits [14:0]
949 	 * have the timer values for completetions.  Read-only to
950 	 * SW; OBP or HW need to set it.
951 	 */
952 	DBG(DBG_LPU, NULL, "lpu_init - "
953 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
954 	    CSR_XR(csr_base,
955 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
956 
957 	/*
958 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
959 	 */
960 	if (pxu_p->chip_id == FIRE_VER_10) {
961 		val = fire10_acknak_timer_table[max_payload][link_width];
962 	} else {
963 		/* Default case is FIRE2.0 */
964 		val = acknak_timer_table[max_payload][link_width];
965 	}
966 
967 	CSR_XS(csr_base,
968 	    LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
969 
970 	DBG(DBG_LPU, NULL, "lpu_init - "
971 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
972 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
973 
974 	/*
975 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
976 	 */
977 	DBG(DBG_LPU, NULL,
978 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
979 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
980 
981 	/*
982 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
983 	 */
984 	if (pxu_p->chip_id == FIRE_VER_10) {
985 		val = fire10_replay_timer_table[max_payload][link_width];
986 	} else {
987 		/* Default case is FIRE2.0 */
988 		val = replay_timer_table[max_payload][link_width];
989 	}
990 
991 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
992 
993 	DBG(DBG_LPU, NULL,
994 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
995 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
996 
997 	/*
998 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
999 	 */
1000 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1001 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
1002 
1003 	/*
1004 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1005 	 */
1006 	DBG(DBG_LPU, NULL,
1007 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1008 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
1009 
1010 	/*
1011 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1012 	 */
1013 	DBG(DBG_LPU, NULL,
1014 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1015 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
1016 
1017 	/*
1018 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1019 	 */
1020 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
1021 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
1022 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
1023 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
1024 
1025 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
1026 	DBG(DBG_LPU, NULL,
1027 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1028 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
1029 
1030 	/*
1031 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1032 	 */
1033 	DBG(DBG_LPU, NULL,
1034 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1035 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
1036 
1037 	/*
1038 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1039 	 */
1040 	DBG(DBG_LPU, NULL,
1041 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1042 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
1043 
1044 	/*
1045 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1046 	 */
1047 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1048 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1049 
1050 	/*
1051 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1052 	 */
1053 	DBG(DBG_LPU, NULL,
1054 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1055 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1056 
1057 	/*
1058 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1059 	 */
1060 
1061 	/*
1062 	 * Test only register.  Will not be programmed.
1063 	 */
1064 	DBG(DBG_LPU, NULL,
1065 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1066 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1067 
1068 	/*
1069 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1070 	 */
1071 
1072 	/*
1073 	 * Test only register.  Will not be programmed.
1074 	 */
1075 	DBG(DBG_LPU, NULL,
1076 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1077 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1078 
1079 	/*
1080 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1081 	 */
1082 	DBG(DBG_LPU, NULL,
1083 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1084 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1085 
1086 	/*
1087 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1088 	 */
1089 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1090 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1091 
1092 	/*
1093 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1094 	 */
1095 
1096 	/*
1097 	 * Test only register.  Will not be programmed.
1098 	 */
1099 	DBG(DBG_LPU, NULL,
1100 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1101 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1102 
1103 	/*
1104 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1105 	 */
1106 	DBG(DBG_LPU, NULL,
1107 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1108 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1109 
1110 	/*
1111 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1112 	 */
1113 	DBG(DBG_LPU, NULL,
1114 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1115 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1116 
1117 	/*
1118 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1119 	 */
1120 	DBG(DBG_LPU, NULL,
1121 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1122 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1123 
1124 	/*
1125 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1126 	 */
1127 	DBG(DBG_LPU, NULL,
1128 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1129 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1130 
1131 	/*
1132 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1133 	 */
1134 	DBG(DBG_LPU, NULL,
1135 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1136 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1137 
1138 	/*
1139 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1140 	 */
1141 
1142 	/*
1143 	 * Test only register.  Will not be programmed.
1144 	 */
1145 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1146 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1147 
1148 	/*
1149 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1150 	 */
1151 
1152 	/*
1153 	 * Test only register.  Will not be programmed.
1154 	 */
1155 	DBG(DBG_LPU, NULL,
1156 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1157 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1158 
1159 	/*
1160 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1161 	 */
1162 
1163 	/*
1164 	 * Test only register.
1165 	 */
1166 	DBG(DBG_LPU, NULL,
1167 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1168 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1169 
1170 	/*
1171 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1172 	 */
1173 	DBG(DBG_LPU, NULL, "lpu_init - "
1174 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1175 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1176 
1177 	/*
1178 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1179 	 */
1180 
1181 	/*
1182 	 * test only register.
1183 	 */
1184 	DBG(DBG_LPU, NULL,
1185 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1186 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1187 
1188 	/*
1189 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1190 	 */
1191 
1192 	/*
1193 	 * test only register.
1194 	 */
1195 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1196 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1197 
1198 	/*
1199 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1200 	 */
1201 	DBG(DBG_LPU, NULL,
1202 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1203 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1204 
1205 	/*
1206 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1207 	 */
1208 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1209 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1210 
1211 	/*
1212 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1213 	 */
1214 	DBG(DBG_LPU, NULL,
1215 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1216 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1217 
1218 	/*
1219 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1220 	 */
1221 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1222 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1223 
1224 	DBG(DBG_LPU, NULL,
1225 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1226 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1227 
1228 	/*
1229 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1230 	 */
1231 
1232 	/*
1233 	 * This also needs some explanation.  What is the best value
1234 	 * for the water mark?  Test mode enables which test mode?
1235 	 * Programming model needed for the Receiver Reset Lane N
1236 	 * bits.
1237 	 */
1238 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1239 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1240 
1241 	/*
1242 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1243 	 */
1244 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1245 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1246 
1247 	/*
1248 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1249 	 */
1250 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1251 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1252 
1253 	/*
1254 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1255 	 */
1256 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1257 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1258 
1259 	/*
1260 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1261 	 */
1262 	DBG(DBG_LPU, NULL,
1263 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1264 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1265 
1266 	/*
1267 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1268 	 */
1269 	DBG(DBG_LPU, NULL,
1270 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1271 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1272 
1273 	DBG(DBG_LPU, NULL,
1274 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1275 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1276 
1277 	/*
1278 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1279 	 */
1280 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1281 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1282 
1283 	/*
1284 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1285 	 */
1286 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1287 		CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1288 
1289 	/*
1290 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1291 	 */
1292 	DBG(DBG_LPU, NULL,
1293 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1294 	    CSR_XR(csr_base,
1295 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1296 
1297 	/*
1298 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1299 	 */
1300 	DBG(DBG_LPU, NULL,
1301 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1302 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1303 
1304 	DBG(DBG_LPU, NULL,
1305 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1306 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1307 
1308 	/*
1309 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1310 	 */
1311 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1312 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1313 
1314 	/*
1315 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1316 	 */
1317 
1318 	/*
1319 	 * The new PRM has values for LTSSM 8 ns timeout value and
1320 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1321 	 * Most of the other bits are questions as well.
1322 	 *
1323 	 * As such we will use the reset value.
1324 	 */
1325 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1326 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1327 
1328 	/*
1329 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1330 	 */
1331 
1332 	/*
1333 	 * Again, what does '12 ms timeout value mean'?
1334 	 */
1335 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1336 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1337 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1338 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1339 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1340 
1341 	/*
1342 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1343 	 */
1344 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1345 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1346 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1347 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1348 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1349 
1350 	/*
1351 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1352 	 */
1353 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1354 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1355 		(LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1356 		LPU_LTSSM_CONFIG4_N_FTS));
1357 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1358 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1359 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1360 
1361 	/*
1362 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1363 	 */
1364 	val = 0ull;
1365 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1366 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1367 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1368 
1369 	/*
1370 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1371 	 */
1372 
1373 	/*
1374 	 * LTSSM Status registers are test only.
1375 	 */
1376 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1377 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1378 
1379 	/*
1380 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1381 	 */
1382 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1383 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1384 
1385 	/*
1386 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1387 	 */
1388 	DBG(DBG_LPU, NULL,
1389 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1390 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1391 
1392 	/*
1393 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1394 	 */
1395 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1396 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1397 
1398 	DBG(DBG_LPU, NULL,
1399 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1400 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1401 
1402 	/*
1403 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1404 	 */
1405 	DBG(DBG_LPU, NULL,
1406 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1407 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1408 
1409 	/*
1410 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1411 	 */
1412 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1413 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1414 
1415 	/*
1416 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1417 	 */
1418 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1419 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1420 
1421 	/*
1422 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1423 	 */
1424 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1425 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1426 
1427 	/*
1428 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1429 	 */
1430 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1431 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1432 
1433 	/*
1434 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1435 	 */
1436 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1437 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1438 
1439 	/*
1440 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1441 	 */
1442 	DBG(DBG_LPU, NULL, "lpu_init - "
1443 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1444 	    CSR_XR(csr_base,
1445 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1446 
1447 	/*
1448 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1449 	 */
1450 	DBG(DBG_LPU, NULL,
1451 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1452 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1453 
1454 	DBG(DBG_LPU, NULL,
1455 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1456 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1457 
1458 	/*
1459 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1460 	 */
1461 	DBG(DBG_LPU, NULL,
1462 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1463 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1464 
1465 	/*
1466 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1467 	 */
1468 	DBG(DBG_LPU, NULL,
1469 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1470 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1471 
1472 	/*
1473 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1474 	 */
1475 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1476 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1477 }
1478 
1479 /* ARGSUSED */
1480 static void
1481 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1482 {
1483 	uint64_t val;
1484 
1485 /*
1486  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1487  */
1488 
1489 	val = -1ull;
1490 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1491 	DBG(DBG_DMC, NULL,
1492 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1493 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1494 
1495 	/*
1496 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1497 	 */
1498 	DBG(DBG_DMC, NULL,
1499 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1500 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1501 
1502 	/*
1503 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1504 	 */
1505 	val = 0x0ull;
1506 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1507 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1508 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1509 
1510 	/*
1511 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1512 	 */
1513 	val = 0x0ull;
1514 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1515 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1516 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1517 }
1518 
1519 void
1520 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1521 {
1522 	uint64_t val;
1523 
1524 	ilu_init(csr_base, pxu_p);
1525 	tlu_init(csr_base, pxu_p);
1526 	lpu_init(csr_base, pxu_p);
1527 	dmc_init(csr_base, pxu_p);
1528 
1529 /*
1530  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1531  */
1532 
1533 	val = -1ull;
1534 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1535 	DBG(DBG_PEC, NULL,
1536 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1537 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1538 
1539 	/*
1540 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1541 	 */
1542 	DBG(DBG_PEC, NULL,
1543 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1544 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1545 }
1546 
1547 /*
1548  * Initialize the module, but do not enable interrupts.
1549  */
1550 void
1551 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1552 {
1553 	uint64_t	val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr;
1554 	uint_t		obp_tsb_entries, obp_tsb_size;
1555 
1556 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1557 
1558 	/*
1559 	 * Preserve OBP's TSB
1560 	 */
1561 	val = CSR_XR(csr_base, MMU_TSB_CONTROL);
1562 
1563 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1564 
1565 	obp_tsb_pa = tsb_ctrl &  0x7FFFFFFE000;
1566 	obp_tsb_size = tsb_ctrl & 0xF;
1567 
1568 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1569 
1570 	base_tte_addr = pxu_p->tsb_vaddr +
1571 		((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1572 
1573 	for (i = 0; i < obp_tsb_entries; i++) {
1574 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1575 
1576 		if (!MMU_TTE_VALID(tte))
1577 			continue;
1578 
1579 		base_tte_addr[i] = tte;
1580 	}
1581 
1582 	/*
1583 	 * Invalidate the TLB through the diagnostic register.
1584 	 */
1585 
1586 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1587 
1588 	/*
1589 	 * Configure the Fire MMU TSB Control Register.  Determine
1590 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1591 	 *
1592 	 * Write the most significant 30 bits of the TSB physical address
1593 	 * and the encoded TSB table size.
1594 	 */
1595 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--);
1596 
1597 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1598 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1599 
1600 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1601 
1602 	/*
1603 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1604 	 * the "Cache Mode", the "Bypass Enable" and
1605 	 * the "Translation Enable" bits.
1606 	 */
1607 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1608 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1609 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1610 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1611 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1612 
1613 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1614 
1615 	/*
1616 	 * Read the register here to ensure that the previous writes to
1617 	 * the Fire MMU registers have been flushed.  (Technically, this
1618 	 * is not entirely necessary here as we will likely do later reads
1619 	 * during Fire initialization, but it is a small price to pay for
1620 	 * more modular code.)
1621 	 */
1622 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1623 
1624 	/*
1625 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1626 	 * Plus header logs
1627 	 */
1628 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1629 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1630 
1631 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1632 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1633 
1634 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1635 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1636 
1637 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1638 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1639 }
1640 
1641 /*
1642  * Generic IOMMU Servies
1643  */
1644 
1645 /* ARGSUSED */
1646 uint64_t
1647 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1648     pages_t pages, io_attributes_t io_attributes,
1649     void *addr, size_t pfn_index, int flag)
1650 {
1651 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1652 	uint64_t	attr = MMU_TTE_V;
1653 	int		i;
1654 
1655 	if (io_attributes & PCI_MAP_ATTR_WRITE)
1656 		attr |= MMU_TTE_W;
1657 
1658 	if (flag == MMU_MAP_MP) {
1659 		ddi_dma_impl_t  *mp = (ddi_dma_impl_t *)addr;
1660 
1661 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1662 			px_iopfn_t	pfn = PX_GET_MP_PFN(mp, pfn_index);
1663 
1664 			pxu_p->tsb_vaddr[tsb_index] =
1665 			    MMU_PTOB(pfn) | attr;
1666 		}
1667 	} else {
1668 		caddr_t a = (caddr_t)addr;
1669 
1670 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1671 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1672 
1673 			pxu_p->tsb_vaddr[tsb_index] =
1674 			    MMU_PTOB(pfn) | attr;
1675 		}
1676 	}
1677 
1678 	return (H_EOK);
1679 }
1680 
1681 /* ARGSUSED */
1682 uint64_t
1683 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1684     pages_t pages)
1685 {
1686 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1687 	int		i;
1688 
1689 	for (i = 0; i < pages; i++, tsb_index++) {
1690 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1691 	}
1692 
1693 	return (H_EOK);
1694 }
1695 
1696 /* ARGSUSED */
1697 uint64_t
1698 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1699     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
1700 {
1701 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1702 	uint64_t	*tte_addr;
1703 	uint64_t	ret = H_EOK;
1704 
1705 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1706 
1707 	if (*tte_addr & MMU_TTE_V) {
1708 		*r_addr_p = MMU_TTETOPA(*tte_addr);
1709 		*attributes_p = (*tte_addr & MMU_TTE_W) ?
1710 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1711 	} else {
1712 		*r_addr_p = 0;
1713 		*attributes_p = 0;
1714 		ret = H_ENOMAP;
1715 	}
1716 
1717 	return (ret);
1718 }
1719 
1720 /* ARGSUSED */
1721 uint64_t
1722 hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra,
1723     io_attributes_t io_attributes, io_addr_t *io_addr_p)
1724 {
1725 	uint64_t	pfn = MMU_BTOP(ra);
1726 
1727 	*io_addr_p = MMU_BYPASS_BASE | ra |
1728 	    (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE);
1729 
1730 	return (H_EOK);
1731 }
1732 
1733 /*
1734  * Generic IO Interrupt Servies
1735  */
1736 
1737 /*
1738  * Converts a device specific interrupt number given by the
1739  * arguments devhandle and devino into a system specific ino.
1740  */
1741 /* ARGSUSED */
1742 uint64_t
1743 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
1744     sysino_t *sysino)
1745 {
1746 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
1747 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
1748 		return (H_ENOINTR);
1749 	}
1750 
1751 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
1752 
1753 	return (H_EOK);
1754 }
1755 
1756 /*
1757  * Returns state in intr_valid_state if the interrupt defined by sysino
1758  * is valid (enabled) or not-valid (disabled).
1759  */
1760 uint64_t
1761 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
1762     intr_valid_state_t *intr_valid_state)
1763 {
1764 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1765 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
1766 		*intr_valid_state = INTR_VALID;
1767 	} else {
1768 		*intr_valid_state = INTR_NOTVALID;
1769 	}
1770 
1771 	return (H_EOK);
1772 }
1773 
1774 /*
1775  * Sets the 'valid' state of the interrupt defined by
1776  * the argument sysino to the state defined by the
1777  * argument intr_valid_state.
1778  */
1779 uint64_t
1780 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
1781     intr_valid_state_t intr_valid_state)
1782 {
1783 	switch (intr_valid_state) {
1784 	case INTR_VALID:
1785 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1786 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1787 		break;
1788 	case INTR_NOTVALID:
1789 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1790 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1791 		break;
1792 	default:
1793 		return (EINVAL);
1794 	}
1795 
1796 	return (H_EOK);
1797 }
1798 
1799 /*
1800  * Returns the current state of the interrupt given by the sysino
1801  * argument.
1802  */
1803 uint64_t
1804 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
1805     intr_state_t *intr_state)
1806 {
1807 	intr_state_t state;
1808 
1809 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1810 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
1811 
1812 	switch (state) {
1813 	case INTERRUPT_IDLE_STATE:
1814 		*intr_state = INTR_IDLE_STATE;
1815 		break;
1816 	case INTERRUPT_RECEIVED_STATE:
1817 		*intr_state = INTR_RECEIVED_STATE;
1818 		break;
1819 	case INTERRUPT_PENDING_STATE:
1820 		*intr_state = INTR_DELIVERED_STATE;
1821 		break;
1822 	default:
1823 		return (EINVAL);
1824 	}
1825 
1826 	return (H_EOK);
1827 
1828 }
1829 
1830 /*
1831  * Sets the current state of the interrupt given by the sysino
1832  * argument to the value given in the argument intr_state.
1833  *
1834  * Note: Setting the state to INTR_IDLE clears any pending
1835  * interrupt for sysino.
1836  */
1837 uint64_t
1838 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
1839     intr_state_t intr_state)
1840 {
1841 	intr_state_t state;
1842 
1843 	switch (intr_state) {
1844 	case INTR_IDLE_STATE:
1845 		state = INTERRUPT_IDLE_STATE;
1846 		break;
1847 	case INTR_DELIVERED_STATE:
1848 		state = INTERRUPT_PENDING_STATE;
1849 		break;
1850 	default:
1851 		return (EINVAL);
1852 	}
1853 
1854 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1855 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
1856 
1857 	return (H_EOK);
1858 }
1859 
1860 /*
1861  * Returns the cpuid that is the current target of the
1862  * interrupt given by the sysino argument.
1863  *
1864  * The cpuid value returned is undefined if the target
1865  * has not been set via intr_settarget.
1866  */
1867 uint64_t
1868 hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
1869 {
1870 	*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1871 	    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
1872 
1873 	return (H_EOK);
1874 }
1875 
1876 /*
1877  * Set the target cpu for the interrupt defined by the argument
1878  * sysino to the target cpu value defined by the argument cpuid.
1879  */
1880 uint64_t
1881 hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
1882 {
1883 
1884 	uint64_t	val, intr_controller;
1885 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
1886 
1887 	/*
1888 	 * For now, we assign interrupt controller in a round
1889 	 * robin fashion.  Later, we may need to come up with
1890 	 * a more efficient assignment algorithm.
1891 	 */
1892 	intr_controller = 0x1ull << (cpuid % 4);
1893 
1894 	val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
1895 	    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
1896 	    ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
1897 	    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
1898 
1899 	/* For EQ interrupts, set DATA MONDO bit */
1900 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
1901 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
1902 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
1903 
1904 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
1905 
1906 	return (H_EOK);
1907 }
1908 
1909 /*
1910  * MSIQ Functions:
1911  */
1912 uint64_t
1913 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
1914 {
1915 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
1916 	    (uint64_t)pxu_p->msiq_mapped_p);
1917 	DBG(DBG_IB, NULL,
1918 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
1919 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
1920 
1921 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
1922 	    (uint64_t)ID_TO_IGN(pxu_p->portid) << INO_BITS);
1923 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
1924 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
1925 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
1926 
1927 	return (H_EOK);
1928 }
1929 
1930 uint64_t
1931 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1932     pci_msiq_valid_state_t *msiq_valid_state)
1933 {
1934 	uint32_t	eq_state;
1935 	uint64_t	ret = H_EOK;
1936 
1937 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1938 	    msiq_id, ENTRIES_STATE);
1939 
1940 	switch (eq_state) {
1941 	case EQ_IDLE_STATE:
1942 		*msiq_valid_state = PCI_MSIQ_INVALID;
1943 		break;
1944 	case EQ_ACTIVE_STATE:
1945 	case EQ_ERROR_STATE:
1946 		*msiq_valid_state = PCI_MSIQ_VALID;
1947 		break;
1948 	default:
1949 		ret = H_EIO;
1950 		break;
1951 	}
1952 
1953 	return (ret);
1954 }
1955 
1956 uint64_t
1957 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1958     pci_msiq_valid_state_t msiq_valid_state)
1959 {
1960 	uint64_t	ret = H_EOK;
1961 
1962 	switch (msiq_valid_state) {
1963 	case PCI_MSIQ_INVALID:
1964 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
1965 		    msiq_id, ENTRIES_DIS);
1966 		break;
1967 	case PCI_MSIQ_VALID:
1968 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
1969 		    msiq_id, ENTRIES_EN);
1970 		break;
1971 	default:
1972 		ret = H_EINVAL;
1973 		break;
1974 	}
1975 
1976 	return (ret);
1977 }
1978 
1979 uint64_t
1980 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
1981     pci_msiq_state_t *msiq_state)
1982 {
1983 	uint32_t	eq_state;
1984 	uint64_t	ret = H_EOK;
1985 
1986 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1987 	    msiq_id, ENTRIES_STATE);
1988 
1989 	switch (eq_state) {
1990 	case EQ_IDLE_STATE:
1991 	case EQ_ACTIVE_STATE:
1992 		*msiq_state = PCI_MSIQ_STATE_IDLE;
1993 		break;
1994 	case EQ_ERROR_STATE:
1995 		*msiq_state = PCI_MSIQ_STATE_ERROR;
1996 		break;
1997 	default:
1998 		ret = H_EIO;
1999 	}
2000 
2001 	return (ret);
2002 }
2003 
2004 uint64_t
2005 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2006     pci_msiq_state_t msiq_state)
2007 {
2008 	uint32_t	eq_state;
2009 	uint64_t	ret = H_EOK;
2010 
2011 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2012 	    msiq_id, ENTRIES_STATE);
2013 
2014 	switch (eq_state) {
2015 	case EQ_IDLE_STATE:
2016 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2017 			ret = H_EIO;
2018 		break;
2019 	case EQ_ACTIVE_STATE:
2020 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2021 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2022 			    msiq_id, ENTRIES_ENOVERR);
2023 		else
2024 			ret = H_EIO;
2025 		break;
2026 	case EQ_ERROR_STATE:
2027 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
2028 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2029 			    msiq_id, ENTRIES_E2I);
2030 		else
2031 			ret = H_EIO;
2032 		break;
2033 	default:
2034 		ret = H_EIO;
2035 	}
2036 
2037 	return (ret);
2038 }
2039 
2040 uint64_t
2041 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2042     msiqhead_t *msiq_head)
2043 {
2044 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2045 	    msiq_id, ENTRIES_HEAD);
2046 
2047 	return (H_EOK);
2048 }
2049 
2050 uint64_t
2051 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2052     msiqhead_t msiq_head)
2053 {
2054 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2055 	    ENTRIES_HEAD, msiq_head);
2056 
2057 	return (H_EOK);
2058 }
2059 
2060 uint64_t
2061 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2062     msiqtail_t *msiq_tail)
2063 {
2064 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2065 	    msiq_id, ENTRIES_TAIL);
2066 
2067 	return (H_EOK);
2068 }
2069 
2070 /*
2071  * MSI Functions:
2072  */
2073 uint64_t
2074 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2075 {
2076 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2077 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2078 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2079 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2080 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2081 
2082 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2083 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2084 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2085 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2086 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2087 
2088 	return (H_EOK);
2089 }
2090 
2091 uint64_t
2092 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2093     msiqid_t *msiq_id)
2094 {
2095 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2096 	    msi_num, ENTRIES_EQNUM);
2097 
2098 	return (H_EOK);
2099 }
2100 
2101 uint64_t
2102 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2103     msiqid_t msiq_id)
2104 {
2105 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2106 	    ENTRIES_EQNUM, msiq_id);
2107 
2108 	return (H_EOK);
2109 }
2110 
2111 uint64_t
2112 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2113     pci_msi_valid_state_t *msi_valid_state)
2114 {
2115 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2116 	    msi_num, ENTRIES_V);
2117 
2118 	return (H_EOK);
2119 }
2120 
2121 uint64_t
2122 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2123     pci_msi_valid_state_t msi_valid_state)
2124 {
2125 	uint64_t	ret = H_EOK;
2126 
2127 	switch (msi_valid_state) {
2128 	case PCI_MSI_VALID:
2129 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2130 		    ENTRIES_V);
2131 		break;
2132 	case PCI_MSI_INVALID:
2133 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2134 		    ENTRIES_V);
2135 		break;
2136 	default:
2137 		ret = H_EINVAL;
2138 	}
2139 
2140 	return (ret);
2141 }
2142 
2143 uint64_t
2144 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2145     pci_msi_state_t *msi_state)
2146 {
2147 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2148 	    msi_num, ENTRIES_EQWR_N);
2149 
2150 	return (H_EOK);
2151 }
2152 
2153 uint64_t
2154 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2155     pci_msi_state_t msi_state)
2156 {
2157 	uint64_t	ret = H_EOK;
2158 
2159 	switch (msi_state) {
2160 	case PCI_MSI_STATE_IDLE:
2161 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2162 		    ENTRIES_EQWR_N);
2163 		break;
2164 	case PCI_MSI_STATE_DELIVERED:
2165 	default:
2166 		ret = H_EINVAL;
2167 		break;
2168 	}
2169 
2170 	return (ret);
2171 }
2172 
2173 /*
2174  * MSG Functions:
2175  */
2176 uint64_t
2177 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2178     msiqid_t *msiq_id)
2179 {
2180 	uint64_t	ret = H_EOK;
2181 
2182 	switch (msg_type) {
2183 	case PCIE_PME_MSG:
2184 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2185 		break;
2186 	case PCIE_PME_ACK_MSG:
2187 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2188 		    EQNUM);
2189 		break;
2190 	case PCIE_CORR_MSG:
2191 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2192 		break;
2193 	case PCIE_NONFATAL_MSG:
2194 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2195 		    EQNUM);
2196 		break;
2197 	case PCIE_FATAL_MSG:
2198 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2199 		break;
2200 	default:
2201 		ret = H_EINVAL;
2202 		break;
2203 	}
2204 
2205 	return (ret);
2206 }
2207 
2208 uint64_t
2209 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2210     msiqid_t msiq_id)
2211 {
2212 	uint64_t	ret = H_EOK;
2213 
2214 	switch (msg_type) {
2215 	case PCIE_PME_MSG:
2216 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2217 		break;
2218 	case PCIE_PME_ACK_MSG:
2219 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2220 		break;
2221 	case PCIE_CORR_MSG:
2222 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2223 		break;
2224 	case PCIE_NONFATAL_MSG:
2225 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2226 		break;
2227 	case PCIE_FATAL_MSG:
2228 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2229 		break;
2230 	default:
2231 		ret = H_EINVAL;
2232 		break;
2233 	}
2234 
2235 	return (ret);
2236 }
2237 
2238 uint64_t
2239 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2240     pcie_msg_valid_state_t *msg_valid_state)
2241 {
2242 	uint64_t	ret = H_EOK;
2243 
2244 	switch (msg_type) {
2245 	case PCIE_PME_MSG:
2246 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2247 		break;
2248 	case PCIE_PME_ACK_MSG:
2249 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2250 		    PME_TO_ACK_MAPPING, V);
2251 		break;
2252 	case PCIE_CORR_MSG:
2253 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2254 		break;
2255 	case PCIE_NONFATAL_MSG:
2256 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2257 		    ERR_NONFATAL_MAPPING, V);
2258 		break;
2259 	case PCIE_FATAL_MSG:
2260 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2261 		    V);
2262 		break;
2263 	default:
2264 		ret = H_EINVAL;
2265 		break;
2266 	}
2267 
2268 	return (ret);
2269 }
2270 
2271 uint64_t
2272 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2273     pcie_msg_valid_state_t msg_valid_state)
2274 {
2275 	uint64_t	ret = H_EOK;
2276 
2277 	switch (msg_valid_state) {
2278 	case PCIE_MSG_VALID:
2279 		switch (msg_type) {
2280 		case PCIE_PME_MSG:
2281 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2282 			break;
2283 		case PCIE_PME_ACK_MSG:
2284 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2285 			break;
2286 		case PCIE_CORR_MSG:
2287 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2288 			break;
2289 		case PCIE_NONFATAL_MSG:
2290 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2291 			break;
2292 		case PCIE_FATAL_MSG:
2293 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2294 			break;
2295 		default:
2296 			ret = H_EINVAL;
2297 			break;
2298 		}
2299 
2300 		break;
2301 	case PCIE_MSG_INVALID:
2302 		switch (msg_type) {
2303 		case PCIE_PME_MSG:
2304 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2305 			break;
2306 		case PCIE_PME_ACK_MSG:
2307 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2308 			break;
2309 		case PCIE_CORR_MSG:
2310 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2311 			break;
2312 		case PCIE_NONFATAL_MSG:
2313 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2314 			break;
2315 		case PCIE_FATAL_MSG:
2316 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2317 			break;
2318 		default:
2319 			ret = H_EINVAL;
2320 			break;
2321 		}
2322 		break;
2323 	default:
2324 		ret = H_EINVAL;
2325 	}
2326 
2327 	return (ret);
2328 }
2329 
2330 /*
2331  * Suspend/Resume Functions:
2332  *	(pec, mmu, ib)
2333  *	cb
2334  * Registers saved have all been touched in the XXX_init functions.
2335  */
2336 uint64_t
2337 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2338 {
2339 	uint64_t	*config_state;
2340 	int		total_size;
2341 	int		i;
2342 
2343 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2344 		return (H_EIO);
2345 
2346 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2347 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2348 
2349 	if (config_state == NULL) {
2350 		return (H_EIO);
2351 	}
2352 
2353 	/*
2354 	 * Soft state for suspend/resume  from pxu_t
2355 	 * uint64_t	*pec_config_state;
2356 	 * uint64_t	*mmu_config_state;
2357 	 * uint64_t	*ib_intr_map;
2358 	 * uint64_t	*ib_config_state;
2359 	 * uint64_t	*xcb_config_state;
2360 	 */
2361 
2362 	/* Save the PEC configuration states */
2363 	pxu_p->pec_config_state = config_state;
2364 	for (i = 0; i < PEC_KEYS; i++) {
2365 		pxu_p->pec_config_state[i] =
2366 		    CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]);
2367 	}
2368 
2369 	/* Save the MMU configuration states */
2370 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2371 	for (i = 0; i < MMU_KEYS; i++) {
2372 		pxu_p->mmu_config_state[i] =
2373 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2374 	}
2375 
2376 	/* Save the interrupt mapping registers */
2377 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2378 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2379 		pxu_p->ib_intr_map[i] =
2380 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2381 	}
2382 
2383 	/* Save the IB configuration states */
2384 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2385 	for (i = 0; i < IB_KEYS; i++) {
2386 		pxu_p->ib_config_state[i] =
2387 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2388 	}
2389 
2390 	return (H_EOK);
2391 }
2392 
2393 void
2394 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2395 {
2396 	int		total_size;
2397 	sysino_t	sysino;
2398 	int		i;
2399 
2400 	/* Make sure that suspend actually did occur */
2401 	if (!pxu_p->pec_config_state) {
2402 		return;
2403 	}
2404 
2405 	/* Restore IB configuration states */
2406 	for (i = 0; i < IB_KEYS; i++) {
2407 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2408 		    pxu_p->ib_config_state[i]);
2409 	}
2410 
2411 	/*
2412 	 * Restore the interrupt mapping registers
2413 	 * And make sure the intrs are idle.
2414 	 */
2415 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2416 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2417 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2418 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2419 		    pxu_p->ib_intr_map[i]);
2420 	}
2421 
2422 	/* Restore MMU configuration states */
2423 	/* Clear the cache. */
2424 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2425 
2426 	for (i = 0; i < MMU_KEYS; i++) {
2427 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2428 		    pxu_p->mmu_config_state[i]);
2429 	}
2430 
2431 	/* Restore PEC configuration states */
2432 	/* Make sure all reset bits are low until error is detected */
2433 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2434 
2435 	for (i = 0; i < PEC_KEYS; i++) {
2436 		CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i],
2437 		    pxu_p->pec_config_state[i]);
2438 	}
2439 
2440 	/* Enable PCI-E interrupt */
2441 	(void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino);
2442 
2443 	(void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE);
2444 
2445 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2446 	kmem_free(pxu_p->pec_config_state, total_size);
2447 
2448 	pxu_p->pec_config_state = NULL;
2449 	pxu_p->mmu_config_state = NULL;
2450 	pxu_p->ib_config_state = NULL;
2451 	pxu_p->ib_intr_map = NULL;
2452 
2453 	msiq_resume(dev_hdl, pxu_p);
2454 }
2455 
2456 uint64_t
2457 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2458 {
2459 	uint64_t	*config_state;
2460 	int		i;
2461 
2462 	config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP);
2463 
2464 	if (config_state == NULL) {
2465 		return (H_EIO);
2466 	}
2467 
2468 	/* Save the configuration states */
2469 	pxu_p->xcb_config_state = config_state;
2470 	for (i = 0; i < CB_KEYS; i++) {
2471 		pxu_p->xcb_config_state[i] =
2472 		    CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]);
2473 	}
2474 
2475 	return (H_EOK);
2476 }
2477 
2478 void
2479 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2480     devino_t devino, pxu_t *pxu_p)
2481 {
2482 	sysino_t	sysino;
2483 	int		i;
2484 
2485 	/*
2486 	 * No reason to have any reset bits high until an error is
2487 	 * detected on the link.
2488 	 */
2489 	CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2490 
2491 	ASSERT(pxu_p->xcb_config_state);
2492 
2493 	/* Restore the configuration states */
2494 	for (i = 0; i < CB_KEYS; i++) {
2495 		CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i],
2496 		    pxu_p->xcb_config_state[i]);
2497 	}
2498 
2499 	/* Enable XBC interrupt */
2500 	(void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino);
2501 
2502 	(void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
2503 
2504 	kmem_free(pxu_p->xcb_config_state, CB_SIZE);
2505 
2506 	pxu_p->xcb_config_state = NULL;
2507 }
2508 
2509 static uint64_t
2510 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2511 {
2512 	size_t	bufsz;
2513 	volatile uint64_t *cur_p;
2514 	int i;
2515 
2516 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2517 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2518 	    NULL)
2519 		return (H_EIO);
2520 
2521 	cur_p = pxu_p->msiq_config_state;
2522 
2523 	/* Save each EQ state */
2524 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2525 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2526 
2527 	/* Save MSI mapping registers */
2528 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2529 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2530 
2531 	/* Save all other MSIQ registers */
2532 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2533 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2534 	return (H_EOK);
2535 }
2536 
2537 static void
2538 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2539 {
2540 	size_t	bufsz;
2541 	uint64_t *cur_p;
2542 	int i;
2543 
2544 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2545 	cur_p = pxu_p->msiq_config_state;
2546 	/*
2547 	 * Initialize EQ base address register and
2548 	 * Interrupt Mondo Data 0 register.
2549 	 */
2550 	(void) hvio_msiq_init(dev_hdl, pxu_p);
2551 
2552 	/* Restore EQ states */
2553 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2554 		if (((*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK) ==
2555 		    EQ_ACTIVE_STATE) {
2556 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2557 			    i, ENTRIES_EN);
2558 		}
2559 	}
2560 
2561 	/* Restore MSI mapping */
2562 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2563 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2564 
2565 	/*
2566 	 * Restore all other registers. MSI 32 bit address and
2567 	 * MSI 64 bit address are restored as part of this.
2568 	 */
2569 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2570 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2571 
2572 	kmem_free(pxu_p->msiq_config_state, bufsz);
2573 	pxu_p->msiq_config_state = NULL;
2574 }
2575 
2576 /*
2577  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2578  * called by px_goto_l23ready.
2579  * returns DDI_SUCCESS or DDI_FAILURE
2580  */
2581 int
2582 px_send_pme_turnoff(caddr_t csr_base)
2583 {
2584 	volatile uint64_t reg;
2585 
2586 	/* TBD: Wait for link to be in L1 state (link status reg) */
2587 
2588 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2589 	/* If already pending, return failure */
2590 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2591 		return (DDI_FAILURE);
2592 	}
2593 
2594 	/* write to PME_Turn_off reg to boradcast */
2595 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2596 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2597 	return (DDI_SUCCESS);
2598 }
2599