xref: /titanic_44/usr/src/uts/sun4u/io/px/px_hlib.c (revision ca94806435194836d666800631115fd21556bbfe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/vmsystm.h>
32 #include <sys/vmem.h>
33 #include <sys/machsystm.h>	/* lddphys() */
34 #include <sys/iommutsb.h>
35 #include <sys/pci.h>
36 #include <pcie_pwr.h>
37 #include <px_obj.h>
38 #include "px_regs.h"
39 #include "px_csr.h"
40 #include "px_lib4u.h"
41 
42 /*
43  * Registers that need to be saved and restored during suspend/resume.
44  */
45 
46 /*
47  * Registers in the PEC Module.
48  * LPU_RESET should be set to 0ull during resume
49  */
50 static uint64_t	pec_config_state_regs[] = {
51 	PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
52 	ILU_ERROR_LOG_ENABLE,
53 	ILU_INTERRUPT_ENABLE,
54 	TLU_CONTROL,
55 	TLU_OTHER_EVENT_LOG_ENABLE,
56 	TLU_OTHER_EVENT_INTERRUPT_ENABLE,
57 	TLU_DEVICE_CONTROL,
58 	TLU_LINK_CONTROL,
59 	TLU_UNCORRECTABLE_ERROR_LOG_ENABLE,
60 	TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE,
61 	TLU_CORRECTABLE_ERROR_LOG_ENABLE,
62 	TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE,
63 	LPU_LINK_LAYER_INTERRUPT_MASK,
64 	LPU_PHY_INTERRUPT_MASK,
65 	LPU_RECEIVE_PHY_INTERRUPT_MASK,
66 	LPU_TRANSMIT_PHY_INTERRUPT_MASK,
67 	LPU_GIGABLAZE_GLUE_INTERRUPT_MASK,
68 	LPU_LTSSM_INTERRUPT_MASK,
69 	LPU_RESET,
70 	LPU_DEBUG_CONFIG,
71 	LPU_INTERRUPT_MASK,
72 	LPU_LINK_LAYER_CONFIG,
73 	LPU_FLOW_CONTROL_UPDATE_CONTROL,
74 	LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD,
75 	LPU_TXLINK_REPLAY_TIMER_THRESHOLD,
76 	LPU_REPLAY_BUFFER_MAX_ADDRESS,
77 	LPU_TXLINK_RETRY_FIFO_POINTER,
78 	LPU_LTSSM_CONFIG2,
79 	LPU_LTSSM_CONFIG3,
80 	LPU_LTSSM_CONFIG4,
81 	LPU_LTSSM_CONFIG5,
82 	DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
83 	DMC_DEBUG_SELECT_FOR_PORT_A,
84 	DMC_DEBUG_SELECT_FOR_PORT_B
85 };
86 #define	PEC_SIZE (sizeof (pec_config_state_regs))
87 #define	PEC_KEYS (PEC_SIZE / sizeof (uint64_t))
88 
89 /*
90  * Registers for the MMU module.
91  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
92  */
93 static uint64_t mmu_config_state_regs[] = {
94 	MMU_TSB_CONTROL,
95 	MMU_CONTROL_AND_STATUS,
96 	MMU_ERROR_LOG_ENABLE,
97 	MMU_INTERRUPT_ENABLE
98 };
99 #define	MMU_SIZE (sizeof (mmu_config_state_regs))
100 #define	MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
101 
102 /*
103  * Registers for the IB Module
104  */
105 static uint64_t ib_config_state_regs[] = {
106 	IMU_ERROR_LOG_ENABLE,
107 	IMU_INTERRUPT_ENABLE
108 };
109 #define	IB_SIZE (sizeof (ib_config_state_regs))
110 #define	IB_KEYS (IB_SIZE / sizeof (uint64_t))
111 #define	IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
112 
113 /*
114  * Registers for the CB module.
115  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
116  */
117 static uint64_t	cb_config_state_regs[] = {
118 	JBUS_PARITY_CONTROL,
119 	JBC_FATAL_RESET_ENABLE,
120 	JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
121 	JBC_ERROR_LOG_ENABLE,
122 	JBC_INTERRUPT_ENABLE
123 };
124 #define	CB_SIZE (sizeof (cb_config_state_regs))
125 #define	CB_KEYS (CB_SIZE / sizeof (uint64_t))
126 
127 static uint64_t	msiq_config_other_regs[] = {
128 	ERR_COR_MAPPING,
129 	ERR_NONFATAL_MAPPING,
130 	ERR_FATAL_MAPPING,
131 	PM_PME_MAPPING,
132 	PME_TO_ACK_MAPPING,
133 	MSI_32_BIT_ADDRESS,
134 	MSI_64_BIT_ADDRESS
135 };
136 #define	MSIQ_OTHER_SIZE	(sizeof (msiq_config_other_regs))
137 #define	MSIQ_OTHER_KEYS	(MSIQ_OTHER_SIZE / sizeof (uint64_t))
138 
139 #define	MSIQ_STATE_SIZE		(EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
140 #define	MSIQ_MAPPING_SIZE	(MSI_MAPPING_ENTRIES * sizeof (uint64_t))
141 
142 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
143 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
144 
145 /*
146  * Initialize the module, but do not enable interrupts.
147  */
148 /* ARGSUSED */
149 void
150 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
151 {
152 	uint64_t val;
153 
154 	/* Check if we need to enable inverted parity */
155 	val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
156 	CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
157 	DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n",
158 	    CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
159 
160 	val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
161 	    (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
162 	    (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
163 	    (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
164 	    (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
165 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
166 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
167 	    (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
168 	CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
169 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
170 		CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
171 
172 	/*
173 	 * Enable merge, jbc and dmc interrupts.
174 	 */
175 	CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
176 	DBG(DBG_CB, NULL,
177 	    "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
178 	    CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
179 
180 	/*
181 	 * CSR_V CB's interrupt regs (log, enable, status, clear)
182 	 */
183 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
184 	    CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
185 
186 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
187 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
188 
189 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
190 	    CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
191 
192 	DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
193 	    CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
194 }
195 
196 /*
197  * Initialize the module, but do not enable interrupts.
198  */
199 /* ARGSUSED */
200 void
201 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
202 {
203 	/*
204 	 * CSR_V IB's interrupt regs (log, enable, status, clear)
205 	 */
206 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
207 	    CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
208 
209 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
210 	    CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
211 
212 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
213 	    CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
214 
215 	DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
216 	    CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
217 }
218 
219 /*
220  * Initialize the module, but do not enable interrupts.
221  */
222 /* ARGSUSED */
223 static void
224 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
225 {
226 	/*
227 	 * CSR_V ILU's interrupt regs (log, enable, status, clear)
228 	 */
229 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
230 	    CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
231 
232 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
233 	    CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
234 
235 	DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
236 	    CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
237 
238 	DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
239 	    CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
240 }
241 
242 /*
243  * Initialize the module, but do not enable interrupts.
244  */
245 static void
246 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
247 {
248 	uint64_t val;
249 
250 	/*
251 	 * CSR_V TLU_CONTROL Expect OBP ???
252 	 */
253 
254 	/*
255 	 * L0s entry default timer value - 7.0 us
256 	 * Completion timeout select default value - 67.1 ms and
257 	 * OBP will set this value.
258 	 *
259 	 * Configuration - Bit 0 should always be 0 for upstream port.
260 	 * Bit 1 is clock - how is this related to the clock bit in TLU
261 	 * Link Control register?  Both are hardware dependent and likely
262 	 * set by OBP.
263 	 *
264 	 * Disable non-posted write bit - ordering by setting
265 	 * NPWR_EN bit to force serialization of writes.
266 	 */
267 	val = CSR_XR(csr_base, TLU_CONTROL);
268 
269 	if (pxu_p->chip_id == FIRE_VER_10) {
270 		val |= (TLU_CONTROL_L0S_TIM_DEFAULT <<
271 		    FIRE10_TLU_CONTROL_L0S_TIM) |
272 		    (1ull << FIRE10_TLU_CONTROL_NPWR_EN) |
273 		    TLU_CONTROL_CONFIG_DEFAULT;
274 	} else {
275 		/* Default case is FIRE2.0 */
276 		val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
277 		    (1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT;
278 	}
279 
280 	/*
281 	 * Set Detect.Quiet. This will disable automatic link
282 	 * re-training, if the link goes down e.g. power management
283 	 * turns off power to the downstream device. This will enable
284 	 * Fire to go to Drain state, after link down. The drain state
285 	 * forces a reset to the FC state machine, which is required for
286 	 * proper link re-training.
287 	 */
288 	val |= (1ull << TLU_REMAIN_DETECT_QUIET);
289 	CSR_XS(csr_base, TLU_CONTROL, val);
290 	DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
291 	    CSR_XR(csr_base, TLU_CONTROL));
292 
293 	/*
294 	 * CSR_V TLU_STATUS Expect HW 0x4
295 	 */
296 
297 	/*
298 	 * Only bit [7:0] are currently defined.  Bits [2:0]
299 	 * are the state, which should likely be in state active,
300 	 * 100b.  Bit three is 'recovery', which is not understood.
301 	 * All other bits are reserved.
302 	 */
303 	DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
304 	    CSR_XR(csr_base, TLU_STATUS));
305 
306 	/*
307 	 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
308 	 */
309 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
310 	    CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
311 
312 	/*
313 	 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
314 	 */
315 
316 	/*
317 	 * Ingress credits initial register.  Bits [39:32] should be
318 	 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
319 	 * be 0xC0.  These are the reset values, and should be set by
320 	 * HW.
321 	 */
322 	DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
323 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
324 
325 	/*
326 	 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
327 	 */
328 
329 	/*
330 	 * Diagnostic register - always zero unless we are debugging.
331 	 */
332 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
333 	    CSR_XR(csr_base, TLU_DIAGNOSTIC));
334 
335 	/*
336 	 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
337 	 */
338 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
339 	    CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
340 
341 	/*
342 	 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
343 	 */
344 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
345 	    CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
346 
347 	/*
348 	 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
349 	 */
350 	DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
351 	    CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
352 
353 	/*
354 	 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
355 	 */
356 	DBG(DBG_TLU, NULL,
357 	    "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
358 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
359 
360 	/*
361 	 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
362 	 */
363 	DBG(DBG_TLU, NULL,
364 	    "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
365 	    CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
366 
367 	/*
368 	 * CSR_V TLU's interrupt regs (log, enable, status, clear)
369 	 */
370 	DBG(DBG_TLU, NULL,
371 	    "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
372 	    CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
373 
374 	DBG(DBG_TLU, NULL,
375 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
376 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
377 
378 	DBG(DBG_TLU, NULL,
379 	    "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
380 	    CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
381 
382 	DBG(DBG_TLU, NULL,
383 	    "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
384 	    CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
385 
386 	/*
387 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
388 	 */
389 	DBG(DBG_TLU, NULL,
390 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
391 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
392 
393 	/*
394 	 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
395 	 */
396 	DBG(DBG_TLU, NULL,
397 	    "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
398 	    CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
399 
400 	/*
401 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
402 	 */
403 	DBG(DBG_TLU, NULL,
404 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
405 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
406 
407 	/*
408 	 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
409 	 */
410 	DBG(DBG_TLU, NULL,
411 	    "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
412 	    CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
413 
414 	/*
415 	 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
416 	 */
417 	DBG(DBG_TLU, NULL,
418 	    "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
419 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
420 
421 	/*
422 	 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
423 	 */
424 	DBG(DBG_TLU, NULL,
425 	    "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
426 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
427 
428 	/*
429 	 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
430 	 */
431 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
432 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
433 
434 	/*
435 	 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
436 	 */
437 	DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
438 	    CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
439 
440 	/*
441 	 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
442 	 */
443 
444 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
445 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
446 
447 	/*
448 	 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
449 	 */
450 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
451 	    CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
452 
453 	/*
454 	 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
455 	 */
456 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
457 	    CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
458 
459 	/*
460 	 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
461 	 */
462 
463 	/*
464 	 * Bits [14:12] are the Max Read Request Size, which is always 64
465 	 * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
466 	 * start at 128 bytes which is 000b.  This may be revisited if
467 	 * init_child finds greater values.
468 	 */
469 	val = 0x0ull;
470 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
471 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
472 	    CSR_XR(csr_base, TLU_DEVICE_CONTROL));
473 
474 	/*
475 	 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
476 	 */
477 	DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
478 	    CSR_XR(csr_base, TLU_DEVICE_STATUS));
479 
480 	/*
481 	 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
482 	 */
483 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
484 	    CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
485 
486 	/*
487 	 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
488 	 */
489 
490 	/*
491 	 * The CLOCK bit should be set by OBP if the hardware dictates,
492 	 * and if it is set then ASPM should be used since then L0s exit
493 	 * latency should be lower than L1 exit latency.
494 	 *
495 	 * Note that we will not enable power management during bringup
496 	 * since it has not been test and is creating some problems in
497 	 * simulation.
498 	 */
499 	val = (1ull << TLU_LINK_CONTROL_CLOCK);
500 
501 	CSR_XS(csr_base, TLU_LINK_CONTROL, val);
502 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
503 	    CSR_XR(csr_base, TLU_LINK_CONTROL));
504 
505 	/*
506 	 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
507 	 */
508 
509 	/*
510 	 * Not sure if HW or OBP will be setting this read only
511 	 * register.  Bit 12 is Clock, and it should always be 1
512 	 * signifying that the component uses the same physical
513 	 * clock as the platform.  Bits [9:4] are for the width,
514 	 * with the expected value above signifying a x1 width.
515 	 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
516 	 * the only speed as yet supported by the PCI-E spec.
517 	 */
518 	DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
519 	    CSR_XR(csr_base, TLU_LINK_STATUS));
520 
521 	/*
522 	 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
523 	 */
524 
525 	/*
526 	 * Power Limits for the slots.  Will be platform
527 	 * dependent, and OBP will need to set after consulting
528 	 * with the HW guys.
529 	 *
530 	 * Bits [16:15] are power limit scale, which most likely
531 	 * will be 0b signifying 1x.  Bits [14:7] are the Set
532 	 * Power Limit Value, which is a number which is multiplied
533 	 * by the power limit scale to get the actual power limit.
534 	 */
535 	DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
536 	    CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
537 
538 	/*
539 	 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
540 	 */
541 	DBG(DBG_TLU, NULL,
542 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
543 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
544 
545 	/*
546 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
547 	 * Kernel 0x17F0110017F011
548 	 */
549 	DBG(DBG_TLU, NULL,
550 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
551 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
552 
553 	/*
554 	 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
555 	 */
556 	DBG(DBG_TLU, NULL,
557 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
558 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
559 
560 	/*
561 	 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
562 	 */
563 	DBG(DBG_TLU, NULL,
564 	    "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
565 	    CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
566 
567 	/*
568 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
569 	 */
570 	DBG(DBG_TLU, NULL,
571 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
572 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
573 
574 	/*
575 	 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
576 	 */
577 	DBG(DBG_TLU, NULL,
578 	    "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
579 	    CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
580 
581 	/*
582 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
583 	 */
584 	DBG(DBG_TLU, NULL,
585 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
586 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
587 
588 	/*
589 	 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
590 	 */
591 	DBG(DBG_TLU, NULL,
592 	    "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
593 	    CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
594 
595 
596 	/*
597 	 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
598 	 * Plus header logs
599 	 */
600 
601 	/*
602 	 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
603 	 */
604 	DBG(DBG_TLU, NULL,
605 	    "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
606 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
607 
608 	/*
609 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
610 	 */
611 	DBG(DBG_TLU, NULL,
612 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
613 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
614 
615 	/*
616 	 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
617 	 */
618 	DBG(DBG_TLU, NULL,
619 	    "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
620 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
621 
622 	/*
623 	 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
624 	 */
625 	DBG(DBG_TLU, NULL,
626 	    "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
627 	    CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
628 }
629 
630 static void
631 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
632 {
633 	/* Variables used to set the ACKNAK Latency Timer and Replay Timer */
634 	int link_width, max_payload;
635 
636 	uint64_t val;
637 
638 	/*
639 	 * ACKNAK Latency Threshold Table.
640 	 * See Fire PRM 1.0 sections 1.2.11.1, table 1-17.
641 	 */
642 	int fire10_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE]
643 	    [LINK_WIDTH_ARR_SIZE] = {
644 		{0xED,   0x76,  0x70,  0x58},
645 		{0x1A0,  0x76,  0x6B,  0x61},
646 		{0x22F,  0x9A,  0x6A,  0x6A},
647 		{0x42F,  0x11A, 0x96,  0x96},
648 		{0x82F,  0x21A, 0x116, 0x116},
649 		{0x102F, 0x41A, 0x216, 0x216}
650 	};
651 
652 	/*
653 	 * TxLink Replay Timer Latency Table
654 	 * See Fire PRM 1.0 sections 1.2.11.2, table 1-18.
655 	 */
656 	int fire10_replay_timer_table[LINK_MAX_PKT_ARR_SIZE]
657 	    [LINK_WIDTH_ARR_SIZE] = {
658 		{0x2C7,  0x108, 0xF6,  0xBD},
659 		{0x4E0,  0x162, 0x141, 0xF1},
660 		{0x68D,  0x1CE, 0x102, 0x102},
661 		{0xC8D,  0x34E, 0x1C2, 0x1C2},
662 		{0x188D, 0x64E, 0x342, 0x342},
663 		{0x308D, 0xC4E, 0x642, 0x642}
664 	};
665 
666 	/*
667 	 * ACKNAK Latency Threshold Table.
668 	 * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
669 	 */
670 	int acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
671 		{0xED,   0x49,  0x43,  0x30},
672 		{0x1A0,  0x76,  0x6B,  0x48},
673 		{0x22F,  0x9A,  0x56,  0x56},
674 		{0x42F,  0x11A, 0x96,  0x96},
675 		{0x82F,  0x21A, 0x116, 0x116},
676 		{0x102F, 0x41A, 0x216, 0x216}
677 	};
678 
679 	/*
680 	 * TxLink Replay Timer Latency Table
681 	 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
682 	 */
683 	int replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
684 		{0x379,  0x112, 0xFC,  0xB4},
685 		{0x618,  0x1BA, 0x192, 0x10E},
686 		{0x831,  0x242, 0x143, 0x143},
687 		{0xFB1,  0x422, 0x233, 0x233},
688 		{0x1EB0, 0x7E1, 0x412, 0x412},
689 		{0x3CB0, 0xF61, 0x7D2, 0x7D2}
690 	};
691 	/*
692 	 * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
693 	 * Only Link Widths of x1, x4, and x8 are supported.
694 	 * If any width is reported other than x8, set default to x8.
695 	 */
696 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
697 	DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
698 
699 	/*
700 	 * Convert link_width to match timer array configuration.
701 	 */
702 	switch (link_width) {
703 	case 1:
704 		link_width = 0;
705 		break;
706 	case 4:
707 		link_width = 1;
708 		break;
709 	case 8:
710 		link_width = 2;
711 		break;
712 	case 16:
713 		link_width = 3;
714 		break;
715 	default:
716 		link_width = 0;
717 	}
718 
719 	/*
720 	 * Get the Max Payload Size.
721 	 * See table above LINK_MAX_PKT_ARR_SIZE #define
722 	 */
723 	if (pxu_p->chip_id == FIRE_VER_10) {
724 		max_payload = CSR_FR(csr_base,
725 		    FIRE10_LPU_LINK_LAYER_CONFIG, MAX_PAYLOAD);
726 	} else {
727 		/* Default case is FIRE2.0 */
728 		max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
729 		    TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
730 	}
731 
732 	DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
733 	    (0x80 << max_payload));
734 
735 	/* Make sure the packet size is not greater than 4096 */
736 	max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
737 	    (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
738 
739 	/*
740 	 * CSR_V LPU_ID Expect HW 0x0
741 	 */
742 
743 	/*
744 	 * This register has link id, phy id and gigablaze id.
745 	 * Should be set by HW.
746 	 */
747 	DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
748 	    CSR_XR(csr_base, LPU_ID));
749 
750 	/*
751 	 * CSR_V LPU_RESET Expect Kernel 0x0
752 	 */
753 
754 	/*
755 	 * No reason to have any reset bits high until an error is
756 	 * detected on the link.
757 	 */
758 	val = 0ull;
759 	CSR_XS(csr_base, LPU_RESET, val);
760 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
761 	    CSR_XR(csr_base, LPU_RESET));
762 
763 	/*
764 	 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
765 	 */
766 
767 	/*
768 	 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
769 	 * They are read-only.  What do the 8 bits mean, and
770 	 * how do they get set if they are read only?
771 	 */
772 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
773 	    CSR_XR(csr_base, LPU_DEBUG_STATUS));
774 
775 	/*
776 	 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
777 	 */
778 	DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
779 	    CSR_XR(csr_base, LPU_DEBUG_CONFIG));
780 
781 	/*
782 	 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
783 	 */
784 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
785 	    CSR_XR(csr_base, LPU_LTSSM_CONTROL));
786 
787 	/*
788 	 * CSR_V LPU_LINK_STATUS Expect HW 0x101
789 	 */
790 
791 	/*
792 	 * This register has bits [9:4] for link width, and the
793 	 * default 0x10, means a width of x16.  The problem is
794 	 * this width is not supported according to the TLU
795 	 * link status register.
796 	 */
797 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
798 	    CSR_XR(csr_base, LPU_LINK_STATUS));
799 
800 	/*
801 	 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
802 	 */
803 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
804 	    CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
805 
806 	/*
807 	 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
808 	 */
809 	DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
810 	    CSR_XR(csr_base, LPU_INTERRUPT_MASK));
811 
812 	/*
813 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
814 	 */
815 	DBG(DBG_LPU, NULL,
816 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
817 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
818 
819 	/*
820 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
821 	 */
822 	DBG(DBG_LPU, NULL,
823 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
824 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
825 
826 	/*
827 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
828 	 */
829 	DBG(DBG_LPU, NULL,
830 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
831 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
832 
833 	/*
834 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
835 	 */
836 	DBG(DBG_LPU, NULL,
837 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
838 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
839 
840 	/*
841 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
842 	 */
843 	DBG(DBG_LPU, NULL,
844 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
845 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
846 
847 	/*
848 	 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
849 	 */
850 	DBG(DBG_LPU, NULL,
851 	    "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
852 	    CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
853 
854 	/*
855 	 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
856 	 */
857 
858 	/*
859 	 * This is another place where Max Payload can be set,
860 	 * this time for the link layer.  It will be set to
861 	 * 128B, which is the default, but this will need to
862 	 * be revisited.
863 	 */
864 	val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
865 	CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
866 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
867 	    CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
868 
869 	/*
870 	 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
871 	 */
872 
873 	/*
874 	 * Another R/W status register.  Bit 3, DL up Status, will
875 	 * be set high.  The link state machine status bits [2:0]
876 	 * are set to 0x1, but the status bits are not defined in the
877 	 * PRM.  What does 0x1 mean, what others values are possible
878 	 * and what are thier meanings?
879 	 *
880 	 * This register has been giving us problems in simulation.
881 	 * It has been mentioned that software should not program
882 	 * any registers with WE bits except during debug.  So
883 	 * this register will no longer be programmed.
884 	 */
885 
886 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
887 	    CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
888 
889 	/*
890 	 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
891 	 */
892 	DBG(DBG_LPU, NULL,
893 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
894 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
895 
896 	/*
897 	 * CSR_V LPU Link Layer interrupt regs (mask, status)
898 	 */
899 	DBG(DBG_LPU, NULL,
900 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
901 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
902 
903 	DBG(DBG_LPU, NULL,
904 	    "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
905 	    CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
906 
907 	/*
908 	 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
909 	 */
910 
911 	/*
912 	 * The PRM says that only the first two bits will be set
913 	 * high by default, which will enable flow control for
914 	 * posted and non-posted updates, but NOT completetion
915 	 * updates.
916 	 */
917 	val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
918 	    (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
919 	CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
920 	DBG(DBG_LPU, NULL,
921 	    "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
922 	    CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
923 
924 	/*
925 	 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
926 	 * Expect OBP 0x1D4C
927 	 */
928 
929 	/*
930 	 * This should be set by OBP.  We'll check to make sure.
931 	 */
932 	DBG(DBG_LPU, NULL, "lpu_init - "
933 	    "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
934 	    CSR_XR(csr_base,
935 	    LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
936 
937 	/*
938 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
939 	 */
940 
941 	/*
942 	 * This register has Flow Control Update Timer values for
943 	 * non-posted and posted requests, bits [30:16] and bits
944 	 * [14:0], respectively.  These are read-only to SW so
945 	 * either HW or OBP needs to set them.
946 	 */
947 	DBG(DBG_LPU, NULL, "lpu_init - "
948 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
949 	    CSR_XR(csr_base,
950 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
951 
952 	/*
953 	 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
954 	 */
955 
956 	/*
957 	 * Same as timer0 register above, except for bits [14:0]
958 	 * have the timer values for completetions.  Read-only to
959 	 * SW; OBP or HW need to set it.
960 	 */
961 	DBG(DBG_LPU, NULL, "lpu_init - "
962 	    "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
963 	    CSR_XR(csr_base,
964 	    LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
965 
966 	/*
967 	 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
968 	 */
969 	if (pxu_p->chip_id == FIRE_VER_10) {
970 		val = fire10_acknak_timer_table[max_payload][link_width];
971 	} else {
972 		/* Default case is FIRE2.0 */
973 		val = acknak_timer_table[max_payload][link_width];
974 	}
975 
976 	CSR_XS(csr_base,
977 	    LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
978 
979 	DBG(DBG_LPU, NULL, "lpu_init - "
980 	    "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
981 	    CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
982 
983 	/*
984 	 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
985 	 */
986 	DBG(DBG_LPU, NULL,
987 	    "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
988 	    CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
989 
990 	/*
991 	 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
992 	 */
993 	if (pxu_p->chip_id == FIRE_VER_10) {
994 		val = fire10_replay_timer_table[max_payload][link_width];
995 	} else {
996 		/* Default case is FIRE2.0 */
997 		val = replay_timer_table[max_payload][link_width];
998 	}
999 
1000 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1001 
1002 	DBG(DBG_LPU, NULL,
1003 	    "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
1004 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
1005 
1006 	/*
1007 	 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
1008 	 */
1009 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1010 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
1011 
1012 	/*
1013 	 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1014 	 */
1015 	DBG(DBG_LPU, NULL,
1016 	    "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1017 	    CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
1018 
1019 	/*
1020 	 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1021 	 */
1022 	DBG(DBG_LPU, NULL,
1023 	    "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1024 	    CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
1025 
1026 	/*
1027 	 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1028 	 */
1029 	val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
1030 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
1031 	    (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
1032 	    LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
1033 
1034 	CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
1035 	DBG(DBG_LPU, NULL,
1036 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1037 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
1038 
1039 	/*
1040 	 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1041 	 */
1042 	DBG(DBG_LPU, NULL,
1043 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1044 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
1045 
1046 	/*
1047 	 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1048 	 */
1049 	DBG(DBG_LPU, NULL,
1050 	    "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1051 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
1052 
1053 	/*
1054 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1055 	 */
1056 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1057 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1058 
1059 	/*
1060 	 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1061 	 */
1062 	DBG(DBG_LPU, NULL,
1063 	    "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1064 	    CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1065 
1066 	/*
1067 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1068 	 */
1069 
1070 	/*
1071 	 * Test only register.  Will not be programmed.
1072 	 */
1073 	DBG(DBG_LPU, NULL,
1074 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1075 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1076 
1077 	/*
1078 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1079 	 */
1080 
1081 	/*
1082 	 * Test only register.  Will not be programmed.
1083 	 */
1084 	DBG(DBG_LPU, NULL,
1085 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1086 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1087 
1088 	/*
1089 	 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1090 	 */
1091 	DBG(DBG_LPU, NULL,
1092 	    "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1093 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1094 
1095 	/*
1096 	 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1097 	 */
1098 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1099 	    CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1100 
1101 	/*
1102 	 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1103 	 */
1104 
1105 	/*
1106 	 * Test only register.  Will not be programmed.
1107 	 */
1108 	DBG(DBG_LPU, NULL,
1109 	    "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1110 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1111 
1112 	/*
1113 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1114 	 */
1115 	DBG(DBG_LPU, NULL,
1116 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1117 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1118 
1119 	/*
1120 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1121 	 */
1122 	DBG(DBG_LPU, NULL,
1123 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1124 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1125 
1126 	/*
1127 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1128 	 */
1129 	DBG(DBG_LPU, NULL,
1130 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1131 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1132 
1133 	/*
1134 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1135 	 */
1136 	DBG(DBG_LPU, NULL,
1137 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1138 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1139 
1140 	/*
1141 	 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1142 	 */
1143 	DBG(DBG_LPU, NULL,
1144 	    "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1145 	    CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1146 
1147 	/*
1148 	 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1149 	 */
1150 
1151 	/*
1152 	 * Test only register.  Will not be programmed.
1153 	 */
1154 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1155 	    CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1156 
1157 	/*
1158 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1159 	 */
1160 
1161 	/*
1162 	 * Test only register.  Will not be programmed.
1163 	 */
1164 	DBG(DBG_LPU, NULL,
1165 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1166 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1167 
1168 	/*
1169 	 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1170 	 */
1171 
1172 	/*
1173 	 * Test only register.
1174 	 */
1175 	DBG(DBG_LPU, NULL,
1176 	    "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1177 	    CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1178 
1179 	/*
1180 	 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1181 	 */
1182 	DBG(DBG_LPU, NULL, "lpu_init - "
1183 	    "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1184 	    CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1185 
1186 	/*
1187 	 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1188 	 */
1189 
1190 	/*
1191 	 * test only register.
1192 	 */
1193 	DBG(DBG_LPU, NULL,
1194 	    "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1195 	    CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1196 
1197 	/*
1198 	 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1199 	 */
1200 
1201 	/*
1202 	 * test only register.
1203 	 */
1204 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1205 	    CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1206 
1207 	/*
1208 	 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1209 	 */
1210 	DBG(DBG_LPU, NULL,
1211 	    "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1212 	    CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1213 
1214 	/*
1215 	 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1216 	 */
1217 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1218 	    CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1219 
1220 	/*
1221 	 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1222 	 */
1223 	DBG(DBG_LPU, NULL,
1224 	    "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1225 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1226 
1227 	/*
1228 	 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1229 	 */
1230 	DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1231 	    CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1232 
1233 	DBG(DBG_LPU, NULL,
1234 	    "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1235 	    CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1236 
1237 	/*
1238 	 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1239 	 */
1240 
1241 	/*
1242 	 * This also needs some explanation.  What is the best value
1243 	 * for the water mark?  Test mode enables which test mode?
1244 	 * Programming model needed for the Receiver Reset Lane N
1245 	 * bits.
1246 	 */
1247 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1248 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1249 
1250 	/*
1251 	 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1252 	 */
1253 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1254 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1255 
1256 	/*
1257 	 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1258 	 */
1259 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1260 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1261 
1262 	/*
1263 	 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1264 	 */
1265 	DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1266 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1267 
1268 	/*
1269 	 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1270 	 */
1271 	DBG(DBG_LPU, NULL,
1272 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1273 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1274 
1275 	/*
1276 	 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1277 	 */
1278 	DBG(DBG_LPU, NULL,
1279 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1280 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1281 
1282 	DBG(DBG_LPU, NULL,
1283 	    "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1284 	    CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1285 
1286 	/*
1287 	 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1288 	 */
1289 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1290 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1291 
1292 	/*
1293 	 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1294 	 */
1295 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1296 		CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1297 
1298 	/*
1299 	 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1300 	 */
1301 	DBG(DBG_LPU, NULL,
1302 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1303 	    CSR_XR(csr_base,
1304 	    LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1305 
1306 	/*
1307 	 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1308 	 */
1309 	DBG(DBG_LPU, NULL,
1310 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1311 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1312 
1313 	DBG(DBG_LPU, NULL,
1314 	    "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1315 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1316 
1317 	/*
1318 	 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1319 	 */
1320 	DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1321 	    CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1322 
1323 	/*
1324 	 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1325 	 */
1326 
1327 	/*
1328 	 * The new PRM has values for LTSSM 8 ns timeout value and
1329 	 * LTSSM 20 ns timeout value.  But what do these values mean?
1330 	 * Most of the other bits are questions as well.
1331 	 *
1332 	 * As such we will use the reset value.
1333 	 */
1334 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1335 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1336 
1337 	/*
1338 	 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1339 	 */
1340 
1341 	/*
1342 	 * Again, what does '12 ms timeout value mean'?
1343 	 */
1344 	val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1345 	    LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1346 	CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1347 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1348 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1349 
1350 	/*
1351 	 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1352 	 */
1353 	val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1354 	    LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1355 	CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1356 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1357 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1358 
1359 	/*
1360 	 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1361 	 */
1362 	val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1363 	    LPU_LTSSM_CONFIG4_DATA_RATE) |
1364 		(LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1365 		LPU_LTSSM_CONFIG4_N_FTS));
1366 	CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1367 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1368 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1369 
1370 	/*
1371 	 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1372 	 */
1373 	val = 0ull;
1374 	CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1375 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1376 	    CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1377 
1378 	/*
1379 	 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1380 	 */
1381 
1382 	/*
1383 	 * LTSSM Status registers are test only.
1384 	 */
1385 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1386 	    CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1387 
1388 	/*
1389 	 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1390 	 */
1391 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1392 	    CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1393 
1394 	/*
1395 	 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1396 	 */
1397 	DBG(DBG_LPU, NULL,
1398 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1399 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1400 
1401 	/*
1402 	 * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1403 	 */
1404 	DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1405 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1406 
1407 	DBG(DBG_LPU, NULL,
1408 	    "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1409 	    CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1410 
1411 	/*
1412 	 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1413 	 */
1414 	DBG(DBG_LPU, NULL,
1415 	    "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1416 	    CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1417 
1418 	/*
1419 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1420 	 */
1421 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1422 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1423 
1424 	/*
1425 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1426 	 */
1427 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1428 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1429 
1430 	/*
1431 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1432 	 */
1433 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1434 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1435 
1436 	/*
1437 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1438 	 */
1439 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1440 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1441 
1442 	/*
1443 	 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1444 	 */
1445 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1446 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1447 
1448 	/*
1449 	 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1450 	 */
1451 	DBG(DBG_LPU, NULL, "lpu_init - "
1452 	    "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1453 	    CSR_XR(csr_base,
1454 	    LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1455 
1456 	/*
1457 	 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1458 	 */
1459 	DBG(DBG_LPU, NULL,
1460 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1461 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1462 
1463 	DBG(DBG_LPU, NULL,
1464 	    "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1465 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1466 
1467 	/*
1468 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1469 	 */
1470 	DBG(DBG_LPU, NULL,
1471 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1472 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1473 
1474 	/*
1475 	 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1476 	 */
1477 	DBG(DBG_LPU, NULL,
1478 	    "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1479 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1480 
1481 	/*
1482 	 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1483 	 */
1484 	DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1485 	    CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1486 }
1487 
1488 /* ARGSUSED */
1489 static void
1490 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1491 {
1492 	uint64_t val;
1493 
1494 /*
1495  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1496  */
1497 
1498 	val = -1ull;
1499 	CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1500 	DBG(DBG_DMC, NULL,
1501 	    "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1502 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1503 
1504 	/*
1505 	 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1506 	 */
1507 	DBG(DBG_DMC, NULL,
1508 	    "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1509 	    CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1510 
1511 	/*
1512 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1513 	 */
1514 	val = 0x0ull;
1515 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1516 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1517 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1518 
1519 	/*
1520 	 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1521 	 */
1522 	val = 0x0ull;
1523 	CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1524 	DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1525 	    CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1526 }
1527 
1528 void
1529 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1530 {
1531 	uint64_t val;
1532 
1533 	ilu_init(csr_base, pxu_p);
1534 	tlu_init(csr_base, pxu_p);
1535 	lpu_init(csr_base, pxu_p);
1536 	dmc_init(csr_base, pxu_p);
1537 
1538 /*
1539  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1540  */
1541 
1542 	val = -1ull;
1543 	CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1544 	DBG(DBG_PEC, NULL,
1545 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1546 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1547 
1548 	/*
1549 	 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1550 	 */
1551 	DBG(DBG_PEC, NULL,
1552 	    "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1553 	    CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1554 }
1555 
1556 /*
1557  * Initialize the module, but do not enable interrupts.
1558  */
1559 void
1560 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1561 {
1562 	uint64_t	val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr;
1563 	uint_t		obp_tsb_entries, obp_tsb_size;
1564 
1565 	bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1566 
1567 	/*
1568 	 * Preserve OBP's TSB
1569 	 */
1570 	val = CSR_XR(csr_base, MMU_TSB_CONTROL);
1571 
1572 	tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1573 
1574 	obp_tsb_pa = tsb_ctrl &  0x7FFFFFFE000;
1575 	obp_tsb_size = tsb_ctrl & 0xF;
1576 
1577 	obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1578 
1579 	base_tte_addr = pxu_p->tsb_vaddr +
1580 		((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1581 
1582 	for (i = 0; i < obp_tsb_entries; i++) {
1583 		uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1584 
1585 		if (!MMU_TTE_VALID(tte))
1586 			continue;
1587 
1588 		base_tte_addr[i] = tte;
1589 	}
1590 
1591 	/*
1592 	 * Invalidate the TLB through the diagnostic register.
1593 	 */
1594 
1595 	CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1596 
1597 	/*
1598 	 * Configure the Fire MMU TSB Control Register.  Determine
1599 	 * the encoding for either 8KB pages (0) or 64KB pages (1).
1600 	 *
1601 	 * Write the most significant 30 bits of the TSB physical address
1602 	 * and the encoded TSB table size.
1603 	 */
1604 	for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--);
1605 
1606 	val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1607 	    ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1608 
1609 	CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1610 
1611 	/*
1612 	 * Enable the MMU, set the "TSB Cache Snoop Enable",
1613 	 * the "Cache Mode", the "Bypass Enable" and
1614 	 * the "Translation Enable" bits.
1615 	 */
1616 	val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1617 	val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1618 	    | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1619 	    | (1ull << MMU_CONTROL_AND_STATUS_BE)
1620 	    | (1ull << MMU_CONTROL_AND_STATUS_TE));
1621 
1622 	CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1623 
1624 	/*
1625 	 * Read the register here to ensure that the previous writes to
1626 	 * the Fire MMU registers have been flushed.  (Technically, this
1627 	 * is not entirely necessary here as we will likely do later reads
1628 	 * during Fire initialization, but it is a small price to pay for
1629 	 * more modular code.)
1630 	 */
1631 	(void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1632 
1633 	/*
1634 	 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1635 	 * Plus header logs
1636 	 */
1637 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1638 	    CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1639 
1640 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1641 	    CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1642 
1643 	DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1644 	    CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1645 
1646 	DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1647 	    CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1648 }
1649 
1650 /*
1651  * Generic IOMMU Servies
1652  */
1653 
1654 /* ARGSUSED */
1655 uint64_t
1656 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1657     pages_t pages, io_attributes_t io_attributes,
1658     void *addr, size_t pfn_index, int flag)
1659 {
1660 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1661 	uint64_t	attr = MMU_TTE_V;
1662 	int		i;
1663 
1664 	if (io_attributes & PCI_MAP_ATTR_WRITE)
1665 		attr |= MMU_TTE_W;
1666 
1667 	if (flag == MMU_MAP_MP) {
1668 		ddi_dma_impl_t  *mp = (ddi_dma_impl_t *)addr;
1669 
1670 		for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1671 			px_iopfn_t	pfn = PX_GET_MP_PFN(mp, pfn_index);
1672 
1673 			pxu_p->tsb_vaddr[tsb_index] =
1674 			    MMU_PTOB(pfn) | attr;
1675 		}
1676 	} else {
1677 		caddr_t a = (caddr_t)addr;
1678 
1679 		for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1680 			px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1681 
1682 			pxu_p->tsb_vaddr[tsb_index] =
1683 			    MMU_PTOB(pfn) | attr;
1684 		}
1685 	}
1686 
1687 	return (H_EOK);
1688 }
1689 
1690 /* ARGSUSED */
1691 uint64_t
1692 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1693     pages_t pages)
1694 {
1695 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1696 	int		i;
1697 
1698 	for (i = 0; i < pages; i++, tsb_index++) {
1699 		pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1700 	}
1701 
1702 	return (H_EOK);
1703 }
1704 
1705 /* ARGSUSED */
1706 uint64_t
1707 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1708     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
1709 {
1710 	tsbindex_t	tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1711 	uint64_t	*tte_addr;
1712 	uint64_t	ret = H_EOK;
1713 
1714 	tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1715 
1716 	if (*tte_addr & MMU_TTE_V) {
1717 		*r_addr_p = MMU_TTETOPA(*tte_addr);
1718 		*attributes_p = (*tte_addr & MMU_TTE_W) ?
1719 		    PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1720 	} else {
1721 		*r_addr_p = 0;
1722 		*attributes_p = 0;
1723 		ret = H_ENOMAP;
1724 	}
1725 
1726 	return (ret);
1727 }
1728 
1729 /* ARGSUSED */
1730 uint64_t
1731 hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra,
1732     io_attributes_t io_attributes, io_addr_t *io_addr_p)
1733 {
1734 	uint64_t	pfn = MMU_BTOP(ra);
1735 
1736 	*io_addr_p = MMU_BYPASS_BASE | ra |
1737 	    (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE);
1738 
1739 	return (H_EOK);
1740 }
1741 
1742 /*
1743  * Generic IO Interrupt Servies
1744  */
1745 
1746 /*
1747  * Converts a device specific interrupt number given by the
1748  * arguments devhandle and devino into a system specific ino.
1749  */
1750 /* ARGSUSED */
1751 uint64_t
1752 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
1753     sysino_t *sysino)
1754 {
1755 	if (devino > INTERRUPT_MAPPING_ENTRIES) {
1756 		DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
1757 		return (H_ENOINTR);
1758 	}
1759 
1760 	*sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
1761 
1762 	return (H_EOK);
1763 }
1764 
1765 /*
1766  * Returns state in intr_valid_state if the interrupt defined by sysino
1767  * is valid (enabled) or not-valid (disabled).
1768  */
1769 uint64_t
1770 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
1771     intr_valid_state_t *intr_valid_state)
1772 {
1773 	if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1774 	    SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
1775 		*intr_valid_state = INTR_VALID;
1776 	} else {
1777 		*intr_valid_state = INTR_NOTVALID;
1778 	}
1779 
1780 	return (H_EOK);
1781 }
1782 
1783 /*
1784  * Sets the 'valid' state of the interrupt defined by
1785  * the argument sysino to the state defined by the
1786  * argument intr_valid_state.
1787  */
1788 uint64_t
1789 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
1790     intr_valid_state_t intr_valid_state)
1791 {
1792 	switch (intr_valid_state) {
1793 	case INTR_VALID:
1794 		CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1795 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1796 		break;
1797 	case INTR_NOTVALID:
1798 		CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1799 		    SYSINO_TO_DEVINO(sysino), ENTRIES_V);
1800 		break;
1801 	default:
1802 		return (EINVAL);
1803 	}
1804 
1805 	return (H_EOK);
1806 }
1807 
1808 /*
1809  * Returns the current state of the interrupt given by the sysino
1810  * argument.
1811  */
1812 uint64_t
1813 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
1814     intr_state_t *intr_state)
1815 {
1816 	intr_state_t state;
1817 
1818 	state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1819 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
1820 
1821 	switch (state) {
1822 	case INTERRUPT_IDLE_STATE:
1823 		*intr_state = INTR_IDLE_STATE;
1824 		break;
1825 	case INTERRUPT_RECEIVED_STATE:
1826 		*intr_state = INTR_RECEIVED_STATE;
1827 		break;
1828 	case INTERRUPT_PENDING_STATE:
1829 		*intr_state = INTR_DELIVERED_STATE;
1830 		break;
1831 	default:
1832 		return (EINVAL);
1833 	}
1834 
1835 	return (H_EOK);
1836 
1837 }
1838 
1839 /*
1840  * Sets the current state of the interrupt given by the sysino
1841  * argument to the value given in the argument intr_state.
1842  *
1843  * Note: Setting the state to INTR_IDLE clears any pending
1844  * interrupt for sysino.
1845  */
1846 uint64_t
1847 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
1848     intr_state_t intr_state)
1849 {
1850 	intr_state_t state;
1851 
1852 	switch (intr_state) {
1853 	case INTR_IDLE_STATE:
1854 		state = INTERRUPT_IDLE_STATE;
1855 		break;
1856 	case INTR_DELIVERED_STATE:
1857 		state = INTERRUPT_PENDING_STATE;
1858 		break;
1859 	default:
1860 		return (EINVAL);
1861 	}
1862 
1863 	CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
1864 	    SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
1865 
1866 	return (H_EOK);
1867 }
1868 
1869 /*
1870  * Returns the cpuid that is the current target of the
1871  * interrupt given by the sysino argument.
1872  *
1873  * The cpuid value returned is undefined if the target
1874  * has not been set via intr_settarget.
1875  */
1876 uint64_t
1877 hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
1878 {
1879 	*cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
1880 	    SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
1881 
1882 	return (H_EOK);
1883 }
1884 
1885 /*
1886  * Set the target cpu for the interrupt defined by the argument
1887  * sysino to the target cpu value defined by the argument cpuid.
1888  */
1889 uint64_t
1890 hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
1891 {
1892 
1893 	uint64_t	val, intr_controller;
1894 	uint32_t	ino = SYSINO_TO_DEVINO(sysino);
1895 
1896 	/*
1897 	 * For now, we assign interrupt controller in a round
1898 	 * robin fashion.  Later, we may need to come up with
1899 	 * a more efficient assignment algorithm.
1900 	 */
1901 	intr_controller = 0x1ull << (cpuid % 4);
1902 
1903 	val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
1904 	    INTERRUPT_MAPPING_ENTRIES_T_JPID) |
1905 	    ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
1906 	    << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
1907 
1908 	/* For EQ interrupts, set DATA MONDO bit */
1909 	if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
1910 	    (ino < (PX_DEFAULT_MSIQ_1ST_DEVINO + PX_DEFAULT_MSIQ_CNT)))
1911 		val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
1912 
1913 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
1914 
1915 	return (H_EOK);
1916 }
1917 
1918 /*
1919  * MSIQ Functions:
1920  */
1921 uint64_t
1922 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
1923 {
1924 	CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
1925 	    (uint64_t)pxu_p->msiq_mapped_p);
1926 	DBG(DBG_IB, NULL,
1927 	    "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
1928 	    CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
1929 
1930 	CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
1931 	    (uint64_t)ID_TO_IGN(pxu_p->portid) << INO_BITS);
1932 	DBG(DBG_IB, NULL, "hvio_msiq_init: "
1933 	    "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
1934 	    CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
1935 
1936 	return (H_EOK);
1937 }
1938 
1939 uint64_t
1940 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1941     pci_msiq_valid_state_t *msiq_valid_state)
1942 {
1943 	uint32_t	eq_state;
1944 	uint64_t	ret = H_EOK;
1945 
1946 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1947 	    msiq_id, ENTRIES_STATE);
1948 
1949 	switch (eq_state) {
1950 	case EQ_IDLE_STATE:
1951 		*msiq_valid_state = PCI_MSIQ_INVALID;
1952 		break;
1953 	case EQ_ACTIVE_STATE:
1954 	case EQ_ERROR_STATE:
1955 		*msiq_valid_state = PCI_MSIQ_VALID;
1956 		break;
1957 	default:
1958 		ret = H_EIO;
1959 		break;
1960 	}
1961 
1962 	return (ret);
1963 }
1964 
1965 uint64_t
1966 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
1967     pci_msiq_valid_state_t msiq_valid_state)
1968 {
1969 	uint64_t	ret = H_EOK;
1970 
1971 	switch (msiq_valid_state) {
1972 	case PCI_MSIQ_INVALID:
1973 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
1974 		    msiq_id, ENTRIES_DIS);
1975 		break;
1976 	case PCI_MSIQ_VALID:
1977 		CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
1978 		    msiq_id, ENTRIES_EN);
1979 		break;
1980 	default:
1981 		ret = H_EINVAL;
1982 		break;
1983 	}
1984 
1985 	return (ret);
1986 }
1987 
1988 uint64_t
1989 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
1990     pci_msiq_state_t *msiq_state)
1991 {
1992 	uint32_t	eq_state;
1993 	uint64_t	ret = H_EOK;
1994 
1995 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
1996 	    msiq_id, ENTRIES_STATE);
1997 
1998 	switch (eq_state) {
1999 	case EQ_IDLE_STATE:
2000 	case EQ_ACTIVE_STATE:
2001 		*msiq_state = PCI_MSIQ_STATE_IDLE;
2002 		break;
2003 	case EQ_ERROR_STATE:
2004 		*msiq_state = PCI_MSIQ_STATE_ERROR;
2005 		break;
2006 	default:
2007 		ret = H_EIO;
2008 	}
2009 
2010 	return (ret);
2011 }
2012 
2013 uint64_t
2014 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2015     pci_msiq_state_t msiq_state)
2016 {
2017 	uint32_t	eq_state;
2018 	uint64_t	ret = H_EOK;
2019 
2020 	eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2021 	    msiq_id, ENTRIES_STATE);
2022 
2023 	switch (eq_state) {
2024 	case EQ_IDLE_STATE:
2025 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2026 			ret = H_EIO;
2027 		break;
2028 	case EQ_ACTIVE_STATE:
2029 		if (msiq_state == PCI_MSIQ_STATE_ERROR)
2030 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2031 			    msiq_id, ENTRIES_ENOVERR);
2032 		else
2033 			ret = H_EIO;
2034 		break;
2035 	case EQ_ERROR_STATE:
2036 		if (msiq_state == PCI_MSIQ_STATE_IDLE)
2037 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2038 			    msiq_id, ENTRIES_E2I);
2039 		else
2040 			ret = H_EIO;
2041 		break;
2042 	default:
2043 		ret = H_EIO;
2044 	}
2045 
2046 	return (ret);
2047 }
2048 
2049 uint64_t
2050 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2051     msiqhead_t *msiq_head)
2052 {
2053 	*msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2054 	    msiq_id, ENTRIES_HEAD);
2055 
2056 	return (H_EOK);
2057 }
2058 
2059 uint64_t
2060 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2061     msiqhead_t msiq_head)
2062 {
2063 	CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2064 	    ENTRIES_HEAD, msiq_head);
2065 
2066 	return (H_EOK);
2067 }
2068 
2069 uint64_t
2070 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2071     msiqtail_t *msiq_tail)
2072 {
2073 	*msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2074 	    msiq_id, ENTRIES_TAIL);
2075 
2076 	return (H_EOK);
2077 }
2078 
2079 /*
2080  * MSI Functions:
2081  */
2082 uint64_t
2083 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2084 {
2085 	/* PCI MEM 32 resources to perform 32 bit MSI transactions */
2086 	CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2087 	    ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2088 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2089 	    CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2090 
2091 	/* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2092 	CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2093 	    ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2094 	DBG(DBG_IB, NULL, "hvio_msiq_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2095 	    CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2096 
2097 	return (H_EOK);
2098 }
2099 
2100 uint64_t
2101 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2102     msiqid_t *msiq_id)
2103 {
2104 	*msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2105 	    msi_num, ENTRIES_EQNUM);
2106 
2107 	return (H_EOK);
2108 }
2109 
2110 uint64_t
2111 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2112     msiqid_t msiq_id)
2113 {
2114 	CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2115 	    ENTRIES_EQNUM, msiq_id);
2116 
2117 	return (H_EOK);
2118 }
2119 
2120 uint64_t
2121 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2122     pci_msi_valid_state_t *msi_valid_state)
2123 {
2124 	*msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2125 	    msi_num, ENTRIES_V);
2126 
2127 	return (H_EOK);
2128 }
2129 
2130 uint64_t
2131 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2132     pci_msi_valid_state_t msi_valid_state)
2133 {
2134 	uint64_t	ret = H_EOK;
2135 
2136 	switch (msi_valid_state) {
2137 	case PCI_MSI_VALID:
2138 		CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2139 		    ENTRIES_V);
2140 		break;
2141 	case PCI_MSI_INVALID:
2142 		CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2143 		    ENTRIES_V);
2144 		break;
2145 	default:
2146 		ret = H_EINVAL;
2147 	}
2148 
2149 	return (ret);
2150 }
2151 
2152 uint64_t
2153 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2154     pci_msi_state_t *msi_state)
2155 {
2156 	*msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2157 	    msi_num, ENTRIES_EQWR_N);
2158 
2159 	return (H_EOK);
2160 }
2161 
2162 uint64_t
2163 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2164     pci_msi_state_t msi_state)
2165 {
2166 	uint64_t	ret = H_EOK;
2167 
2168 	switch (msi_state) {
2169 	case PCI_MSI_STATE_IDLE:
2170 		CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2171 		    ENTRIES_EQWR_N);
2172 		break;
2173 	case PCI_MSI_STATE_DELIVERED:
2174 	default:
2175 		ret = H_EINVAL;
2176 		break;
2177 	}
2178 
2179 	return (ret);
2180 }
2181 
2182 /*
2183  * MSG Functions:
2184  */
2185 uint64_t
2186 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2187     msiqid_t *msiq_id)
2188 {
2189 	uint64_t	ret = H_EOK;
2190 
2191 	switch (msg_type) {
2192 	case PCIE_PME_MSG:
2193 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2194 		break;
2195 	case PCIE_PME_ACK_MSG:
2196 		*msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2197 		    EQNUM);
2198 		break;
2199 	case PCIE_CORR_MSG:
2200 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2201 		break;
2202 	case PCIE_NONFATAL_MSG:
2203 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2204 		    EQNUM);
2205 		break;
2206 	case PCIE_FATAL_MSG:
2207 		*msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2208 		break;
2209 	default:
2210 		ret = H_EINVAL;
2211 		break;
2212 	}
2213 
2214 	return (ret);
2215 }
2216 
2217 uint64_t
2218 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2219     msiqid_t msiq_id)
2220 {
2221 	uint64_t	ret = H_EOK;
2222 
2223 	switch (msg_type) {
2224 	case PCIE_PME_MSG:
2225 		CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2226 		break;
2227 	case PCIE_PME_ACK_MSG:
2228 		CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2229 		break;
2230 	case PCIE_CORR_MSG:
2231 		CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2232 		break;
2233 	case PCIE_NONFATAL_MSG:
2234 		CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2235 		break;
2236 	case PCIE_FATAL_MSG:
2237 		CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2238 		break;
2239 	default:
2240 		ret = H_EINVAL;
2241 		break;
2242 	}
2243 
2244 	return (ret);
2245 }
2246 
2247 uint64_t
2248 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2249     pcie_msg_valid_state_t *msg_valid_state)
2250 {
2251 	uint64_t	ret = H_EOK;
2252 
2253 	switch (msg_type) {
2254 	case PCIE_PME_MSG:
2255 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2256 		break;
2257 	case PCIE_PME_ACK_MSG:
2258 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2259 		    PME_TO_ACK_MAPPING, V);
2260 		break;
2261 	case PCIE_CORR_MSG:
2262 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2263 		break;
2264 	case PCIE_NONFATAL_MSG:
2265 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2266 		    ERR_NONFATAL_MAPPING, V);
2267 		break;
2268 	case PCIE_FATAL_MSG:
2269 		*msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2270 		    V);
2271 		break;
2272 	default:
2273 		ret = H_EINVAL;
2274 		break;
2275 	}
2276 
2277 	return (ret);
2278 }
2279 
2280 uint64_t
2281 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2282     pcie_msg_valid_state_t msg_valid_state)
2283 {
2284 	uint64_t	ret = H_EOK;
2285 
2286 	switch (msg_valid_state) {
2287 	case PCIE_MSG_VALID:
2288 		switch (msg_type) {
2289 		case PCIE_PME_MSG:
2290 			CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2291 			break;
2292 		case PCIE_PME_ACK_MSG:
2293 			CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2294 			break;
2295 		case PCIE_CORR_MSG:
2296 			CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2297 			break;
2298 		case PCIE_NONFATAL_MSG:
2299 			CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2300 			break;
2301 		case PCIE_FATAL_MSG:
2302 			CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2303 			break;
2304 		default:
2305 			ret = H_EINVAL;
2306 			break;
2307 		}
2308 
2309 		break;
2310 	case PCIE_MSG_INVALID:
2311 		switch (msg_type) {
2312 		case PCIE_PME_MSG:
2313 			CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2314 			break;
2315 		case PCIE_PME_ACK_MSG:
2316 			CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2317 			break;
2318 		case PCIE_CORR_MSG:
2319 			CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2320 			break;
2321 		case PCIE_NONFATAL_MSG:
2322 			CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2323 			break;
2324 		case PCIE_FATAL_MSG:
2325 			CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2326 			break;
2327 		default:
2328 			ret = H_EINVAL;
2329 			break;
2330 		}
2331 		break;
2332 	default:
2333 		ret = H_EINVAL;
2334 	}
2335 
2336 	return (ret);
2337 }
2338 
2339 /*
2340  * Suspend/Resume Functions:
2341  *	(pec, mmu, ib)
2342  *	cb
2343  * Registers saved have all been touched in the XXX_init functions.
2344  */
2345 uint64_t
2346 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2347 {
2348 	uint64_t	*config_state;
2349 	int		total_size;
2350 	int		i;
2351 
2352 	if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2353 		return (H_EIO);
2354 
2355 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2356 	config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2357 
2358 	if (config_state == NULL) {
2359 		return (H_EIO);
2360 	}
2361 
2362 	/*
2363 	 * Soft state for suspend/resume  from pxu_t
2364 	 * uint64_t	*pec_config_state;
2365 	 * uint64_t	*mmu_config_state;
2366 	 * uint64_t	*ib_intr_map;
2367 	 * uint64_t	*ib_config_state;
2368 	 * uint64_t	*xcb_config_state;
2369 	 */
2370 
2371 	/* Save the PEC configuration states */
2372 	pxu_p->pec_config_state = config_state;
2373 	for (i = 0; i < PEC_KEYS; i++) {
2374 		pxu_p->pec_config_state[i] =
2375 		    CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]);
2376 	}
2377 
2378 	/* Save the MMU configuration states */
2379 	pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2380 	for (i = 0; i < MMU_KEYS; i++) {
2381 		pxu_p->mmu_config_state[i] =
2382 		    CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2383 	}
2384 
2385 	/* Save the interrupt mapping registers */
2386 	pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2387 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2388 		pxu_p->ib_intr_map[i] =
2389 		    CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2390 	}
2391 
2392 	/* Save the IB configuration states */
2393 	pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2394 	for (i = 0; i < IB_KEYS; i++) {
2395 		pxu_p->ib_config_state[i] =
2396 		    CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2397 	}
2398 
2399 	return (H_EOK);
2400 }
2401 
2402 void
2403 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2404 {
2405 	int		total_size;
2406 	sysino_t	sysino;
2407 	int		i;
2408 
2409 	/* Make sure that suspend actually did occur */
2410 	if (!pxu_p->pec_config_state) {
2411 		return;
2412 	}
2413 
2414 	/* Restore IB configuration states */
2415 	for (i = 0; i < IB_KEYS; i++) {
2416 		CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2417 		    pxu_p->ib_config_state[i]);
2418 	}
2419 
2420 	/*
2421 	 * Restore the interrupt mapping registers
2422 	 * And make sure the intrs are idle.
2423 	 */
2424 	for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2425 		CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2426 		    ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2427 		CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2428 		    pxu_p->ib_intr_map[i]);
2429 	}
2430 
2431 	/* Restore MMU configuration states */
2432 	/* Clear the cache. */
2433 	CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2434 
2435 	for (i = 0; i < MMU_KEYS; i++) {
2436 		CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2437 		    pxu_p->mmu_config_state[i]);
2438 	}
2439 
2440 	/* Restore PEC configuration states */
2441 	/* Make sure all reset bits are low until error is detected */
2442 	CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2443 
2444 	for (i = 0; i < PEC_KEYS; i++) {
2445 		CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i],
2446 		    pxu_p->pec_config_state[i]);
2447 	}
2448 
2449 	/* Enable PCI-E interrupt */
2450 	(void) hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino, &sysino);
2451 
2452 	(void) hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE);
2453 
2454 	total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2455 	kmem_free(pxu_p->pec_config_state, total_size);
2456 
2457 	pxu_p->pec_config_state = NULL;
2458 	pxu_p->mmu_config_state = NULL;
2459 	pxu_p->ib_config_state = NULL;
2460 	pxu_p->ib_intr_map = NULL;
2461 
2462 	msiq_resume(dev_hdl, pxu_p);
2463 }
2464 
2465 uint64_t
2466 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2467 {
2468 	uint64_t	*config_state;
2469 	int		i;
2470 
2471 	config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP);
2472 
2473 	if (config_state == NULL) {
2474 		return (H_EIO);
2475 	}
2476 
2477 	/* Save the configuration states */
2478 	pxu_p->xcb_config_state = config_state;
2479 	for (i = 0; i < CB_KEYS; i++) {
2480 		pxu_p->xcb_config_state[i] =
2481 		    CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]);
2482 	}
2483 
2484 	return (H_EOK);
2485 }
2486 
2487 void
2488 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2489     devino_t devino, pxu_t *pxu_p)
2490 {
2491 	sysino_t	sysino;
2492 	int		i;
2493 
2494 	/*
2495 	 * No reason to have any reset bits high until an error is
2496 	 * detected on the link.
2497 	 */
2498 	CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2499 
2500 	ASSERT(pxu_p->xcb_config_state);
2501 
2502 	/* Restore the configuration states */
2503 	for (i = 0; i < CB_KEYS; i++) {
2504 		CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i],
2505 		    pxu_p->xcb_config_state[i]);
2506 	}
2507 
2508 	/* Enable XBC interrupt */
2509 	(void) hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino, &sysino);
2510 
2511 	(void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
2512 
2513 	kmem_free(pxu_p->xcb_config_state, CB_SIZE);
2514 
2515 	pxu_p->xcb_config_state = NULL;
2516 }
2517 
2518 static uint64_t
2519 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2520 {
2521 	size_t	bufsz;
2522 	volatile uint64_t *cur_p;
2523 	int i;
2524 
2525 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2526 	if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2527 	    NULL)
2528 		return (H_EIO);
2529 
2530 	cur_p = pxu_p->msiq_config_state;
2531 
2532 	/* Save each EQ state */
2533 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2534 		*cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2535 
2536 	/* Save MSI mapping registers */
2537 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2538 		*cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2539 
2540 	/* Save all other MSIQ registers */
2541 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2542 		*cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2543 	return (H_EOK);
2544 }
2545 
2546 static void
2547 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2548 {
2549 	size_t	bufsz;
2550 	uint64_t *cur_p;
2551 	int i;
2552 
2553 	bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2554 	cur_p = pxu_p->msiq_config_state;
2555 	/*
2556 	 * Initialize EQ base address register and
2557 	 * Interrupt Mondo Data 0 register.
2558 	 */
2559 	(void) hvio_msiq_init(dev_hdl, pxu_p);
2560 
2561 	/* Restore EQ states */
2562 	for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2563 		if (((*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK) ==
2564 		    EQ_ACTIVE_STATE) {
2565 			CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2566 			    i, ENTRIES_EN);
2567 		}
2568 	}
2569 
2570 	/* Restore MSI mapping */
2571 	for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2572 		CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2573 
2574 	/*
2575 	 * Restore all other registers. MSI 32 bit address and
2576 	 * MSI 64 bit address are restored as part of this.
2577 	 */
2578 	for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2579 		CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2580 
2581 	kmem_free(pxu_p->msiq_config_state, bufsz);
2582 	pxu_p->msiq_config_state = NULL;
2583 }
2584 
2585 /*
2586  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2587  * called by px_goto_l23ready.
2588  * returns DDI_SUCCESS or DDI_FAILURE
2589  */
2590 int
2591 px_send_pme_turnoff(caddr_t csr_base)
2592 {
2593 	volatile uint64_t reg;
2594 
2595 	reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2596 	/* If already pending, return failure */
2597 	if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2598 		DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2599 		    "tlu_pme_turn_off_generate = %x\n", reg);
2600 		return (DDI_FAILURE);
2601 	}
2602 
2603 	/* write to PME_Turn_off reg to boradcast */
2604 	reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
2605 	CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
2606 
2607 	return (DDI_SUCCESS);
2608 }
2609 
2610 /*
2611  * Checks for link being in L1idle state.
2612  * Returns
2613  * DDI_SUCCESS - if the link is in L1idle
2614  * DDI_FAILURE - if the link is not in L1idle
2615  */
2616 int
2617 px_link_wait4l1idle(caddr_t csr_base)
2618 {
2619 	uint8_t ltssm_state;
2620 	int ntries = px_max_l1_tries;
2621 
2622 	while (ntries > 0) {
2623 		ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
2624 		if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
2625 			break;
2626 		delay(1);
2627 	}
2628 	DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
2629 	return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
2630 }
2631 
2632 /*
2633  * Tranisition the link to L0, after it is down.
2634  */
2635 int
2636 px_link_retrain(caddr_t csr_base)
2637 {
2638 	volatile uint64_t reg;
2639 
2640 	reg = CSR_XR(csr_base, TLU_CONTROL);
2641 	if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
2642 		DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
2643 		return (DDI_FAILURE);
2644 	}
2645 
2646 	/* Clear link down bit in TLU Other Event Clear Status Register. */
2647 	CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
2648 
2649 	/* Clear Drain bit in TLU Status Register */
2650 	CSR_BS(csr_base, TLU_STATUS, DRAIN);
2651 
2652 	/* Clear Remain in Detect.Quiet bit in TLU Control Register */
2653 	reg = CSR_XR(csr_base, TLU_CONTROL);
2654 	reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
2655 	CSR_XS(csr_base, TLU_CONTROL, reg);
2656 
2657 	return (DDI_SUCCESS);
2658 }
2659 
2660 void
2661 px_enable_detect_quiet(caddr_t csr_base)
2662 {
2663 	volatile uint64_t tlu_ctrl;
2664 
2665 	tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
2666 	tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
2667 	CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
2668 }
2669