xref: /freebsd/sys/contrib/alpine-hal/al_hal_pcie.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2 ********************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 
44 #include "al_hal_pcie.h"
45 #include "al_hal_pbs_regs.h"
46 #include "al_hal_unit_adapter_regs.h"
47 
48 /**
49  * Parameter definitions
50  */
51 #define AL_PCIE_AXI_REGS_OFFSET			0x0
52 
53 #define AL_PCIE_LTSSM_STATE_L0			0x11
54 #define AL_PCIE_LTSSM_STATE_L0S			0x12
55 #define AL_PCIE_DEVCTL_PAYLOAD_128B		0x00
56 #define AL_PCIE_DEVCTL_PAYLOAD_256B		0x20
57 
58 #define AL_PCIE_SECBUS_DEFAULT			0x1
59 #define AL_PCIE_SUBBUS_DEFAULT			0x1
60 #define AL_PCIE_LINKUP_WAIT_INTERVAL		50	/* measured in usec */
61 #define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC	20
62 
63 #define AL_PCIE_LINKUP_RETRIES			8
64 
65 #define AL_PCIE_MAX_32_MEMORY_BAR_SIZE		(0x100000000ULL)
66 #define AL_PCIE_MIN_MEMORY_BAR_SIZE		(1 << 12)
67 #define AL_PCIE_MIN_IO_BAR_SIZE			(1 << 8)
68 
69 /**
70  * inbound header credits and outstanding outbound reads defaults
71  */
72 /** RC - Revisions 1/2 */
73 #define AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT	(8)
74 #define AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT	(41)
75 #define AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT	(25)
76 #define AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT	(31)
77 /** EP - Revisions 1/2 */
78 #define AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT	(15)
79 #define AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT	(76)
80 #define AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT	(6)
81 #define AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT	(15)
82 /** RC - Revision 3 */
83 #define AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT	(32)
84 #define AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT	(161)
85 #define AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT	(38)
86 #define AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT	(60)
87 /** EP - Revision 3 */
88 #define AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT	(32)
89 #define AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT	(161)
90 #define AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT	(38)
91 #define AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT	(60)
92 
93 /**
94  * MACROS
95  */
96 #define AL_PCIE_PARSE_LANES(v)		(((1 << v) - 1) << \
97 		PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
98 
99 #define AL_PCIE_FLR_DONE_INTERVAL		10
100 
101 /**
102  * Static functions
103  */
104 static void
105 al_pcie_port_wr_to_ro_set(struct al_pcie_port *pcie_port, al_bool enable)
106 {
107 	/* when disabling writes to RO, make sure any previous writes to
108 	 * config space were committed
109 	 */
110 	if (enable == AL_FALSE)
111 		al_local_data_memory_barrier();
112 
113 	al_reg_write32(&pcie_port->regs->port_regs->rd_only_wr_en,
114 		       (enable == AL_TRUE) ? 1 : 0);
115 
116 	/* when enabling writes to RO, make sure it is committed before trying
117 	 * to write to RO config space
118 	 */
119 	if (enable == AL_TRUE)
120 		al_local_data_memory_barrier();
121 }
122 
123 /** helper function to access dbi_cs2 registers */
124 static void
125 al_reg_write32_dbi_cs2(
126 	struct al_pcie_port	*pcie_port,
127 	uint32_t		*offset,
128 	uint32_t		val)
129 {
130 	uintptr_t cs2_bit =
131 		(pcie_port->rev_id == AL_PCIE_REV_ID_3) ? 0x4000 : 0x1000;
132 
133 	al_reg_write32((uint32_t *)((uintptr_t)offset | cs2_bit), val);
134 }
135 
136 static unsigned int
137 al_pcie_speed_gen_code(enum al_pcie_link_speed speed)
138 {
139 	if (speed == AL_PCIE_LINK_SPEED_GEN1)
140 		return 1;
141 	if (speed == AL_PCIE_LINK_SPEED_GEN2)
142 		return 2;
143 	if (speed == AL_PCIE_LINK_SPEED_GEN3)
144 		return 3;
145 	/* must not be reached */
146 	return 0;
147 }
148 
149 static inline void
150 al_pcie_port_link_speed_ctrl_set(
151 	struct al_pcie_port *pcie_port,
152 	enum al_pcie_link_speed max_speed)
153 {
154 	struct al_pcie_regs *regs = pcie_port->regs;
155 
156 	al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
157 
158 	if (max_speed != AL_PCIE_LINK_SPEED_DEFAULT) {
159 		uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(max_speed);
160 		al_reg_write32_masked(
161 			(uint32_t __iomem *)(regs->core_space[0].pcie_link_cap_base),
162 			0xF, max_speed_val);
163 		al_reg_write32_masked(
164 			(uint32_t __iomem *)(regs->core_space[0].pcie_cap_base
165 			+ (AL_PCI_EXP_LNKCTL2 >> 2)),
166 			0xF, max_speed_val);
167 	}
168 
169 	al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
170 }
171 
172 static int
173 al_pcie_port_link_config(
174 	struct al_pcie_port *pcie_port,
175 	const struct al_pcie_link_params *link_params)
176 {
177 	struct al_pcie_regs *regs = pcie_port->regs;
178 	uint8_t max_lanes = pcie_port->max_lanes;
179 
180 	if ((link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)	&&
181 	    (link_params->max_payload_size != AL_PCIE_MPS_128)		&&
182 	    (link_params->max_payload_size != AL_PCIE_MPS_256)) {
183 		al_err("PCIe %d: unsupported Max Payload Size (%u)\n",
184 		       pcie_port->port_id, link_params->max_payload_size);
185 		return -EINVAL;
186 	}
187 
188 	al_pcie_port_link_speed_ctrl_set(pcie_port, link_params->max_speed);
189 
190 	/* Change Max Payload Size, if needed.
191 	 * The Max Payload Size is only valid for PF0.
192 	 */
193 	if (link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)
194 		al_reg_write32_masked(regs->core_space[0].pcie_dev_ctrl_status,
195 				      PCIE_PORT_DEV_CTRL_STATUS_MPS_MASK,
196 				      link_params->max_payload_size <<
197 					PCIE_PORT_DEV_CTRL_STATUS_MPS_SHIFT);
198 
199 	/** Snap from PCIe core spec:
200 	 * Link Mode Enable. Sets the number of lanes in the link that you want
201 	 * to connect to the link partner. When you have unused lanes in your
202 	 * system, then you must change the value in this register to reflect
203 	 * the number of lanes. You must also change the value in the
204 	 * "Predetermined Number of Lanes" field of the "Link Width and Speed
205 	 * Change Control Register".
206 	 * 000001: x1
207 	 * 000011: x2
208 	 * 000111: x4
209 	 * 001111: x8
210 	 * 011111: x16
211 	 * 111111: x32 (not supported)
212 	 */
213 	al_reg_write32_masked(&regs->port_regs->gen2_ctrl,
214 				PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_MASK,
215 				max_lanes << PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_SHIFT);
216 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
217 				PCIE_PORT_LINK_CTRL_LINK_CAPABLE_MASK,
218 				(max_lanes + (max_lanes-1))
219 				<< PCIE_PORT_LINK_CTRL_LINK_CAPABLE_SHIFT);
220 
221 	return 0;
222 }
223 
224 static void
225 al_pcie_port_ram_parity_int_config(
226 	struct al_pcie_port *pcie_port,
227 	al_bool enable)
228 {
229 	struct al_pcie_regs *regs = pcie_port->regs;
230 
231 	al_reg_write32(&regs->app.parity->en_core,
232 		(enable == AL_TRUE) ? 0xffffffff : 0x0);
233 
234 	al_reg_write32_masked(&regs->app.int_grp_b->mask,
235 	      PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE,
236 	      (enable != AL_TRUE) ?
237 	      PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE : 0);
238 
239 }
240 
241 static void
242 al_pcie_port_axi_parity_int_config(
243 	struct al_pcie_port *pcie_port,
244 	al_bool enable)
245 {
246 	struct al_pcie_regs *regs = pcie_port->regs;
247 	uint32_t parity_enable_mask = 0xffffffff;
248 
249 	/**
250 	 * Addressing RMN: 5603
251 	 *
252 	 * RMN description:
253 	 * u4_ram2p signal false parity error
254 	 *
255 	 * Software flow:
256 	 * Disable parity check for this memory
257 	 */
258 	if (pcie_port->rev_id >= AL_PCIE_REV_ID_3)
259 		parity_enable_mask &= ~PCIE_AXI_PARITY_EN_AXI_U4_RAM2P;
260 
261 	al_reg_write32(regs->axi.parity.en_axi,
262 		       (enable == AL_TRUE) ? parity_enable_mask : 0x0);
263 
264 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
265 		al_reg_write32_masked(regs->axi.ctrl.global,
266 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
267 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
268 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
269 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
270 			(enable == AL_TRUE) ?
271 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
272 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
273 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
274 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
275 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
276 	} else {
277 		al_reg_write32_masked(regs->axi.ctrl.global,
278 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
279 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
280 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
281 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
282 			(enable == AL_TRUE) ?
283 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
284 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
285 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
286 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
287 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
288 	}
289 
290 	al_reg_write32_masked(&regs->axi.int_grp_a->mask,
291 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
292 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
293 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
294 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
295 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI,
296 		(enable != AL_TRUE) ?
297 		(PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
298 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
299 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
300 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
301 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI) : 0);
302 }
303 
304 static void
305 al_pcie_port_relaxed_pcie_ordering_config(
306 	struct al_pcie_port *pcie_port,
307 	struct al_pcie_relaxed_ordering_params *relaxed_ordering_params)
308 {
309 	struct al_pcie_regs *regs = pcie_port->regs;
310 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
311 	/**
312 	 * Default:
313 	 *  - RC: Rx relaxed ordering only
314 	 *  - EP: TX relaxed ordering only
315 	 */
316 	al_bool tx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_FALSE : AL_TRUE);
317 	al_bool rx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_TRUE : AL_FALSE);
318 
319 	if (relaxed_ordering_params) {
320 		tx_relaxed_ordering = relaxed_ordering_params->enable_tx_relaxed_ordering;
321 		rx_relaxed_ordering = relaxed_ordering_params->enable_rx_relaxed_ordering;
322 	}
323 
324 	/** PCIe ordering:
325 	 *  - disable outbound completion must be stalled behind outbound write
326 	 *    ordering rule enforcement is disabled for root-port
327 	 *  - disables read completion on the master port push slave writes for end-point
328 	 */
329 	al_reg_write32_masked(
330 		regs->axi.ordering.pos_cntl,
331 		PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
332 		PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
333 		PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS |
334 		PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES,
335 		(tx_relaxed_ordering ?
336 		(PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
337 		PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES) : 0) |
338 		(rx_relaxed_ordering ?
339 		(PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
340 		PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS) : 0));
341 }
342 
343 static int
344 al_pcie_rev_id_get(
345 	void __iomem *pbs_reg_base,
346 	void __iomem *pcie_reg_base)
347 {
348 	uint32_t chip_id;
349 	uint16_t chip_id_dev;
350 	uint8_t rev_id;
351 	struct al_pbs_regs *pbs_regs = pbs_reg_base;
352 
353 	/* get revision ID from PBS' chip_id register */
354 	chip_id = al_reg_read32(&pbs_regs->unit.chip_id);
355 	chip_id_dev = AL_REG_FIELD_GET(chip_id,
356 				       PBS_UNIT_CHIP_ID_DEV_ID_MASK,
357 				       PBS_UNIT_CHIP_ID_DEV_ID_SHIFT);
358 
359 	if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V1) {
360 		rev_id = AL_PCIE_REV_ID_1;
361 	} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE_V2) {
362 		struct al_pcie_revx_regs __iomem *regs =
363 			(struct al_pcie_revx_regs __iomem *)pcie_reg_base;
364 		uint32_t dev_id;
365 
366 		dev_id = al_reg_read32(&regs->axi.device_id.device_rev_id) &
367 			PCIE_AXI_DEVICE_ID_REG_DEV_ID_MASK;
368 		if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X4) {
369 			rev_id = AL_PCIE_REV_ID_2;
370 		} else if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X8) {
371 			rev_id = AL_PCIE_REV_ID_3;
372 		} else {
373 			al_warn("%s: Revision ID is unknown\n",
374 				__func__);
375 			return -EINVAL;
376 		}
377 	} else {
378 		al_warn("%s: Revision ID is unknown\n",
379 			__func__);
380 		return -EINVAL;
381 	}
382 	return rev_id;
383 }
384 
385 static int
386 al_pcie_port_lat_rply_timers_config(
387 	struct al_pcie_port *pcie_port,
388 	const struct al_pcie_latency_replay_timers  *lat_rply_timers)
389 {
390 	struct al_pcie_regs *regs = pcie_port->regs;
391 	uint32_t	reg = 0;
392 
393 	AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit);
394 	AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit);
395 
396 	al_reg_write32(&regs->port_regs->ack_lat_rply_timer, reg);
397 	return 0;
398 }
399 
400 static void
401 al_pcie_ib_hcrd_os_ob_reads_config_default(
402 	struct al_pcie_port *pcie_port)
403 {
404 
405 	struct al_pcie_ib_hcrd_os_ob_reads_config ib_hcrd_os_ob_reads_config;
406 
407 	switch (al_pcie_operating_mode_get(pcie_port)) {
408 	case AL_PCIE_OPERATING_MODE_RC:
409 		if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
410 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
411 				AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT;
412 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
413 				AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT;
414 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
415 				AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT;
416 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
417 				AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT;
418 		} else {
419 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
420 				AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT;
421 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
422 				AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT;
423 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
424 				AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT;
425 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
426 				AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT;
427 		}
428 		break;
429 
430 	case AL_PCIE_OPERATING_MODE_EP:
431 		if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
432 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
433 				AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT;
434 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
435 				AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT;
436 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
437 				AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT;
438 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
439 				AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT;
440 		} else {
441 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
442 				AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT;
443 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
444 				AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT;
445 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
446 				AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT;
447 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
448 				AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT;
449 		}
450 		break;
451 
452 	default:
453 		al_err("PCIe %d: outstanding outbound transactions could not be configured - unknown operating mode\n",
454 			pcie_port->port_id);
455 		al_assert(0);
456 	}
457 
458 	al_pcie_port_ib_hcrd_os_ob_reads_config(pcie_port, &ib_hcrd_os_ob_reads_config);
459 };
460 
461 /** return AL_TRUE if link is up, AL_FALSE otherwise */
462 static al_bool
463 al_pcie_check_link(
464 	struct al_pcie_port *pcie_port,
465 	uint8_t *ltssm_ret)
466 {
467 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
468 	uint32_t info_0;
469 	uint8_t	ltssm_state;
470 
471 	info_0 = al_reg_read32(&regs->app.debug->info_0);
472 
473 	ltssm_state = AL_REG_FIELD_GET(info_0,
474 			PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK,
475 			PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT);
476 
477 	al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n",
478 		pcie_port->port_id, info_0, ltssm_state);
479 
480 	if (ltssm_ret)
481 		*ltssm_ret = ltssm_state;
482 
483 	if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) ||
484 			(ltssm_state == AL_PCIE_LTSSM_STATE_L0S))
485 		return AL_TRUE;
486 	return AL_FALSE;
487 }
488 
489 static int
490 al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port,
491 				const struct al_pcie_gen2_params *gen2_params)
492 {
493 	struct al_pcie_regs *regs = pcie_port->regs;
494 	uint32_t gen2_ctrl;
495 
496 	al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n",
497 	       pcie_port->port_id,
498 	       gen2_params->tx_swing_low ? "Low" : "Full",
499 	       gen2_params->tx_compliance_receive_enable? "enable" : "disable",
500 	       gen2_params->set_deemphasis? "enable" : "disable");
501 
502 	gen2_ctrl = al_reg_read32(&regs->port_regs->gen2_ctrl);
503 
504 	if (gen2_params->tx_swing_low)
505 		AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
506 	else
507 		AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
508 
509 	if (gen2_params->tx_compliance_receive_enable)
510 		AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
511 	else
512 		AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
513 
514 	if (gen2_params->set_deemphasis)
515 		AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
516 	else
517 		AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
518 
519 	al_reg_write32(&regs->port_regs->gen2_ctrl, gen2_ctrl);
520 
521 	return 0;
522 }
523 
524 
525 static uint16_t
526 gen3_lane_eq_param_to_val(const struct al_pcie_gen3_lane_eq_params *eq_params)
527 {
528 	uint16_t eq_control = 0;
529 
530 	eq_control = eq_params->downstream_port_transmitter_preset & 0xF;
531 	eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4;
532 	eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8;
533 	eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12;
534 
535 	return eq_control;
536 }
537 
538 static int
539 al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
540 				const struct al_pcie_gen3_params *gen3_params)
541 {
542 	struct al_pcie_regs *regs = pcie_port->regs;
543 	uint32_t reg = 0;
544 	uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(regs->core_space[0].pcie_sec_ext_cap_base + (0xC >> 2));
545 	int i;
546 
547 	al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n",
548 	       pcie_port->port_id,
549 	       gen3_params->perform_eq ? "enable" : "disable",
550 	       gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable");
551 
552 	if (gen3_params->perform_eq)
553 		AL_REG_BIT_SET(reg, 0);
554 	if (gen3_params->interrupt_enable_on_link_eq_request)
555 		AL_REG_BIT_SET(reg, 1);
556 
557 	al_reg_write32(regs->core_space[0].pcie_sec_ext_cap_base + (4 >> 2),
558 		       reg);
559 
560 	al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
561 
562 	for (i = 0; i < gen3_params->eq_params_elements; i += 2) {
563 		uint32_t eq_control =
564 			(uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) |
565 			(uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16;
566 
567 		al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1);
568 		al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control);
569 	}
570 
571 	al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
572 
573 	reg = al_reg_read32(&regs->port_regs->gen3_ctrl);
574 	if (gen3_params->eq_disable)
575 		AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
576 	else
577 		AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
578 
579 	if (gen3_params->eq_phase2_3_disable)
580 		AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
581 	else
582 		AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
583 
584 	al_reg_write32(&regs->port_regs->gen3_ctrl, reg);
585 
586 	reg = 0;
587 	AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK,
588 			 PCIE_PORT_GEN3_EQ_LF_SHIFT,
589 			 gen3_params->local_lf);
590 	AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK,
591 			 PCIE_PORT_GEN3_EQ_FS_SHIFT,
592 			 gen3_params->local_fs);
593 
594 	al_reg_write32(&regs->port_regs->gen3_eq_fs_lf, reg);
595 
596 	reg = 0;
597 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK,
598 			 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT,
599 			 gen3_params->local_lf);
600 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK,
601 			 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT,
602 			 gen3_params->local_fs);
603 	al_reg_write32(regs->axi.conf.zero_lane0, reg);
604 	al_reg_write32(regs->axi.conf.zero_lane1, reg);
605 	al_reg_write32(regs->axi.conf.zero_lane2, reg);
606 	al_reg_write32(regs->axi.conf.zero_lane3, reg);
607 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
608 		al_reg_write32(regs->axi.conf.zero_lane4, reg);
609 		al_reg_write32(regs->axi.conf.zero_lane5, reg);
610 		al_reg_write32(regs->axi.conf.zero_lane6, reg);
611 		al_reg_write32(regs->axi.conf.zero_lane7, reg);
612 	}
613 
614 	/*
615 	 * Gen3 EQ Control Register:
616 	 * - Preset Request Vector - request 9
617 	 * - Behavior After 24 ms Timeout (when optimal settings are not
618 	 *   found): Recovery.Equalization.RcvrLock
619 	 * - Phase2_3 2 ms Timeout Disable
620 	 * - Feedback Mode - Figure Of Merit
621 	 */
622 	reg = 0x00020031;
623 	al_reg_write32(&regs->port_regs->gen3_eq_ctrl, reg);
624 
625 	return 0;
626 }
627 
628 static int
629 al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
630 			      const struct al_pcie_pf_config_params *pf_params)
631 {
632 	struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
633 	struct al_pcie_regs *regs = pcie_port->regs;
634 	unsigned int pf_num = pcie_pf->pf_num;
635 	int bar_idx;
636 	int ret;
637 
638 	al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
639 
640 	/* Disable D1 and D3hot capabilities */
641 	if (pf_params->cap_d1_d3hot_dis)
642 		al_reg_write32_masked(
643 			regs->core_space[pf_num].pcie_pm_cap_base,
644 			AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
645 
646 	/* Set/Clear FLR bit */
647 	if (pf_params->cap_flr_dis)
648 		al_reg_write32_masked(
649 			regs->core_space[pf_num].pcie_dev_cap_base,
650 			AL_PCI_EXP_DEVCAP_FLR, 0);
651 	else
652 		al_reg_write32_masked(
653 			regs->core_space[pcie_pf->pf_num].pcie_dev_cap_base,
654 			AL_PCI_EXP_DEVCAP_FLR, AL_PCI_EXP_DEVCAP_FLR);
655 
656 	/* Disable ASPM capability */
657 	if (pf_params->cap_aspm_dis) {
658 		al_reg_write32_masked(
659 			regs->core_space[pf_num].pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
660 			AL_PCI_EXP_LNKCAP_ASPMS, 0);
661 	}
662 
663 	if (!pf_params->bar_params_valid) {
664 		ret = 0;
665 		goto done;
666 	}
667 
668 	for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */
669 		const struct al_pcie_ep_bar_params *params = pf_params->bar_params + bar_idx;
670 		uint32_t mask = 0;
671 		uint32_t ctrl = 0;
672 		uint32_t __iomem *bar_addr = &regs->core_space[pf_num].config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx];
673 
674 		if (params->enable) {
675 			uint64_t size = params->size;
676 
677 			if (params->memory_64_bit) {
678 				const struct al_pcie_ep_bar_params *next_params = params + 1;
679 				/* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */
680 				if (bar_idx & 1) {
681 					ret = -EINVAL;
682 					goto done;
683 				}
684 
685 				/* next BAR must be disabled */
686 				if (next_params->enable) {
687 					ret = -EINVAL;
688 					goto done;
689 				}
690 
691 				/* 64 bar must be memory bar */
692 				if (!params->memory_space) {
693 					ret = -EINVAL;
694 					goto done;
695 				}
696 			} else {
697 				if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE)
698 					return -EINVAL;
699 				/* 32 bit space can't be prefetchable */
700 				if (params->memory_is_prefetchable) {
701 					ret = -EINVAL;
702 					goto done;
703 				}
704 			}
705 
706 			if (params->memory_space) {
707 				if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
708 					al_err("PCIe %d: memory BAR %d: size (0x%jx) less that minimal allowed value\n",
709 						pcie_port->port_id, bar_idx,
710 						(uintmax_t)size);
711 					ret = -EINVAL;
712 					goto done;
713 				}
714 			} else {
715 				/* IO can't be prefetchable */
716 				if (params->memory_is_prefetchable) {
717 					ret = -EINVAL;
718 					goto done;
719 				}
720 
721 				if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
722 					al_err("PCIe %d: IO BAR %d: size (0x%jx) less that minimal allowed value\n",
723 						pcie_port->port_id, bar_idx,
724 						(uintmax_t)size);
725 					ret = -EINVAL;
726 					goto done;
727 				}
728 			}
729 
730 			/* size must be power of 2 */
731 			if (size & (size - 1)) {
732 				al_err("PCIe %d: BAR %d:size (0x%jx) must be "
733 					"power of 2\n",
734 					pcie_port->port_id, bar_idx, (uintmax_t)size);
735 				ret = -EINVAL;
736 				goto done;
737 			}
738 
739 			/* If BAR is 64-bit, disable the next BAR before
740 			 * configuring this one
741 			 */
742 			if (params->memory_64_bit)
743 				al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, 0);
744 
745 			mask = 1; /* enable bit*/
746 			mask |= (params->size - 1) & 0xFFFFFFFF;
747 
748 			al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
749 
750 			if (params->memory_space == AL_FALSE)
751 				ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO;
752 			if (params->memory_64_bit)
753 				ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64;
754 			if (params->memory_is_prefetchable)
755 				ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH;
756 			al_reg_write32(bar_addr, ctrl);
757 
758 			if (params->memory_64_bit) {
759 				mask = ((params->size - 1) >> 32) & 0xFFFFFFFF;
760 				al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, mask);
761 			}
762 
763 		} else {
764 			al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
765 		}
766 		if (params->enable && params->memory_64_bit)
767 			bar_idx += 2;
768 		else
769 			bar_idx += 1;
770 	}
771 
772 	if (pf_params->exp_bar_params.enable) {
773 		if (pcie_port->rev_id != AL_PCIE_REV_ID_3) {
774 			al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id);
775 			ret = -ENOSYS;
776 			goto done;
777 		} else {
778 			/* Enable exp ROM */
779 			uint32_t __iomem *exp_rom_bar_addr =
780 			&regs->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
781 			uint32_t mask = 1; /* enable bit*/
782 			mask |= (pf_params->exp_bar_params.size - 1) & 0xFFFFFFFF;
783 			al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , mask);
784 		}
785 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
786 		/* Disable exp ROM */
787 		uint32_t __iomem *exp_rom_bar_addr =
788 			&regs->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
789 		al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , 0);
790 	}
791 
792 	/* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
793 	if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
794 		al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_0, (1 << 21));
795 	} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
796 		(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
797 		al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_3, (1 << 18));
798 	} else {
799 		al_assert(0);
800 		ret = -ENOSYS;
801 		goto done;
802 	}
803 
804 	/**
805 	 * Addressing RMN: 1547
806 	 *
807 	 * RMN description:
808 	 * 1. Whenever writing to 0x2xx offset, the write also happens to
809 	 * 0x3xx address, meaning two registers are written instead of one.
810 	 * 2. Read and write from 0x3xx work ok.
811 	 *
812 	 * Software flow:
813 	 * Backup the value of the app.int_grp_a.mask_a register, because
814 	 * app.int_grp_a.mask_clear_a gets overwritten during the write to
815 	 * app.soc.mask_msi_leg_0 register.
816 	 * Restore the original value after the write to app.soc.mask_msi_leg_0
817 	 * register.
818 	 */
819 	if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
820 		al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
821 	} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
822 		(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
823 		al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_3, (1 << 19));
824 	} else {
825 		al_assert(0);
826 		ret = -ENOSYS;
827 		goto done;
828 	}
829 
830 	ret = 0;
831 
832 done:
833 	al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
834 
835 	return ret;
836 }
837 
838 static int
839 al_pcie_port_sris_config(
840 	struct al_pcie_port *pcie_port,
841 	struct al_pcie_sris_params *sris_params,
842 	enum al_pcie_link_speed link_speed)
843 {
844 	int rc = 0;
845 	struct al_pcie_regs *regs = pcie_port->regs;
846 
847 	if (sris_params->use_defaults) {
848 		sris_params->kp_counter_gen3 = (pcie_port->rev_id > AL_PCIE_REV_ID_1) ?
849 						PCIE_SRIS_KP_COUNTER_GEN3_DEFAULT_VAL : 0;
850 		sris_params->kp_counter_gen21 = PCIE_SRIS_KP_COUNTER_GEN21_DEFAULT_VAL;
851 
852 		al_dbg("PCIe %d: configuring SRIS with default values kp_gen3[%d] kp_gen21[%d]\n",
853 			pcie_port->port_id,
854 			sris_params->kp_counter_gen3,
855 			sris_params->kp_counter_gen21);
856 	}
857 
858 	switch (pcie_port->rev_id) {
859 	case AL_PCIE_REV_ID_3:
860 		al_reg_write32_masked(&regs->app.cfg_func_ext->cfg,
861 				PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE,
862 				PCIE_W_CFG_FUNC_EXT_CFG_APP_SRIS_MODE);
863 	case AL_PCIE_REV_ID_2:
864 		al_reg_write32_masked(regs->app.global_ctrl.sris_kp_counter,
865 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_MASK |
866 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_MASK |
867 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN,
868 			(sris_params->kp_counter_gen3 <<
869 				PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_SHIFT) |
870 			(sris_params->kp_counter_gen21 <<
871 				PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_SHIFT) |
872 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN);
873 		break;
874 
875 	case AL_PCIE_REV_ID_1:
876 		if ((link_speed == AL_PCIE_LINK_SPEED_GEN3) && (sris_params->kp_counter_gen3)) {
877 			al_err("PCIe %d: cannot config Gen%d SRIS with rev_id[%d]\n",
878 				pcie_port->port_id, al_pcie_speed_gen_code(link_speed),
879 				pcie_port->rev_id);
880 			return -EINVAL;
881 		}
882 
883 		al_reg_write32_masked(&regs->port_regs->filter_mask_reg_1,
884 			PCIE_FLT_MASK_SKP_INT_VAL_MASK,
885 			sris_params->kp_counter_gen21);
886 		break;
887 
888 	default:
889 		al_err("PCIe %d: SRIS config is not supported in rev_id[%d]\n",
890 			pcie_port->port_id, pcie_port->rev_id);
891 		al_assert(0);
892 		return -EINVAL;
893 	}
894 
895 	return rc;
896 }
897 
898 static void
899 al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port)
900 {
901 	struct al_pcie_regs *regs = pcie_port->regs;
902 
903 	al_reg_write32_masked(
904 		&regs->port_regs->vc0_posted_rcv_q_ctrl,
905 		RADM_PQ_HCRD_VC0_MASK,
906 		(pcie_port->ib_hcrd_config.nof_p_hdr - 1)
907 			<< RADM_PQ_HCRD_VC0_SHIFT);
908 
909 	al_reg_write32_masked(
910 		&regs->port_regs->vc0_non_posted_rcv_q_ctrl,
911 		RADM_NPQ_HCRD_VC0_MASK,
912 		(pcie_port->ib_hcrd_config.nof_np_hdr - 1)
913 			<< RADM_NPQ_HCRD_VC0_SHIFT);
914 }
915 
916 static unsigned int
917 al_pcie_port_max_num_of_pfs_get(struct al_pcie_port *pcie_port)
918 {
919 	struct al_pcie_regs *regs = pcie_port->regs;
920 	uint32_t max_func_num;
921 	uint32_t max_num_of_pfs;
922 
923 	/**
924 	 * Only in REV3, when port is already enabled, max_num_of_pfs is already
925 	 * initialized, return it. Otherwise, return default: 1 PF
926 	 */
927 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
928 		&& al_pcie_port_is_enabled(pcie_port)) {
929 		max_func_num = al_reg_read32(&regs->port_regs->timer_ctrl_max_func_num);
930 		max_num_of_pfs = AL_REG_FIELD_GET(max_func_num, PCIE_PORT_GEN3_MAX_FUNC_NUM, 0) + 1;
931 		return max_num_of_pfs;
932 	}
933 	return 1;
934 }
935 
936 /** Enable ecrc generation in outbound atu (Addressing RMN: 5119) */
937 static void al_pcie_ecrc_gen_ob_atu_enable(struct al_pcie_port *pcie_port, unsigned int pf_num)
938 {
939 	struct al_pcie_regs *regs = pcie_port->regs;
940 	int max_ob_atu = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
941 		AL_PCIE_REV_3_ATU_NUM_OUTBOUND_REGIONS : AL_PCIE_REV_1_2_ATU_NUM_OUTBOUND_REGIONS;
942 	int i;
943 	for (i = 0; i < max_ob_atu; i++) {
944 		al_bool enable = 0;
945 		uint32_t reg = 0;
946 		unsigned int func_num;
947 		AL_REG_FIELD_SET(reg, 0xF, 0, i);
948 		AL_REG_BIT_VAL_SET(reg, 31, AL_PCIE_ATU_DIR_OUTBOUND);
949 		al_reg_write32(&regs->port_regs->iatu.index, reg);
950 		reg = al_reg_read32(&regs->port_regs->iatu.cr2);
951 		enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
952 		reg = al_reg_read32(&regs->port_regs->iatu.cr1);
953 		func_num = AL_REG_FIELD_GET(reg,
954 				PCIE_IATU_CR1_FUNC_NUM_MASK,
955 				PCIE_IATU_CR1_FUNC_NUM_SHIFT);
956 		if ((enable == AL_TRUE) && (pf_num == func_num)) {
957 			/* Set TD bit */
958 			AL_REG_BIT_SET(reg, 8);
959 			al_reg_write32(&regs->port_regs->iatu.cr1, reg);
960 		}
961 	}
962 }
963 
964 /******************************************************************************/
965 /***************************** API Implementation *****************************/
966 /******************************************************************************/
967 
968 /*************************** PCIe Initialization API **************************/
969 
970 /**
971  * Initializes a PCIe port handle structure
972  * Caution: this function should not read/write to any register except for
973  * reading RO register (REV_ID for example)
974  */
975 int
976 al_pcie_port_handle_init(
977 	struct al_pcie_port 	*pcie_port,
978 	void __iomem		*pcie_reg_base,
979 	void __iomem		*pbs_reg_base,
980 	unsigned int		port_id)
981 {
982 	int i, ret;
983 
984 	pcie_port->pcie_reg_base = pcie_reg_base;
985 	pcie_port->regs = &pcie_port->regs_ptrs;
986 	pcie_port->ex_regs = NULL;
987 	pcie_port->pbs_regs = pbs_reg_base;
988 	pcie_port->port_id = port_id;
989 	pcie_port->max_lanes = 0;
990 
991 	ret = al_pcie_rev_id_get(pbs_reg_base, pcie_reg_base);
992 	if (ret < 0)
993 		return ret;
994 
995 	pcie_port->rev_id = ret;
996 
997 	/* Zero all regs */
998 	al_memset(pcie_port->regs, 0, sizeof(struct al_pcie_regs));
999 
1000 	if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
1001 		struct al_pcie_rev1_regs __iomem *regs =
1002 			(struct al_pcie_rev1_regs __iomem *)pcie_reg_base;
1003 
1004 		pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
1005 		pcie_port->regs->axi.ctrl.master_rctl = &regs->axi.ctrl.master_rctl;
1006 		pcie_port->regs->axi.ctrl.master_ctl = &regs->axi.ctrl.master_ctl;
1007 		pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
1008 		pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
1009 		pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
1010 		pcie_port->regs->axi.ob_ctrl.cfg_target_bus = &regs->axi.ob_ctrl.cfg_target_bus;
1011 		pcie_port->regs->axi.ob_ctrl.cfg_control = &regs->axi.ob_ctrl.cfg_control;
1012 		pcie_port->regs->axi.ob_ctrl.io_start_l = &regs->axi.ob_ctrl.io_start_l;
1013 		pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
1014 		pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
1015 		pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
1016 		pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
1017 		pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
1018 		pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
1019 		pcie_port->regs->axi.conf.zero_lane2 = &regs->axi.conf.zero_lane2;
1020 		pcie_port->regs->axi.conf.zero_lane3 = &regs->axi.conf.zero_lane3;
1021 		pcie_port->regs->axi.status.lane[0] = &regs->axi.status.lane0;
1022 		pcie_port->regs->axi.status.lane[1] = &regs->axi.status.lane1;
1023 		pcie_port->regs->axi.status.lane[2] = &regs->axi.status.lane2;
1024 		pcie_port->regs->axi.status.lane[3] = &regs->axi.status.lane3;
1025 		pcie_port->regs->axi.parity.en_axi = &regs->axi.parity.en_axi;
1026 		pcie_port->regs->axi.ordering.pos_cntl = &regs->axi.ordering.pos_cntl;
1027 		pcie_port->regs->axi.pre_configuration.pcie_core_setup = &regs->axi.pre_configuration.pcie_core_setup;
1028 		pcie_port->regs->axi.init_fc.cfg = &regs->axi.init_fc.cfg;
1029 		pcie_port->regs->axi.int_grp_a = &regs->axi.int_grp_a;
1030 
1031 		pcie_port->regs->app.global_ctrl.port_init = &regs->app.global_ctrl.port_init;
1032 		pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
1033 		pcie_port->regs->app.global_ctrl.events_gen[0] = &regs->app.global_ctrl.events_gen;
1034 		pcie_port->regs->app.debug = &regs->app.debug;
1035 		pcie_port->regs->app.soc_int[0].status_0 = &regs->app.soc_int.status_0;
1036 		pcie_port->regs->app.soc_int[0].status_1 = &regs->app.soc_int.status_1;
1037 		pcie_port->regs->app.soc_int[0].status_2 = &regs->app.soc_int.status_2;
1038 		pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = &regs->app.soc_int.mask_inta_leg_0;
1039 		pcie_port->regs->app.soc_int[0].mask_inta_leg_1 = &regs->app.soc_int.mask_inta_leg_1;
1040 		pcie_port->regs->app.soc_int[0].mask_inta_leg_2 = &regs->app.soc_int.mask_inta_leg_2;
1041 		pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = &regs->app.soc_int.mask_msi_leg_0;
1042 		pcie_port->regs->app.soc_int[0].mask_msi_leg_1 = &regs->app.soc_int.mask_msi_leg_1;
1043 		pcie_port->regs->app.soc_int[0].mask_msi_leg_2 = &regs->app.soc_int.mask_msi_leg_2;
1044 		pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
1045 		pcie_port->regs->app.parity = &regs->app.parity;
1046 		pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1047 		pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1048 		pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
1049 		pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
1050 
1051 		pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1052 		pcie_port->regs->core_space[0].pcie_pm_cap_base = &regs->core_space.pcie_pm_cap_base;
1053 		pcie_port->regs->core_space[0].pcie_cap_base = &regs->core_space.pcie_cap_base;
1054 		pcie_port->regs->core_space[0].pcie_dev_cap_base = &regs->core_space.pcie_dev_cap_base;
1055 		pcie_port->regs->core_space[0].pcie_dev_ctrl_status = &regs->core_space.pcie_dev_ctrl_status;
1056 		pcie_port->regs->core_space[0].pcie_link_cap_base = &regs->core_space.pcie_link_cap_base;
1057 		pcie_port->regs->core_space[0].msix_cap_base = &regs->core_space.msix_cap_base;
1058 		pcie_port->regs->core_space[0].aer = &regs->core_space.aer;
1059 		pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = &regs->core_space.pcie_sec_ext_cap_base;
1060 
1061 		pcie_port->regs->port_regs = &regs->core_space.port_regs;
1062 
1063 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_2) {
1064 		struct al_pcie_rev2_regs __iomem *regs =
1065 			(struct al_pcie_rev2_regs __iomem *)pcie_reg_base;
1066 
1067 		pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
1068 		pcie_port->regs->axi.ctrl.master_rctl = &regs->axi.ctrl.master_rctl;
1069 		pcie_port->regs->axi.ctrl.master_ctl = &regs->axi.ctrl.master_ctl;
1070 		pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
1071 		pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
1072 		pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
1073 		pcie_port->regs->axi.ob_ctrl.cfg_target_bus = &regs->axi.ob_ctrl.cfg_target_bus;
1074 		pcie_port->regs->axi.ob_ctrl.cfg_control = &regs->axi.ob_ctrl.cfg_control;
1075 		pcie_port->regs->axi.ob_ctrl.io_start_l = &regs->axi.ob_ctrl.io_start_l;
1076 		pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
1077 		pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
1078 		pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
1079 		pcie_port->regs->axi.ob_ctrl.tgtid_reg_ovrd = &regs->axi.ob_ctrl.tgtid_reg_ovrd;
1080 		pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_sel = &regs->axi.ob_ctrl.addr_high_reg_ovrd_sel;
1081 		pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_value = &regs->axi.ob_ctrl.addr_high_reg_ovrd_value;
1082 		pcie_port->regs->axi.ob_ctrl.addr_size_replace = &regs->axi.ob_ctrl.addr_size_replace;
1083 		pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
1084 		pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
1085 		pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
1086 		pcie_port->regs->axi.conf.zero_lane2 = &regs->axi.conf.zero_lane2;
1087 		pcie_port->regs->axi.conf.zero_lane3 = &regs->axi.conf.zero_lane3;
1088 		pcie_port->regs->axi.status.lane[0] = &regs->axi.status.lane0;
1089 		pcie_port->regs->axi.status.lane[1] = &regs->axi.status.lane1;
1090 		pcie_port->regs->axi.status.lane[2] = &regs->axi.status.lane2;
1091 		pcie_port->regs->axi.status.lane[3] = &regs->axi.status.lane3;
1092 		pcie_port->regs->axi.parity.en_axi = &regs->axi.parity.en_axi;
1093 		pcie_port->regs->axi.ordering.pos_cntl = &regs->axi.ordering.pos_cntl;
1094 		pcie_port->regs->axi.pre_configuration.pcie_core_setup = &regs->axi.pre_configuration.pcie_core_setup;
1095 		pcie_port->regs->axi.init_fc.cfg = &regs->axi.init_fc.cfg;
1096 		pcie_port->regs->axi.int_grp_a = &regs->axi.int_grp_a;
1097 
1098 		pcie_port->regs->app.global_ctrl.port_init = &regs->app.global_ctrl.port_init;
1099 		pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
1100 		pcie_port->regs->app.global_ctrl.events_gen[0] = &regs->app.global_ctrl.events_gen;
1101 		pcie_port->regs->app.global_ctrl.corr_err_sts_int = &regs->app.global_ctrl.pended_corr_err_sts_int;
1102 		pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = &regs->app.global_ctrl.pended_uncorr_err_sts_int;
1103 		pcie_port->regs->app.global_ctrl.sris_kp_counter = &regs->app.global_ctrl.sris_kp_counter_value;
1104 		pcie_port->regs->app.debug = &regs->app.debug;
1105 		pcie_port->regs->app.ap_user_send_msg = &regs->app.ap_user_send_msg;
1106 		pcie_port->regs->app.soc_int[0].status_0 = &regs->app.soc_int.status_0;
1107 		pcie_port->regs->app.soc_int[0].status_1 = &regs->app.soc_int.status_1;
1108 		pcie_port->regs->app.soc_int[0].status_2 = &regs->app.soc_int.status_2;
1109 		pcie_port->regs->app.soc_int[0].status_3 = &regs->app.soc_int.status_3;
1110 		pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = &regs->app.soc_int.mask_inta_leg_0;
1111 		pcie_port->regs->app.soc_int[0].mask_inta_leg_1 = &regs->app.soc_int.mask_inta_leg_1;
1112 		pcie_port->regs->app.soc_int[0].mask_inta_leg_2 = &regs->app.soc_int.mask_inta_leg_2;
1113 		pcie_port->regs->app.soc_int[0].mask_inta_leg_3 = &regs->app.soc_int.mask_inta_leg_3;
1114 		pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = &regs->app.soc_int.mask_msi_leg_0;
1115 		pcie_port->regs->app.soc_int[0].mask_msi_leg_1 = &regs->app.soc_int.mask_msi_leg_1;
1116 		pcie_port->regs->app.soc_int[0].mask_msi_leg_2 = &regs->app.soc_int.mask_msi_leg_2;
1117 		pcie_port->regs->app.soc_int[0].mask_msi_leg_3 = &regs->app.soc_int.mask_msi_leg_3;
1118 		pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
1119 		pcie_port->regs->app.parity = &regs->app.parity;
1120 		pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1121 		pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1122 		pcie_port->regs->app.status_per_func[0] = &regs->app.status_per_func;
1123 		pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
1124 		pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
1125 
1126 		pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1127 		pcie_port->regs->core_space[0].pcie_pm_cap_base = &regs->core_space.pcie_pm_cap_base;
1128 		pcie_port->regs->core_space[0].pcie_cap_base = &regs->core_space.pcie_cap_base;
1129 		pcie_port->regs->core_space[0].pcie_dev_cap_base = &regs->core_space.pcie_dev_cap_base;
1130 		pcie_port->regs->core_space[0].pcie_dev_ctrl_status = &regs->core_space.pcie_dev_ctrl_status;
1131 		pcie_port->regs->core_space[0].pcie_link_cap_base = &regs->core_space.pcie_link_cap_base;
1132 		pcie_port->regs->core_space[0].msix_cap_base = &regs->core_space.msix_cap_base;
1133 		pcie_port->regs->core_space[0].aer = &regs->core_space.aer;
1134 		pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = &regs->core_space.pcie_sec_ext_cap_base;
1135 
1136 		pcie_port->regs->port_regs = &regs->core_space.port_regs;
1137 
1138 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1139 		struct al_pcie_rev3_regs __iomem *regs =
1140 			(struct al_pcie_rev3_regs __iomem *)pcie_reg_base;
1141 		pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
1142 		pcie_port->regs->axi.ctrl.master_rctl = &regs->axi.ctrl.master_rctl;
1143 		pcie_port->regs->axi.ctrl.master_ctl = &regs->axi.ctrl.master_ctl;
1144 		pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
1145 		pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
1146 		pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
1147 		pcie_port->regs->axi.ob_ctrl.cfg_target_bus = &regs->axi.ob_ctrl.cfg_target_bus;
1148 		pcie_port->regs->axi.ob_ctrl.cfg_control = &regs->axi.ob_ctrl.cfg_control;
1149 		pcie_port->regs->axi.ob_ctrl.io_start_l = &regs->axi.ob_ctrl.io_start_l;
1150 		pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
1151 		pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
1152 		pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
1153 		pcie_port->regs->axi.ob_ctrl.io_addr_mask_h = &regs->axi.ob_ctrl.io_addr_mask_h;
1154 		pcie_port->regs->axi.ob_ctrl.ar_msg_addr_mask_h = &regs->axi.ob_ctrl.ar_msg_addr_mask_h;
1155 		pcie_port->regs->axi.ob_ctrl.aw_msg_addr_mask_h = &regs->axi.ob_ctrl.aw_msg_addr_mask_h;
1156 		pcie_port->regs->axi.ob_ctrl.tgtid_reg_ovrd = &regs->axi.ob_ctrl.tgtid_reg_ovrd;
1157 		pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_sel = &regs->axi.ob_ctrl.addr_high_reg_ovrd_sel;
1158 		pcie_port->regs->axi.ob_ctrl.addr_high_reg_ovrd_value = &regs->axi.ob_ctrl.addr_high_reg_ovrd_value;
1159 		pcie_port->regs->axi.ob_ctrl.addr_size_replace = &regs->axi.ob_ctrl.addr_size_replace;
1160 		pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
1161 		pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
1162 		pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
1163 		pcie_port->regs->axi.conf.zero_lane2 = &regs->axi.conf.zero_lane2;
1164 		pcie_port->regs->axi.conf.zero_lane3 = &regs->axi.conf.zero_lane3;
1165 		pcie_port->regs->axi.conf.zero_lane4 = &regs->axi.conf.zero_lane4;
1166 		pcie_port->regs->axi.conf.zero_lane5 = &regs->axi.conf.zero_lane5;
1167 		pcie_port->regs->axi.conf.zero_lane6 = &regs->axi.conf.zero_lane6;
1168 		pcie_port->regs->axi.conf.zero_lane7 = &regs->axi.conf.zero_lane7;
1169 		pcie_port->regs->axi.status.lane[0] = &regs->axi.status.lane0;
1170 		pcie_port->regs->axi.status.lane[1] = &regs->axi.status.lane1;
1171 		pcie_port->regs->axi.status.lane[2] = &regs->axi.status.lane2;
1172 		pcie_port->regs->axi.status.lane[3] = &regs->axi.status.lane3;
1173 		pcie_port->regs->axi.status.lane[4] = &regs->axi.status.lane4;
1174 		pcie_port->regs->axi.status.lane[5] = &regs->axi.status.lane5;
1175 		pcie_port->regs->axi.status.lane[6] = &regs->axi.status.lane6;
1176 		pcie_port->regs->axi.status.lane[7] = &regs->axi.status.lane7;
1177 		pcie_port->regs->axi.parity.en_axi = &regs->axi.parity.en_axi;
1178 		pcie_port->regs->axi.ordering.pos_cntl = &regs->axi.ordering.pos_cntl;
1179 		pcie_port->regs->axi.pre_configuration.pcie_core_setup = &regs->axi.pre_configuration.pcie_core_setup;
1180 		pcie_port->regs->axi.init_fc.cfg = &regs->axi.init_fc.cfg;
1181 		pcie_port->regs->axi.int_grp_a = &regs->axi.int_grp_a;
1182 		pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_0 = &regs->axi.axi_attr_ovrd.write_msg_ctrl_0;
1183 		pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_1 = &regs->axi.axi_attr_ovrd.write_msg_ctrl_1;
1184 		pcie_port->regs->axi.axi_attr_ovrd.pf_sel = &regs->axi.axi_attr_ovrd.pf_sel;
1185 
1186 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1187 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_0 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_0;
1188 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_1 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_1;
1189 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_2 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_2;
1190 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_3 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_3;
1191 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_4 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_4;
1192 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_5 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_5;
1193 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_6 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_6;
1194 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_7 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_7;
1195 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_8 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_8;
1196 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_9 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_9;
1197 		}
1198 
1199 		pcie_port->regs->axi.msg_attr_axuser_table.entry_vec = &regs->axi.msg_attr_axuser_table.entry_vec;
1200 
1201 		pcie_port->regs->app.global_ctrl.port_init = &regs->app.global_ctrl.port_init;
1202 		pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
1203 		pcie_port->regs->app.global_ctrl.corr_err_sts_int = &regs->app.global_ctrl.pended_corr_err_sts_int;
1204 		pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = &regs->app.global_ctrl.pended_uncorr_err_sts_int;
1205 
1206 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1207 			pcie_port->regs->app.global_ctrl.events_gen[i] = &regs->app.events_gen_per_func[i].events_gen;
1208 		}
1209 
1210 		pcie_port->regs->app.global_ctrl.sris_kp_counter = &regs->app.global_ctrl.sris_kp_counter_value;
1211 		pcie_port->regs->app.debug = &regs->app.debug;
1212 
1213 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1214 			pcie_port->regs->app.soc_int[i].status_0 = &regs->app.soc_int_per_func[i].status_0;
1215 			pcie_port->regs->app.soc_int[i].status_1 = &regs->app.soc_int_per_func[i].status_1;
1216 			pcie_port->regs->app.soc_int[i].status_2 = &regs->app.soc_int_per_func[i].status_2;
1217 			pcie_port->regs->app.soc_int[i].status_3 = &regs->app.soc_int_per_func[i].status_3;
1218 			pcie_port->regs->app.soc_int[i].mask_inta_leg_0 = &regs->app.soc_int_per_func[i].mask_inta_leg_0;
1219 			pcie_port->regs->app.soc_int[i].mask_inta_leg_1 = &regs->app.soc_int_per_func[i].mask_inta_leg_1;
1220 			pcie_port->regs->app.soc_int[i].mask_inta_leg_2 = &regs->app.soc_int_per_func[i].mask_inta_leg_2;
1221 			pcie_port->regs->app.soc_int[i].mask_inta_leg_3 = &regs->app.soc_int_per_func[i].mask_inta_leg_3;
1222 			pcie_port->regs->app.soc_int[i].mask_msi_leg_0 = &regs->app.soc_int_per_func[i].mask_msi_leg_0;
1223 			pcie_port->regs->app.soc_int[i].mask_msi_leg_1 = &regs->app.soc_int_per_func[i].mask_msi_leg_1;
1224 			pcie_port->regs->app.soc_int[i].mask_msi_leg_2 = &regs->app.soc_int_per_func[i].mask_msi_leg_2;
1225 			pcie_port->regs->app.soc_int[i].mask_msi_leg_3 = &regs->app.soc_int_per_func[i].mask_msi_leg_3;
1226 		}
1227 
1228 		pcie_port->regs->app.ap_user_send_msg = &regs->app.ap_user_send_msg;
1229 		pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
1230 		pcie_port->regs->app.parity = &regs->app.parity;
1231 		pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1232 		pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1233 		pcie_port->regs->app.cfg_func_ext = &regs->app.cfg_func_ext;
1234 
1235 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++)
1236 			pcie_port->regs->app.status_per_func[i] = &regs->app.status_per_func[i];
1237 
1238 		pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
1239 		pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
1240 		pcie_port->regs->app.int_grp_c = &regs->app.int_grp_c;
1241 		pcie_port->regs->app.int_grp_d = &regs->app.int_grp_d;
1242 
1243 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1244 			pcie_port->regs->core_space[i].config_header = regs->core_space.func[i].config_header;
1245 			pcie_port->regs->core_space[i].pcie_pm_cap_base = &regs->core_space.func[i].pcie_pm_cap_base;
1246 			pcie_port->regs->core_space[i].pcie_cap_base = &regs->core_space.func[i].pcie_cap_base;
1247 			pcie_port->regs->core_space[i].pcie_dev_cap_base = &regs->core_space.func[i].pcie_dev_cap_base;
1248 			pcie_port->regs->core_space[i].pcie_dev_ctrl_status = &regs->core_space.func[i].pcie_dev_ctrl_status;
1249 			pcie_port->regs->core_space[i].pcie_link_cap_base = &regs->core_space.func[i].pcie_link_cap_base;
1250 			pcie_port->regs->core_space[i].msix_cap_base = &regs->core_space.func[i].msix_cap_base;
1251 			pcie_port->regs->core_space[i].aer = &regs->core_space.func[i].aer;
1252 			pcie_port->regs->core_space[i].tph_cap_base = &regs->core_space.func[i].tph_cap_base;
1253 
1254 		}
1255 
1256 		/* secondary extension capability only for PF0 */
1257 		pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = &regs->core_space.func[0].pcie_sec_ext_cap_base;
1258 
1259 		pcie_port->regs->port_regs = &regs->core_space.func[0].port_regs;
1260 
1261 	} else {
1262 		al_warn("%s: Revision ID is unknown\n",
1263 			__func__);
1264 		return -EINVAL;
1265 	}
1266 
1267 	/* set maximum number of physical functions */
1268 	pcie_port->max_num_of_pfs = al_pcie_port_max_num_of_pfs_get(pcie_port);
1269 
1270 	/* Clear 'nof_p_hdr' & 'nof_np_hdr' to later know if they where changed by the user */
1271 	pcie_port->ib_hcrd_config.nof_np_hdr = 0;
1272 	pcie_port->ib_hcrd_config.nof_p_hdr = 0;
1273 
1274 	al_dbg("pcie port handle initialized. port id: %d, rev_id %d, regs base %p\n",
1275 	       port_id, pcie_port->rev_id, pcie_reg_base);
1276 	return 0;
1277 }
1278 
1279 /**
1280  * Initializes a PCIe Physical function handle structure
1281  * Caution: this function should not read/write to any register except for
1282  * reading RO register (REV_ID for example)
1283  */
1284 int
1285 al_pcie_pf_handle_init(
1286 	struct al_pcie_pf *pcie_pf,
1287 	struct al_pcie_port *pcie_port,
1288 	unsigned int pf_num)
1289 {
1290 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
1291 	al_assert(pf_num < pcie_port->max_num_of_pfs);
1292 
1293 	if (op_mode != AL_PCIE_OPERATING_MODE_EP) {
1294 		al_err("PCIe %d: can't init PF handle with operating mode [%d]\n",
1295 			pcie_port->port_id, op_mode);
1296 		return -EINVAL;
1297 	}
1298 
1299 	pcie_pf->pf_num = pf_num;
1300 	pcie_pf->pcie_port = pcie_port;
1301 
1302 	al_dbg("PCIe %d: pf handle initialized. pf number: %d, rev_id %d, regs %p\n",
1303 	       pcie_port->port_id, pcie_pf->pf_num, pcie_port->rev_id,
1304 	       pcie_port->regs);
1305 	return 0;
1306 }
1307 
1308 /** Get port revision ID */
1309 int al_pcie_port_rev_id_get(struct al_pcie_port *pcie_port)
1310 {
1311 	return pcie_port->rev_id;
1312 }
1313 
1314 /************************** Pre PCIe Port Enable API **************************/
1315 
1316 /** configure pcie operating mode (root complex or endpoint) */
1317 int
1318 al_pcie_port_operating_mode_config(
1319 	struct al_pcie_port *pcie_port,
1320 	enum al_pcie_operating_mode mode)
1321 {
1322 	struct al_pcie_regs *regs = pcie_port->regs;
1323 	uint32_t reg, device_type, new_device_type;
1324 
1325 	if (al_pcie_port_is_enabled(pcie_port)) {
1326 		al_err("PCIe %d: already enabled, cannot set operating mode\n",
1327 			pcie_port->port_id);
1328 		return -EINVAL;
1329 	}
1330 
1331 	reg = al_reg_read32(regs->axi.pcie_global.conf);
1332 
1333 	device_type = AL_REG_FIELD_GET(reg,
1334 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1335 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1336 	if (mode == AL_PCIE_OPERATING_MODE_EP) {
1337 		new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP;
1338 	} else if (mode == AL_PCIE_OPERATING_MODE_RC) {
1339 		new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC;
1340 
1341 		if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1342 			/* config 1 PF in RC mode */
1343 			al_reg_write32_masked(regs->axi.axi_attr_ovrd.pf_sel,
1344 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_AXUSER |
1345 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1346 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_ADDR_OFFSET_MASK |
1347 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT0_OVRD |
1348 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_AXUSER |
1349 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG |
1350 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_ADDR_OFFSET_MASK |
1351 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT1_OVRD,
1352 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1353 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG);
1354 		}
1355 	} else {
1356 		al_err("PCIe %d: unknown operating mode: %d\n", pcie_port->port_id, mode);
1357 		return -EINVAL;
1358 	}
1359 
1360 	if (new_device_type == device_type) {
1361 		al_dbg("PCIe %d: operating mode already set to %s\n",
1362 		       pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1363 		       "EndPoint" : "Root Complex");
1364 		return 0;
1365 	}
1366 	al_dbg("PCIe %d: set operating mode to %s\n",
1367 		pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1368 		"EndPoint" : "Root Complex");
1369 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1370 			 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT,
1371 			 new_device_type);
1372 
1373 	al_reg_write32(regs->axi.pcie_global.conf, reg);
1374 
1375 	return 0;
1376 }
1377 
1378 int
1379 al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
1380 {
1381 	struct al_pcie_regs *regs = pcie_port->regs;
1382 	uint32_t active_lanes_val;
1383 
1384 	if (al_pcie_port_is_enabled(pcie_port)) {
1385 		al_err("PCIe %d: already enabled, cannot set max lanes\n",
1386 			pcie_port->port_id);
1387 		return -EINVAL;
1388 	}
1389 
1390 	/* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
1391 	active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
1392 
1393 	al_reg_write32_masked(regs->axi.pcie_global.conf,
1394 		(pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1395 		PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1396 		PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1397 		active_lanes_val);
1398 
1399 	pcie_port->max_lanes = lanes;
1400 	return 0;
1401 }
1402 
1403 int
1404 al_pcie_port_max_num_of_pfs_set(
1405 	struct al_pcie_port *pcie_port,
1406 	uint8_t max_num_of_pfs)
1407 {
1408 	struct al_pcie_regs *regs = pcie_port->regs;
1409 
1410 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
1411 		al_assert(max_num_of_pfs <= REV3_MAX_NUM_OF_PFS);
1412 	else
1413 		al_assert(max_num_of_pfs == REV1_2_MAX_NUM_OF_PFS);
1414 
1415 	pcie_port->max_num_of_pfs = max_num_of_pfs;
1416 
1417 	if (al_pcie_port_is_enabled(pcie_port) && (pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
1418 		enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
1419 
1420 		al_bool is_multi_pf =
1421 			((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1));
1422 
1423 		/* Set maximum physical function numbers */
1424 		al_reg_write32_masked(
1425 			&regs->port_regs->timer_ctrl_max_func_num,
1426 			PCIE_PORT_GEN3_MAX_FUNC_NUM,
1427 			pcie_port->max_num_of_pfs - 1);
1428 
1429 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1430 
1431 		/**
1432 		 * in EP mode, when we have more than 1 PF we need to assert
1433 		 * multi-pf support so the host scan all PFs
1434 		 */
1435 		al_reg_write32_masked((uint32_t __iomem *)
1436 			(&regs->core_space[0].config_header[0] +
1437 			(PCIE_BIST_HEADER_TYPE_BASE >> 2)),
1438 			PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
1439 			is_multi_pf ? PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK : 0);
1440 
1441 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 /* Inbound header credits and outstanding outbound reads configuration */
1448 int
1449 al_pcie_port_ib_hcrd_os_ob_reads_config(
1450 	struct al_pcie_port *pcie_port,
1451 	struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config)
1452 {
1453 	struct al_pcie_regs *regs = pcie_port->regs;
1454 
1455 	if (al_pcie_port_is_enabled(pcie_port)) {
1456 		al_err("PCIe %d: already enabled, cannot configure IB credits and OB OS reads\n",
1457 			pcie_port->port_id);
1458 		return -EINVAL;
1459 	}
1460 
1461 	al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0);
1462 
1463 	al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0);
1464 
1465 	al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0);
1466 
1467 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1468 		al_assert(
1469 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1470 			ib_hcrd_os_ob_reads_config->nof_np_hdr +
1471 			ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1472 			AL_PCIE_REV3_IB_HCRD_SUM);
1473 
1474 		al_reg_write32_masked(
1475 			regs->axi.init_fc.cfg,
1476 			PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_MASK |
1477 			PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_MASK |
1478 			PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1479 			(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1480 			 PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1481 			(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1482 			 PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1483 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1484 			 PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1485 	} else {
1486 		al_assert(
1487 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1488 			ib_hcrd_os_ob_reads_config->nof_np_hdr +
1489 			ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1490 			AL_PCIE_REV_1_2_IB_HCRD_SUM);
1491 
1492 		al_reg_write32_masked(
1493 			regs->axi.init_fc.cfg,
1494 			PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_MASK |
1495 			PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_MASK |
1496 			PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1497 			(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1498 			 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1499 			(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1500 			 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1501 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1502 			 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1503 	}
1504 
1505 	al_reg_write32_masked(
1506 		regs->axi.pre_configuration.pcie_core_setup,
1507 		PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
1508 		ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads <<
1509 		PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
1510 
1511 	/* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */
1512 	pcie_port->ib_hcrd_config.nof_np_hdr =
1513 		ib_hcrd_os_ob_reads_config->nof_np_hdr;
1514 	pcie_port->ib_hcrd_config.nof_p_hdr =
1515 		ib_hcrd_os_ob_reads_config->nof_p_hdr;
1516 
1517 	return 0;
1518 }
1519 
1520 enum al_pcie_operating_mode
1521 al_pcie_operating_mode_get(
1522 	struct al_pcie_port *pcie_port)
1523 {
1524 	struct al_pcie_regs *regs = pcie_port->regs;
1525 	uint32_t reg, device_type;
1526 
1527 	al_assert(pcie_port);
1528 
1529 	reg = al_reg_read32(regs->axi.pcie_global.conf);
1530 
1531 	device_type = AL_REG_FIELD_GET(reg,
1532 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1533 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1534 
1535 	switch (device_type) {
1536 	case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP:
1537 		return AL_PCIE_OPERATING_MODE_EP;
1538 	case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC:
1539 		return AL_PCIE_OPERATING_MODE_RC;
1540 	default:
1541 		al_err("PCIe %d: unknown device type (%d) in global conf register.\n",
1542 			pcie_port->port_id, device_type);
1543 	}
1544 	return AL_PCIE_OPERATING_MODE_UNKNOWN;
1545 }
1546 
1547 /* PCIe AXI quality of service configuration */
1548 void al_pcie_axi_qos_config(
1549 	struct al_pcie_port	*pcie_port,
1550 	unsigned int		arqos,
1551 	unsigned int		awqos)
1552 {
1553 	struct al_pcie_regs *regs = pcie_port->regs;
1554 
1555 	al_assert(pcie_port);
1556 	al_assert(arqos <= PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_VAL_MAX);
1557 	al_assert(awqos <= PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_VAL_MAX);
1558 
1559 	al_reg_write32_masked(
1560 		regs->axi.ctrl.master_arctl,
1561 		PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_MASK,
1562 		arqos << PCIE_AXI_CTRL_MASTER_ARCTL_ARQOS_SHIFT);
1563 	al_reg_write32_masked(
1564 		regs->axi.ctrl.master_awctl,
1565 		PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_MASK,
1566 		awqos << PCIE_AXI_CTRL_MASTER_AWCTL_AWQOS_SHIFT);
1567 }
1568 
1569 /**************************** PCIe Port Enable API ****************************/
1570 
1571 /** Enable PCIe port (deassert reset) */
1572 int
1573 al_pcie_port_enable(struct al_pcie_port *pcie_port)
1574 {
1575 	struct al_pbs_regs *pbs_reg_base =
1576 				(struct al_pbs_regs *)pcie_port->pbs_regs;
1577 	struct al_pcie_regs *regs = pcie_port->regs;
1578 	unsigned int port_id = pcie_port->port_id;
1579 
1580 	/* pre-port-enable default functionality should be here */
1581 
1582 	/**
1583 	 * Set inbound header credit and outstanding outbound reads defaults
1584 	 * if the port initiator doesn't set it.
1585 	 * Must be called before port enable (PCIE_EXIST)
1586 	 */
1587 	if ((pcie_port->ib_hcrd_config.nof_np_hdr == 0) ||
1588 			(pcie_port->ib_hcrd_config.nof_p_hdr == 0))
1589 		al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
1590 
1591 	/*
1592 	 * Disable ATS capability
1593 	 * - must be done before core reset deasserted
1594 	 * - rev_id 0 - no effect, but no harm
1595 	 */
1596 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
1597 		(pcie_port->rev_id == AL_PCIE_REV_ID_2)) {
1598 		al_reg_write32_masked(
1599 			regs->axi.ordering.pos_cntl,
1600 			PCIE_AXI_CORE_SETUP_ATS_CAP_DIS,
1601 			PCIE_AXI_CORE_SETUP_ATS_CAP_DIS);
1602 	}
1603 
1604 	/* Deassert core reset */
1605 	al_reg_write32_masked(
1606 		&pbs_reg_base->unit.pcie_conf_1,
1607 		1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1608 		1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT));
1609 
1610 	return 0;
1611 }
1612 
1613 /** Disable PCIe port (assert reset) */
1614 void
1615 al_pcie_port_disable(struct al_pcie_port *pcie_port)
1616 {
1617 	struct al_pbs_regs *pbs_reg_base =
1618 				(struct al_pbs_regs *)pcie_port->pbs_regs;
1619 	unsigned int port_id = pcie_port->port_id;
1620 
1621 	if (!al_pcie_port_is_enabled(pcie_port)) {
1622 		al_warn("PCIe %d: trying to disable a non-enabled port\n",
1623 			pcie_port->port_id);
1624 	}
1625 
1626 	/* Assert core reset */
1627 	al_reg_write32_masked(
1628 		&pbs_reg_base->unit.pcie_conf_1,
1629 		1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1630 		0);
1631 }
1632 
1633 int
1634 al_pcie_port_memory_shutdown_set(
1635 	struct al_pcie_port	*pcie_port,
1636 	al_bool			enable)
1637 {
1638 	struct al_pcie_regs *regs = pcie_port->regs;
1639 	uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1640 		PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN :
1641 		PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
1642 
1643 	if (!al_pcie_port_is_enabled(pcie_port)) {
1644 		al_err("PCIe %d: not enabled, cannot shutdown memory\n",
1645 			pcie_port->port_id);
1646 		return -EINVAL;
1647 	}
1648 
1649 	al_reg_write32_masked(regs->axi.pcie_global.conf,
1650 		mask, enable == AL_TRUE ? mask : 0);
1651 
1652 	return 0;
1653 }
1654 
1655 al_bool
1656 al_pcie_port_is_enabled(struct al_pcie_port *pcie_port)
1657 {
1658 	struct al_pbs_regs *pbs_reg_base = (struct al_pbs_regs *)pcie_port->pbs_regs;
1659 	uint32_t pcie_exist = al_reg_read32(&pbs_reg_base->unit.pcie_conf_1);
1660 
1661 	uint32_t ports_enabled = AL_REG_FIELD_GET(pcie_exist,
1662 		PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK,
1663 		PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT);
1664 
1665 	return (AL_REG_FIELD_GET(ports_enabled, AL_BIT(pcie_port->port_id),
1666 		pcie_port->port_id) == 1);
1667 }
1668 
1669 /*************************** PCIe Configuration API ***************************/
1670 
1671 /** configure pcie port (link params, etc..) */
1672 int
1673 al_pcie_port_config(struct al_pcie_port *pcie_port,
1674 			const struct al_pcie_port_config_params *params)
1675 {
1676 	struct al_pcie_regs *regs = pcie_port->regs;
1677 	enum al_pcie_operating_mode op_mode;
1678 	int status = 0;
1679 	int i;
1680 
1681 	if (!al_pcie_port_is_enabled(pcie_port)) {
1682 		al_err("PCIe %d: port not enabled, cannot configure port\n",
1683 			pcie_port->port_id);
1684 		return -EINVAL;
1685 	}
1686 
1687 	if (al_pcie_is_link_started(pcie_port)) {
1688 		al_err("PCIe %d: link already started, cannot configure port\n",
1689 			pcie_port->port_id);
1690 		return -EINVAL;
1691 	}
1692 
1693 	al_assert(pcie_port);
1694 	al_assert(params);
1695 
1696 	al_dbg("PCIe %d: port config\n", pcie_port->port_id);
1697 
1698 	op_mode = al_pcie_operating_mode_get(pcie_port);
1699 
1700 	/* if max lanes not specifies, read it from register */
1701 	if (pcie_port->max_lanes == 0) {
1702 		uint32_t global_conf = al_reg_read32(regs->axi.pcie_global.conf);
1703 		uint32_t act_lanes = AL_REG_FIELD_GET(global_conf,
1704 			(pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1705 			PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1706 			PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1707 			PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT);
1708 
1709 		switch(act_lanes) {
1710 		case 0x1:
1711 			pcie_port->max_lanes = 1;
1712 			break;
1713 		case 0x3:
1714 			pcie_port->max_lanes = 2;
1715 			break;
1716 		case 0xf:
1717 			pcie_port->max_lanes = 4;
1718 			break;
1719 		case 0xff:
1720 			pcie_port->max_lanes = 8;
1721 			break;
1722 		default:
1723 			pcie_port->max_lanes = 0;
1724 			al_err("PCIe %d: invalid max lanes val (0x%x)\n", pcie_port->port_id, act_lanes);
1725 			break;
1726 		}
1727 	}
1728 
1729 	if (params->link_params)
1730 		status = al_pcie_port_link_config(pcie_port, params->link_params);
1731 	if (status)
1732 		goto done;
1733 
1734 	/* Change max read request size to 256 bytes
1735 	 * Max Payload Size is remained untouched- it is the responsibility of
1736 	 * the host to change the MPS, if needed.
1737 	 */
1738 	for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1739 		al_reg_write32_masked(regs->core_space[i].pcie_dev_ctrl_status,
1740 			PCIE_PORT_DEV_CTRL_STATUS_MRRS_MASK,
1741 			PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_256);
1742 		if (pcie_port->rev_id != AL_PCIE_REV_ID_3)
1743 			break;
1744 	}
1745 
1746 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1747 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1748 
1749 		/* Disable TPH next pointer */
1750 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1751 			al_reg_write32_masked(regs->core_space[i].tph_cap_base,
1752 			PCIE_TPH_NEXT_POINTER, 0);
1753 		}
1754 
1755 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1756 	}
1757 
1758 
1759 	status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop);
1760 	if (status)
1761 		goto done;
1762 
1763 	al_pcie_port_max_num_of_pfs_set(pcie_port, pcie_port->max_num_of_pfs);
1764 
1765 	al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
1766 
1767 	al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
1768 
1769 	al_pcie_port_relaxed_pcie_ordering_config(pcie_port, params->relaxed_ordering_params);
1770 
1771 	if (params->lat_rply_timers)
1772 		status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers);
1773 	if (status)
1774 		goto done;
1775 
1776 	if (params->gen2_params)
1777 		status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params);
1778 	if (status)
1779 		goto done;
1780 
1781 	if (params->gen3_params)
1782 		status = al_pcie_port_gen3_params_config(pcie_port, params->gen3_params);
1783 	if (status)
1784 		goto done;
1785 
1786 	if (params->sris_params)
1787 		status = al_pcie_port_sris_config(pcie_port, params->sris_params,
1788 						params->link_params->max_speed);
1789 	if (status)
1790 		goto done;
1791 
1792 	al_pcie_port_ib_hcrd_config(pcie_port);
1793 
1794 	if (params->fast_link_mode) {
1795 		al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
1796 			      1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT,
1797 			      1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT);
1798 	}
1799 
1800 	if (params->enable_axi_slave_err_resp)
1801 		al_reg_write32_masked(&regs->port_regs->axi_slave_err_resp,
1802 				1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT,
1803 				1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT);
1804 
1805 	/**
1806 	 * Addressing RMN: 5477
1807 	 *
1808 	 * RMN description:
1809 	 * address-decoder logic performs sub-target decoding even for transactions
1810 	 * which undergo target enforcement. thus, in case transaction's address is
1811 	 * inside any ECAM bar, the sub-target decoding will be set to ECAM, which
1812 	 * causes wrong handling by PCIe unit
1813 	 *
1814 	 * Software flow:
1815 	 * on EP mode only, turning on the iATU-enable bit (with the relevant mask
1816 	 * below) allows the PCIe unit to discard the ECAM bit which was asserted
1817 	 * by-mistake in the address-decoder
1818 	 */
1819 	if (op_mode == AL_PCIE_OPERATING_MODE_EP) {
1820 		al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1821 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1822 			(0) << PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
1823 		al_reg_write32_masked(regs->axi.ob_ctrl.cfg_control,
1824 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN,
1825 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN);
1826 	}
1827 
1828 	if (op_mode == AL_PCIE_OPERATING_MODE_RC) {
1829 		/**
1830 		 * enable memory and I/O access from port when in RC mode
1831 		 * in RC mode, only core_space[0] is valid.
1832 		 */
1833 		al_reg_write16_masked(
1834 			(uint16_t __iomem *)(&regs->core_space[0].config_header[0] + (0x4 >> 2)),
1835 			0x7, /* Mem, MSE, IO */
1836 			0x7);
1837 
1838 		/* change the class code to match pci bridge */
1839 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1840 
1841 		al_reg_write32_masked(
1842 			(uint32_t __iomem *)(&regs->core_space[0].config_header[0]
1843 			+ (PCI_CLASS_REVISION >> 2)),
1844 			0xFFFFFF00,
1845 			0x06040000);
1846 
1847 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1848 
1849 		/**
1850 		 * Addressing RMN: 5702
1851 		 *
1852 		 * RMN description:
1853 		 * target bus mask default value in HW is: 0xFE, this enforces
1854 		 * setting the target bus for ports 1 and 3 when running on RC
1855 		 * mode since bit[20] in ECAM address in these cases is set
1856 		 *
1857 		 * Software flow:
1858 		 * on RC mode only, set target-bus value to 0xFF to prevent this
1859 		 * enforcement
1860 		 */
1861 		al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1862 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1863 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK);
1864 	}
1865 done:
1866 	al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done");
1867 
1868 	return status;
1869 }
1870 
1871 int
1872 al_pcie_pf_config(
1873 	struct al_pcie_pf *pcie_pf,
1874 	const struct al_pcie_pf_config_params *params)
1875 {
1876 	struct al_pcie_port *pcie_port;
1877 	int status = 0;
1878 
1879 	al_assert(pcie_pf);
1880 	al_assert(params);
1881 
1882 	pcie_port = pcie_pf->pcie_port;
1883 
1884 	if (!al_pcie_port_is_enabled(pcie_port)) {
1885 		al_err("PCIe %d: port not enabled, cannot configure port\n", pcie_port->port_id);
1886 		return -EINVAL;
1887 	}
1888 
1889 	al_dbg("PCIe %d: pf %d config\n", pcie_port->port_id, pcie_pf->pf_num);
1890 
1891 	if (params)
1892 		status = al_pcie_port_pf_params_config(pcie_pf, params);
1893 	if (status)
1894 		goto done;
1895 
1896 done:
1897 	al_dbg("PCIe %d: pf %d config %s\n",
1898 		pcie_port->port_id, pcie_pf->pf_num, status ? "failed" : "done");
1899 
1900 	return status;
1901 }
1902 
1903 /************************** PCIe Link Operations API **************************/
1904 
1905 /* start pcie link */
1906 int
1907 al_pcie_link_start(struct al_pcie_port *pcie_port)
1908 {
1909 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1910 
1911 	if (!al_pcie_port_is_enabled(pcie_port)) {
1912 		al_err("PCIe %d: port not enabled, cannot start link\n",
1913 			pcie_port->port_id);
1914 		return -EINVAL;
1915 	}
1916 
1917 	al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id);
1918 
1919 	al_reg_write32_masked(
1920 			regs->app.global_ctrl.port_init,
1921 			PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1922 			PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1923 
1924 	return 0;
1925 }
1926 
1927 /* stop pcie link */
1928 int
1929 al_pcie_link_stop(struct al_pcie_port *pcie_port)
1930 {
1931 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1932 
1933 	if (!al_pcie_is_link_started(pcie_port)) {
1934 		al_warn("PCIe %d: trying to stop a non-started link\n",
1935 			pcie_port->port_id);
1936 	}
1937 
1938 	al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id);
1939 
1940 	al_reg_write32_masked(
1941 			regs->app.global_ctrl.port_init,
1942 			PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1943 			~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1944 
1945 	return 0;
1946 }
1947 
1948 /** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
1949 al_bool al_pcie_is_link_started(struct al_pcie_port *pcie_port)
1950 {
1951 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1952 
1953 	uint32_t port_init = al_reg_read32(regs->app.global_ctrl.port_init);
1954 	uint8_t ltssm_en = AL_REG_FIELD_GET(port_init,
1955 		PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1956 		PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
1957 
1958 	return ltssm_en;
1959 }
1960 
1961 /* wait for link up indication */
1962 int
1963 al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
1964 {
1965 	int wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC;
1966 
1967 	while (wait_count-- > 0)	{
1968 		if (al_pcie_check_link(pcie_port, NULL)) {
1969 			al_dbg("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
1970 			return 0;
1971 		} else
1972 			al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
1973 				pcie_port->port_id, wait_count);
1974 
1975 		al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
1976 	}
1977 	al_dbg("PCIE_%d: link is not established in time\n",
1978 				pcie_port->port_id);
1979 
1980 	return ETIMEDOUT;
1981 }
1982 
1983 /** get link status */
1984 int
1985 al_pcie_link_status(struct al_pcie_port *pcie_port,
1986 			struct al_pcie_link_status *status)
1987 {
1988 	struct al_pcie_regs *regs = pcie_port->regs;
1989 	uint16_t	pcie_lnksta;
1990 
1991 	al_assert(status);
1992 
1993 	if (!al_pcie_port_is_enabled(pcie_port)) {
1994 		al_dbg("PCIe %d: port not enabled, no link.\n", pcie_port->port_id);
1995 		status->link_up = AL_FALSE;
1996 		status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
1997 		status->lanes = 0;
1998 		status->ltssm_state = 0;
1999 		return 0;
2000 	}
2001 
2002 	status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
2003 
2004 	if (!status->link_up) {
2005 		status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
2006 		status->lanes = 0;
2007 		return 0;
2008 	}
2009 
2010 	pcie_lnksta = al_reg_read16((uint16_t __iomem *)regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1));
2011 
2012 	switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) {
2013 		case AL_PCI_EXP_LNKSTA_CLS_2_5GB:
2014 			status->speed = AL_PCIE_LINK_SPEED_GEN1;
2015 			break;
2016 		case AL_PCI_EXP_LNKSTA_CLS_5_0GB:
2017 			status->speed = AL_PCIE_LINK_SPEED_GEN2;
2018 			break;
2019 		case AL_PCI_EXP_LNKSTA_CLS_8_0GB:
2020 			status->speed = AL_PCIE_LINK_SPEED_GEN3;
2021 			break;
2022 		default:
2023 			status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
2024 			al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n",
2025 				pcie_port->port_id, pcie_lnksta);
2026 	}
2027 	status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
2028 	al_dbg("PCIe %d: Link up. speed gen%d negotiated width %d\n",
2029 		pcie_port->port_id, status->speed, status->lanes);
2030 
2031 	return 0;
2032 }
2033 
2034 /** get lane status */
2035 void
2036 al_pcie_lane_status_get(
2037 	struct al_pcie_port		*pcie_port,
2038 	unsigned int			lane,
2039 	struct al_pcie_lane_status	*status)
2040 {
2041 	struct al_pcie_regs *regs = pcie_port->regs;
2042 	uint32_t lane_status;
2043 	uint32_t *reg_ptr;
2044 
2045 	al_assert(pcie_port);
2046 	al_assert(status);
2047 	al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_1) || (lane < REV1_2_MAX_NUM_LANES));
2048 	al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_2) || (lane < REV1_2_MAX_NUM_LANES));
2049 	al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_3) || (lane < REV3_MAX_NUM_LANES));
2050 
2051 	reg_ptr = regs->axi.status.lane[lane];
2052 
2053 	/* Reset field is valid only when same value is read twice */
2054 	do {
2055 		lane_status = al_reg_read32(reg_ptr);
2056 		status->is_reset = !!(lane_status & PCIE_AXI_STATUS_LANE_IS_RESET);
2057 	} while (status->is_reset != (!!(al_reg_read32(reg_ptr) & PCIE_AXI_STATUS_LANE_IS_RESET)));
2058 
2059 	status->requested_speed =
2060 		(lane_status & PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_MASK) >>
2061 		PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_SHIFT;
2062 }
2063 
2064 /** trigger hot reset */
2065 int
2066 al_pcie_link_hot_reset(struct al_pcie_port *pcie_port, al_bool enable)
2067 {
2068 	struct al_pcie_regs *regs = pcie_port->regs;
2069 	uint32_t events_gen;
2070 	al_bool app_reset_state;
2071 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2072 
2073 	if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2074 		al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2075 		return -EINVAL;
2076 	}
2077 
2078 	if (!al_pcie_is_link_started(pcie_port)) {
2079 		al_err("PCIe %d: link not started, cannot trigger hot-reset\n", pcie_port->port_id);
2080 		return -EINVAL;
2081 	}
2082 
2083 	events_gen = al_reg_read32(regs->app.global_ctrl.events_gen[0]);
2084 	app_reset_state = events_gen & PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT;
2085 
2086 	if (enable && app_reset_state) {
2087 		al_err("PCIe %d: link is already in hot-reset state\n", pcie_port->port_id);
2088 		return -EINVAL;
2089 	} else if ((!enable) && (!(app_reset_state))) {
2090 		al_err("PCIe %d: link is already in non-hot-reset state\n", pcie_port->port_id);
2091 		return -EINVAL;
2092 	} else {
2093 		al_dbg("PCIe %d: %s hot-reset\n", pcie_port->port_id,
2094 			(enable ? "enabling" : "disabling"));
2095 		/* hot-reset functionality is implemented only for function 0 */
2096 		al_reg_write32_masked(regs->app.global_ctrl.events_gen[0],
2097 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT,
2098 			(enable ? PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT
2099 				: ~PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT));
2100 		return 0;
2101 	}
2102 }
2103 
2104 /** disable port link */
2105 int
2106 al_pcie_link_disable(struct al_pcie_port *pcie_port, al_bool disable)
2107 {
2108 	struct al_pcie_regs *regs = pcie_port->regs;
2109 	uint32_t pcie_lnkctl;
2110 	al_bool link_disable_state;
2111 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2112 
2113 	if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2114 		al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2115 		return -EINVAL;
2116 	}
2117 
2118 	if (!al_pcie_is_link_started(pcie_port)) {
2119 		al_err("PCIe %d: link not started, cannot disable link\n", pcie_port->port_id);
2120 		return -EINVAL;
2121 	}
2122 
2123 	pcie_lnkctl = al_reg_read32(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1));
2124 	link_disable_state = pcie_lnkctl & AL_PCI_EXP_LNKCTL_LNK_DIS;
2125 
2126 	if (disable && link_disable_state) {
2127 		al_err("PCIe %d: link is already in disable state\n", pcie_port->port_id);
2128 		return -EINVAL;
2129 	} else if ((!disable) && (!(link_disable_state))) {
2130 		al_err("PCIe %d: link is already in enable state\n", pcie_port->port_id);
2131 		return -EINVAL;
2132 	}
2133 
2134 	al_dbg("PCIe %d: %s port\n", pcie_port->port_id, (disable ? "disabling" : "enabling"));
2135 	al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2136 		AL_PCI_EXP_LNKCTL_LNK_DIS,
2137 		(disable ? AL_PCI_EXP_LNKCTL_LNK_DIS : ~AL_PCI_EXP_LNKCTL_LNK_DIS));
2138 	return 0;
2139 }
2140 
2141 /** retrain link */
2142 int
2143 al_pcie_link_retrain(struct al_pcie_port *pcie_port)
2144 {
2145 	struct al_pcie_regs *regs = pcie_port->regs;
2146 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2147 
2148 	if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2149 		al_err("PCIe %d: link-retrain is applicable only for RC mode\n",
2150 			pcie_port->port_id);
2151 		return -EINVAL;
2152 	}
2153 
2154 	if (!al_pcie_is_link_started(pcie_port)) {
2155 		al_err("PCIe %d: link not started, cannot link-retrain\n", pcie_port->port_id);
2156 		return -EINVAL;
2157 	}
2158 
2159 	al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2160 	AL_PCI_EXP_LNKCTL_LNK_RTRN, AL_PCI_EXP_LNKCTL_LNK_RTRN);
2161 
2162 	return 0;
2163 }
2164 
2165 /* trigger speed change */
2166 int
2167 al_pcie_link_change_speed(struct al_pcie_port *pcie_port,
2168 			      enum al_pcie_link_speed new_speed)
2169 {
2170 	struct al_pcie_regs *regs = pcie_port->regs;
2171 
2172 	if (!al_pcie_is_link_started(pcie_port)) {
2173 		al_err("PCIe %d: link not started, cannot change speed\n", pcie_port->port_id);
2174 		return -EINVAL;
2175 	}
2176 
2177 	al_dbg("PCIe %d: changing speed to %d\n", pcie_port->port_id, new_speed);
2178 
2179 	al_pcie_port_link_speed_ctrl_set(pcie_port, new_speed);
2180 
2181 	al_reg_write32_masked(&regs->port_regs->gen2_ctrl,
2182 		PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE,
2183 		PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE);
2184 
2185 	return 0;
2186 }
2187 
2188 /* TODO: check if this function needed */
2189 int
2190 al_pcie_link_change_width(struct al_pcie_port *pcie_port,
2191 			      uint8_t width __attribute__((__unused__)))
2192 {
2193 	al_err("PCIe %d: link change width not implemented\n",
2194 		pcie_port->port_id);
2195 
2196 	return -ENOSYS;
2197 }
2198 
2199 /**************************** Post Link Start API *****************************/
2200 
2201 /************************** Snoop Configuration API ***************************/
2202 
2203 int
2204 al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop)
2205 {
2206 	struct al_pcie_regs *regs = pcie_port->regs;
2207 
2208 	/* Set snoop mode */
2209 	al_dbg("PCIE_%d: snoop mode %s\n",
2210 			pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
2211 
2212 	if (enable_axi_snoop) {
2213 		al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2214 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2215 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP);
2216 
2217 		al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2218 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2219 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP);
2220 	} else {
2221 		al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2222 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2223 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP);
2224 
2225 		al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2226 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2227 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP);
2228 	}
2229 	return 0;
2230 }
2231 
2232 /************************** Configuration Space API ***************************/
2233 
2234 /** get base address of pci configuration space header */
2235 int
2236 al_pcie_config_space_get(struct al_pcie_pf *pcie_pf,
2237 			     uint8_t __iomem **addr)
2238 {
2239 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2240 
2241 	*addr = (uint8_t __iomem *)&regs->core_space[pcie_pf->pf_num].config_header[0];
2242 	return 0;
2243 }
2244 
2245 /* Read data from the local configuration space */
2246 uint32_t
2247 al_pcie_local_cfg_space_read(
2248 	struct al_pcie_pf	*pcie_pf,
2249 	unsigned int		reg_offset)
2250 {
2251 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2252 	uint32_t data;
2253 
2254 	data = al_reg_read32(&regs->core_space[pcie_pf->pf_num].config_header[reg_offset]);
2255 
2256 	return data;
2257 }
2258 
2259 /* Write data to the local configuration space */
2260 void
2261 al_pcie_local_cfg_space_write(
2262 	struct al_pcie_pf	*pcie_pf,
2263 	unsigned int		reg_offset,
2264 	uint32_t		data,
2265 	al_bool			cs2,
2266 	al_bool			allow_ro_wr)
2267 {
2268 	struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
2269 	struct al_pcie_regs *regs = pcie_port->regs;
2270 	unsigned int pf_num = pcie_pf->pf_num;
2271 	uint32_t *offset = &regs->core_space[pf_num].config_header[reg_offset];
2272 
2273 	if (allow_ro_wr)
2274 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
2275 
2276 	if (cs2 == AL_FALSE)
2277 		al_reg_write32(offset, data);
2278 	else
2279 		al_reg_write32_dbi_cs2(pcie_port, offset, data);
2280 
2281 	if (allow_ro_wr)
2282 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
2283 }
2284 
2285 /** set target_bus and mask_target_bus */
2286 int
2287 al_pcie_target_bus_set(
2288 	struct al_pcie_port *pcie_port,
2289 	uint8_t target_bus,
2290 	uint8_t mask_target_bus)
2291 {
2292 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2293 	uint32_t reg;
2294 
2295 	reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2296 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2297 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT,
2298 			mask_target_bus);
2299 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2300 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT,
2301 			target_bus);
2302 	al_reg_write32(regs->axi.ob_ctrl.cfg_target_bus, reg);
2303 	return 0;
2304 }
2305 
2306 /** get target_bus and mask_target_bus */
2307 int
2308 al_pcie_target_bus_get(
2309 	struct al_pcie_port *pcie_port,
2310 	uint8_t *target_bus,
2311 	uint8_t *mask_target_bus)
2312 {
2313 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2314 	uint32_t reg;
2315 
2316 	al_assert(target_bus);
2317 	al_assert(mask_target_bus);
2318 
2319 	reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2320 
2321 	*mask_target_bus = AL_REG_FIELD_GET(reg,
2322 				PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2323 				PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
2324 	*target_bus = AL_REG_FIELD_GET(reg,
2325 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2326 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT);
2327 	return 0;
2328 }
2329 
2330 /** Set secondary bus number */
2331 int
2332 al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus)
2333 {
2334 	struct al_pcie_regs *regs = pcie_port->regs;
2335 
2336 	uint32_t secbus_val = (secbus <<
2337 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT);
2338 
2339 	al_reg_write32_masked(
2340 		regs->axi.ob_ctrl.cfg_control,
2341 		PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK,
2342 		secbus_val);
2343 	return 0;
2344 }
2345 
2346 /** Set sub-ordinary bus number */
2347 int
2348 al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus)
2349 {
2350 	struct al_pcie_regs *regs = pcie_port->regs;
2351 
2352 	uint32_t subbus_val = (subbus <<
2353 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT);
2354 
2355 	al_reg_write32_masked(
2356 		regs->axi.ob_ctrl.cfg_control,
2357 		PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK,
2358 		subbus_val);
2359 	return 0;
2360 }
2361 
2362 /* Enable/disable deferring incoming configuration requests */
2363 void
2364 al_pcie_app_req_retry_set(
2365 	struct al_pcie_port	*pcie_port,
2366 	al_bool			en)
2367 {
2368 	struct al_pcie_regs *regs = pcie_port->regs;
2369 	uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
2370 		PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
2371 		PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
2372 
2373 	al_reg_write32_masked(regs->app.global_ctrl.pm_control,
2374 		mask, (en == AL_TRUE) ? mask : 0);
2375 }
2376 
2377 /* Check if deferring incoming configuration requests is enabled or not */
2378 al_bool al_pcie_app_req_retry_get_status(struct al_pcie_port	*pcie_port)
2379 {
2380 	struct al_pcie_regs *regs = pcie_port->regs;
2381 	uint32_t pm_control;
2382 	uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
2383 		PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
2384 		PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
2385 
2386 	pm_control = al_reg_read32(regs->app.global_ctrl.pm_control);
2387 	return (pm_control & mask) ? AL_TRUE : AL_FALSE;
2388 }
2389 
2390 /*************** Internal Address Translation Unit (ATU) API ******************/
2391 
2392 /** program internal ATU region entry */
2393 int
2394 al_pcie_atu_region_set(
2395 	struct al_pcie_port *pcie_port,
2396 	struct al_pcie_atu_region *atu_region)
2397 {
2398 	struct al_pcie_regs *regs = pcie_port->regs;
2399 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2400 	uint32_t reg = 0;
2401 
2402 	/**
2403 	 * Addressing RMN: 5384
2404 	 *
2405 	 * RMN description:
2406 	 * From SNPS (also included in the data book) Dynamic iATU Programming
2407 	 * With AHB/AXI Bridge Module When the bridge slave interface clock
2408 	 * (hresetn or slv_aclk) is asynchronous to the PCIe native core clock
2409 	 * (core_clk), you must not update the iATU registers while operations
2410 	 * are in progress on the AHB/AXI bridge slave interface. The iATU
2411 	 * registers are in the core_clk clock domain. The register outputs are
2412 	 * used in the AHB/AXI bridge slave interface clock domain. There is no
2413 	 * synchronization logic between these registers and the AHB/AXI bridge
2414 	 * slave interface.
2415 	 *
2416 	 * Software flow:
2417 	 * Do not allow configuring Outbound iATU after link is started
2418 	 */
2419 	if ((atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)
2420 		&& (al_pcie_is_link_started(pcie_port))) {
2421 		if (!atu_region->enforce_ob_atu_region_set) {
2422 			al_err("PCIe %d: setting OB iATU after link is started is not allowed\n",
2423 				pcie_port->port_id);
2424 			al_assert(AL_FALSE);
2425 			return -EINVAL;
2426 		} else {
2427 			al_info("PCIe %d: setting OB iATU even after link is started\n",
2428 				pcie_port->port_id);
2429 		}
2430 	}
2431 
2432 	/*TODO : add sanity check */
2433 	AL_REG_FIELD_SET(reg, 0xF, 0, atu_region->index);
2434 	AL_REG_BIT_VAL_SET(reg, 31, atu_region->direction);
2435 	al_reg_write32(&regs->port_regs->iatu.index, reg);
2436 
2437 	al_reg_write32(&regs->port_regs->iatu.lower_base_addr,
2438 			(uint32_t)(atu_region->base_addr & 0xFFFFFFFF));
2439 	al_reg_write32(&regs->port_regs->iatu.upper_base_addr,
2440 			(uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF));
2441 	al_reg_write32(&regs->port_regs->iatu.lower_target_addr,
2442 			(uint32_t)(atu_region->target_addr & 0xFFFFFFFF));
2443 	al_reg_write32(&regs->port_regs->iatu.upper_target_addr,
2444 			(uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF));
2445 
2446 	/* configure the limit, not needed when working in BAR match mode */
2447 	if (atu_region->match_mode == 0) {
2448 		uint32_t limit_reg_val;
2449 		uint32_t *limit_ext_reg =
2450 			(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
2451 			&regs->app.atu.out_mask_pair[atu_region->index / 2] :
2452 			&regs->app.atu.in_mask_pair[atu_region->index / 2];
2453 		uint32_t limit_ext_reg_mask =
2454 			(atu_region->index % 2) ?
2455 			PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
2456 			PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
2457 		unsigned int limit_ext_reg_shift =
2458 			(atu_region->index % 2) ?
2459 			PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
2460 			PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
2461 		uint64_t limit_sz_msk =
2462 			atu_region->limit - atu_region->base_addr;
2463 		uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
2464 					32) & 0xFFFFFFFF);
2465 
2466 		if (limit_ext_reg_val) {
2467 			limit_reg_val =	(uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
2468 			al_assert(limit_reg_val == 0xFFFFFFFF);
2469 		} else {
2470 			limit_reg_val = (uint32_t)(atu_region->limit &
2471 					0xFFFFFFFF);
2472 		}
2473 
2474 		al_reg_write32_masked(
2475 				limit_ext_reg,
2476 				limit_ext_reg_mask,
2477 				limit_ext_reg_val << limit_ext_reg_shift);
2478 
2479 		al_reg_write32(&regs->port_regs->iatu.limit_addr,
2480 				limit_reg_val);
2481 	}
2482 
2483 
2484 	/**
2485 	* Addressing RMN: 3186
2486 	*
2487 	* RMN description:
2488 	* Bug in SNPS IP (versions 4.21 , 4.10a-ea02)
2489 	* In CFG request created via outbound atu (shift mode) bits [27:12] go to
2490 	* [31:16] , the shifting is correct , however the ATU leaves bit [15:12]
2491 	* to their original values, this is then transmited in the tlp .
2492 	* Those bits are currently reserved ,bit might be non-resv. in future generations .
2493 	*
2494 	* Software flow:
2495 	* Enable HW fix
2496 	* rev=REV1,REV2 set bit 15 in corresponding app_reg.atu.out_mask
2497 	* rev>REV2 set corresponding bit is app_reg.atu.reg_out_mask
2498 	*/
2499 	if ((atu_region->cfg_shift_mode == AL_TRUE) &&
2500 		(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)) {
2501 		if (pcie_port->rev_id > AL_PCIE_REV_ID_2) {
2502 			al_reg_write32_masked(regs->app.atu.reg_out_mask,
2503 			1 << (atu_region->index) ,
2504 			1 << (atu_region->index));
2505 		} else {
2506 			uint32_t *limit_ext_reg =
2507 				(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
2508 				&regs->app.atu.out_mask_pair[atu_region->index / 2] :
2509 				&regs->app.atu.in_mask_pair[atu_region->index / 2];
2510 			uint32_t limit_ext_reg_mask =
2511 				(atu_region->index % 2) ?
2512 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
2513 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
2514 			unsigned int limit_ext_reg_shift =
2515 				(atu_region->index % 2) ?
2516 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
2517 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
2518 
2519 			al_reg_write32_masked(
2520 				limit_ext_reg,
2521 				limit_ext_reg_mask,
2522 				(AL_BIT(15)) << limit_ext_reg_shift);
2523 		}
2524 	}
2525 
2526 	reg = 0;
2527 	AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type);
2528 	AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr);
2529 
2530 
2531 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
2532 		&& (op_mode == AL_PCIE_OPERATING_MODE_EP)
2533 		&& (atu_region->function_match_bypass_mode)) {
2534 		AL_REG_FIELD_SET(reg,
2535 			PCIE_IATU_CR1_FUNC_NUM_MASK,
2536 			PCIE_IATU_CR1_FUNC_NUM_SHIFT,
2537 			atu_region->function_match_bypass_mode_number);
2538 	}
2539 
2540 	al_reg_write32(&regs->port_regs->iatu.cr1, reg);
2541 
2542 	/* Enable/disable the region. */
2543 	reg = 0;
2544 	AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code);
2545 	AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number);
2546 	AL_REG_FIELD_SET(reg, 0x3 << 24, 24, atu_region->response);
2547 	AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE);
2548 	AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE);
2549 	AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE);
2550 	AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE);
2551 	if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO)
2552 		AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode);
2553 	AL_REG_BIT_VAL_SET(reg, 31, !!atu_region->enable);
2554 
2555 	/* In outbound, enable function bypass
2556 	 * In inbound, enable function match mode
2557 	 * Note: this is the same bit, has different meanings in ob/ib ATUs
2558 	 */
2559 	if (op_mode == AL_PCIE_OPERATING_MODE_EP)
2560 		AL_REG_FIELD_SET(reg,
2561 			PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_MASK,
2562 			PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_SHIFT,
2563 			atu_region->function_match_bypass_mode ? 0x1 : 0x0);
2564 
2565 	al_reg_write32(&regs->port_regs->iatu.cr2, reg);
2566 
2567 	return 0;
2568 }
2569 
2570 /** obtains internal ATU region base/target addresses */
2571 void
2572 al_pcie_atu_region_get_fields(
2573 	struct al_pcie_port *pcie_port,
2574 	enum al_pcie_atu_dir direction, uint8_t index,
2575 	al_bool *enable, uint64_t *base_addr, uint64_t *target_addr)
2576 {
2577 	struct al_pcie_regs *regs = pcie_port->regs;
2578 	uint64_t high_addr;
2579 	uint32_t reg = 0;
2580 
2581 	AL_REG_FIELD_SET(reg, 0xF, 0, index);
2582 	AL_REG_BIT_VAL_SET(reg, 31, direction);
2583 	al_reg_write32(&regs->port_regs->iatu.index, reg);
2584 
2585 	*base_addr = al_reg_read32(&regs->port_regs->iatu.lower_base_addr);
2586 	high_addr = al_reg_read32(&regs->port_regs->iatu.upper_base_addr);
2587 	high_addr <<= 32;
2588 	*base_addr |= high_addr;
2589 
2590 	*target_addr = al_reg_read32(&regs->port_regs->iatu.lower_target_addr);
2591 	high_addr = al_reg_read32(&regs->port_regs->iatu.upper_target_addr);
2592 	high_addr <<= 32;
2593 	*target_addr |= high_addr;
2594 
2595 	reg = al_reg_read32(&regs->port_regs->iatu.cr1);
2596 	*enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
2597 }
2598 
2599 void
2600 al_pcie_axi_io_config(
2601 	struct al_pcie_port *pcie_port,
2602 	al_phys_addr_t start,
2603 	al_phys_addr_t end)
2604 {
2605 	struct al_pcie_regs *regs = pcie_port->regs;
2606 
2607 	al_reg_write32(regs->axi.ob_ctrl.io_start_h,
2608 			(uint32_t)((start >> 32) & 0xFFFFFFFF));
2609 
2610 	al_reg_write32(regs->axi.ob_ctrl.io_start_l,
2611 			(uint32_t)(start & 0xFFFFFFFF));
2612 
2613 	al_reg_write32(regs->axi.ob_ctrl.io_limit_h,
2614 			(uint32_t)((end >> 32) & 0xFFFFFFFF));
2615 
2616 	al_reg_write32(regs->axi.ob_ctrl.io_limit_l,
2617 			(uint32_t)(end & 0xFFFFFFFF));
2618 
2619 	al_reg_write32_masked(regs->axi.ctrl.slv_ctl,
2620 			      PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN,
2621 			      PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
2622 }
2623 
2624 /************** Interrupt and Event generation (Endpoint mode Only) API *****************/
2625 
2626 int al_pcie_pf_flr_done_gen(struct al_pcie_pf		*pcie_pf)
2627 {
2628 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2629 	unsigned int pf_num = pcie_pf->pf_num;
2630 
2631 	al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
2632 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE,
2633 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE);
2634 	al_udelay(AL_PCIE_FLR_DONE_INTERVAL);
2635 	al_reg_write32_masked(regs->app.global_ctrl.events_gen[pf_num],
2636 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_FLR_PF_DONE, 0);
2637 	return 0;
2638 }
2639 
2640 
2641 /** generate INTx Assert/DeAssert Message */
2642 int
2643 al_pcie_legacy_int_gen(
2644 	struct al_pcie_pf		*pcie_pf,
2645 	al_bool				assert,
2646 	enum al_pcie_legacy_int_type	type)
2647 {
2648 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2649 	unsigned int pf_num = pcie_pf->pf_num;
2650 	uint32_t reg;
2651 
2652 	al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */
2653 	reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2654 	AL_REG_BIT_VAL_SET(reg, 3, !!assert);
2655 	al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2656 
2657 	return 0;
2658 }
2659 
2660 /** generate MSI interrupt */
2661 int
2662 al_pcie_msi_int_gen(struct al_pcie_pf *pcie_pf, uint8_t vector)
2663 {
2664 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2665 	unsigned int pf_num = pcie_pf->pf_num;
2666 	uint32_t reg;
2667 
2668 	/* set msi vector and clear MSI request */
2669 	reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2670 	AL_REG_BIT_CLEAR(reg, 4);
2671 	AL_REG_FIELD_SET(reg,
2672 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK,
2673 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT,
2674 			vector);
2675 	al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2676 	/* set MSI request */
2677 	AL_REG_BIT_SET(reg, 4);
2678 	al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2679 
2680 	return 0;
2681 }
2682 
2683 /** configure MSIX capability */
2684 int
2685 al_pcie_msix_config(
2686 	struct al_pcie_pf *pcie_pf,
2687 	struct al_pcie_msix_params *msix_params)
2688 {
2689 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2690 	unsigned int pf_num = pcie_pf->pf_num;
2691 	uint32_t msix_reg0;
2692 
2693 	al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_TRUE);
2694 
2695 	msix_reg0 = al_reg_read32(regs->core_space[pf_num].msix_cap_base);
2696 
2697 	msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT);
2698 	msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) <<
2699 			AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT;
2700 	al_reg_write32(regs->core_space[pf_num].msix_cap_base, msix_reg0);
2701 
2702 	/* Table offset & BAR */
2703 	al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_TABLE >> 2),
2704 		       (msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) |
2705 			       (msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR));
2706 	/* PBA offset & BAR */
2707 	al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_PBA >> 2),
2708 		       (msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) |
2709 			       (msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR));
2710 
2711 	al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_FALSE);
2712 
2713 	return 0;
2714 }
2715 
2716 /** check whether MSIX is enabled */
2717 al_bool
2718 al_pcie_msix_enabled(struct al_pcie_pf	*pcie_pf)
2719 {
2720 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2721 	uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2722 
2723 	if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN)
2724 		return AL_TRUE;
2725 	return AL_FALSE;
2726 }
2727 
2728 /** check whether MSIX is masked */
2729 al_bool
2730 al_pcie_msix_masked(struct al_pcie_pf *pcie_pf)
2731 {
2732 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2733 	uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2734 
2735 	if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK)
2736 		return AL_TRUE;
2737 	return AL_FALSE;
2738 }
2739 
2740 /******************** Advanced Error Reporting (AER) API **********************/
2741 /************************* Auxiliary functions ********************************/
2742 /* configure AER capability */
2743 static int
2744 al_pcie_aer_config_aux(
2745 		struct al_pcie_port		*pcie_port,
2746 		unsigned int	pf_num,
2747 		struct al_pcie_aer_params	*params)
2748 {
2749 	struct al_pcie_regs *regs = pcie_port->regs;
2750 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2751 	uint32_t reg_val;
2752 
2753 	reg_val = al_reg_read32(&aer_regs->header);
2754 
2755 	if (((reg_val & PCIE_AER_CAP_ID_MASK) >> PCIE_AER_CAP_ID_SHIFT) !=
2756 		PCIE_AER_CAP_ID_VAL)
2757 		return -EIO;
2758 
2759 	if (((reg_val & PCIE_AER_CAP_VER_MASK) >> PCIE_AER_CAP_VER_SHIFT) !=
2760 		PCIE_AER_CAP_VER_VAL)
2761 		return -EIO;
2762 
2763 	al_reg_write32(&aer_regs->corr_err_mask, ~params->enabled_corr_err);
2764 
2765 	al_reg_write32(&aer_regs->uncorr_err_mask,
2766 		(~params->enabled_uncorr_non_fatal_err) |
2767 		(~params->enabled_uncorr_fatal_err));
2768 
2769 	al_reg_write32(&aer_regs->uncorr_err_severity,
2770 		params->enabled_uncorr_fatal_err);
2771 
2772 	al_reg_write32(&aer_regs->cap_and_ctrl,
2773 		(params->ecrc_gen_en ? PCIE_AER_CTRL_STAT_ECRC_GEN_EN : 0) |
2774 		(params->ecrc_chk_en ? PCIE_AER_CTRL_STAT_ECRC_CHK_EN : 0));
2775 
2776 	/**
2777 	 * Addressing RMN: 5119
2778 	 *
2779 	 * RMN description:
2780 	 * ECRC generation for outbound request translated by iATU is effected
2781 	 * by iATU setting instead of ecrc_gen_bit in AER
2782 	 *
2783 	 * Software flow:
2784 	 * When enabling ECRC generation, set the outbound iATU to generate ECRC
2785 	 */
2786 	if (params->ecrc_gen_en == AL_TRUE) {
2787 		al_pcie_ecrc_gen_ob_atu_enable(pcie_port, pf_num);
2788 	}
2789 
2790 	al_reg_write32_masked(
2791 		regs->core_space[pf_num].pcie_dev_ctrl_status,
2792 		PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN |
2793 		PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN |
2794 		PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN |
2795 		PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN,
2796 		(params->enabled_corr_err ?
2797 		 PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN : 0) |
2798 		(params->enabled_uncorr_non_fatal_err ?
2799 		 PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN : 0) |
2800 		(params->enabled_uncorr_fatal_err ?
2801 		 PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN : 0) |
2802 		((params->enabled_uncorr_non_fatal_err &
2803 		  AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2804 		 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0) |
2805 		((params->enabled_uncorr_fatal_err &
2806 		  AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2807 		 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0));
2808 
2809 	return 0;
2810 }
2811 
2812 /** AER uncorrectable errors get and clear */
2813 static unsigned int
2814 al_pcie_aer_uncorr_get_and_clear_aux(
2815 		struct al_pcie_port		*pcie_port,
2816 		unsigned int	pf_num)
2817 {
2818 	struct al_pcie_regs *regs = pcie_port->regs;
2819 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2820 	uint32_t reg_val;
2821 
2822 	reg_val = al_reg_read32(&aer_regs->uncorr_err_stat);
2823 	al_reg_write32(&aer_regs->uncorr_err_stat, reg_val);
2824 
2825 	return reg_val;
2826 }
2827 
2828 /** AER correctable errors get and clear */
2829 static unsigned int
2830 al_pcie_aer_corr_get_and_clear_aux(
2831 		struct al_pcie_port		*pcie_port,
2832 		unsigned int	pf_num)
2833 {
2834 	struct al_pcie_regs *regs = pcie_port->regs;
2835 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2836 	uint32_t reg_val;
2837 
2838 	reg_val = al_reg_read32(&aer_regs->corr_err_stat);
2839 	al_reg_write32(&aer_regs->corr_err_stat, reg_val);
2840 
2841 	return reg_val;
2842 }
2843 
2844 #if (AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS != 4)
2845 #error Wrong assumption!
2846 #endif
2847 
2848 /** AER get the header for the TLP corresponding to a detected error */
2849 static void
2850 al_pcie_aer_err_tlp_hdr_get_aux(
2851 		struct al_pcie_port		*pcie_port,
2852 		unsigned int	pf_num,
2853 	uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2854 {
2855 	struct al_pcie_regs *regs = pcie_port->regs;
2856 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pf_num].aer;
2857 	int i;
2858 
2859 	for (i = 0; i < AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS; i++)
2860 		hdr[i] = al_reg_read32(&aer_regs->header_log[i]);
2861 }
2862 
2863 /******************** EP AER functions **********************/
2864 /** configure EP physical function AER capability */
2865 int al_pcie_aer_config(
2866 		struct al_pcie_pf *pcie_pf,
2867 		struct al_pcie_aer_params	*params)
2868 {
2869 	al_assert(pcie_pf);
2870 	al_assert(params);
2871 
2872 	return al_pcie_aer_config_aux(
2873 			pcie_pf->pcie_port, pcie_pf->pf_num, params);
2874 }
2875 
2876 /** EP physical function AER uncorrectable errors get and clear */
2877 unsigned int al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf *pcie_pf)
2878 {
2879 	al_assert(pcie_pf);
2880 
2881 	return al_pcie_aer_uncorr_get_and_clear_aux(
2882 			pcie_pf->pcie_port, pcie_pf->pf_num);
2883 }
2884 
2885 /** EP physical function AER correctable errors get and clear */
2886 unsigned int al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
2887 {
2888 	al_assert(pcie_pf);
2889 
2890 	return al_pcie_aer_corr_get_and_clear_aux(
2891 			pcie_pf->pcie_port, pcie_pf->pf_num);
2892 }
2893 
2894 /**
2895  * EP physical function AER get the header for
2896  * the TLP corresponding to a detected error
2897  * */
2898 void al_pcie_aer_err_tlp_hdr_get(
2899 		struct al_pcie_pf *pcie_pf,
2900 		uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2901 {
2902 	al_assert(pcie_pf);
2903 	al_assert(hdr);
2904 
2905 	al_pcie_aer_err_tlp_hdr_get_aux(
2906 			pcie_pf->pcie_port, pcie_pf->pf_num, hdr);
2907 }
2908 
2909 /******************** RC AER functions **********************/
2910 /** configure RC port AER capability */
2911 int al_pcie_port_aer_config(
2912 		struct al_pcie_port		*pcie_port,
2913 		struct al_pcie_aer_params	*params)
2914 {
2915 	al_assert(pcie_port);
2916 	al_assert(params);
2917 
2918 	/**
2919 	* For RC mode there's no PFs (neither PF handles),
2920 	* therefore PF#0 is used
2921 	* */
2922 	return al_pcie_aer_config_aux(pcie_port, 0, params);
2923 }
2924 
2925 /** RC port AER uncorrectable errors get and clear */
2926 unsigned int al_pcie_port_aer_uncorr_get_and_clear(
2927 		struct al_pcie_port		*pcie_port)
2928 {
2929 	al_assert(pcie_port);
2930 
2931 	/**
2932 	* For RC mode there's no PFs (neither PF handles),
2933 	* therefore PF#0 is used
2934 	* */
2935 	return al_pcie_aer_uncorr_get_and_clear_aux(pcie_port, 0);
2936 }
2937 
2938 /** RC port AER correctable errors get and clear */
2939 unsigned int al_pcie_port_aer_corr_get_and_clear(
2940 		struct al_pcie_port		*pcie_port)
2941 {
2942 	al_assert(pcie_port);
2943 
2944 	/**
2945 	* For RC mode there's no PFs (neither PF handles),
2946 	* therefore PF#0 is used
2947 	* */
2948 	return al_pcie_aer_corr_get_and_clear_aux(pcie_port, 0);
2949 }
2950 
2951 /** RC port AER get the header for the TLP corresponding to a detected error */
2952 void al_pcie_port_aer_err_tlp_hdr_get(
2953 		struct al_pcie_port		*pcie_port,
2954 		uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2955 {
2956 	al_assert(pcie_port);
2957 	al_assert(hdr);
2958 
2959 	/**
2960 	* For RC mode there's no PFs (neither PF handles),
2961 	* therefore PF#0 is used
2962 	* */
2963 	al_pcie_aer_err_tlp_hdr_get_aux(pcie_port, 0, hdr);
2964 }
2965 
2966 /********************** Loopback mode (RC and Endpoint modes) ************/
2967 
2968 /** enter local pipe loopback mode */
2969 int
2970 al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port)
2971 {
2972 	struct al_pcie_regs *regs = pcie_port->regs;
2973 
2974 	al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode", pcie_port->port_id);
2975 
2976 	al_reg_write32_masked(&regs->port_regs->pipe_loopback_ctrl,
2977 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2978 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
2979 
2980 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
2981 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2982 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT);
2983 
2984 	return 0;
2985 }
2986 
2987 /**
2988  * @brief exit local pipe loopback mode
2989  *
2990  * @param pcie_port	pcie port handle
2991  * @return		0 if no error found
2992  */
2993 int
2994 al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port)
2995 {
2996 	struct al_pcie_regs *regs = pcie_port->regs;
2997 
2998 	al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode", pcie_port->port_id);
2999 
3000 	al_reg_write32_masked(&regs->port_regs->pipe_loopback_ctrl,
3001 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
3002 			      0);
3003 
3004 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
3005 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
3006 			      0);
3007 	return 0;
3008 }
3009 
3010 /** enter remote loopback mode */
3011 int
3012 al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port)
3013 {
3014 	struct al_pcie_regs *regs = pcie_port->regs;
3015 
3016 	al_dbg("PCIe %d: Enter REMOTE Loopback mode", pcie_port->port_id);
3017 
3018 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
3019 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
3020 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
3021 
3022 	return 0;
3023 }
3024 
3025 /**
3026  * @brief   exit remote loopback mode
3027  *
3028  * @param   pcie_port pcie port handle
3029  * @return  0 if no error found
3030  */
3031 int
3032 al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port)
3033 {
3034 	struct al_pcie_regs *regs = pcie_port->regs;
3035 
3036 	al_dbg("PCIe %d: Exit REMOTE Loopback mode", pcie_port->port_id);
3037 
3038 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
3039 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
3040 			      0);
3041 	return 0;
3042 }
3043