xref: /freebsd/sys/contrib/alpine-hal/al_hal_pcie.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2 ********************************************************************************
3 Copyright (C) 2015 Annapurna Labs Ltd.
4 
5 This file may be licensed under the terms of the Annapurna Labs Commercial
6 License Agreement.
7 
8 Alternatively, this file can be distributed under the terms of the GNU General
9 Public License V2 as published by the Free Software Foundation and can be
10 found at http://www.gnu.org/licenses/gpl-2.0.html
11 
12 Alternatively, redistribution and use in source and binary forms, with or
13 without modification, are permitted provided that the following conditions are
14 met:
15 
16     *     Redistributions of source code must retain the above copyright notice,
17 this list of conditions and the following disclaimer.
18 
19     *     Redistributions in binary form must reproduce the above copyright
20 notice, this list of conditions and the following disclaimer in
21 the documentation and/or other materials provided with the
22 distribution.
23 
24 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
25 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
28 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
31 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 
35 *******************************************************************************/
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 
44 #include "al_hal_pcie.h"
45 #include "al_hal_pbs_regs.h"
46 #include "al_hal_unit_adapter_regs.h"
47 
48 /**
49  * Parameter definitions
50  */
51 #define AL_PCIE_AXI_REGS_OFFSET			0x0
52 
53 #define AL_PCIE_LTSSM_STATE_L0			0x11
54 #define AL_PCIE_LTSSM_STATE_L0S			0x12
55 #define AL_PCIE_DEVCTL_PAYLOAD_128B		0x00
56 #define AL_PCIE_DEVCTL_PAYLOAD_256B		0x20
57 
58 #define AL_PCIE_SECBUS_DEFAULT			0x1
59 #define AL_PCIE_SUBBUS_DEFAULT			0x1
60 #define AL_PCIE_LINKUP_WAIT_INTERVAL		50	/* measured in usec */
61 #define AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC	20
62 
63 #define AL_PCIE_LINKUP_RETRIES			8
64 
65 #define AL_PCIE_MAX_32_MEMORY_BAR_SIZE		(0x100000000ULL)
66 #define AL_PCIE_MIN_MEMORY_BAR_SIZE		(1 << 12)
67 #define AL_PCIE_MIN_IO_BAR_SIZE			(1 << 8)
68 
69 /**
70  * inbound header credits and outstanding outbound reads defaults
71  */
72 /** RC - Revisions 1/2 */
73 #define AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT	(8)
74 #define AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT	(41)
75 #define AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT	(25)
76 #define AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT	(31)
77 /** EP - Revisions 1/2 */
78 #define AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT	(15)
79 #define AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT	(76)
80 #define AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT	(6)
81 #define AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT	(15)
82 /** RC - Revision 3 */
83 #define AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT	(32)
84 #define AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT	(161)
85 #define AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT	(38)
86 #define AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT	(60)
87 /** EP - Revision 3 */
88 #define AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT	(32)
89 #define AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT	(161)
90 #define AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT	(38)
91 #define AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT	(60)
92 
93 /**
94  * MACROS
95  */
96 #define AL_PCIE_PARSE_LANES(v)		(((1 << v) - 1) << \
97 		PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT)
98 
99 /**
100  * Static functions
101  */
102 static void
103 al_pcie_port_wr_to_ro_set(struct al_pcie_port *pcie_port, al_bool enable)
104 {
105 	/* when disabling writes to RO, make sure any previous writes to
106 	 * config space were committed
107 	 */
108 	if (enable == AL_FALSE)
109 		al_local_data_memory_barrier();
110 
111 	al_reg_write32(&pcie_port->regs->port_regs->rd_only_wr_en,
112 		       (enable == AL_TRUE) ? 1 : 0);
113 
114 	/* when enabling writes to RO, make sure it is committed before trying
115 	 * to write to RO config space
116 	 */
117 	if (enable == AL_TRUE)
118 		al_local_data_memory_barrier();
119 }
120 
121 /** helper function to access dbi_cs2 registers */
122 static void
123 al_reg_write32_dbi_cs2(
124 	struct al_pcie_port	*pcie_port,
125 	uint32_t		*offset,
126 	uint32_t		val)
127 {
128 	uintptr_t cs2_bit =
129 		(pcie_port->rev_id == AL_PCIE_REV_ID_3) ? 0x4000 : 0x1000;
130 
131 	al_reg_write32((uint32_t *)((uintptr_t)offset | cs2_bit), val);
132 }
133 
134 static unsigned int
135 al_pcie_speed_gen_code(enum al_pcie_link_speed speed)
136 {
137 	if (speed == AL_PCIE_LINK_SPEED_GEN1)
138 		return 1;
139 	if (speed == AL_PCIE_LINK_SPEED_GEN2)
140 		return 2;
141 	if (speed == AL_PCIE_LINK_SPEED_GEN3)
142 		return 3;
143 	/* must not be reached */
144 	return 0;
145 }
146 
147 static inline void
148 al_pcie_port_link_speed_ctrl_set(
149 	struct al_pcie_port *pcie_port,
150 	enum al_pcie_link_speed max_speed)
151 {
152 	struct al_pcie_regs *regs = pcie_port->regs;
153 
154 	al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
155 
156 	if (max_speed != AL_PCIE_LINK_SPEED_DEFAULT) {
157 		uint16_t max_speed_val = (uint16_t)al_pcie_speed_gen_code(max_speed);
158 		al_reg_write32_masked(
159 			(uint32_t __iomem *)(regs->core_space[0].pcie_link_cap_base),
160 			0xF, max_speed_val);
161 		al_reg_write32_masked(
162 			(uint32_t __iomem *)(regs->core_space[0].pcie_cap_base
163 			+ (AL_PCI_EXP_LNKCTL2 >> 2)),
164 			0xF, max_speed_val);
165 	}
166 
167 	al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
168 }
169 
170 static int
171 al_pcie_port_link_config(
172 	struct al_pcie_port *pcie_port,
173 	const struct al_pcie_link_params *link_params)
174 {
175 	struct al_pcie_regs *regs = pcie_port->regs;
176 	uint8_t max_lanes = pcie_port->max_lanes;
177 
178 	if ((link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)	&&
179 	    (link_params->max_payload_size != AL_PCIE_MPS_128)		&&
180 	    (link_params->max_payload_size != AL_PCIE_MPS_256)) {
181 		al_err("PCIe %d: unsupported Max Payload Size (%u)\n",
182 		       pcie_port->port_id, link_params->max_payload_size);
183 		return -EINVAL;
184 	}
185 
186 	al_dbg("PCIe %d: link config: max speed gen %d, max lanes %d, reversal %s\n",
187 	       pcie_port->port_id, link_params->max_speed,
188 	       pcie_port->max_lanes, link_params->enable_reversal? "enable" : "disable");
189 
190 	al_pcie_port_link_speed_ctrl_set(pcie_port, link_params->max_speed);
191 
192 	/* Change Max Payload Size, if needed.
193 	 * The Max Payload Size is only valid for PF0.
194 	 */
195 	if (link_params->max_payload_size != AL_PCIE_MPS_DEFAULT)
196 		al_reg_write32_masked(regs->core_space[0].pcie_dev_ctrl_status,
197 				      PCIE_PORT_DEV_CTRL_STATUS_MPS_MASK,
198 				      link_params->max_payload_size <<
199 					PCIE_PORT_DEV_CTRL_STATUS_MPS_SHIFT);
200 
201 	/** Snap from PCIe core spec:
202 	 * Link Mode Enable. Sets the number of lanes in the link that you want
203 	 * to connect to the link partner. When you have unused lanes in your
204 	 * system, then you must change the value in this register to reflect
205 	 * the number of lanes. You must also change the value in the
206 	 * "Predetermined Number of Lanes" field of the "Link Width and Speed
207 	 * Change Control Register".
208 	 * 000001: x1
209 	 * 000011: x2
210 	 * 000111: x4
211 	 * 001111: x8
212 	 * 011111: x16
213 	 * 111111: x32 (not supported)
214 	 */
215 	al_reg_write32_masked(&regs->port_regs->gen2_ctrl,
216 				PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_MASK,
217 				max_lanes << PCIE_PORT_GEN2_CTRL_NUM_OF_LANES_SHIFT);
218 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
219 				PCIE_PORT_LINK_CTRL_LINK_CAPABLE_MASK,
220 				(max_lanes + (max_lanes-1))
221 				<< PCIE_PORT_LINK_CTRL_LINK_CAPABLE_SHIFT);
222 
223 	/* TODO: add support for reversal mode */
224 	if (link_params->enable_reversal) {
225 		al_err("PCIe %d: enabling reversal mode not implemented\n",
226 			pcie_port->port_id);
227 		return -ENOSYS;
228 	}
229 	return 0;
230 }
231 
232 static void
233 al_pcie_port_ram_parity_int_config(
234 	struct al_pcie_port *pcie_port,
235 	al_bool enable)
236 {
237 	struct al_pcie_regs *regs = pcie_port->regs;
238 
239 	al_reg_write32(&regs->app.parity->en_core,
240 		(enable == AL_TRUE) ? 0xffffffff : 0x0);
241 
242 	al_reg_write32_masked(&regs->app.int_grp_b->mask,
243 	      PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE,
244 	      (enable != AL_TRUE) ?
245 	      PCIE_W_INT_GRP_B_CAUSE_B_PARITY_ERROR_CORE : 0);
246 
247 }
248 
249 static void
250 al_pcie_port_axi_parity_int_config(
251 	struct al_pcie_port *pcie_port,
252 	al_bool enable)
253 {
254 	struct al_pcie_regs *regs = pcie_port->regs;
255 	uint32_t parity_enable_mask = 0xffffffff;
256 
257 	/**
258 	 * Addressing RMN: 5603
259 	 *
260 	 * RMN description:
261 	 * u4_ram2p signal false parity error
262 	 *
263 	 * Software flow:
264 	 * Disable parity check for this memory
265 	 */
266 	if (pcie_port->rev_id >= AL_PCIE_REV_ID_3)
267 		parity_enable_mask &= ~PCIE_AXI_PARITY_EN_AXI_U4_RAM2P;
268 
269 	al_reg_write32(regs->axi.parity.en_axi,
270 		       (enable == AL_TRUE) ? parity_enable_mask : 0x0);
271 
272 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
273 		al_reg_write32_masked(regs->axi.ctrl.global,
274 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
275 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
276 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
277 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
278 			(enable == AL_TRUE) ?
279 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
280 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
281 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
282 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
283 			PCIE_REV3_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
284 	} else {
285 		al_reg_write32_masked(regs->axi.ctrl.global,
286 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
287 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
288 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
289 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR,
290 			(enable == AL_TRUE) ?
291 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_MSTR |
292 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_RD |
293 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV |
294 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_ERR_EN_WR :
295 			PCIE_REV1_2_AXI_CTRL_GLOBAL_PARITY_CALC_EN_SLV);
296 	}
297 
298 	al_reg_write32_masked(&regs->axi.int_grp_a->mask,
299 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
300 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
301 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
302 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
303 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI,
304 		(enable != AL_TRUE) ?
305 		(PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_DATA_PATH_RD |
306 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_RD |
307 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_ADDR_WR |
308 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERR_OUT_DATA_WR |
309 		PCIE_AXI_INT_GRP_A_CAUSE_PARITY_ERROR_AXI) : 0);
310 }
311 
312 static void
313 al_pcie_port_relaxed_pcie_ordering_config(
314 	struct al_pcie_port *pcie_port,
315 	struct al_pcie_relaxed_ordering_params *relaxed_ordering_params)
316 {
317 	struct al_pcie_regs *regs = pcie_port->regs;
318 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
319 	/**
320 	 * Default:
321 	 *  - RC: Rx relaxed ordering only
322 	 *  - EP: TX relaxed ordering only
323 	 */
324 	al_bool tx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_FALSE : AL_TRUE);
325 	al_bool rx_relaxed_ordering = (op_mode == AL_PCIE_OPERATING_MODE_RC ? AL_TRUE : AL_FALSE);
326 
327 	if (relaxed_ordering_params) {
328 		tx_relaxed_ordering = relaxed_ordering_params->enable_tx_relaxed_ordering;
329 		rx_relaxed_ordering = relaxed_ordering_params->enable_rx_relaxed_ordering;
330 	}
331 
332 	/** PCIe ordering:
333 	 *  - disable outbound completion must be stalled behind outbound write
334 	 *    ordering rule enforcement is disabled for root-port
335 	 *  - disables read completion on the master port push slave writes for end-point
336 	 */
337 	al_reg_write32_masked(
338 		regs->axi.ordering.pos_cntl,
339 		PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
340 		PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
341 		PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS |
342 		PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES,
343 		(tx_relaxed_ordering ?
344 		(PCIE_AXI_POS_ORDER_BYPASS_CMPL_AFTER_WR_FIX |
345 		PCIE_AXI_POS_ORDER_SEGMENT_BUFFER_DONT_WAIT_FOR_P_WRITES) : 0) |
346 		(rx_relaxed_ordering ?
347 		(PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_DIS |
348 		PCIE_AXI_POS_ORDER_EP_CMPL_AFTER_WR_SUPPORT_INTERLV_DIS) : 0));
349 }
350 
351 static int
352 al_pcie_rev_id_get(
353 	void __iomem *pbs_reg_base,
354 	void __iomem *pcie_reg_base)
355 {
356 	uint32_t chip_id;
357 	uint16_t chip_id_dev;
358 	uint8_t rev_id;
359 	struct al_pbs_regs *pbs_regs = pbs_reg_base;
360 
361 	/* get revision ID from PBS' chip_id register */
362 	chip_id = al_reg_read32(&pbs_regs->unit.chip_id);
363 	chip_id_dev = AL_REG_FIELD_GET(chip_id,
364 				       PBS_UNIT_CHIP_ID_DEV_ID_MASK,
365 				       PBS_UNIT_CHIP_ID_DEV_ID_SHIFT);
366 
367 	if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_ALPINE) {
368 		rev_id = AL_REG_FIELD_GET(
369 						chip_id,
370 						PBS_UNIT_CHIP_ID_DEV_REV_ID_MASK,
371 						PBS_UNIT_CHIP_ID_DEV_REV_ID_SHIFT);
372 	} else if (chip_id_dev == PBS_UNIT_CHIP_ID_DEV_ID_PEAKROCK) {
373 		struct al_pcie_revx_regs __iomem *regs =
374 			(struct al_pcie_revx_regs __iomem *)pcie_reg_base;
375 		uint32_t dev_id;
376 
377 		dev_id = al_reg_read32(&regs->axi.device_id.device_rev_id) &
378 			PCIE_AXI_DEVICE_ID_REG_DEV_ID_MASK;
379 		if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X4) {
380 			rev_id = AL_PCIE_REV_ID_2;
381 		} else if (dev_id == PCIE_AXI_DEVICE_ID_REG_DEV_ID_X8) {
382 			rev_id = AL_PCIE_REV_ID_3;
383 		} else {
384 			al_warn("%s: Revision ID is unknown\n",
385 				__func__);
386 			return -EINVAL;
387 		}
388 	} else {
389 		al_warn("%s: Revision ID is unknown\n",
390 			__func__);
391 		return -EINVAL;
392 	}
393 	return rev_id;
394 }
395 
396 static int
397 al_pcie_port_lat_rply_timers_config(
398 	struct al_pcie_port *pcie_port,
399 	const struct al_pcie_latency_replay_timers  *lat_rply_timers)
400 {
401 	struct al_pcie_regs *regs = pcie_port->regs;
402 	uint32_t	reg = 0;
403 
404 	AL_REG_FIELD_SET(reg, 0xFFFF, 0, lat_rply_timers->round_trip_lat_limit);
405 	AL_REG_FIELD_SET(reg, 0xFFFF0000, 16, lat_rply_timers->replay_timer_limit);
406 
407 	al_reg_write32(&regs->port_regs->ack_lat_rply_timer, reg);
408 	return 0;
409 }
410 
411 static void
412 al_pcie_ib_hcrd_os_ob_reads_config_default(
413 	struct al_pcie_port *pcie_port)
414 {
415 
416 	struct al_pcie_ib_hcrd_os_ob_reads_config ib_hcrd_os_ob_reads_config;
417 
418 	switch (al_pcie_operating_mode_get(pcie_port)) {
419 	case AL_PCIE_OPERATING_MODE_RC:
420 		if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
421 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
422 				AL_PCIE_REV_3_RC_OB_OS_READS_DEFAULT;
423 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
424 				AL_PCIE_REV_3_RC_NOF_CPL_HDR_DEFAULT;
425 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
426 				AL_PCIE_REV_3_RC_NOF_NP_HDR_DEFAULT;
427 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
428 				AL_PCIE_REV_3_RC_NOF_P_HDR_DEFAULT;
429 		} else {
430 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
431 				AL_PCIE_REV_1_2_RC_OB_OS_READS_DEFAULT;
432 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
433 				AL_PCIE_REV_1_2_RC_NOF_CPL_HDR_DEFAULT;
434 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
435 				AL_PCIE_REV_1_2_RC_NOF_NP_HDR_DEFAULT;
436 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
437 				AL_PCIE_REV_1_2_RC_NOF_P_HDR_DEFAULT;
438 		}
439 		break;
440 
441 	case AL_PCIE_OPERATING_MODE_EP:
442 		if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
443 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
444 				AL_PCIE_REV_3_EP_OB_OS_READS_DEFAULT;
445 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
446 				AL_PCIE_REV_3_EP_NOF_CPL_HDR_DEFAULT;
447 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
448 				AL_PCIE_REV_3_EP_NOF_NP_HDR_DEFAULT;
449 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
450 				AL_PCIE_REV_3_EP_NOF_P_HDR_DEFAULT;
451 		} else {
452 			ib_hcrd_os_ob_reads_config.nof_outstanding_ob_reads =
453 				AL_PCIE_REV_1_2_EP_OB_OS_READS_DEFAULT;
454 			ib_hcrd_os_ob_reads_config.nof_cpl_hdr =
455 				AL_PCIE_REV_1_2_EP_NOF_CPL_HDR_DEFAULT;
456 			ib_hcrd_os_ob_reads_config.nof_np_hdr =
457 				AL_PCIE_REV_1_2_EP_NOF_NP_HDR_DEFAULT;
458 			ib_hcrd_os_ob_reads_config.nof_p_hdr =
459 				AL_PCIE_REV_1_2_EP_NOF_P_HDR_DEFAULT;
460 		}
461 		break;
462 
463 	default:
464 		al_err("PCIe %d: outstanding outbound transactions could not be configured - unknown operating mode\n",
465 			pcie_port->port_id);
466 		al_assert(0);
467 	}
468 
469 	al_pcie_port_ib_hcrd_os_ob_reads_config(pcie_port, &ib_hcrd_os_ob_reads_config);
470 };
471 
472 /** return AL_TRUE is link started (LTSSM enabled) and AL_FALSE otherwise */
473 static al_bool
474 al_pcie_is_link_started(struct al_pcie_port *pcie_port)
475 {
476 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
477 
478 	uint32_t port_init = al_reg_read32(regs->app.global_ctrl.port_init);
479 	uint8_t ltssm_en = AL_REG_FIELD_GET(port_init,
480 		PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
481 		PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_SHIFT);
482 
483 	return ltssm_en;
484 }
485 
486 /** return AL_TRUE if link is up, AL_FALSE otherwise */
487 static al_bool
488 al_pcie_check_link(
489 	struct al_pcie_port *pcie_port,
490 	uint8_t *ltssm_ret)
491 {
492 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
493 	uint32_t info_0;
494 	uint8_t	ltssm_state;
495 
496 	info_0 = al_reg_read32(&regs->app.debug->info_0);
497 
498 	ltssm_state = AL_REG_FIELD_GET(info_0,
499 			PCIE_W_DEBUG_INFO_0_LTSSM_STATE_MASK,
500 			PCIE_W_DEBUG_INFO_0_LTSSM_STATE_SHIFT);
501 
502 	al_dbg("PCIe %d: Port Debug 0: 0x%08x. LTSSM state :0x%x\n",
503 		pcie_port->port_id, info_0, ltssm_state);
504 
505 	if (ltssm_ret)
506 		*ltssm_ret = ltssm_state;
507 
508 	if ((ltssm_state == AL_PCIE_LTSSM_STATE_L0) ||
509 			(ltssm_state == AL_PCIE_LTSSM_STATE_L0S))
510 		return AL_TRUE;
511 	return AL_FALSE;
512 }
513 
514 static int
515 al_pcie_port_gen2_params_config(struct al_pcie_port *pcie_port,
516 				const struct al_pcie_gen2_params *gen2_params)
517 {
518 	struct al_pcie_regs *regs = pcie_port->regs;
519 	uint32_t gen2_ctrl;
520 
521 	al_dbg("PCIe %d: Gen2 params config: Tx Swing %s, interrupt on link Eq %s, set Deemphasis %s\n",
522 	       pcie_port->port_id,
523 	       gen2_params->tx_swing_low ? "Low" : "Full",
524 	       gen2_params->tx_compliance_receive_enable? "enable" : "disable",
525 	       gen2_params->set_deemphasis? "enable" : "disable");
526 
527 	gen2_ctrl = al_reg_read32(&regs->port_regs->gen2_ctrl);
528 
529 	if (gen2_params->tx_swing_low)
530 		AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
531 	else
532 		AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_SWING_LOW_SHIFT);
533 
534 	if (gen2_params->tx_compliance_receive_enable)
535 		AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
536 	else
537 		AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_TX_COMPLIANCE_RCV_SHIFT);
538 
539 	if (gen2_params->set_deemphasis)
540 		AL_REG_BIT_SET(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
541 	else
542 		AL_REG_BIT_CLEAR(gen2_ctrl, PCIE_PORT_GEN2_CTRL_DEEMPHASIS_SET_SHIFT);
543 
544 	al_reg_write32(&regs->port_regs->gen2_ctrl, gen2_ctrl);
545 
546 	return 0;
547 }
548 
549 
550 static uint16_t
551 gen3_lane_eq_param_to_val(const struct al_pcie_gen3_lane_eq_params *eq_params)
552 {
553 	uint16_t eq_control = 0;
554 
555 	eq_control = eq_params->downstream_port_transmitter_preset & 0xF;
556 	eq_control |= (eq_params->downstream_port_receiver_preset_hint & 0x7) << 4;
557 	eq_control |= (eq_params->upstream_port_transmitter_preset & 0xF) << 8;
558 	eq_control |= (eq_params->upstream_port_receiver_preset_hint & 0x7) << 12;
559 
560 	return eq_control;
561 }
562 
563 static int
564 al_pcie_port_gen3_params_config(struct al_pcie_port *pcie_port,
565 				const struct al_pcie_gen3_params *gen3_params)
566 {
567 	struct al_pcie_regs *regs = pcie_port->regs;
568 	uint32_t reg = 0;
569 	uint16_t __iomem *lanes_eq_base = (uint16_t __iomem *)(regs->core_space[0].pcie_sec_ext_cap_base + (0xC >> 2));
570 	int i;
571 
572 	al_dbg("PCIe %d: Gen3 params config: Equalization %s, interrupt on link Eq %s\n",
573 	       pcie_port->port_id,
574 	       gen3_params->perform_eq ? "enable" : "disable",
575 	       gen3_params->interrupt_enable_on_link_eq_request? "enable" : "disable");
576 
577 	if (gen3_params->perform_eq)
578 		AL_REG_BIT_SET(reg, 0);
579 	if (gen3_params->interrupt_enable_on_link_eq_request)
580 		AL_REG_BIT_SET(reg, 1);
581 
582 	al_reg_write32(regs->core_space[0].pcie_sec_ext_cap_base + (4 >> 2),
583 		       reg);
584 
585 	al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
586 
587 	for (i = 0; i < gen3_params->eq_params_elements; i += 2) {
588 		uint32_t eq_control =
589 			(uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i) |
590 			(uint32_t)gen3_lane_eq_param_to_val(gen3_params->eq_params + i + 1) << 16;
591 
592 		al_dbg("PCIe %d: Set EQ (0x%08x) for lane %d, %d\n", pcie_port->port_id, eq_control, i, i + 1);
593 		al_reg_write32((uint32_t *)(lanes_eq_base + i), eq_control);
594 	}
595 
596 	al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
597 
598 	reg = al_reg_read32(&regs->port_regs->gen3_ctrl);
599 	if (gen3_params->eq_disable)
600 		AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
601 	else
602 		AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_DISABLE_SHIFT);
603 
604 	if (gen3_params->eq_phase2_3_disable)
605 		AL_REG_BIT_SET(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
606 	else
607 		AL_REG_BIT_CLEAR(reg, PCIE_PORT_GEN3_CTRL_EQ_PHASE_2_3_DISABLE_SHIFT);
608 
609 	al_reg_write32(&regs->port_regs->gen3_ctrl, reg);
610 
611 	reg = 0;
612 	AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_LF_MASK,
613 			 PCIE_PORT_GEN3_EQ_LF_SHIFT,
614 			 gen3_params->local_lf);
615 	AL_REG_FIELD_SET(reg, PCIE_PORT_GEN3_EQ_FS_MASK,
616 			 PCIE_PORT_GEN3_EQ_FS_SHIFT,
617 			 gen3_params->local_fs);
618 
619 	al_reg_write32(&regs->port_regs->gen3_eq_fs_lf, reg);
620 
621 	reg = 0;
622 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_MASK,
623 			 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_LF_SHIFT,
624 			 gen3_params->local_lf);
625 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_MASK,
626 			 PCIE_AXI_MISC_ZERO_LANEX_PHY_MAC_LOCAL_FS_SHIFT,
627 			 gen3_params->local_fs);
628 	al_reg_write32(regs->axi.conf.zero_lane0, reg);
629 	al_reg_write32(regs->axi.conf.zero_lane1, reg);
630 	al_reg_write32(regs->axi.conf.zero_lane2, reg);
631 	al_reg_write32(regs->axi.conf.zero_lane3, reg);
632 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
633 		al_reg_write32(regs->axi.conf.zero_lane4, reg);
634 		al_reg_write32(regs->axi.conf.zero_lane5, reg);
635 		al_reg_write32(regs->axi.conf.zero_lane6, reg);
636 		al_reg_write32(regs->axi.conf.zero_lane7, reg);
637 	}
638 
639 	/*
640 	 * Gen3 EQ Control Register:
641 	 * - Preset Request Vector - request 9
642 	 * - Behavior After 24 ms Timeout (when optimal settings are not
643 	 *   found): Recovery.Equalization.RcvrLock
644 	 * - Phase2_3 2 ms Timeout Disable
645 	 * - Feedback Mode - Figure Of Merit
646 	 */
647 	reg = 0x00020031;
648 	al_reg_write32(&regs->port_regs->gen3_eq_ctrl, reg);
649 
650 	return 0;
651 }
652 
653 static int
654 al_pcie_port_tl_credits_config(
655 	struct al_pcie_port *pcie_port,
656 	const struct al_pcie_tl_credits_params  *tl_credits __attribute__((__unused__)))
657 {
658 	al_err("PCIe %d: transport layer credits config not implemented\n",
659 		pcie_port->port_id);
660 
661 	return -ENOSYS;
662 
663 }
664 
665 static int
666 al_pcie_port_pf_params_config(struct al_pcie_pf *pcie_pf,
667 			      const struct al_pcie_pf_config_params *pf_params)
668 {
669 	struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
670 	struct al_pcie_regs *regs = pcie_port->regs;
671 	unsigned int pf_num = pcie_pf->pf_num;
672 	int bar_idx;
673 	int ret;
674 
675 	al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
676 
677 	/* Disable D1 and D3hot capabilities */
678 	if (pf_params->cap_d1_d3hot_dis)
679 		al_reg_write32_masked(
680 			regs->core_space[pf_num].pcie_pm_cap_base,
681 			AL_FIELD_MASK(26, 25) | AL_FIELD_MASK(31, 28), 0);
682 
683 	/* Disable FLR capability */
684 	if (pf_params->cap_flr_dis)
685 		al_reg_write32_masked(
686 			regs->core_space[pf_num].pcie_dev_cap_base,
687 			AL_BIT(28), 0);
688 
689 	/* Disable ASPM capability */
690 	if (pf_params->cap_aspm_dis) {
691 		al_reg_write32_masked(
692 			regs->core_space[pf_num].pcie_cap_base + (AL_PCI_EXP_LNKCAP >> 2),
693 			AL_PCI_EXP_LNKCAP_ASPMS, 0);
694 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
695 		al_warn("%s: ASPM support is enabled, please disable it\n",
696 			__func__);
697 		ret = -EINVAL;
698 		goto done;
699 	}
700 
701 	if (!pf_params->bar_params_valid) {
702 		ret = 0;
703 		goto done;
704 	}
705 
706 	for (bar_idx = 0; bar_idx < 6;){ /* bar_idx will be incremented depending on bar type */
707 		const struct al_pcie_ep_bar_params *params = pf_params->bar_params + bar_idx;
708 		uint32_t mask = 0;
709 		uint32_t ctrl = 0;
710 		uint32_t __iomem *bar_addr = &regs->core_space[pf_num].config_header[(AL_PCI_BASE_ADDRESS_0 >> 2) + bar_idx];
711 
712 		if (params->enable) {
713 			uint64_t size = params->size;
714 
715 			if (params->memory_64_bit) {
716 				const struct al_pcie_ep_bar_params *next_params = params + 1;
717 				/* 64 bars start at even index (BAR0, BAR 2 or BAR 4) */
718 				if (bar_idx & 1) {
719 					ret = -EINVAL;
720 					goto done;
721 				}
722 
723 				/* next BAR must be disabled */
724 				if (next_params->enable) {
725 					ret = -EINVAL;
726 					goto done;
727 				}
728 
729 				/* 64 bar must be memory bar */
730 				if (!params->memory_space) {
731 					ret = -EINVAL;
732 					goto done;
733 				}
734 			} else {
735 				if (size > AL_PCIE_MAX_32_MEMORY_BAR_SIZE)
736 					return -EINVAL;
737 				/* 32 bit space can't be prefetchable */
738 				if (params->memory_is_prefetchable) {
739 					ret = -EINVAL;
740 					goto done;
741 				}
742 			}
743 
744 			if (params->memory_space) {
745 				if (size < AL_PCIE_MIN_MEMORY_BAR_SIZE) {
746 					al_err("PCIe %d: memory BAR %d: size (0x%llx) less that minimal allowed value\n",
747 						pcie_port->port_id, bar_idx, size);
748 					ret = -EINVAL;
749 					goto done;
750 				}
751 			} else {
752 				/* IO can't be prefetchable */
753 				if (params->memory_is_prefetchable) {
754 					ret = -EINVAL;
755 					goto done;
756 				}
757 
758 				if (size < AL_PCIE_MIN_IO_BAR_SIZE) {
759 					al_err("PCIe %d: IO BAR %d: size (0x%llx) less that minimal allowed value\n",
760 						pcie_port->port_id, bar_idx, size);
761 					ret = -EINVAL;
762 					goto done;
763 				}
764 			}
765 
766 			/* size must be power of 2 */
767 			if (size & (size - 1)) {
768 				al_err("PCIe %d: BAR %d:size (0x%llx) must be "
769 					"power of 2\n",
770 					pcie_port->port_id, bar_idx, size);
771 				ret = -EINVAL;
772 				goto done;
773 			}
774 
775 			/* If BAR is 64-bit, disable the next BAR before
776 			 * configuring this one
777 			 */
778 			if (params->memory_64_bit)
779 				al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, 0);
780 
781 			mask = 1; /* enable bit*/
782 			mask |= (params->size - 1) & 0xFFFFFFFF;
783 
784 			al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
785 
786 			if (params->memory_space == AL_FALSE)
787 				ctrl = AL_PCI_BASE_ADDRESS_SPACE_IO;
788 			if (params->memory_64_bit)
789 				ctrl |= AL_PCI_BASE_ADDRESS_MEM_TYPE_64;
790 			if (params->memory_is_prefetchable)
791 				ctrl |= AL_PCI_BASE_ADDRESS_MEM_PREFETCH;
792 			al_reg_write32(bar_addr, ctrl);
793 
794 			if (params->memory_64_bit) {
795 				mask = ((params->size - 1) >> 32) & 0xFFFFFFFF;
796 				al_reg_write32_dbi_cs2(pcie_port, bar_addr + 1, mask);
797 			}
798 
799 		} else {
800 			al_reg_write32_dbi_cs2(pcie_port, bar_addr , mask);
801 		}
802 		if (params->enable && params->memory_64_bit)
803 			bar_idx += 2;
804 		else
805 			bar_idx += 1;
806 	}
807 
808 	if (pf_params->exp_bar_params.enable) {
809 		if (pcie_port->rev_id != AL_PCIE_REV_ID_3) {
810 			al_err("PCIe %d: Expansion BAR enable not supported\n", pcie_port->port_id);
811 			ret = -ENOSYS;
812 			goto done;
813 		} else {
814 			/* Enable exp ROM */
815 			uint32_t __iomem *exp_rom_bar_addr =
816 			&regs->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
817 			uint32_t mask = 1; /* enable bit*/
818 			mask |= (pf_params->exp_bar_params.size - 1) & 0xFFFFFFFF;
819 			al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , mask);
820 		}
821 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
822 		/* Disable exp ROM */
823 		uint32_t __iomem *exp_rom_bar_addr =
824 			&regs->core_space[pf_num].config_header[AL_PCI_EXP_ROM_BASE_ADDRESS >> 2];
825 		al_reg_write32_dbi_cs2(pcie_port, exp_rom_bar_addr , 0);
826 	}
827 
828 	/* Open CPU generated msi and legacy interrupts in pcie wrapper logic */
829 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
830 		(pcie_port->rev_id == AL_PCIE_REV_ID_1)) {
831 		al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_0, (1 << 21));
832 	} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
833 		(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
834 		al_reg_write32(regs->app.soc_int[pf_num].mask_inta_leg_3, (1 << 18));
835 	} else {
836 		al_assert(0);
837 		ret = -ENOSYS;
838 		goto done;
839 	}
840 
841 	/**
842 	 * Addressing RMN: 1547
843 	 *
844 	 * RMN description:
845 	 * 1. Whenever writing to 0x2xx offset, the write also happens to
846 	 * 0x3xx address, meaning two registers are written instead of one.
847 	 * 2. Read and write from 0x3xx work ok.
848 	 *
849 	 * Software flow:
850 	 * Backup the value of the app.int_grp_a.mask_a register, because
851 	 * app.int_grp_a.mask_clear_a gets overwritten during the write to
852 	 * app.soc.mask_msi_leg_0 register.
853 	 * Restore the original value after the write to app.soc.mask_msi_leg_0
854 	 * register.
855 	 */
856 	if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
857 		uint32_t backup;
858 
859 		backup = al_reg_read32(&regs->app.int_grp_a->mask);
860 		al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
861 		al_reg_write32(&regs->app.int_grp_a->mask, backup);
862 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_1) {
863 		al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_0, (1 << 22));
864 	} else if ((pcie_port->rev_id == AL_PCIE_REV_ID_2) ||
865 		(pcie_port->rev_id == AL_PCIE_REV_ID_3)) {
866 		al_reg_write32(regs->app.soc_int[pf_num].mask_msi_leg_3, (1 << 19));
867 	} else {
868 		al_assert(0);
869 		ret = -ENOSYS;
870 		goto done;
871 	}
872 
873 	ret = 0;
874 
875 done:
876 	al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
877 
878 	return ret;
879 }
880 
881 static void
882 al_pcie_port_features_config(
883 	struct al_pcie_port *pcie_port,
884 	const struct al_pcie_features *features)
885 {
886 	struct al_pcie_regs *regs = pcie_port->regs;
887 
888 	al_assert(pcie_port->rev_id > AL_PCIE_REV_ID_0);
889 
890 	al_reg_write32_masked(
891 		&regs->app.ctrl_gen->features,
892 		PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX,
893 		features->sata_ep_msi_fix ?
894 		PCIE_W_CTRL_GEN_FEATURES_SATA_EP_MSI_FIX : 0);
895 }
896 
897 static int
898 al_pcie_port_sris_config(
899 	struct al_pcie_port *pcie_port,
900 	struct al_pcie_sris_params *sris_params,
901 	enum al_pcie_link_speed link_speed)
902 {
903 	int rc = 0;
904 	struct al_pcie_regs *regs = pcie_port->regs;
905 
906 	if (sris_params->use_defaults) {
907 		sris_params->kp_counter_gen3 = (pcie_port->rev_id > AL_PCIE_REV_ID_1) ?
908 						PCIE_SRIS_KP_COUNTER_GEN3_DEFAULT_VAL : 0;
909 		sris_params->kp_counter_gen21 = PCIE_SRIS_KP_COUNTER_GEN21_DEFAULT_VAL;
910 
911 		al_dbg("PCIe %d: configuring SRIS with default values kp_gen3[%d] kp_gen21[%d]\n",
912 			pcie_port->port_id,
913 			sris_params->kp_counter_gen3,
914 			sris_params->kp_counter_gen21);
915 	}
916 
917 	switch (pcie_port->rev_id) {
918 	case AL_PCIE_REV_ID_3:
919 	case AL_PCIE_REV_ID_2:
920 		al_reg_write32_masked(regs->app.global_ctrl.sris_kp_counter,
921 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_MASK |
922 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_MASK |
923 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN,
924 			(sris_params->kp_counter_gen3 <<
925 				PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN3_SRIS_SHIFT) |
926 			(sris_params->kp_counter_gen21 <<
927 				PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_GEN21_SRIS_SHIFT) |
928 			PCIE_W_GLOBAL_CTRL_SRIS_KP_COUNTER_VALUE_PCIE_X4_SRIS_EN);
929 		break;
930 
931 	case AL_PCIE_REV_ID_1:
932 		if ((link_speed == AL_PCIE_LINK_SPEED_GEN3) && (sris_params->kp_counter_gen3)) {
933 			al_err("PCIe %d: cannot config Gen%d SRIS with rev_id[%d]\n",
934 				pcie_port->port_id, al_pcie_speed_gen_code(link_speed),
935 				pcie_port->rev_id);
936 			return -EINVAL;
937 		}
938 
939 		al_reg_write32_masked(&regs->port_regs->filter_mask_reg_1,
940 			PCIE_FLT_MASK_SKP_INT_VAL_MASK,
941 			sris_params->kp_counter_gen21);
942 		break;
943 
944 	default:
945 		al_err("PCIe %d: SRIS config is not supported in rev_id[%d]\n",
946 			pcie_port->port_id, pcie_port->rev_id);
947 		al_assert(0);
948 		return -EINVAL;
949 	}
950 
951 	return rc;
952 }
953 
954 static void
955 al_pcie_port_ib_hcrd_config(struct al_pcie_port *pcie_port)
956 {
957 	struct al_pcie_regs *regs = pcie_port->regs;
958 
959 	al_reg_write32_masked(
960 		&regs->port_regs->vc0_posted_rcv_q_ctrl,
961 		RADM_PQ_HCRD_VC0_MASK,
962 		(pcie_port->ib_hcrd_config.nof_p_hdr - 1)
963 			<< RADM_PQ_HCRD_VC0_SHIFT);
964 
965 	al_reg_write32_masked(
966 		&regs->port_regs->vc0_non_posted_rcv_q_ctrl,
967 		RADM_NPQ_HCRD_VC0_MASK,
968 		(pcie_port->ib_hcrd_config.nof_np_hdr - 1)
969 			<< RADM_NPQ_HCRD_VC0_SHIFT);
970 }
971 
972 static unsigned int
973 al_pcie_port_max_num_of_pfs_get(struct al_pcie_port *pcie_port)
974 {
975 	struct al_pcie_regs *regs = pcie_port->regs;
976 	uint32_t max_func_num;
977 	uint32_t max_num_of_pfs;
978 
979 	/**
980 	 * Only in REV3, when port is already enabled, max_num_of_pfs is already
981 	 * initialized, return it. Otherwise, return default: 1 PF
982 	 */
983 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
984 		&& al_pcie_port_is_enabled(pcie_port)) {
985 		max_func_num = al_reg_read32(&regs->port_regs->timer_ctrl_max_func_num);
986 		max_num_of_pfs = AL_REG_FIELD_GET(max_func_num, PCIE_PORT_GEN3_MAX_FUNC_NUM, 0) + 1;
987 		return max_num_of_pfs;
988 	}
989 	return 1;
990 }
991 
992 /******************************************************************************/
993 /***************************** API Implementation *****************************/
994 /******************************************************************************/
995 
996 /*************************** PCIe Initialization API **************************/
997 
998 /**
999  * Initializes a PCIe port handle structure
1000  * Caution: this function should not read/write to any register except for
1001  * reading RO register (REV_ID for example)
1002  */
1003 int
1004 al_pcie_port_handle_init(
1005 	struct al_pcie_port 	*pcie_port,
1006 	void __iomem		*pcie_reg_base,
1007 	void __iomem		*pbs_reg_base,
1008 	unsigned int		port_id)
1009 {
1010 	int i, ret;
1011 
1012 	pcie_port->pcie_reg_base = pcie_reg_base;
1013 	pcie_port->regs = &pcie_port->regs_ptrs;
1014 	pcie_port->ex_regs = NULL;
1015 	pcie_port->pbs_regs = pbs_reg_base;
1016 	pcie_port->port_id = port_id;
1017 	pcie_port->max_lanes = 0;
1018 
1019 	ret = al_pcie_rev_id_get(pbs_reg_base, pcie_reg_base);
1020 	if (ret < 0)
1021 		return ret;
1022 
1023 	pcie_port->rev_id = ret;
1024 
1025 	/* Zero all regs */
1026 	al_memset(pcie_port->regs, 0, sizeof(struct al_pcie_regs));
1027 
1028 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
1029 		(pcie_port->rev_id == AL_PCIE_REV_ID_1)) {
1030 		struct al_pcie_rev1_regs __iomem *regs =
1031 			(struct al_pcie_rev1_regs __iomem *)pcie_reg_base;
1032 
1033 		pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
1034 		pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
1035 		pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
1036 		pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
1037 		pcie_port->regs->axi.ob_ctrl.cfg_target_bus = &regs->axi.ob_ctrl.cfg_target_bus;
1038 		pcie_port->regs->axi.ob_ctrl.cfg_control = &regs->axi.ob_ctrl.cfg_control;
1039 		pcie_port->regs->axi.ob_ctrl.io_start_l = &regs->axi.ob_ctrl.io_start_l;
1040 		pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
1041 		pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
1042 		pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
1043 		pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
1044 		pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
1045 		pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
1046 		pcie_port->regs->axi.conf.zero_lane2 = &regs->axi.conf.zero_lane2;
1047 		pcie_port->regs->axi.conf.zero_lane3 = &regs->axi.conf.zero_lane3;
1048 		pcie_port->regs->axi.status.lane[0] = &regs->axi.status.lane0;
1049 		pcie_port->regs->axi.status.lane[1] = &regs->axi.status.lane1;
1050 		pcie_port->regs->axi.status.lane[2] = &regs->axi.status.lane2;
1051 		pcie_port->regs->axi.status.lane[3] = &regs->axi.status.lane3;
1052 		pcie_port->regs->axi.parity.en_axi = &regs->axi.parity.en_axi;
1053 		pcie_port->regs->axi.ordering.pos_cntl = &regs->axi.ordering.pos_cntl;
1054 		pcie_port->regs->axi.pre_configuration.pcie_core_setup = &regs->axi.pre_configuration.pcie_core_setup;
1055 		pcie_port->regs->axi.init_fc.cfg = &regs->axi.init_fc.cfg;
1056 		pcie_port->regs->axi.int_grp_a = &regs->axi.int_grp_a;
1057 
1058 		pcie_port->regs->app.global_ctrl.port_init = &regs->app.global_ctrl.port_init;
1059 		pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
1060 		pcie_port->regs->app.global_ctrl.events_gen[0] = &regs->app.global_ctrl.events_gen;
1061 		pcie_port->regs->app.debug = &regs->app.debug;
1062 		pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = &regs->app.soc_int.mask_inta_leg_0;
1063 		pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = &regs->app.soc_int.mask_msi_leg_0;
1064 		pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
1065 		pcie_port->regs->app.parity = &regs->app.parity;
1066 		pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1067 		pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1068 
1069 		if (pcie_port->rev_id == AL_PCIE_REV_ID_0) {
1070 			pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a_m0;
1071 			pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b_m0;
1072 		} else {
1073 			pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
1074 			pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
1075 		}
1076 
1077 		pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1078 		pcie_port->regs->core_space[0].pcie_pm_cap_base = &regs->core_space.pcie_pm_cap_base;
1079 		pcie_port->regs->core_space[0].pcie_cap_base = &regs->core_space.pcie_cap_base;
1080 		pcie_port->regs->core_space[0].pcie_dev_cap_base = &regs->core_space.pcie_dev_cap_base;
1081 		pcie_port->regs->core_space[0].pcie_dev_ctrl_status = &regs->core_space.pcie_dev_ctrl_status;
1082 		pcie_port->regs->core_space[0].pcie_link_cap_base = &regs->core_space.pcie_link_cap_base;
1083 		pcie_port->regs->core_space[0].msix_cap_base = &regs->core_space.msix_cap_base;
1084 		pcie_port->regs->core_space[0].aer = &regs->core_space.aer;
1085 		pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = &regs->core_space.pcie_sec_ext_cap_base;
1086 
1087 		pcie_port->regs->port_regs = &regs->core_space.port_regs;
1088 
1089 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_2) {
1090 		struct al_pcie_rev2_regs __iomem *regs =
1091 			(struct al_pcie_rev2_regs __iomem *)pcie_reg_base;
1092 
1093 		pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
1094 		pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
1095 		pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
1096 		pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
1097 		pcie_port->regs->axi.ob_ctrl.cfg_target_bus = &regs->axi.ob_ctrl.cfg_target_bus;
1098 		pcie_port->regs->axi.ob_ctrl.cfg_control = &regs->axi.ob_ctrl.cfg_control;
1099 		pcie_port->regs->axi.ob_ctrl.io_start_l = &regs->axi.ob_ctrl.io_start_l;
1100 		pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
1101 		pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
1102 		pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
1103 		pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
1104 		pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
1105 		pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
1106 		pcie_port->regs->axi.conf.zero_lane2 = &regs->axi.conf.zero_lane2;
1107 		pcie_port->regs->axi.conf.zero_lane3 = &regs->axi.conf.zero_lane3;
1108 		pcie_port->regs->axi.status.lane[0] = &regs->axi.status.lane0;
1109 		pcie_port->regs->axi.status.lane[1] = &regs->axi.status.lane1;
1110 		pcie_port->regs->axi.status.lane[2] = &regs->axi.status.lane2;
1111 		pcie_port->regs->axi.status.lane[3] = &regs->axi.status.lane3;
1112 		pcie_port->regs->axi.parity.en_axi = &regs->axi.parity.en_axi;
1113 		pcie_port->regs->axi.ordering.pos_cntl = &regs->axi.ordering.pos_cntl;
1114 		pcie_port->regs->axi.pre_configuration.pcie_core_setup = &regs->axi.pre_configuration.pcie_core_setup;
1115 		pcie_port->regs->axi.init_fc.cfg = &regs->axi.init_fc.cfg;
1116 		pcie_port->regs->axi.int_grp_a = &regs->axi.int_grp_a;
1117 
1118 		pcie_port->regs->app.global_ctrl.port_init = &regs->app.global_ctrl.port_init;
1119 		pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
1120 		pcie_port->regs->app.global_ctrl.events_gen[0] = &regs->app.global_ctrl.events_gen;
1121 		pcie_port->regs->app.global_ctrl.corr_err_sts_int = &regs->app.global_ctrl.pended_corr_err_sts_int;
1122 		pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = &regs->app.global_ctrl.pended_uncorr_err_sts_int;
1123 		pcie_port->regs->app.debug = &regs->app.debug;
1124 		pcie_port->regs->app.ap_user_send_msg = &regs->app.ap_user_send_msg;
1125 		pcie_port->regs->app.soc_int[0].mask_inta_leg_0 = &regs->app.soc_int.mask_inta_leg_0;
1126 		pcie_port->regs->app.soc_int[0].mask_inta_leg_3 = &regs->app.soc_int.mask_inta_leg_3;
1127 		pcie_port->regs->app.soc_int[0].mask_msi_leg_0 = &regs->app.soc_int.mask_msi_leg_0;
1128 		pcie_port->regs->app.soc_int[0].mask_msi_leg_3 = &regs->app.soc_int.mask_msi_leg_3;
1129 		pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
1130 		pcie_port->regs->app.parity = &regs->app.parity;
1131 		pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1132 		pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1133 		pcie_port->regs->app.status_per_func[0] = &regs->app.status_per_func;
1134 		pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
1135 		pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
1136 
1137 		pcie_port->regs->core_space[0].config_header = regs->core_space.config_header;
1138 		pcie_port->regs->core_space[0].pcie_pm_cap_base = &regs->core_space.pcie_pm_cap_base;
1139 		pcie_port->regs->core_space[0].pcie_cap_base = &regs->core_space.pcie_cap_base;
1140 		pcie_port->regs->core_space[0].pcie_dev_cap_base = &regs->core_space.pcie_dev_cap_base;
1141 		pcie_port->regs->core_space[0].pcie_dev_ctrl_status = &regs->core_space.pcie_dev_ctrl_status;
1142 		pcie_port->regs->core_space[0].pcie_link_cap_base = &regs->core_space.pcie_link_cap_base;
1143 		pcie_port->regs->core_space[0].msix_cap_base = &regs->core_space.msix_cap_base;
1144 		pcie_port->regs->core_space[0].aer = &regs->core_space.aer;
1145 		pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = &regs->core_space.pcie_sec_ext_cap_base;
1146 
1147 		pcie_port->regs->port_regs = &regs->core_space.port_regs;
1148 
1149 	} else if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1150 		struct al_pcie_rev3_regs __iomem *regs =
1151 			(struct al_pcie_rev3_regs __iomem *)pcie_reg_base;
1152 		pcie_port->regs->axi.ctrl.global = &regs->axi.ctrl.global;
1153 		pcie_port->regs->axi.ctrl.master_arctl = &regs->axi.ctrl.master_arctl;
1154 		pcie_port->regs->axi.ctrl.master_awctl = &regs->axi.ctrl.master_awctl;
1155 		pcie_port->regs->axi.ctrl.slv_ctl = &regs->axi.ctrl.slv_ctl;
1156 		pcie_port->regs->axi.ob_ctrl.cfg_target_bus = &regs->axi.ob_ctrl.cfg_target_bus;
1157 		pcie_port->regs->axi.ob_ctrl.cfg_control = &regs->axi.ob_ctrl.cfg_control;
1158 		pcie_port->regs->axi.ob_ctrl.io_start_l = &regs->axi.ob_ctrl.io_start_l;
1159 		pcie_port->regs->axi.ob_ctrl.io_start_h = &regs->axi.ob_ctrl.io_start_h;
1160 		pcie_port->regs->axi.ob_ctrl.io_limit_l = &regs->axi.ob_ctrl.io_limit_l;
1161 		pcie_port->regs->axi.ob_ctrl.io_limit_h = &regs->axi.ob_ctrl.io_limit_h;
1162 		pcie_port->regs->axi.pcie_global.conf = &regs->axi.pcie_global.conf;
1163 		pcie_port->regs->axi.conf.zero_lane0 = &regs->axi.conf.zero_lane0;
1164 		pcie_port->regs->axi.conf.zero_lane1 = &regs->axi.conf.zero_lane1;
1165 		pcie_port->regs->axi.conf.zero_lane2 = &regs->axi.conf.zero_lane2;
1166 		pcie_port->regs->axi.conf.zero_lane3 = &regs->axi.conf.zero_lane3;
1167 		pcie_port->regs->axi.conf.zero_lane4 = &regs->axi.conf.zero_lane4;
1168 		pcie_port->regs->axi.conf.zero_lane5 = &regs->axi.conf.zero_lane5;
1169 		pcie_port->regs->axi.conf.zero_lane6 = &regs->axi.conf.zero_lane6;
1170 		pcie_port->regs->axi.conf.zero_lane7 = &regs->axi.conf.zero_lane7;
1171 		pcie_port->regs->axi.status.lane[0] = &regs->axi.status.lane0;
1172 		pcie_port->regs->axi.status.lane[1] = &regs->axi.status.lane1;
1173 		pcie_port->regs->axi.status.lane[2] = &regs->axi.status.lane2;
1174 		pcie_port->regs->axi.status.lane[3] = &regs->axi.status.lane3;
1175 		pcie_port->regs->axi.status.lane[4] = &regs->axi.status.lane4;
1176 		pcie_port->regs->axi.status.lane[5] = &regs->axi.status.lane5;
1177 		pcie_port->regs->axi.status.lane[6] = &regs->axi.status.lane6;
1178 		pcie_port->regs->axi.status.lane[7] = &regs->axi.status.lane7;
1179 		pcie_port->regs->axi.parity.en_axi = &regs->axi.parity.en_axi;
1180 		pcie_port->regs->axi.ordering.pos_cntl = &regs->axi.ordering.pos_cntl;
1181 		pcie_port->regs->axi.pre_configuration.pcie_core_setup = &regs->axi.pre_configuration.pcie_core_setup;
1182 		pcie_port->regs->axi.init_fc.cfg = &regs->axi.init_fc.cfg;
1183 		pcie_port->regs->axi.int_grp_a = &regs->axi.int_grp_a;
1184 		pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_0 = &regs->axi.axi_attr_ovrd.write_msg_ctrl_0;
1185 		pcie_port->regs->axi.axi_attr_ovrd.write_msg_ctrl_1 = &regs->axi.axi_attr_ovrd.write_msg_ctrl_1;
1186 		pcie_port->regs->axi.axi_attr_ovrd.pf_sel = &regs->axi.axi_attr_ovrd.pf_sel;
1187 
1188 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1189 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_0 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_0;
1190 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_1 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_1;
1191 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_2 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_2;
1192 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_3 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_3;
1193 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_4 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_4;
1194 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_5 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_5;
1195 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_6 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_6;
1196 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_7 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_7;
1197 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_8 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_8;
1198 			pcie_port->regs->axi.pf_axi_attr_ovrd[i].func_ctrl_9 = &regs->axi.pf_axi_attr_ovrd[i].func_ctrl_9;
1199 		}
1200 
1201 		pcie_port->regs->axi.msg_attr_axuser_table.entry_vec = &regs->axi.msg_attr_axuser_table.entry_vec;
1202 
1203 		pcie_port->regs->app.global_ctrl.port_init = &regs->app.global_ctrl.port_init;
1204 		pcie_port->regs->app.global_ctrl.pm_control = &regs->app.global_ctrl.pm_control;
1205 		pcie_port->regs->app.global_ctrl.corr_err_sts_int = &regs->app.global_ctrl.pended_corr_err_sts_int;
1206 		pcie_port->regs->app.global_ctrl.uncorr_err_sts_int = &regs->app.global_ctrl.pended_uncorr_err_sts_int;
1207 
1208 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1209 			pcie_port->regs->app.global_ctrl.events_gen[i] = &regs->app.events_gen_per_func[i].events_gen;
1210 		}
1211 
1212 		pcie_port->regs->app.global_ctrl.sris_kp_counter = &regs->app.global_ctrl.sris_kp_counter_value;
1213 		pcie_port->regs->app.debug = &regs->app.debug;
1214 
1215 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1216 			pcie_port->regs->app.soc_int[i].mask_inta_leg_0 = &regs->app.soc_int_per_func[i].mask_inta_leg_0;
1217 			pcie_port->regs->app.soc_int[i].mask_inta_leg_3 = &regs->app.soc_int_per_func[i].mask_inta_leg_3;
1218 			pcie_port->regs->app.soc_int[i].mask_msi_leg_0 = &regs->app.soc_int_per_func[i].mask_msi_leg_0;
1219 			pcie_port->regs->app.soc_int[i].mask_msi_leg_3 = &regs->app.soc_int_per_func[i].mask_msi_leg_3;
1220 		}
1221 
1222 		pcie_port->regs->app.ap_user_send_msg = &regs->app.ap_user_send_msg;
1223 		pcie_port->regs->app.ctrl_gen = &regs->app.ctrl_gen;
1224 		pcie_port->regs->app.parity = &regs->app.parity;
1225 		pcie_port->regs->app.atu.in_mask_pair = regs->app.atu.in_mask_pair;
1226 		pcie_port->regs->app.atu.out_mask_pair = regs->app.atu.out_mask_pair;
1227 
1228 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++)
1229 			pcie_port->regs->app.status_per_func[i] = &regs->app.status_per_func[i];
1230 
1231 		pcie_port->regs->app.int_grp_a = &regs->app.int_grp_a;
1232 		pcie_port->regs->app.int_grp_b = &regs->app.int_grp_b;
1233 		pcie_port->regs->app.int_grp_c = &regs->app.int_grp_c;
1234 		pcie_port->regs->app.int_grp_d = &regs->app.int_grp_d;
1235 
1236 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1237 			pcie_port->regs->core_space[i].config_header = regs->core_space.func[i].config_header;
1238 			pcie_port->regs->core_space[i].pcie_pm_cap_base = &regs->core_space.func[i].pcie_pm_cap_base;
1239 			pcie_port->regs->core_space[i].pcie_cap_base = &regs->core_space.func[i].pcie_cap_base;
1240 			pcie_port->regs->core_space[i].pcie_dev_cap_base = &regs->core_space.func[i].pcie_dev_cap_base;
1241 			pcie_port->regs->core_space[i].pcie_dev_ctrl_status = &regs->core_space.func[i].pcie_dev_ctrl_status;
1242 			pcie_port->regs->core_space[i].pcie_link_cap_base = &regs->core_space.func[i].pcie_link_cap_base;
1243 			pcie_port->regs->core_space[i].msix_cap_base = &regs->core_space.func[i].msix_cap_base;
1244 			pcie_port->regs->core_space[i].aer = &regs->core_space.func[i].aer;
1245 			pcie_port->regs->core_space[i].tph_cap_base = &regs->core_space.func[i].tph_cap_base;
1246 
1247 		}
1248 
1249 		/* secondary extension capability only for PF0 */
1250 		pcie_port->regs->core_space[0].pcie_sec_ext_cap_base = &regs->core_space.func[0].pcie_sec_ext_cap_base;
1251 
1252 		pcie_port->regs->port_regs = &regs->core_space.func[0].port_regs;
1253 
1254 	} else {
1255 		al_warn("%s: Revision ID is unknown\n",
1256 			__func__);
1257 		return -EINVAL;
1258 	}
1259 
1260 	/* set maximum number of physical functions */
1261 	pcie_port->max_num_of_pfs = al_pcie_port_max_num_of_pfs_get(pcie_port);
1262 
1263 	al_dbg("pcie port handle initialized. port id: %d, rev_id %d, regs base %p\n",
1264 	       port_id, pcie_port->rev_id, pcie_reg_base);
1265 	return 0;
1266 }
1267 
1268 /**
1269  * Initializes a PCIe Physical function handle structure
1270  * Caution: this function should not read/write to any register except for
1271  * reading RO register (REV_ID for example)
1272  */
1273 int
1274 al_pcie_pf_handle_init(
1275 	struct al_pcie_pf *pcie_pf,
1276 	struct al_pcie_port *pcie_port,
1277 	unsigned int pf_num)
1278 {
1279 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
1280 	al_assert(pf_num < pcie_port->max_num_of_pfs);
1281 
1282 	if (op_mode != AL_PCIE_OPERATING_MODE_EP) {
1283 		al_err("PCIe %d: can't init PF handle with operating mode [%d]\n",
1284 			pcie_port->port_id, op_mode);
1285 		return -EINVAL;
1286 	}
1287 
1288 	pcie_pf->pf_num = pf_num;
1289 	pcie_pf->pcie_port = pcie_port;
1290 
1291 	al_dbg("PCIe %d: pf handle initialized. pf number: %d, rev_id %d, regs %p\n",
1292 	       pcie_port->port_id, pcie_pf->pf_num, pcie_port->rev_id,
1293 	       pcie_port->regs);
1294 	return 0;
1295 }
1296 
1297 /************************** Pre PCIe Port Enable API **************************/
1298 
1299 /** configure pcie operating mode (root complex or endpoint) */
1300 int
1301 al_pcie_port_operating_mode_config(
1302 	struct al_pcie_port *pcie_port,
1303 	enum al_pcie_operating_mode mode)
1304 {
1305 	struct al_pcie_regs *regs = pcie_port->regs;
1306 	uint32_t reg, device_type, new_device_type;
1307 
1308 	if (al_pcie_port_is_enabled(pcie_port)) {
1309 		al_err("PCIe %d: already enabled, cannot set operating mode\n",
1310 			pcie_port->port_id);
1311 		return -EINVAL;
1312 	}
1313 
1314 	reg = al_reg_read32(regs->axi.pcie_global.conf);
1315 
1316 	device_type = AL_REG_FIELD_GET(reg,
1317 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1318 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1319 	if (mode == AL_PCIE_OPERATING_MODE_EP) {
1320 		new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP;
1321 	} else if (mode == AL_PCIE_OPERATING_MODE_RC) {
1322 		new_device_type = PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC;
1323 
1324 		if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1325 			/* config 1 PF in RC mode */
1326 			al_reg_write32_masked(regs->axi.axi_attr_ovrd.pf_sel,
1327 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_AXUSER |
1328 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1329 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_ADDR_OFFSET_MASK |
1330 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT0_OVRD |
1331 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_AXUSER |
1332 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG |
1333 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_ADDR_OFFSET_MASK |
1334 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_CFG_PF_BIT1_OVRD,
1335 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT0_OVRD_FROM_REG |
1336 				PCIE_AXI_AXI_ATTR_OVRD_PF_SEL_PF_BIT1_OVRD_FROM_REG);
1337 		}
1338 	} else {
1339 		al_err("PCIe %d: unknown operating mode: %d\n", pcie_port->port_id, mode);
1340 		return -EINVAL;
1341 	}
1342 
1343 	if (new_device_type == device_type) {
1344 		al_dbg("PCIe %d: operating mode already set to %s\n",
1345 		       pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1346 		       "EndPoint" : "Root Complex");
1347 		return 0;
1348 	}
1349 	al_info("PCIe %d: set operating mode to %s\n",
1350 		pcie_port->port_id, (mode == AL_PCIE_OPERATING_MODE_EP) ?
1351 		"EndPoint" : "Root Complex");
1352 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1353 			 PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT,
1354 			 new_device_type);
1355 
1356 	al_reg_write32(regs->axi.pcie_global.conf, reg);
1357 
1358 	return 0;
1359 }
1360 
1361 int
1362 al_pcie_port_max_lanes_set(struct al_pcie_port *pcie_port, uint8_t lanes)
1363 {
1364 	struct al_pcie_regs *regs = pcie_port->regs;
1365 
1366 	if (al_pcie_port_is_enabled(pcie_port)) {
1367 		al_err("PCIe %d: already enabled, cannot set max lanes\n",
1368 			pcie_port->port_id);
1369 		return -EINVAL;
1370 	}
1371 
1372 	/* convert to bitmask format (4 ->'b1111, 2 ->'b11, 1 -> 'b1) */
1373 	uint32_t active_lanes_val = AL_PCIE_PARSE_LANES(lanes);
1374 
1375 	al_reg_write32_masked(regs->axi.pcie_global.conf,
1376 		(pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1377 		PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1378 		PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1379 		active_lanes_val);
1380 
1381 	pcie_port->max_lanes = lanes;
1382 	return 0;
1383 }
1384 
1385 int
1386 al_pcie_port_max_num_of_pfs_set(
1387 	struct al_pcie_port *pcie_port,
1388 	uint8_t max_num_of_pfs)
1389 {
1390 	if (al_pcie_port_is_enabled(pcie_port)) {
1391 		al_err("PCIe %d: already enabled, cannot set max num of PFs\n",
1392 			pcie_port->port_id);
1393 		return -EINVAL;
1394 	}
1395 
1396 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3)
1397 		al_assert(max_num_of_pfs <= REV3_MAX_NUM_OF_PFS);
1398 	else
1399 		al_assert(max_num_of_pfs == REV1_2_MAX_NUM_OF_PFS);
1400 
1401 	pcie_port->max_num_of_pfs = max_num_of_pfs;
1402 
1403 	return 0;
1404 }
1405 
1406 /* Inbound header credits and outstanding outbound reads configuration */
1407 int
1408 al_pcie_port_ib_hcrd_os_ob_reads_config(
1409 	struct al_pcie_port *pcie_port,
1410 	struct al_pcie_ib_hcrd_os_ob_reads_config *ib_hcrd_os_ob_reads_config)
1411 {
1412 	struct al_pcie_regs *regs = pcie_port->regs;
1413 
1414 	if (al_pcie_port_is_enabled(pcie_port)) {
1415 		al_err("PCIe %d: already enabled, cannot configure IB credits and OB OS reads\n",
1416 			pcie_port->port_id);
1417 		return -EINVAL;
1418 	}
1419 
1420 	al_assert(ib_hcrd_os_ob_reads_config->nof_np_hdr > 0);
1421 
1422 	al_assert(ib_hcrd_os_ob_reads_config->nof_p_hdr > 0);
1423 
1424 	al_assert(ib_hcrd_os_ob_reads_config->nof_cpl_hdr > 0);
1425 
1426 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1427 		al_assert(
1428 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1429 			ib_hcrd_os_ob_reads_config->nof_np_hdr +
1430 			ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1431 			AL_PCIE_REV3_IB_HCRD_SUM);
1432 
1433 		al_reg_write32_masked(
1434 			regs->axi.init_fc.cfg,
1435 			PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_MASK |
1436 			PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_MASK |
1437 			PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1438 			(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1439 			 PCIE_AXI_REV3_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1440 			(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1441 			 PCIE_AXI_REV3_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1442 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1443 			 PCIE_AXI_REV3_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1444 	} else {
1445 		al_assert(
1446 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr +
1447 			ib_hcrd_os_ob_reads_config->nof_np_hdr +
1448 			ib_hcrd_os_ob_reads_config->nof_p_hdr) ==
1449 			AL_PCIE_REV_1_2_IB_HCRD_SUM);
1450 
1451 		al_reg_write32_masked(
1452 			regs->axi.init_fc.cfg,
1453 			PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_MASK |
1454 			PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_MASK |
1455 			PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_MASK,
1456 			(ib_hcrd_os_ob_reads_config->nof_p_hdr <<
1457 			 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_P_HDR_SHIFT) |
1458 			(ib_hcrd_os_ob_reads_config->nof_np_hdr <<
1459 			 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_NP_HDR_SHIFT) |
1460 			(ib_hcrd_os_ob_reads_config->nof_cpl_hdr <<
1461 			 PCIE_AXI_REV1_2_INIT_FC_CFG_NOF_CPL_HDR_SHIFT));
1462 	}
1463 
1464 	al_reg_write32_masked(
1465 		regs->axi.pre_configuration.pcie_core_setup,
1466 		PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_MASK,
1467 		ib_hcrd_os_ob_reads_config->nof_outstanding_ob_reads <<
1468 		PCIE_AXI_CORE_SETUP_NOF_READS_ONSLAVE_INTRF_PCIE_CORE_SHIFT);
1469 
1470 	/* Store 'nof_p_hdr' and 'nof_np_hdr' to be set in the core later */
1471 	pcie_port->ib_hcrd_config.nof_np_hdr =
1472 		ib_hcrd_os_ob_reads_config->nof_np_hdr;
1473 	pcie_port->ib_hcrd_config.nof_p_hdr =
1474 		ib_hcrd_os_ob_reads_config->nof_p_hdr;
1475 
1476 	return 0;
1477 }
1478 
1479 enum al_pcie_operating_mode
1480 al_pcie_operating_mode_get(
1481 	struct al_pcie_port *pcie_port)
1482 {
1483 	struct al_pcie_regs *regs = pcie_port->regs;
1484 	uint32_t reg, device_type;
1485 
1486 	al_assert(pcie_port);
1487 
1488 	reg = al_reg_read32(regs->axi.pcie_global.conf);
1489 
1490 	device_type = AL_REG_FIELD_GET(reg,
1491 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_MASK,
1492 			PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_SHIFT);
1493 
1494 	switch (device_type) {
1495 	case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_EP:
1496 		return AL_PCIE_OPERATING_MODE_EP;
1497 	case PCIE_AXI_MISC_PCIE_GLOBAL_CONF_DEV_TYPE_RC:
1498 		return AL_PCIE_OPERATING_MODE_RC;
1499 	default:
1500 		al_err("PCIe %d: unknown device type (%d) in global conf register.\n",
1501 			pcie_port->port_id, device_type);
1502 	}
1503 	return AL_PCIE_OPERATING_MODE_UNKNOWN;
1504 }
1505 
1506 /**************************** PCIe Port Enable API ****************************/
1507 
1508 /** Enable PCIe port (deassert reset) */
1509 int
1510 al_pcie_port_enable(struct al_pcie_port *pcie_port)
1511 {
1512 	struct al_pbs_regs *pbs_reg_base =
1513 				(struct al_pbs_regs *)pcie_port->pbs_regs;
1514 	struct al_pcie_regs *regs = pcie_port->regs;
1515 	unsigned int port_id = pcie_port->port_id;
1516 
1517 	/* pre-port-enable default functionality should be here */
1518 
1519 	/**
1520 	 * Set inbound header credit and outstanding outbound reads defaults
1521 	 * Must be called before port enable (PCIE_EXIST)
1522 	 */
1523 	al_pcie_ib_hcrd_os_ob_reads_config_default(pcie_port);
1524 
1525 	/*
1526 	 * Disable ATS capability
1527 	 * - must be done before core reset deasserted
1528 	 * - rev_id 0 - no effect, but no harm
1529 	 */
1530 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_0) ||
1531 		(pcie_port->rev_id == AL_PCIE_REV_ID_1) ||
1532 		(pcie_port->rev_id == AL_PCIE_REV_ID_2)) {
1533 		al_reg_write32_masked(
1534 			regs->axi.ordering.pos_cntl,
1535 			PCIE_AXI_CORE_SETUP_ATS_CAP_DIS,
1536 			PCIE_AXI_CORE_SETUP_ATS_CAP_DIS);
1537 	}
1538 
1539 	/* Deassert core reset */
1540 	al_reg_write32_masked(
1541 		&pbs_reg_base->unit.pcie_conf_1,
1542 		1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1543 		1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT));
1544 
1545 	return 0;
1546 }
1547 
1548 /** Disable PCIe port (assert reset) */
1549 void
1550 al_pcie_port_disable(struct al_pcie_port *pcie_port)
1551 {
1552 	struct al_pbs_regs *pbs_reg_base =
1553 				(struct al_pbs_regs *)pcie_port->pbs_regs;
1554 	unsigned int port_id = pcie_port->port_id;
1555 
1556 	if (!al_pcie_port_is_enabled(pcie_port)) {
1557 		al_warn("PCIe %d: trying to disable a non-enabled port\n",
1558 			pcie_port->port_id);
1559 	}
1560 
1561 	/* Assert core reset */
1562 	al_reg_write32_masked(
1563 		&pbs_reg_base->unit.pcie_conf_1,
1564 		1 << (port_id + PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT),
1565 		0);
1566 }
1567 
1568 int
1569 al_pcie_port_memory_shutdown_set(
1570 	struct al_pcie_port	*pcie_port,
1571 	al_bool			enable)
1572 {
1573 	struct al_pcie_regs *regs = pcie_port->regs;
1574 	uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1575 		PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN :
1576 		PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_MEM_SHUTDOWN;
1577 
1578 	if (!al_pcie_port_is_enabled(pcie_port)) {
1579 		al_err("PCIe %d: not enabled, cannot shutdown memory\n",
1580 			pcie_port->port_id);
1581 		return -EINVAL;
1582 	}
1583 
1584 	al_reg_write32_masked(regs->axi.pcie_global.conf,
1585 		mask, enable == AL_TRUE ? mask : 0);
1586 
1587 	return 0;
1588 }
1589 
1590 al_bool
1591 al_pcie_port_is_enabled(struct al_pcie_port *pcie_port)
1592 {
1593 	struct al_pbs_regs *pbs_reg_base = (struct al_pbs_regs *)pcie_port->pbs_regs;
1594 	uint32_t pcie_exist = al_reg_read32(&pbs_reg_base->unit.pcie_conf_1);
1595 
1596 	uint32_t ports_enabled = AL_REG_FIELD_GET(pcie_exist,
1597 		PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_MASK,
1598 		PBS_UNIT_PCIE_CONF_1_PCIE_EXIST_SHIFT);
1599 
1600 	return (AL_REG_FIELD_GET(ports_enabled, AL_BIT(pcie_port->port_id),
1601 		pcie_port->port_id) == 1);
1602 }
1603 
1604 /*************************** PCIe Configuration API ***************************/
1605 
1606 /** configure pcie port (link params, etc..) */
1607 int
1608 al_pcie_port_config(struct al_pcie_port *pcie_port,
1609 			const struct al_pcie_port_config_params *params)
1610 {
1611 	struct al_pcie_regs *regs = pcie_port->regs;
1612 	enum al_pcie_operating_mode op_mode;
1613 	int status = 0;
1614 	int i;
1615 
1616 	if (!al_pcie_port_is_enabled(pcie_port)) {
1617 		al_err("PCIe %d: port not enabled, cannot configure port\n",
1618 			pcie_port->port_id);
1619 		return -EINVAL;
1620 	}
1621 
1622 	if (al_pcie_is_link_started(pcie_port)) {
1623 		al_err("PCIe %d: link already started, cannot configure port\n",
1624 			pcie_port->port_id);
1625 		return -EINVAL;
1626 	}
1627 
1628 	al_assert(pcie_port);
1629 	al_assert(params);
1630 
1631 	al_dbg("PCIe %d: port config\n", pcie_port->port_id);
1632 
1633 	op_mode = al_pcie_operating_mode_get(pcie_port);
1634 
1635 	/* if max lanes not specifies, read it from register */
1636 	if (pcie_port->max_lanes == 0) {
1637 		uint32_t global_conf = al_reg_read32(regs->axi.pcie_global.conf);
1638 		uint32_t act_lanes = AL_REG_FIELD_GET(global_conf,
1639 			(pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
1640 			PCIE_REV3_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK :
1641 			PCIE_REV1_2_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_MASK,
1642 			PCIE_REVX_AXI_MISC_PCIE_GLOBAL_CONF_NOF_ACT_LANES_SHIFT);
1643 
1644 		switch(act_lanes) {
1645 		case 0x1:
1646 			pcie_port->max_lanes = 1;
1647 			break;
1648 		case 0x3:
1649 			pcie_port->max_lanes = 2;
1650 			break;
1651 		case 0xf:
1652 			pcie_port->max_lanes = 4;
1653 			break;
1654 		case 0xff:
1655 			pcie_port->max_lanes = 8;
1656 			break;
1657 		default:
1658 			pcie_port->max_lanes = 0;
1659 			al_err("PCIe %d: invalid max lanes val (0x%x)\n", pcie_port->port_id, act_lanes);
1660 			break;
1661 		}
1662 	}
1663 
1664 	if (params->link_params)
1665 		status = al_pcie_port_link_config(pcie_port, params->link_params);
1666 	if (status)
1667 		goto done;
1668 
1669 	/* Change max read request size to 256 bytes
1670 	 * Max Payload Size is remained untouched- it is the responsibility of
1671 	 * the host to change the MPS, if needed.
1672 	 */
1673 	for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1674 		al_reg_write32_masked(regs->core_space[i].pcie_dev_ctrl_status,
1675 			PCIE_PORT_DEV_CTRL_STATUS_MRRS_MASK,
1676 			PCIE_PORT_DEV_CTRL_STATUS_MRRS_VAL_256);
1677 		if (pcie_port->rev_id != AL_PCIE_REV_ID_3)
1678 			break;
1679 	}
1680 
1681 	if (pcie_port->rev_id == AL_PCIE_REV_ID_3) {
1682 		/* Set maximum physical function numbers */
1683 		al_reg_write32_masked(
1684 			&regs->port_regs->timer_ctrl_max_func_num,
1685 			PCIE_PORT_GEN3_MAX_FUNC_NUM,
1686 			pcie_port->max_num_of_pfs - 1);
1687 
1688 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1689 
1690 		/**
1691 		 * in EP mode, when we have more than 1 PF we need to assert
1692 		 * multi-pf support so the host scan all PFs
1693 		 */
1694 		if ((op_mode == AL_PCIE_OPERATING_MODE_EP) && (pcie_port->max_num_of_pfs > 1)) {
1695 			al_reg_write32_masked((uint32_t __iomem *)
1696 				(&regs->core_space[0].config_header[0] +
1697 				(PCIE_BIST_HEADER_TYPE_BASE >> 2)),
1698 				PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK,
1699 				PCIE_BIST_HEADER_TYPE_MULTI_FUNC_MASK);
1700 		}
1701 
1702 		/* Disable TPH next pointer */
1703 		for (i = 0; i < AL_MAX_NUM_OF_PFS; i++) {
1704 			al_reg_write32_masked(regs->core_space[i].tph_cap_base,
1705 			PCIE_TPH_NEXT_POINTER, 0);
1706 		}
1707 
1708 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1709 	}
1710 
1711 
1712 	status = al_pcie_port_snoop_config(pcie_port, params->enable_axi_snoop);
1713 	if (status)
1714 		goto done;
1715 
1716 	al_pcie_port_ram_parity_int_config(pcie_port, params->enable_ram_parity_int);
1717 
1718 	al_pcie_port_axi_parity_int_config(pcie_port, params->enable_axi_parity_int);
1719 
1720 	al_pcie_port_relaxed_pcie_ordering_config(pcie_port, params->relaxed_ordering_params);
1721 
1722 	if (params->lat_rply_timers)
1723 		status = al_pcie_port_lat_rply_timers_config(pcie_port, params->lat_rply_timers);
1724 	if (status)
1725 		goto done;
1726 
1727 	if (params->gen2_params)
1728 		status = al_pcie_port_gen2_params_config(pcie_port, params->gen2_params);
1729 	if (status)
1730 		goto done;
1731 
1732 	if (params->gen3_params)
1733 		status = al_pcie_port_gen3_params_config(pcie_port, params->gen3_params);
1734 	if (status)
1735 		goto done;
1736 
1737 	if (params->tl_credits)
1738 		status = al_pcie_port_tl_credits_config(pcie_port, params->tl_credits);
1739 	if (status)
1740 		goto done;
1741 
1742 	if (params->features)
1743 		al_pcie_port_features_config(pcie_port, params->features);
1744 
1745 	if (params->sris_params)
1746 		status = al_pcie_port_sris_config(pcie_port, params->sris_params,
1747 						params->link_params->max_speed);
1748 	if (status)
1749 		goto done;
1750 
1751 	al_pcie_port_ib_hcrd_config(pcie_port);
1752 
1753 	if (params->fast_link_mode) {
1754 		al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
1755 			      1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT,
1756 			      1 << PCIE_PORT_LINK_CTRL_FAST_LINK_EN_SHIFT);
1757 	}
1758 
1759 	if (params->enable_axi_slave_err_resp)
1760 		al_reg_write32_masked(&regs->port_regs->axi_slave_err_resp,
1761 				1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT,
1762 				1 << PCIE_PORT_AXI_SLAVE_ERR_RESP_ALL_MAPPING_SHIFT);
1763 
1764 	/**
1765 	 * Addressing RMN: 5477
1766 	 *
1767 	 * RMN description:
1768 	 * address-decoder logic performs sub-target decoding even for transactions
1769 	 * which undergo target enforcement. thus, in case transaction's address is
1770 	 * inside any ECAM bar, the sub-target decoding will be set to ECAM, which
1771 	 * causes wrong handling by PCIe unit
1772 	 *
1773 	 * Software flow:
1774 	 * on EP mode only, turning on the iATU-enable bit (with the relevant mask
1775 	 * below) allows the PCIe unit to discard the ECAM bit which was asserted
1776 	 * by-mistake in the address-decoder
1777 	 */
1778 	if (op_mode == AL_PCIE_OPERATING_MODE_EP) {
1779 		al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1780 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1781 			(0) << PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
1782 		al_reg_write32_masked(regs->axi.ob_ctrl.cfg_control,
1783 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN,
1784 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_IATU_EN);
1785 	}
1786 
1787 	if (op_mode == AL_PCIE_OPERATING_MODE_RC) {
1788 		/**
1789 		 * enable memory and I/O access from port when in RC mode
1790 		 * in RC mode, only core_space[0] is valid.
1791 		 */
1792 		al_reg_write16_masked(
1793 			(uint16_t __iomem *)(&regs->core_space[0].config_header[0] + (0x4 >> 2)),
1794 			0x7, /* Mem, MSE, IO */
1795 			0x7);
1796 
1797 		/* change the class code to match pci bridge */
1798 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
1799 
1800 		al_reg_write32_masked(
1801 			(uint32_t __iomem *)(&regs->core_space[0].config_header[0]
1802 			+ (PCI_CLASS_REVISION >> 2)),
1803 			0xFFFFFF00,
1804 			0x06040000);
1805 
1806 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
1807 
1808 		/**
1809 		 * Addressing RMN: 5702
1810 		 *
1811 		 * RMN description:
1812 		 * target bus mask default value in HW is: 0xFE, this enforces
1813 		 * setting the target bus for ports 1 and 3 when running on RC
1814 		 * mode since bit[20] in ECAM address in these cases is set
1815 		 *
1816 		 * Software flow:
1817 		 * on RC mode only, set target-bus value to 0xFF to prevent this
1818 		 * enforcement
1819 		 */
1820 		al_reg_write32_masked(regs->axi.ob_ctrl.cfg_target_bus,
1821 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
1822 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK);
1823 	}
1824 done:
1825 	al_dbg("PCIe %d: port config %s\n", pcie_port->port_id, status? "failed": "done");
1826 
1827 	return status;
1828 }
1829 
1830 int
1831 al_pcie_pf_config(
1832 	struct al_pcie_pf *pcie_pf,
1833 	const struct al_pcie_pf_config_params *params)
1834 {
1835 	struct al_pcie_port *pcie_port;
1836 	int status = 0;
1837 
1838 	al_assert(pcie_pf);
1839 	al_assert(params);
1840 
1841 	pcie_port = pcie_pf->pcie_port;
1842 
1843 	if (!al_pcie_port_is_enabled(pcie_port)) {
1844 		al_err("PCIe %d: port not enabled, cannot configure port\n", pcie_port->port_id);
1845 		return -EINVAL;
1846 	}
1847 
1848 	al_dbg("PCIe %d: pf %d config\n", pcie_port->port_id, pcie_pf->pf_num);
1849 
1850 	if (params)
1851 		status = al_pcie_port_pf_params_config(pcie_pf, params);
1852 	if (status)
1853 		goto done;
1854 
1855 done:
1856 	al_dbg("PCIe %d: pf %d config %s\n",
1857 		pcie_port->port_id, pcie_pf->pf_num, status ? "failed" : "done");
1858 
1859 	return status;
1860 }
1861 
1862 /************************** PCIe Link Operations API **************************/
1863 
1864 /* start pcie link */
1865 int
1866 al_pcie_link_start(struct al_pcie_port *pcie_port)
1867 {
1868 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1869 
1870 	if (!al_pcie_port_is_enabled(pcie_port)) {
1871 		al_err("PCIe %d: port not enabled, cannot start link\n",
1872 			pcie_port->port_id);
1873 		return -EINVAL;
1874 	}
1875 
1876 	al_dbg("PCIe_%d: start port link.\n", pcie_port->port_id);
1877 
1878 	al_reg_write32_masked(
1879 			regs->app.global_ctrl.port_init,
1880 			PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1881 			PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1882 
1883 	return 0;
1884 }
1885 
1886 /* stop pcie link */
1887 int
1888 al_pcie_link_stop(struct al_pcie_port *pcie_port)
1889 {
1890 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
1891 
1892 	if (!al_pcie_is_link_started(pcie_port)) {
1893 		al_warn("PCIe %d: trying to stop a non-started link\n",
1894 			pcie_port->port_id);
1895 	}
1896 
1897 	al_dbg("PCIe_%d: stop port link.\n", pcie_port->port_id);
1898 
1899 	al_reg_write32_masked(
1900 			regs->app.global_ctrl.port_init,
1901 			PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK,
1902 			~PCIE_W_GLOBAL_CTRL_PORT_INIT_APP_LTSSM_EN_MASK);
1903 
1904 	return 0;
1905 }
1906 
1907 /* wait for link up indication */
1908 int
1909 al_pcie_link_up_wait(struct al_pcie_port *pcie_port, uint32_t timeout_ms)
1910 {
1911 	int wait_count = timeout_ms * AL_PCIE_LINKUP_WAIT_INTERVALS_PER_SEC;
1912 
1913 	while (wait_count-- > 0)	{
1914 		if (al_pcie_check_link(pcie_port, NULL)) {
1915 			al_info("PCIe_%d: <<<<<<<<< Link up >>>>>>>>>\n", pcie_port->port_id);
1916 			return 0;
1917 		} else
1918 			al_dbg("PCIe_%d: No link up, %d attempts remaining\n",
1919 				pcie_port->port_id, wait_count);
1920 
1921 		al_udelay(AL_PCIE_LINKUP_WAIT_INTERVAL);
1922 	}
1923 	al_info("PCIE_%d: link is not established in time\n",
1924 				pcie_port->port_id);
1925 
1926 	return ETIMEDOUT;
1927 }
1928 
1929 /** get link status */
1930 int
1931 al_pcie_link_status(struct al_pcie_port *pcie_port,
1932 			struct al_pcie_link_status *status)
1933 {
1934 	struct al_pcie_regs *regs = pcie_port->regs;
1935 	uint16_t	pcie_lnksta;
1936 
1937 	al_assert(status);
1938 
1939 	status->link_up = al_pcie_check_link(pcie_port, &status->ltssm_state);
1940 
1941 	if (!status->link_up) {
1942 		status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
1943 		status->lanes = 0;
1944 		return 0;
1945 	}
1946 
1947 	pcie_lnksta = al_reg_read16((uint16_t __iomem *)regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKSTA >> 1));
1948 
1949 	switch(pcie_lnksta & AL_PCI_EXP_LNKSTA_CLS) {
1950 		case AL_PCI_EXP_LNKSTA_CLS_2_5GB:
1951 			status->speed = AL_PCIE_LINK_SPEED_GEN1;
1952 			break;
1953 		case AL_PCI_EXP_LNKSTA_CLS_5_0GB:
1954 			status->speed = AL_PCIE_LINK_SPEED_GEN2;
1955 			break;
1956 		case AL_PCI_EXP_LNKSTA_CLS_8_0GB:
1957 			status->speed = AL_PCIE_LINK_SPEED_GEN3;
1958 			break;
1959 		default:
1960 			status->speed = AL_PCIE_LINK_SPEED_DEFAULT;
1961 			al_err("PCIe %d: unknown link speed indication. PCIE LINK STATUS %x\n",
1962 				pcie_port->port_id, pcie_lnksta);
1963 	}
1964 	status->lanes = (pcie_lnksta & AL_PCI_EXP_LNKSTA_NLW) >> AL_PCI_EXP_LNKSTA_NLW_SHIFT;
1965 	al_info("PCIe %d: Link up. speed gen%d negotiated width %d\n",
1966 		pcie_port->port_id, status->speed, status->lanes);
1967 
1968 	return 0;
1969 }
1970 
1971 /** get lane status */
1972 void
1973 al_pcie_lane_status_get(
1974 	struct al_pcie_port		*pcie_port,
1975 	unsigned int			lane,
1976 	struct al_pcie_lane_status	*status)
1977 {
1978 	struct al_pcie_regs *regs = pcie_port->regs;
1979 	uint32_t lane_status;
1980 	uint32_t *reg_ptr;
1981 
1982 	al_assert(pcie_port);
1983 	al_assert(status);
1984 	al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_1) || (lane < REV1_2_MAX_NUM_LANES));
1985 	al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_2) || (lane < REV1_2_MAX_NUM_LANES));
1986 	al_assert((pcie_port->rev_id != AL_PCIE_REV_ID_3) || (lane < REV3_MAX_NUM_LANES));
1987 
1988 	reg_ptr = regs->axi.status.lane[lane];
1989 
1990 	/* Reset field is valid only when same value is read twice */
1991 	do {
1992 		lane_status = al_reg_read32(reg_ptr);
1993 		status->is_reset = !!(lane_status & PCIE_AXI_STATUS_LANE_IS_RESET);
1994 	} while (status->is_reset != (!!(al_reg_read32(reg_ptr) & PCIE_AXI_STATUS_LANE_IS_RESET)));
1995 
1996 	status->requested_speed =
1997 		(lane_status & PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_MASK) >>
1998 		PCIE_AXI_STATUS_LANE_REQUESTED_SPEED_SHIFT;
1999 }
2000 
2001 /** trigger hot reset */
2002 int
2003 al_pcie_link_hot_reset(struct al_pcie_port *pcie_port, al_bool enable)
2004 {
2005 	struct al_pcie_regs *regs = pcie_port->regs;
2006 	uint32_t events_gen;
2007 	al_bool app_reset_state;
2008 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2009 
2010 	if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2011 		al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2012 		return -EINVAL;
2013 	}
2014 
2015 	if (!al_pcie_is_link_started(pcie_port)) {
2016 		al_err("PCIe %d: link not started, cannot trigger hot-reset\n", pcie_port->port_id);
2017 		return -EINVAL;
2018 	}
2019 
2020 	events_gen = al_reg_read32(regs->app.global_ctrl.events_gen[0]);
2021 	app_reset_state = events_gen & PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT;
2022 
2023 	if (enable && app_reset_state) {
2024 		al_err("PCIe %d: link is already in hot-reset state\n", pcie_port->port_id);
2025 		return -EINVAL;
2026 	} else if ((!enable) && (!(app_reset_state))) {
2027 		al_err("PCIe %d: link is already in non-hot-reset state\n", pcie_port->port_id);
2028 		return -EINVAL;
2029 	} else {
2030 		al_dbg("PCIe %d: %s hot-reset\n", pcie_port->port_id,
2031 			(enable ? "enabling" : "disabling"));
2032 		/* hot-reset functionality is implemented only for function 0 */
2033 		al_reg_write32_masked(regs->app.global_ctrl.events_gen[0],
2034 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT,
2035 			(enable ? PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT
2036 				: ~PCIE_W_GLOBAL_CTRL_EVENTS_GEN_APP_RST_INIT));
2037 		return 0;
2038 	}
2039 }
2040 
2041 /** disable port link */
2042 int
2043 al_pcie_link_disable(struct al_pcie_port *pcie_port, al_bool disable)
2044 {
2045 	struct al_pcie_regs *regs = pcie_port->regs;
2046 	uint32_t pcie_lnkctl;
2047 	al_bool link_disable_state;
2048 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2049 
2050 	if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2051 		al_err("PCIe %d: hot-reset is applicable only for RC mode\n", pcie_port->port_id);
2052 		return -EINVAL;
2053 	}
2054 
2055 	if (!al_pcie_is_link_started(pcie_port)) {
2056 		al_err("PCIe %d: link not started, cannot disable link\n", pcie_port->port_id);
2057 		return -EINVAL;
2058 	}
2059 
2060 	pcie_lnkctl = al_reg_read32(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1));
2061 	link_disable_state = pcie_lnkctl & AL_PCI_EXP_LNKCTL_LNK_DIS;
2062 
2063 	if (disable && link_disable_state) {
2064 		al_err("PCIe %d: link is already in disable state\n", pcie_port->port_id);
2065 		return -EINVAL;
2066 	} else if ((!disable) && (!(link_disable_state))) {
2067 		al_err("PCIe %d: link is already in enable state\n", pcie_port->port_id);
2068 		return -EINVAL;
2069 	}
2070 
2071 	al_dbg("PCIe %d: %s port\n", pcie_port->port_id, (disable ? "disabling" : "enabling"));
2072 	al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2073 		AL_PCI_EXP_LNKCTL_LNK_DIS,
2074 		(disable ? AL_PCI_EXP_LNKCTL_LNK_DIS : ~AL_PCI_EXP_LNKCTL_LNK_DIS));
2075 	return 0;
2076 }
2077 
2078 /** retrain link */
2079 int
2080 al_pcie_link_retrain(struct al_pcie_port *pcie_port)
2081 {
2082 	struct al_pcie_regs *regs = pcie_port->regs;
2083 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2084 
2085 	if (op_mode != AL_PCIE_OPERATING_MODE_RC) {
2086 		al_err("PCIe %d: link-retrain is applicable only for RC mode\n",
2087 			pcie_port->port_id);
2088 		return -EINVAL;
2089 	}
2090 
2091 	if (!al_pcie_is_link_started(pcie_port)) {
2092 		al_err("PCIe %d: link not started, cannot link-retrain\n", pcie_port->port_id);
2093 		return -EINVAL;
2094 	}
2095 
2096 	al_reg_write32_masked(regs->core_space[0].pcie_cap_base + (AL_PCI_EXP_LNKCTL >> 1),
2097 	AL_PCI_EXP_LNKCTL_LNK_RTRN, AL_PCI_EXP_LNKCTL_LNK_RTRN);
2098 
2099 	return 0;
2100 }
2101 
2102 /* trigger speed change */
2103 int
2104 al_pcie_link_change_speed(struct al_pcie_port *pcie_port,
2105 			      enum al_pcie_link_speed new_speed)
2106 {
2107 	struct al_pcie_regs *regs = pcie_port->regs;
2108 
2109 	if (!al_pcie_is_link_started(pcie_port)) {
2110 		al_err("PCIe %d: link not started, cannot change speed\n", pcie_port->port_id);
2111 		return -EINVAL;
2112 	}
2113 
2114 	al_dbg("PCIe %d: changing speed to %d\n", pcie_port->port_id, new_speed);
2115 
2116 	al_pcie_port_link_speed_ctrl_set(pcie_port, new_speed);
2117 
2118 	al_reg_write32_masked(&regs->port_regs->gen2_ctrl,
2119 		PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE,
2120 		PCIE_PORT_GEN2_CTRL_DIRECT_SPEED_CHANGE);
2121 
2122 	return 0;
2123 }
2124 
2125 /* TODO: check if this function needed */
2126 int
2127 al_pcie_link_change_width(struct al_pcie_port *pcie_port,
2128 			      uint8_t width __attribute__((__unused__)))
2129 {
2130 	al_err("PCIe %d: link change width not implemented\n",
2131 		pcie_port->port_id);
2132 
2133 	return -ENOSYS;
2134 }
2135 
2136 /**************************** Post Link Start API *****************************/
2137 
2138 /************************** Snoop Configuration API ***************************/
2139 
2140 int
2141 al_pcie_port_snoop_config(struct al_pcie_port *pcie_port, al_bool enable_axi_snoop)
2142 {
2143 	struct al_pcie_regs *regs = pcie_port->regs;
2144 
2145 	/* Set snoop mode */
2146 	al_info("PCIE_%d: snoop mode %s\n",
2147 			pcie_port->port_id, enable_axi_snoop ? "enable" : "disable");
2148 
2149 	if (enable_axi_snoop) {
2150 		al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2151 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2152 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP);
2153 
2154 		al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2155 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2156 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP);
2157 	} else {
2158 		al_reg_write32_masked(regs->axi.ctrl.master_arctl,
2159 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_ARCTL_SNOOP,
2160 			PCIE_AXI_CTRL_MASTER_ARCTL_OVR_SNOOP);
2161 
2162 		al_reg_write32_masked(regs->axi.ctrl.master_awctl,
2163 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP | PCIE_AXI_CTRL_MASTER_AWCTL_SNOOP,
2164 			PCIE_AXI_CTRL_MASTER_AWCTL_OVR_SNOOP);
2165 	}
2166 	return 0;
2167 }
2168 
2169 /************************** Configuration Space API ***************************/
2170 
2171 /** get base address of pci configuration space header */
2172 int
2173 al_pcie_config_space_get(struct al_pcie_pf *pcie_pf,
2174 			     uint8_t __iomem **addr)
2175 {
2176 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2177 
2178 	*addr = (uint8_t __iomem *)&regs->core_space[pcie_pf->pf_num].config_header[0];
2179 	return 0;
2180 }
2181 
2182 /* Read data from the local configuration space */
2183 uint32_t
2184 al_pcie_local_cfg_space_read(
2185 	struct al_pcie_pf	*pcie_pf,
2186 	unsigned int		reg_offset)
2187 {
2188 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2189 	uint32_t data;
2190 
2191 	data = al_reg_read32(&regs->core_space[pcie_pf->pf_num].config_header[reg_offset]);
2192 
2193 	return data;
2194 }
2195 
2196 /* Write data to the local configuration space */
2197 void
2198 al_pcie_local_cfg_space_write(
2199 	struct al_pcie_pf	*pcie_pf,
2200 	unsigned int		reg_offset,
2201 	uint32_t		data,
2202 	al_bool			cs2,
2203 	al_bool			allow_ro_wr)
2204 {
2205 	struct al_pcie_port *pcie_port = pcie_pf->pcie_port;
2206 	struct al_pcie_regs *regs = pcie_port->regs;
2207 	unsigned int pf_num = pcie_pf->pf_num;
2208 	uint32_t *offset = &regs->core_space[pf_num].config_header[reg_offset];
2209 
2210 	if (allow_ro_wr)
2211 		al_pcie_port_wr_to_ro_set(pcie_port, AL_TRUE);
2212 
2213 	if (cs2 == AL_FALSE)
2214 		al_reg_write32(offset, data);
2215 	else
2216 		al_reg_write32_dbi_cs2(pcie_port, offset, data);
2217 
2218 	if (allow_ro_wr)
2219 		al_pcie_port_wr_to_ro_set(pcie_port, AL_FALSE);
2220 }
2221 
2222 /** set target_bus and mask_target_bus */
2223 int
2224 al_pcie_target_bus_set(
2225 	struct al_pcie_port *pcie_port,
2226 	uint8_t target_bus,
2227 	uint8_t mask_target_bus)
2228 {
2229 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2230 	uint32_t reg;
2231 
2232 	reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2233 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2234 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT,
2235 			mask_target_bus);
2236 	AL_REG_FIELD_SET(reg, PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2237 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT,
2238 			target_bus);
2239 	al_reg_write32(regs->axi.ob_ctrl.cfg_target_bus, reg);
2240 	return 0;
2241 }
2242 
2243 /** get target_bus and mask_target_bus */
2244 int
2245 al_pcie_target_bus_get(
2246 	struct al_pcie_port *pcie_port,
2247 	uint8_t *target_bus,
2248 	uint8_t *mask_target_bus)
2249 {
2250 	struct al_pcie_regs *regs = (struct al_pcie_regs *)pcie_port->regs;
2251 	uint32_t reg;
2252 
2253 	al_assert(target_bus);
2254 	al_assert(mask_target_bus);
2255 
2256 	reg = al_reg_read32(regs->axi.ob_ctrl.cfg_target_bus);
2257 
2258 	*mask_target_bus = AL_REG_FIELD_GET(reg,
2259 				PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_MASK,
2260 				PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_MASK_SHIFT);
2261 	*target_bus = AL_REG_FIELD_GET(reg,
2262 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_MASK,
2263 			PCIE_AXI_MISC_OB_CTRL_CFG_TARGET_BUS_BUSNUM_SHIFT);
2264 	return 0;
2265 }
2266 
2267 /** Set secondary bus number */
2268 int
2269 al_pcie_secondary_bus_set(struct al_pcie_port *pcie_port, uint8_t secbus)
2270 {
2271 	struct al_pcie_regs *regs = pcie_port->regs;
2272 
2273 	uint32_t secbus_val = (secbus <<
2274 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_SHIFT);
2275 
2276 	al_reg_write32_masked(
2277 		regs->axi.ob_ctrl.cfg_control,
2278 		PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SEC_BUS_MASK,
2279 		secbus_val);
2280 	return 0;
2281 }
2282 
2283 /** Set sub-ordinary bus number */
2284 int
2285 al_pcie_subordinary_bus_set(struct al_pcie_port *pcie_port, uint8_t subbus)
2286 {
2287 	struct al_pcie_regs *regs = pcie_port->regs;
2288 
2289 	uint32_t subbus_val = (subbus <<
2290 			PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_SHIFT);
2291 
2292 	al_reg_write32_masked(
2293 		regs->axi.ob_ctrl.cfg_control,
2294 		PCIE_AXI_MISC_OB_CTRL_CFG_CONTROL_SUBBUS_MASK,
2295 		subbus_val);
2296 	return 0;
2297 }
2298 
2299 /* Enable/disable deferring incoming configuration requests */
2300 void
2301 al_pcie_app_req_retry_set(
2302 	struct al_pcie_port	*pcie_port,
2303 	al_bool			en)
2304 {
2305 	struct al_pcie_regs *regs = pcie_port->regs;
2306 	uint32_t mask = (pcie_port->rev_id == AL_PCIE_REV_ID_3) ?
2307 		PCIE_W_REV3_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN :
2308 		PCIE_W_REV1_2_GLOBAL_CTRL_PM_CONTROL_APP_REQ_RETRY_EN;
2309 
2310 	al_reg_write32_masked(regs->app.global_ctrl.pm_control,
2311 		mask, (en == AL_TRUE) ? mask : 0);
2312 }
2313 
2314 /*************** Internal Address Translation Unit (ATU) API ******************/
2315 
2316 /** program internal ATU region entry */
2317 int
2318 al_pcie_atu_region_set(
2319 	struct al_pcie_port *pcie_port,
2320 	struct al_pcie_atu_region *atu_region)
2321 {
2322 	struct al_pcie_regs *regs = pcie_port->regs;
2323 	enum al_pcie_operating_mode op_mode = al_pcie_operating_mode_get(pcie_port);
2324 	uint32_t reg = 0;
2325 
2326 	/**
2327 	 * Addressing RMN: 5384
2328 	 *
2329 	 * RMN description:
2330 	 * From SNPS (also included in the data book) Dynamic iATU Programming
2331 	 * With AHB/AXI Bridge Module When the bridge slave interface clock
2332 	 * (hresetn or slv_aclk) is asynchronous to the PCIe native core clock
2333 	 * (core_clk), you must not update the iATU registers while operations
2334 	 * are in progress on the AHB/AXI bridge slave interface. The iATU
2335 	 * registers are in the core_clk clock domain. The register outputs are
2336 	 * used in the AHB/AXI bridge slave interface clock domain. There is no
2337 	 * synchronization logic between these registers and the AHB/AXI bridge
2338 	 * slave interface.
2339 	 *
2340 	 * Software flow:
2341 	 * Do not allow configuring Outbound iATU after link is started
2342 	 */
2343 	if ((atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND)
2344 		&& (al_pcie_is_link_started(pcie_port))) {
2345 		if (!atu_region->enforce_ob_atu_region_set) {
2346 			al_err("PCIe %d: setting OB iATU after link is started is not allowed\n",
2347 				pcie_port->port_id);
2348 			return -EINVAL;
2349 		} else {
2350 			al_info("PCIe %d: setting OB iATU even after link is started\n",
2351 				pcie_port->port_id);
2352 		}
2353 	}
2354 
2355 	/*TODO : add sanity check */
2356 	AL_REG_FIELD_SET(reg, 0xF, 0, atu_region->index);
2357 	AL_REG_BIT_VAL_SET(reg, 31, atu_region->direction);
2358 	al_reg_write32(&regs->port_regs->iatu.index, reg);
2359 
2360 	al_reg_write32(&regs->port_regs->iatu.lower_base_addr,
2361 			(uint32_t)(atu_region->base_addr & 0xFFFFFFFF));
2362 	al_reg_write32(&regs->port_regs->iatu.upper_base_addr,
2363 			(uint32_t)((atu_region->base_addr >> 32)& 0xFFFFFFFF));
2364 	al_reg_write32(&regs->port_regs->iatu.lower_target_addr,
2365 			(uint32_t)(atu_region->target_addr & 0xFFFFFFFF));
2366 	al_reg_write32(&regs->port_regs->iatu.upper_target_addr,
2367 			(uint32_t)((atu_region->target_addr >> 32)& 0xFFFFFFFF));
2368 
2369 	/* configure the limit, not needed when working in BAR match mode */
2370 	if (atu_region->match_mode == 0) {
2371 		uint32_t limit_reg_val;
2372 		if (pcie_port->rev_id > AL_PCIE_REV_ID_0) {
2373 			uint32_t *limit_ext_reg =
2374 				(atu_region->direction == AL_PCIE_ATU_DIR_OUTBOUND) ?
2375 				&regs->app.atu.out_mask_pair[atu_region->index / 2] :
2376 				&regs->app.atu.in_mask_pair[atu_region->index / 2];
2377 			uint32_t limit_ext_reg_mask =
2378 				(atu_region->index % 2) ?
2379 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_MASK :
2380 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_MASK;
2381 			unsigned int limit_ext_reg_shift =
2382 				(atu_region->index % 2) ?
2383 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_ODD_SHIFT :
2384 				PCIE_W_ATU_MASK_EVEN_ODD_ATU_MASK_40_32_EVEN_SHIFT;
2385 			uint64_t limit_sz_msk =
2386 				atu_region->limit - atu_region->base_addr;
2387 			uint32_t limit_ext_reg_val = (uint32_t)(((limit_sz_msk) >>
2388 						32) & 0xFFFFFFFF);
2389 
2390 			if (limit_ext_reg_val) {
2391 				limit_reg_val =	(uint32_t)((limit_sz_msk) & 0xFFFFFFFF);
2392 				al_assert(limit_reg_val == 0xFFFFFFFF);
2393 			} else {
2394 				limit_reg_val = (uint32_t)(atu_region->limit &
2395 						0xFFFFFFFF);
2396 			}
2397 
2398 			al_reg_write32_masked(
2399 					limit_ext_reg,
2400 					limit_ext_reg_mask,
2401 					limit_ext_reg_val << limit_ext_reg_shift);
2402 		} else {
2403 			limit_reg_val = (uint32_t)(atu_region->limit & 0xFFFFFFFF);
2404 		}
2405 
2406 		al_reg_write32(&regs->port_regs->iatu.limit_addr,
2407 				limit_reg_val);
2408 	}
2409 
2410 	reg = 0;
2411 	AL_REG_FIELD_SET(reg, 0x1F, 0, atu_region->tlp_type);
2412 	AL_REG_FIELD_SET(reg, 0x3 << 9, 9, atu_region->attr);
2413 
2414 
2415 	if ((pcie_port->rev_id == AL_PCIE_REV_ID_3)
2416 		&& (op_mode == AL_PCIE_OPERATING_MODE_EP)
2417 		&& (atu_region->function_match_bypass_mode)) {
2418 		AL_REG_FIELD_SET(reg,
2419 			PCIE_IATU_CR1_FUNC_NUM_MASK,
2420 			PCIE_IATU_CR1_FUNC_NUM_SHIFT,
2421 			atu_region->function_match_bypass_mode_number);
2422 	}
2423 
2424 	al_reg_write32(&regs->port_regs->iatu.cr1, reg);
2425 
2426 	/* Enable/disable the region. */
2427 	reg = 0;
2428 	AL_REG_FIELD_SET(reg, 0xFF, 0, atu_region->msg_code);
2429 	AL_REG_FIELD_SET(reg, 0x700, 8, atu_region->bar_number);
2430 	AL_REG_FIELD_SET(reg, 0x3 << 24, 24, atu_region->response);
2431 	AL_REG_BIT_VAL_SET(reg, 16, atu_region->enable_attr_match_mode == AL_TRUE);
2432 	AL_REG_BIT_VAL_SET(reg, 21, atu_region->enable_msg_match_mode == AL_TRUE);
2433 	AL_REG_BIT_VAL_SET(reg, 28, atu_region->cfg_shift_mode == AL_TRUE);
2434 	AL_REG_BIT_VAL_SET(reg, 29, atu_region->invert_matching == AL_TRUE);
2435 	if (atu_region->tlp_type == AL_PCIE_TLP_TYPE_MEM || atu_region->tlp_type == AL_PCIE_TLP_TYPE_IO)
2436 		AL_REG_BIT_VAL_SET(reg, 30, !!atu_region->match_mode);
2437 	AL_REG_BIT_VAL_SET(reg, 31, !!atu_region->enable);
2438 
2439 	/* In outbound, enable function bypass
2440 	 * In inbound, enable function match mode
2441 	 * Note: this is the same bit, has different meanings in ob/ib ATUs
2442 	 */
2443 	if (op_mode == AL_PCIE_OPERATING_MODE_EP)
2444 		AL_REG_FIELD_SET(reg,
2445 			PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_MASK,
2446 			PCIE_IATU_CR2_FUNC_NUM_TRANS_BYPASS_FUNC_MATCH_ENABLE_SHIFT,
2447 			atu_region->function_match_bypass_mode ? 0x1 : 0x0);
2448 
2449 	al_reg_write32(&regs->port_regs->iatu.cr2, reg);
2450 
2451 	return 0;
2452 }
2453 
2454 /** obtains internal ATU region base/target addresses */
2455 void
2456 al_pcie_atu_region_get_fields(
2457 	struct al_pcie_port *pcie_port,
2458 	enum al_pcie_atu_dir direction, uint8_t index,
2459 	al_bool *enable, uint64_t *base_addr, uint64_t *target_addr)
2460 {
2461 	struct al_pcie_regs *regs = pcie_port->regs;
2462 	uint64_t high_addr;
2463 	uint32_t reg = 0;
2464 
2465 	AL_REG_FIELD_SET(reg, 0xF, 0, index);
2466 	AL_REG_BIT_VAL_SET(reg, 31, direction);
2467 	al_reg_write32(&regs->port_regs->iatu.index, reg);
2468 
2469 	*base_addr = al_reg_read32(&regs->port_regs->iatu.lower_base_addr);
2470 	high_addr = al_reg_read32(&regs->port_regs->iatu.upper_base_addr);
2471 	high_addr <<= 32;
2472 	*base_addr |= high_addr;
2473 
2474 	*target_addr = al_reg_read32(&regs->port_regs->iatu.lower_target_addr);
2475 	high_addr = al_reg_read32(&regs->port_regs->iatu.upper_target_addr);
2476 	high_addr <<= 32;
2477 	*target_addr |= high_addr;
2478 
2479 	reg = al_reg_read32(&regs->port_regs->iatu.cr1);
2480 	*enable = AL_REG_BIT_GET(reg, 31) ? AL_TRUE : AL_FALSE;
2481 }
2482 
2483 void
2484 al_pcie_axi_io_config(
2485 	struct al_pcie_port *pcie_port,
2486 	al_phys_addr_t start,
2487 	al_phys_addr_t end)
2488 {
2489 	struct al_pcie_regs *regs = pcie_port->regs;
2490 
2491 	al_reg_write32(regs->axi.ob_ctrl.io_start_h,
2492 			(uint32_t)((start >> 32) & 0xFFFFFFFF));
2493 
2494 	al_reg_write32(regs->axi.ob_ctrl.io_start_l,
2495 			(uint32_t)(start & 0xFFFFFFFF));
2496 
2497 	al_reg_write32(regs->axi.ob_ctrl.io_limit_h,
2498 			(uint32_t)((end >> 32) & 0xFFFFFFFF));
2499 
2500 	al_reg_write32(regs->axi.ob_ctrl.io_limit_l,
2501 			(uint32_t)(end & 0xFFFFFFFF));
2502 
2503 	al_reg_write32_masked(regs->axi.ctrl.slv_ctl,
2504 			      PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN,
2505 			      PCIE_AXI_CTRL_SLV_CTRL_IO_BAR_EN);
2506 }
2507 
2508 /************** Interrupt generation (Endpoint mode Only) API *****************/
2509 
2510 /** generate INTx Assert/DeAssert Message */
2511 int
2512 al_pcie_legacy_int_gen(
2513 	struct al_pcie_pf		*pcie_pf,
2514 	al_bool				assert,
2515 	enum al_pcie_legacy_int_type	type)
2516 {
2517 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2518 	unsigned int pf_num = pcie_pf->pf_num;
2519 	uint32_t reg;
2520 
2521 	al_assert(type == AL_PCIE_LEGACY_INTA); /* only INTA supported */
2522 	reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2523 	AL_REG_BIT_VAL_SET(reg, 3, !!assert);
2524 	al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2525 
2526 	return 0;
2527 }
2528 
2529 /** generate MSI interrupt */
2530 int
2531 al_pcie_msi_int_gen(struct al_pcie_pf *pcie_pf, uint8_t vector)
2532 {
2533 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2534 	unsigned int pf_num = pcie_pf->pf_num;
2535 	uint32_t reg;
2536 
2537 	/* set msi vector and clear MSI request */
2538 	reg = al_reg_read32(regs->app.global_ctrl.events_gen[pf_num]);
2539 	AL_REG_BIT_CLEAR(reg, 4);
2540 	AL_REG_FIELD_SET(reg,
2541 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_MASK,
2542 			PCIE_W_GLOBAL_CTRL_EVENTS_GEN_MSI_VECTOR_SHIFT,
2543 			vector);
2544 	al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2545 	/* set MSI request */
2546 	AL_REG_BIT_SET(reg, 4);
2547 	al_reg_write32(regs->app.global_ctrl.events_gen[pf_num], reg);
2548 
2549 	return 0;
2550 }
2551 
2552 /** configure MSIX capability */
2553 int
2554 al_pcie_msix_config(
2555 	struct al_pcie_pf *pcie_pf,
2556 	struct al_pcie_msix_params *msix_params)
2557 {
2558 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2559 	unsigned int pf_num = pcie_pf->pf_num;
2560 	uint32_t msix_reg0;
2561 
2562 	al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_TRUE);
2563 
2564 	msix_reg0 = al_reg_read32(regs->core_space[pf_num].msix_cap_base);
2565 
2566 	msix_reg0 &= ~(AL_PCI_MSIX_MSGCTRL_TBL_SIZE << AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT);
2567 	msix_reg0 |= ((msix_params->table_size - 1) & AL_PCI_MSIX_MSGCTRL_TBL_SIZE) <<
2568 			AL_PCI_MSIX_MSGCTRL_TBL_SIZE_SHIFT;
2569 	al_reg_write32(regs->core_space[pf_num].msix_cap_base, msix_reg0);
2570 
2571 	/* Table offset & BAR */
2572 	al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_TABLE >> 2),
2573 		       (msix_params->table_offset & AL_PCI_MSIX_TABLE_OFFSET) |
2574 			       (msix_params->table_bar & AL_PCI_MSIX_TABLE_BAR));
2575 	/* PBA offset & BAR */
2576 	al_reg_write32(regs->core_space[pf_num].msix_cap_base + (AL_PCI_MSIX_PBA >> 2),
2577 		       (msix_params->pba_offset & AL_PCI_MSIX_PBA_OFFSET) |
2578 			       (msix_params->pba_bar & AL_PCI_MSIX_PBA_BAR));
2579 
2580 	al_pcie_port_wr_to_ro_set(pcie_pf->pcie_port, AL_FALSE);
2581 
2582 	return 0;
2583 }
2584 
2585 /** check whether MSIX is enabled */
2586 al_bool
2587 al_pcie_msix_enabled(struct al_pcie_pf	*pcie_pf)
2588 {
2589 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2590 	uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2591 
2592 	if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_EN)
2593 		return AL_TRUE;
2594 	return AL_FALSE;
2595 }
2596 
2597 /** check whether MSIX is masked */
2598 al_bool
2599 al_pcie_msix_masked(struct al_pcie_pf *pcie_pf)
2600 {
2601 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2602 	uint32_t msix_reg0 = al_reg_read32(regs->core_space[pcie_pf->pf_num].msix_cap_base);
2603 
2604 	if (msix_reg0 & AL_PCI_MSIX_MSGCTRL_MASK)
2605 		return AL_TRUE;
2606 	return AL_FALSE;
2607 }
2608 
2609 /******************** Advanced Error Reporting (AER) API **********************/
2610 
2611 /** configure AER capability */
2612 int
2613 al_pcie_aer_config(
2614 	struct al_pcie_pf		*pcie_pf,
2615 	struct al_pcie_aer_params	*params)
2616 {
2617 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2618 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2619 	uint32_t reg_val;
2620 
2621 	reg_val = al_reg_read32(&aer_regs->header);
2622 
2623 	if (((reg_val & PCIE_AER_CAP_ID_MASK) >> PCIE_AER_CAP_ID_SHIFT) !=
2624 		PCIE_AER_CAP_ID_VAL)
2625 		return -EIO;
2626 
2627 	if (((reg_val & PCIE_AER_CAP_VER_MASK) >> PCIE_AER_CAP_VER_SHIFT) !=
2628 		PCIE_AER_CAP_VER_VAL)
2629 		return -EIO;
2630 
2631 	al_reg_write32(&aer_regs->corr_err_mask, ~params->enabled_corr_err);
2632 
2633 	al_reg_write32(&aer_regs->uncorr_err_mask,
2634 		(~params->enabled_uncorr_non_fatal_err) |
2635 		(~params->enabled_uncorr_fatal_err));
2636 
2637 	al_reg_write32(&aer_regs->uncorr_err_severity,
2638 		params->enabled_uncorr_fatal_err);
2639 
2640 	al_reg_write32(&aer_regs->cap_and_ctrl,
2641 		(params->ecrc_gen_en ? PCIE_AER_CTRL_STAT_ECRC_GEN_EN : 0) |
2642 		(params->ecrc_chk_en ? PCIE_AER_CTRL_STAT_ECRC_CHK_EN : 0));
2643 
2644 	al_reg_write32_masked(
2645 		regs->core_space[pcie_pf->pf_num].pcie_dev_ctrl_status,
2646 		PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN |
2647 		PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN |
2648 		PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN |
2649 		PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN,
2650 		(params->enabled_corr_err ?
2651 		 PCIE_PORT_DEV_CTRL_STATUS_CORR_ERR_REPORT_EN : 0) |
2652 		(params->enabled_uncorr_non_fatal_err ?
2653 		 PCIE_PORT_DEV_CTRL_STATUS_NON_FTL_ERR_REPORT_EN : 0) |
2654 		(params->enabled_uncorr_fatal_err ?
2655 		 PCIE_PORT_DEV_CTRL_STATUS_FTL_ERR_REPORT_EN : 0) |
2656 		((params->enabled_uncorr_non_fatal_err &
2657 		  AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2658 		 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0) |
2659 		((params->enabled_uncorr_fatal_err &
2660 		  AL_PCIE_AER_UNCORR_UNSUPRT_REQ_ERR) ?
2661 		 PCIE_PORT_DEV_CTRL_STATUS_UNSUP_REQ_REPORT_EN : 0));
2662 
2663 	return 0;
2664 }
2665 
2666 /** AER uncorretable errors get and clear */
2667 unsigned int
2668 al_pcie_aer_uncorr_get_and_clear(struct al_pcie_pf	*pcie_pf)
2669 {
2670 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2671 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2672 	uint32_t reg_val;
2673 
2674 	reg_val = al_reg_read32(&aer_regs->uncorr_err_stat);
2675 	al_reg_write32(&aer_regs->uncorr_err_stat, reg_val);
2676 
2677 	return reg_val;
2678 }
2679 
2680 /** AER corretable errors get and clear */
2681 unsigned int
2682 al_pcie_aer_corr_get_and_clear(struct al_pcie_pf *pcie_pf)
2683 {
2684 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2685 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2686 	uint32_t reg_val;
2687 
2688 	reg_val = al_reg_read32(&aer_regs->corr_err_stat);
2689 	al_reg_write32(&aer_regs->corr_err_stat, reg_val);
2690 
2691 	return reg_val;
2692 }
2693 
2694 #if (AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS != 4)
2695 #error Wrong assumption!
2696 #endif
2697 
2698 /** AER get the header for the TLP corresponding to a detected error */
2699 void
2700 al_pcie_aer_err_tlp_hdr_get(
2701 	struct al_pcie_pf *pcie_pf,
2702 	uint32_t hdr[AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS])
2703 {
2704 	struct al_pcie_regs *regs = pcie_pf->pcie_port->regs;
2705 	struct al_pcie_core_aer_regs *aer_regs = regs->core_space[pcie_pf->pf_num].aer;
2706 	int i;
2707 
2708 	for (i = 0; i < AL_PCIE_AER_ERR_TLP_HDR_NUM_DWORDS; i++)
2709 		hdr[i] = al_reg_read32(&aer_regs->header_log[i]);
2710 }
2711 
2712 /********************** Loopback mode (RC and Endpoint modes) ************/
2713 
2714 /** enter local pipe loopback mode */
2715 int
2716 al_pcie_local_pipe_loopback_enter(struct al_pcie_port *pcie_port)
2717 {
2718 	struct al_pcie_regs *regs = pcie_port->regs;
2719 
2720 	al_dbg("PCIe %d: Enter LOCAL PIPE Loopback mode", pcie_port->port_id);
2721 
2722 	al_reg_write32_masked(&regs->port_regs->pipe_loopback_ctrl,
2723 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2724 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
2725 
2726 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
2727 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2728 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT);
2729 
2730 	return 0;
2731 }
2732 
2733 /**
2734  * @brief exit local pipe loopback mode
2735  *
2736  * @param pcie_port	pcie port handle
2737  * @return		0 if no error found
2738  */
2739 int
2740 al_pcie_local_pipe_loopback_exit(struct al_pcie_port *pcie_port)
2741 {
2742 	struct al_pcie_regs *regs = pcie_port->regs;
2743 
2744 	al_dbg("PCIe %d: Exit LOCAL PIPE Loopback mode", pcie_port->port_id);
2745 
2746 	al_reg_write32_masked(&regs->port_regs->pipe_loopback_ctrl,
2747 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2748 			      0);
2749 
2750 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
2751 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2752 			      0);
2753 	return 0;
2754 }
2755 
2756 /** enter remote loopback mode */
2757 int
2758 al_pcie_remote_loopback_enter(struct al_pcie_port *pcie_port)
2759 {
2760 	struct al_pcie_regs *regs = pcie_port->regs;
2761 
2762 	al_dbg("PCIe %d: Enter REMOTE Loopback mode", pcie_port->port_id);
2763 
2764 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
2765 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT,
2766 			      1 << PCIE_PORT_PIPE_LOOPBACK_CTRL_PIPE_LB_EN_SHIFT);
2767 
2768 	return 0;
2769 }
2770 
2771 /**
2772  * @brief   exit remote loopback mode
2773  *
2774  * @param   pcie_port pcie port handle
2775  * @return  0 if no error found
2776  */
2777 int
2778 al_pcie_remote_loopback_exit(struct al_pcie_port *pcie_port)
2779 {
2780 	struct al_pcie_regs *regs = pcie_port->regs;
2781 
2782 	al_dbg("PCIe %d: Exit REMOTE Loopback mode", pcie_port->port_id);
2783 
2784 	al_reg_write32_masked(&regs->port_regs->port_link_ctrl,
2785 			      1 << PCIE_PORT_LINK_CTRL_LB_EN_SHIFT,
2786 			      0);
2787 	return 0;
2788 }
2789