1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5
6 #ifndef _PCIE_CADENCE_H
7 #define _PCIE_CADENCE_H
8
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/pci-epf.h>
12 #include <linux/phy/phy.h>
13
14 /* Parameters for the waiting for link up routine */
15 #define LINK_WAIT_MAX_RETRIES 10
16 #define LINK_WAIT_USLEEP_MIN 90000
17 #define LINK_WAIT_USLEEP_MAX 100000
18
19 /*
20 * Local Management Registers
21 */
22 #define CDNS_PCIE_LM_BASE 0x00100000
23
24 /* Vendor ID Register */
25 #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
26 #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
27 #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
28 #define CDNS_PCIE_LM_ID_VENDOR(vid) \
29 (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
30 #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
31 #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
32 #define CDNS_PCIE_LM_ID_SUBSYS(sub) \
33 (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
34
35 /* Root Port Requester ID Register */
36 #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
37 #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
38 #define CDNS_PCIE_LM_RP_RID_SHIFT 0
39 #define CDNS_PCIE_LM_RP_RID_(rid) \
40 (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
41
42 /* Endpoint Bus and Device Number Register */
43 #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
44 #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
45 #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
46 #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
47 #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
48
49 /* Endpoint Function f BAR b Configuration Registers */
50 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
51 (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
52 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
53 (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
54 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
55 (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
56 #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
57 (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
58 #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
59 (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
60 #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
61 (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
62 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
63 (GENMASK(4, 0) << ((b) * 8))
64 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
65 (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
66 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
67 (GENMASK(7, 5) << ((b) * 8))
68 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
69 (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
70
71 /* Endpoint Function Configuration Register */
72 #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
73
74 /* Root Complex BAR Configuration Register */
75 #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
76 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
77 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
78 (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
79 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
80 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
81 (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
82 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
83 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
84 (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
85 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
86 #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
87 (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
88 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
89 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
90 #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
91 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
92 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
93 #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
94 #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
95
96 /* BAR control values applicable to both Endpoint Function and Root Complex */
97 #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
98 #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
99 #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
100 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
101 #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
102 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
103
104 #define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
105 (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
106 #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
107 (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
108 #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
109 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
110 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
111 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
112 #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
113 (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
114 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
115 (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
116 #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
117 (((aperture) - 2) << ((bar) * 8))
118
119 /* PTM Control Register */
120 #define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
121 #define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
122
123 /*
124 * Endpoint Function Registers (PCI configuration space for endpoint functions)
125 */
126 #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
127
128 #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
129 #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
130 #define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
131 #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
132
133 /*
134 * Endpoint PF Registers
135 */
136 #define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
137 #define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
138
139 /*
140 * Root Port Registers (PCI configuration space for the root port function)
141 */
142 #define CDNS_PCIE_RP_BASE 0x00200000
143 #define CDNS_PCIE_RP_CAP_OFFSET 0xc0
144
145 /*
146 * Address Translation Registers
147 */
148 #define CDNS_PCIE_AT_BASE 0x00400000
149
150 /* Region r Outbound AXI to PCIe Address Translation Register 0 */
151 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
152 (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
153 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
154 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
155 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
156 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
157 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
158 (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
159 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
160 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
161 (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
162
163 /* Region r Outbound AXI to PCIe Address Translation Register 1 */
164 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
165 (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
166
167 /* Region r Outbound PCIe Descriptor Register 0 */
168 #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
169 (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
170 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
171 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
172 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
173 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
174 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
175 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
176 #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
177 /* Bit 23 MUST be set in RC mode. */
178 #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
179 #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
180 #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
181 (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
182
183 /* Region r Outbound PCIe Descriptor Register 1 */
184 #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
185 (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
186 #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
187 #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
188 ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
189
190 /* Region r AXI Region Base Address Register 0 */
191 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
192 (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
193 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
194 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
195 (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
196
197 /* Region r AXI Region Base Address Register 1 */
198 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
199 (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
200
201 /* Root Port BAR Inbound PCIe to AXI Address Translation Register */
202 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
203 (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
204 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
205 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
206 (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
207 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
208 (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
209
210 /* AXI link down register */
211 #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
212
213 /* LTSSM Capabilities register */
214 #define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
215 #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
216 #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
217 #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
218 (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
219 CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
220
221 enum cdns_pcie_rp_bar {
222 RP_BAR_UNDEFINED = -1,
223 RP_BAR0,
224 RP_BAR1,
225 RP_NO_BAR
226 };
227
228 #define CDNS_PCIE_RP_MAX_IB 0x3
229 #define CDNS_PCIE_MAX_OB 32
230
231 struct cdns_pcie_rp_ib_bar {
232 u64 size;
233 bool free;
234 };
235
236 /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
237 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
238 (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
239 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
240 (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
241
242 /* Normal/Vendor specific message access: offset inside some outbound region */
243 #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
244 #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
245 (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
246 #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
247 #define CDNS_PCIE_NORMAL_MSG_CODE(code) \
248 (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
249 #define CDNS_PCIE_MSG_DATA BIT(16)
250
251 struct cdns_pcie;
252
253 struct cdns_pcie_ops {
254 int (*start_link)(struct cdns_pcie *pcie);
255 void (*stop_link)(struct cdns_pcie *pcie);
256 bool (*link_up)(struct cdns_pcie *pcie);
257 u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
258 };
259
260 /**
261 * struct cdns_pcie - private data for Cadence PCIe controller drivers
262 * @reg_base: IO mapped register base
263 * @mem_res: start/end offsets in the physical system memory to map PCI accesses
264 * @dev: PCIe controller
265 * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
266 * @phy_count: number of supported PHY devices
267 * @phy: list of pointers to specific PHY control blocks
268 * @link: list of pointers to corresponding device link representations
269 * @ops: Platform-specific ops to control various inputs from Cadence PCIe
270 * wrapper
271 */
272 struct cdns_pcie {
273 void __iomem *reg_base;
274 struct resource *mem_res;
275 struct device *dev;
276 bool is_rc;
277 int phy_count;
278 struct phy **phy;
279 struct device_link **link;
280 const struct cdns_pcie_ops *ops;
281 };
282
283 /**
284 * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
285 * @pcie: Cadence PCIe controller
286 * @cfg_res: start/end offsets in the physical system memory to map PCI
287 * configuration space accesses
288 * @cfg_base: IO mapped window to access the PCI configuration space of a
289 * single function at a time
290 * @vendor_id: PCI vendor ID
291 * @device_id: PCI device ID
292 * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
293 * available
294 * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
295 * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
296 */
297 struct cdns_pcie_rc {
298 struct cdns_pcie pcie;
299 struct resource *cfg_res;
300 void __iomem *cfg_base;
301 u32 vendor_id;
302 u32 device_id;
303 bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
304 unsigned int quirk_retrain_flag:1;
305 unsigned int quirk_detect_quiet_flag:1;
306 };
307
308 /**
309 * struct cdns_pcie_epf - Structure to hold info about endpoint function
310 * @epf: Info about virtual functions attached to the physical function
311 * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
312 */
313 struct cdns_pcie_epf {
314 struct cdns_pcie_epf *epf;
315 struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
316 };
317
318 /**
319 * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
320 * @pcie: Cadence PCIe controller
321 * @max_regions: maximum number of regions supported by hardware
322 * @ob_region_map: bitmask of mapped outbound regions
323 * @ob_addr: base addresses in the AXI bus where the outbound regions start
324 * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
325 * dedicated outbound regions is mapped.
326 * @irq_cpu_addr: base address in the CPU space where a write access triggers
327 * the sending of a memory write (MSI) / normal message (INTX
328 * IRQ) TLP through the PCIe bus.
329 * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
330 * dedicated outbound region.
331 * @irq_pci_fn: the latest PCI function that has updated the mapping of
332 * the MSI/INTX IRQ dedicated outbound region.
333 * @irq_pending: bitmask of asserted INTX IRQs.
334 * @lock: spin lock to disable interrupts while modifying PCIe controller
335 * registers fields (RMW) accessible by both remote RC and EP to
336 * minimize time between read and write
337 * @epf: Structure to hold info about endpoint function
338 * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
339 * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
340 */
341 struct cdns_pcie_ep {
342 struct cdns_pcie pcie;
343 u32 max_regions;
344 unsigned long ob_region_map;
345 phys_addr_t *ob_addr;
346 phys_addr_t irq_phys_addr;
347 void __iomem *irq_cpu_addr;
348 u64 irq_pci_addr;
349 u8 irq_pci_fn;
350 u8 irq_pending;
351 /* protect writing to PCI_STATUS while raising INTX interrupts */
352 spinlock_t lock;
353 struct cdns_pcie_epf *epf;
354 unsigned int quirk_detect_quiet_flag:1;
355 unsigned int quirk_disable_flr:1;
356 };
357
358
359 /* Register access */
cdns_pcie_writel(struct cdns_pcie * pcie,u32 reg,u32 value)360 static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
361 {
362 writel(value, pcie->reg_base + reg);
363 }
364
cdns_pcie_readl(struct cdns_pcie * pcie,u32 reg)365 static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
366 {
367 return readl(pcie->reg_base + reg);
368 }
369
cdns_pcie_read_sz(void __iomem * addr,int size)370 static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
371 {
372 void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
373 unsigned int offset = (unsigned long)addr & 0x3;
374 u32 val = readl(aligned_addr);
375
376 if (!IS_ALIGNED((uintptr_t)addr, size)) {
377 pr_warn("Address %p and size %d are not aligned\n", addr, size);
378 return 0;
379 }
380
381 if (size > 2)
382 return val;
383
384 return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
385 }
386
cdns_pcie_write_sz(void __iomem * addr,int size,u32 value)387 static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
388 {
389 void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
390 unsigned int offset = (unsigned long)addr & 0x3;
391 u32 mask;
392 u32 val;
393
394 if (!IS_ALIGNED((uintptr_t)addr, size)) {
395 pr_warn("Address %p and size %d are not aligned\n", addr, size);
396 return;
397 }
398
399 if (size > 2) {
400 writel(value, addr);
401 return;
402 }
403
404 mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
405 val = readl(aligned_addr) & mask;
406 val |= value << (offset * 8);
407 writel(val, aligned_addr);
408 }
409
410 /* Root Port register access */
cdns_pcie_rp_writeb(struct cdns_pcie * pcie,u32 reg,u8 value)411 static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
412 u32 reg, u8 value)
413 {
414 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
415
416 cdns_pcie_write_sz(addr, 0x1, value);
417 }
418
cdns_pcie_rp_writew(struct cdns_pcie * pcie,u32 reg,u16 value)419 static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
420 u32 reg, u16 value)
421 {
422 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
423
424 cdns_pcie_write_sz(addr, 0x2, value);
425 }
426
cdns_pcie_rp_readw(struct cdns_pcie * pcie,u32 reg)427 static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
428 {
429 void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
430
431 return cdns_pcie_read_sz(addr, 0x2);
432 }
433
434 /* Endpoint Function register access */
cdns_pcie_ep_fn_writeb(struct cdns_pcie * pcie,u8 fn,u32 reg,u8 value)435 static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
436 u32 reg, u8 value)
437 {
438 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
439
440 cdns_pcie_write_sz(addr, 0x1, value);
441 }
442
cdns_pcie_ep_fn_writew(struct cdns_pcie * pcie,u8 fn,u32 reg,u16 value)443 static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
444 u32 reg, u16 value)
445 {
446 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
447
448 cdns_pcie_write_sz(addr, 0x2, value);
449 }
450
cdns_pcie_ep_fn_writel(struct cdns_pcie * pcie,u8 fn,u32 reg,u32 value)451 static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
452 u32 reg, u32 value)
453 {
454 writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
455 }
456
cdns_pcie_ep_fn_readw(struct cdns_pcie * pcie,u8 fn,u32 reg)457 static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
458 {
459 void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
460
461 return cdns_pcie_read_sz(addr, 0x2);
462 }
463
cdns_pcie_ep_fn_readl(struct cdns_pcie * pcie,u8 fn,u32 reg)464 static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
465 {
466 return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
467 }
468
cdns_pcie_start_link(struct cdns_pcie * pcie)469 static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
470 {
471 if (pcie->ops->start_link)
472 return pcie->ops->start_link(pcie);
473
474 return 0;
475 }
476
cdns_pcie_stop_link(struct cdns_pcie * pcie)477 static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
478 {
479 if (pcie->ops->stop_link)
480 pcie->ops->stop_link(pcie);
481 }
482
cdns_pcie_link_up(struct cdns_pcie * pcie)483 static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
484 {
485 if (pcie->ops->link_up)
486 return pcie->ops->link_up(pcie);
487
488 return true;
489 }
490
491 #if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
492 int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
493 int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
494 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
495 void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
496 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
497 int where);
498 #else
cdns_pcie_host_link_setup(struct cdns_pcie_rc * rc)499 static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
500 {
501 return 0;
502 }
503
cdns_pcie_host_init(struct cdns_pcie_rc * rc)504 static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
505 {
506 return 0;
507 }
508
cdns_pcie_host_setup(struct cdns_pcie_rc * rc)509 static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
510 {
511 return 0;
512 }
513
cdns_pcie_host_disable(struct cdns_pcie_rc * rc)514 static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
515 {
516 }
517
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)518 static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
519 int where)
520 {
521 return NULL;
522 }
523 #endif
524
525 #if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
526 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
527 void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
528 #else
cdns_pcie_ep_setup(struct cdns_pcie_ep * ep)529 static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
530 {
531 return 0;
532 }
533
cdns_pcie_ep_disable(struct cdns_pcie_ep * ep)534 static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
535 {
536 }
537 #endif
538
539 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
540
541 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
542 u32 r, bool is_io,
543 u64 cpu_addr, u64 pci_addr, size_t size);
544
545 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
546 u8 busnr, u8 fn,
547 u32 r, u64 cpu_addr);
548
549 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
550 void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
551 int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
552 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
553 extern const struct dev_pm_ops cdns_pcie_pm_ops;
554
555 #endif /* _PCIE_CADENCE_H */
556