xref: /linux/drivers/pci/controller/cadence/pcie-cadence.h (revision 43dfc13ca972988e620a6edb72956981b75ab6b0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #ifndef _PCIE_CADENCE_H
7 #define _PCIE_CADENCE_H
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/pci-epf.h>
13 #include <linux/phy/phy.h>
14 #include "pcie-cadence-lga-regs.h"
15 #include "pcie-cadence-hpa-regs.h"
16 
17 enum cdns_pcie_rp_bar {
18 	RP_BAR_UNDEFINED = -1,
19 	RP_BAR0,
20 	RP_BAR1,
21 	RP_NO_BAR
22 };
23 
24 struct cdns_pcie_rp_ib_bar {
25 	u64 size;
26 	bool free;
27 };
28 
29 struct cdns_pcie;
30 struct cdns_pcie_rc;
31 
32 enum cdns_pcie_reg_bank {
33 	REG_BANK_RP,
34 	REG_BANK_IP_REG,
35 	REG_BANK_IP_CFG_CTRL_REG,
36 	REG_BANK_AXI_MASTER_COMMON,
37 	REG_BANK_AXI_MASTER,
38 	REG_BANK_AXI_SLAVE,
39 	REG_BANK_AXI_HLS,
40 	REG_BANK_AXI_RAS,
41 	REG_BANK_AXI_DTI,
42 	REG_BANKS_MAX,
43 };
44 
45 struct cdns_pcie_ops {
46 	int     (*start_link)(struct cdns_pcie *pcie);
47 	void    (*stop_link)(struct cdns_pcie *pcie);
48 	bool    (*link_up)(struct cdns_pcie *pcie);
49 	u64     (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
50 };
51 
52 /**
53  * struct cdns_plat_pcie_of_data - Register bank offset for a platform
54  * @is_rc: controller is a RC
55  * @ip_reg_bank_offset: ip register bank start offset
56  * @ip_cfg_ctrl_reg_offset: ip config control register start offset
57  * @axi_mstr_common_offset: AXI master common register start offset
58  * @axi_slave_offset: AXI slave start offset
59  * @axi_master_offset: AXI master start offset
60  * @axi_hls_offset: AXI HLS offset start
61  * @axi_ras_offset: AXI RAS offset
62  * @axi_dti_offset: AXI DTI offset
63  */
64 struct cdns_plat_pcie_of_data {
65 	u32 is_rc:1;
66 	u32 ip_reg_bank_offset;
67 	u32 ip_cfg_ctrl_reg_offset;
68 	u32 axi_mstr_common_offset;
69 	u32 axi_slave_offset;
70 	u32 axi_master_offset;
71 	u32 axi_hls_offset;
72 	u32 axi_ras_offset;
73 	u32 axi_dti_offset;
74 };
75 
76 /**
77  * struct cdns_pcie - private data for Cadence PCIe controller drivers
78  * @reg_base: IO mapped register base
79  * @mem_res: start/end offsets in the physical system memory to map PCI accesses
80  * @msg_res: Region for send message to map PCI accesses
81  * @dev: PCIe controller
82  * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
83  * @phy_count: number of supported PHY devices
84  * @phy: list of pointers to specific PHY control blocks
85  * @link: list of pointers to corresponding device link representations
86  * @ops: Platform-specific ops to control various inputs from Cadence PCIe
87  *       wrapper
88  * @cdns_pcie_reg_offsets: Register bank offsets for different SoC
89  */
90 struct cdns_pcie {
91 	void __iomem		             *reg_base;
92 	struct resource		             *mem_res;
93 	struct resource                      *msg_res;
94 	struct device		             *dev;
95 	bool			             is_rc;
96 	int			             phy_count;
97 	struct phy		             **phy;
98 	struct device_link	             **link;
99 	const  struct cdns_pcie_ops          *ops;
100 	const  struct cdns_plat_pcie_of_data *cdns_pcie_reg_offsets;
101 };
102 
103 /**
104  * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
105  * @pcie: Cadence PCIe controller
106  * @cfg_res: start/end offsets in the physical system memory to map PCI
107  *           configuration space accesses
108  * @cfg_base: IO mapped window to access the PCI configuration space of a
109  *            single function at a time
110  * @vendor_id: PCI vendor ID
111  * @device_id: PCI device ID
112  * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
113  *                available
114  * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
115  * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
116  * @ecam_supported: Whether the ECAM is supported
117  * @no_inbound_map: Whether inbound mapping is supported
118  */
119 struct cdns_pcie_rc {
120 	struct cdns_pcie	pcie;
121 	struct resource		*cfg_res;
122 	void __iomem		*cfg_base;
123 	u32			vendor_id;
124 	u32			device_id;
125 	bool			avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
126 	unsigned int		quirk_retrain_flag:1;
127 	unsigned int		quirk_detect_quiet_flag:1;
128 	unsigned int            ecam_supported:1;
129 	unsigned int            no_inbound_map:1;
130 };
131 
132 /**
133  * struct cdns_pcie_epf - Structure to hold info about endpoint function
134  * @epf: Info about virtual functions attached to the physical function
135  * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
136  */
137 struct cdns_pcie_epf {
138 	struct cdns_pcie_epf *epf;
139 	struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
140 };
141 
142 /**
143  * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
144  * @pcie: Cadence PCIe controller
145  * @max_regions: maximum number of regions supported by hardware
146  * @ob_region_map: bitmask of mapped outbound regions
147  * @ob_addr: base addresses in the AXI bus where the outbound regions start
148  * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
149  *		   dedicated outbound regions is mapped.
150  * @irq_cpu_addr: base address in the CPU space where a write access triggers
151  *		  the sending of a memory write (MSI) / normal message (INTX
152  *		  IRQ) TLP through the PCIe bus.
153  * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
154  *		  dedicated outbound region.
155  * @irq_pci_fn: the latest PCI function that has updated the mapping of
156  *		the MSI/INTX IRQ dedicated outbound region.
157  * @irq_pending: bitmask of asserted INTX IRQs.
158  * @lock: spin lock to disable interrupts while modifying PCIe controller
159  *        registers fields (RMW) accessible by both remote RC and EP to
160  *        minimize time between read and write
161  * @epf: Structure to hold info about endpoint function
162  * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
163  * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
164  */
165 struct cdns_pcie_ep {
166 	struct cdns_pcie	pcie;
167 	u32			max_regions;
168 	unsigned long		ob_region_map;
169 	phys_addr_t		*ob_addr;
170 	phys_addr_t		irq_phys_addr;
171 	void __iomem		*irq_cpu_addr;
172 	u64			irq_pci_addr;
173 	u8			irq_pci_fn;
174 	u8			irq_pending;
175 	/* protect writing to PCI_STATUS while raising INTX interrupts */
176 	spinlock_t		lock;
177 	struct cdns_pcie_epf	*epf;
178 	unsigned int		quirk_detect_quiet_flag:1;
179 	unsigned int		quirk_disable_flr:1;
180 };
181 
182 static inline u32 cdns_reg_bank_to_off(struct cdns_pcie *pcie, enum cdns_pcie_reg_bank bank)
183 {
184 	u32 offset = 0x0;
185 
186 	switch (bank) {
187 	case REG_BANK_RP:
188 		offset = 0;
189 		break;
190 	case REG_BANK_IP_REG:
191 		offset = pcie->cdns_pcie_reg_offsets->ip_reg_bank_offset;
192 		break;
193 	case REG_BANK_IP_CFG_CTRL_REG:
194 		offset = pcie->cdns_pcie_reg_offsets->ip_cfg_ctrl_reg_offset;
195 		break;
196 	case REG_BANK_AXI_MASTER_COMMON:
197 		offset = pcie->cdns_pcie_reg_offsets->axi_mstr_common_offset;
198 		break;
199 	case REG_BANK_AXI_MASTER:
200 		offset = pcie->cdns_pcie_reg_offsets->axi_master_offset;
201 		break;
202 	case REG_BANK_AXI_SLAVE:
203 		offset = pcie->cdns_pcie_reg_offsets->axi_slave_offset;
204 		break;
205 	case REG_BANK_AXI_HLS:
206 		offset = pcie->cdns_pcie_reg_offsets->axi_hls_offset;
207 		break;
208 	case REG_BANK_AXI_RAS:
209 		offset = pcie->cdns_pcie_reg_offsets->axi_ras_offset;
210 		break;
211 	case REG_BANK_AXI_DTI:
212 		offset = pcie->cdns_pcie_reg_offsets->axi_dti_offset;
213 		break;
214 	default:
215 		break;
216 	}
217 	return offset;
218 }
219 
220 /* Register access */
221 static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
222 {
223 	writel(value, pcie->reg_base + reg);
224 }
225 
226 static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
227 {
228 	return readl(pcie->reg_base + reg);
229 }
230 
231 static inline void cdns_pcie_hpa_writel(struct cdns_pcie *pcie,
232 					enum cdns_pcie_reg_bank bank,
233 					u32 reg,
234 					u32 value)
235 {
236 	u32 offset = cdns_reg_bank_to_off(pcie, bank);
237 
238 	reg += offset;
239 	writel(value, pcie->reg_base + reg);
240 }
241 
242 static inline u32 cdns_pcie_hpa_readl(struct cdns_pcie *pcie,
243 				      enum cdns_pcie_reg_bank bank,
244 				      u32 reg)
245 {
246 	u32 offset = cdns_reg_bank_to_off(pcie, bank);
247 
248 	reg += offset;
249 	return readl(pcie->reg_base + reg);
250 }
251 
252 static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
253 {
254 	return readw(pcie->reg_base + reg);
255 }
256 
257 static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
258 {
259 	return readb(pcie->reg_base + reg);
260 }
261 
262 static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
263 					  u8 *val)
264 {
265 	*val = cdns_pcie_readb(pcie, where);
266 	return PCIBIOS_SUCCESSFUL;
267 }
268 
269 static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
270 					  u16 *val)
271 {
272 	*val = cdns_pcie_readw(pcie, where);
273 	return PCIBIOS_SUCCESSFUL;
274 }
275 
276 static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
277 					   u32 *val)
278 {
279 	*val = cdns_pcie_readl(pcie, where);
280 	return PCIBIOS_SUCCESSFUL;
281 }
282 
283 static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
284 {
285 	void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
286 	unsigned int offset = (unsigned long)addr & 0x3;
287 	u32 val = readl(aligned_addr);
288 
289 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
290 		pr_warn("Address %p and size %d are not aligned\n", addr, size);
291 		return 0;
292 	}
293 
294 	if (size > 2)
295 		return val;
296 
297 	return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
298 }
299 
300 static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
301 {
302 	void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
303 	unsigned int offset = (unsigned long)addr & 0x3;
304 	u32 mask;
305 	u32 val;
306 
307 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
308 		pr_warn("Address %p and size %d are not aligned\n", addr, size);
309 		return;
310 	}
311 
312 	if (size > 2) {
313 		writel(value, addr);
314 		return;
315 	}
316 
317 	mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
318 	val = readl(aligned_addr) & mask;
319 	val |= value << (offset * 8);
320 	writel(val, aligned_addr);
321 }
322 
323 /* Root Port register access */
324 static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
325 				       u32 reg, u8 value)
326 {
327 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
328 
329 	cdns_pcie_write_sz(addr, 0x1, value);
330 }
331 
332 static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
333 				       u32 reg, u16 value)
334 {
335 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
336 
337 	cdns_pcie_write_sz(addr, 0x2, value);
338 }
339 
340 static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
341 {
342 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
343 
344 	return cdns_pcie_read_sz(addr, 0x2);
345 }
346 
347 static inline void cdns_pcie_hpa_rp_writeb(struct cdns_pcie *pcie,
348 					   u32 reg, u8 value)
349 {
350 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
351 
352 	cdns_pcie_write_sz(addr, 0x1, value);
353 }
354 
355 static inline void cdns_pcie_hpa_rp_writew(struct cdns_pcie *pcie,
356 					   u32 reg, u16 value)
357 {
358 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
359 
360 	cdns_pcie_write_sz(addr, 0x2, value);
361 }
362 
363 static inline u16 cdns_pcie_hpa_rp_readw(struct cdns_pcie *pcie, u32 reg)
364 {
365 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
366 
367 	return cdns_pcie_read_sz(addr, 0x2);
368 }
369 
370 /* Endpoint Function register access */
371 static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
372 					  u32 reg, u8 value)
373 {
374 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
375 
376 	cdns_pcie_write_sz(addr, 0x1, value);
377 }
378 
379 static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
380 					  u32 reg, u16 value)
381 {
382 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
383 
384 	cdns_pcie_write_sz(addr, 0x2, value);
385 }
386 
387 static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
388 					  u32 reg, u32 value)
389 {
390 	writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
391 }
392 
393 static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
394 {
395 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
396 
397 	return cdns_pcie_read_sz(addr, 0x2);
398 }
399 
400 static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
401 {
402 	return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
403 }
404 
405 static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
406 {
407 	if (pcie->ops && pcie->ops->start_link)
408 		return pcie->ops->start_link(pcie);
409 
410 	return 0;
411 }
412 
413 static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
414 {
415 	if (pcie->ops && pcie->ops->stop_link)
416 		pcie->ops->stop_link(pcie);
417 }
418 
419 static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
420 {
421 	if (pcie->ops && pcie->ops->link_up)
422 		return pcie->ops->link_up(pcie);
423 
424 	return true;
425 }
426 
427 #if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
428 int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
429 int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
430 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
431 void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
432 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
433 			       int where);
434 int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc);
435 #else
436 static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
437 {
438 	return 0;
439 }
440 
441 static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
442 {
443 	return 0;
444 }
445 
446 static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
447 {
448 	return 0;
449 }
450 
451 static inline int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
452 {
453 	return 0;
454 }
455 
456 static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
457 {
458 }
459 
460 static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
461 					     int where)
462 {
463 	return NULL;
464 }
465 #endif
466 
467 #if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
468 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
469 void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
470 int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep);
471 #else
472 static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
473 {
474 	return 0;
475 }
476 
477 static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
478 {
479 }
480 
481 static inline int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep)
482 {
483 	return 0;
484 }
485 
486 #endif
487 
488 u8   cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap);
489 u16  cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap);
490 bool cdns_pcie_linkup(struct cdns_pcie *pcie);
491 
492 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
493 
494 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
495 				   u32 r, bool is_io,
496 				   u64 cpu_addr, u64 pci_addr, size_t size);
497 
498 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
499 						  u8 busnr, u8 fn,
500 						  u32 r, u64 cpu_addr);
501 
502 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
503 void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
504 int  cdns_pcie_enable_phy(struct cdns_pcie *pcie);
505 int  cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
506 void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
507 void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
508 				       u32 r, bool is_io,
509 				       u64 cpu_addr, u64 pci_addr, size_t size);
510 void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
511 						      u8 busnr, u8 fn,
512 						      u32 r, u64 cpu_addr);
513 int  cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc);
514 void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
515 				   int where);
516 int  cdns_pcie_hpa_host_start_link(struct cdns_pcie_rc *rc);
517 int  cdns_pcie_hpa_start_link(struct cdns_pcie *pcie);
518 void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie);
519 bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie);
520 
521 extern const struct dev_pm_ops cdns_pcie_pm_ops;
522 
523 #endif /* _PCIE_CADENCE_H */
524