xref: /linux/drivers/pci/controller/cadence/pcie-cadence.c (revision 2f2c7254931f41b5736e3ba12aaa9ac1bbeeeb92)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/of.h>
9 
10 #include "pcie-cadence.h"
11 #include "../../pci.h"
12 
cdns_pcie_find_capability(struct cdns_pcie * pcie,u8 cap)13 u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
14 {
15 	return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
16 				 cap, pcie);
17 }
18 EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
19 
cdns_pcie_find_ext_capability(struct cdns_pcie * pcie,u8 cap)20 u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
21 {
22 	return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
23 }
24 EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
25 
cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie * pcie)26 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
27 {
28 	u32 delay = 0x3;
29 	u32 ltssm_control_cap;
30 
31 	/*
32 	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
33 	 */
34 	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
35 	ltssm_control_cap = ((ltssm_control_cap &
36 			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
37 			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
38 
39 	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
40 }
41 EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
42 
cdns_pcie_set_outbound_region(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,bool is_io,u64 cpu_addr,u64 pci_addr,size_t size)43 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
44 				   u32 r, bool is_io,
45 				   u64 cpu_addr, u64 pci_addr, size_t size)
46 {
47 	/*
48 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
49 	 * for 64bit values.
50 	 */
51 	u64 sz = 1ULL << fls64(size - 1);
52 	int nbits = ilog2(sz);
53 	u32 addr0, addr1, desc0, desc1;
54 
55 	if (nbits < 8)
56 		nbits = 8;
57 
58 	/* Set the PCI address */
59 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
60 		(lower_32_bits(pci_addr) & GENMASK(31, 8));
61 	addr1 = upper_32_bits(pci_addr);
62 
63 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
64 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
65 
66 	/* Set the PCIe header descriptor */
67 	if (is_io)
68 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
69 	else
70 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
71 	desc1 = 0;
72 
73 	/*
74 	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
75 	 * PCIe descriptor, the PCI function number must be set into
76 	 * Bits [26:24] of DESC0 anyway.
77 	 *
78 	 * In Root Complex mode, the function number is always 0 but in Endpoint
79 	 * mode, the PCIe controller may support more than one function. This
80 	 * function number needs to be set properly into the outbound PCIe
81 	 * descriptor.
82 	 *
83 	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
84 	 * then the driver must provide the bus, resp. device, number in
85 	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
86 	 * number, the device number is always 0 in Root Complex mode.
87 	 *
88 	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
89 	 * the PCIe controller will use the captured values for the bus and
90 	 * device numbers.
91 	 */
92 	if (pcie->is_rc) {
93 		/* The device and function numbers are always 0. */
94 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
95 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
96 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
97 	} else {
98 		/*
99 		 * Use captured values for bus and device numbers but still
100 		 * need to set the function number.
101 		 */
102 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
103 	}
104 
105 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
106 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
107 
108 	/* Set the CPU address */
109 	if (pcie->ops && pcie->ops->cpu_addr_fixup)
110 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
111 
112 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
113 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
114 	addr1 = upper_32_bits(cpu_addr);
115 
116 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
117 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
118 }
119 EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
120 
cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,u64 cpu_addr)121 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
122 						  u8 busnr, u8 fn,
123 						  u32 r, u64 cpu_addr)
124 {
125 	u32 addr0, addr1, desc0, desc1;
126 
127 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
128 	desc1 = 0;
129 
130 	/* See cdns_pcie_set_outbound_region() comments above. */
131 	if (pcie->is_rc) {
132 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
133 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
134 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
135 	} else {
136 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
137 	}
138 
139 	/* Set the CPU address */
140 	if (pcie->ops && pcie->ops->cpu_addr_fixup)
141 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
142 
143 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
144 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
145 	addr1 = upper_32_bits(cpu_addr);
146 
147 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
148 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
149 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
150 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
151 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
152 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
153 }
154 EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
155 
cdns_pcie_reset_outbound_region(struct cdns_pcie * pcie,u32 r)156 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
157 {
158 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
159 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
160 
161 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
162 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
163 
164 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
165 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
166 }
167 EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
168 
cdns_pcie_disable_phy(struct cdns_pcie * pcie)169 void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
170 {
171 	int i = pcie->phy_count;
172 
173 	while (i--) {
174 		phy_power_off(pcie->phy[i]);
175 		phy_exit(pcie->phy[i]);
176 	}
177 }
178 EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
179 
cdns_pcie_enable_phy(struct cdns_pcie * pcie)180 int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
181 {
182 	int ret;
183 	int i;
184 
185 	for (i = 0; i < pcie->phy_count; i++) {
186 		ret = phy_init(pcie->phy[i]);
187 		if (ret < 0)
188 			goto err_phy;
189 
190 		ret = phy_power_on(pcie->phy[i]);
191 		if (ret < 0) {
192 			phy_exit(pcie->phy[i]);
193 			goto err_phy;
194 		}
195 	}
196 
197 	return 0;
198 
199 err_phy:
200 	while (--i >= 0) {
201 		phy_power_off(pcie->phy[i]);
202 		phy_exit(pcie->phy[i]);
203 	}
204 
205 	return ret;
206 }
207 EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
208 
cdns_pcie_init_phy(struct device * dev,struct cdns_pcie * pcie)209 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
210 {
211 	struct device_node *np = dev->of_node;
212 	int phy_count;
213 	struct phy **phy;
214 	struct device_link **link;
215 	int i;
216 	int ret;
217 	const char *name;
218 
219 	phy_count = of_property_count_strings(np, "phy-names");
220 	if (phy_count < 1) {
221 		dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
222 		pcie->phy_count = 0;
223 		return 0;
224 	}
225 
226 	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
227 	if (!phy)
228 		return -ENOMEM;
229 
230 	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
231 	if (!link)
232 		return -ENOMEM;
233 
234 	for (i = 0; i < phy_count; i++) {
235 		of_property_read_string_index(np, "phy-names", i, &name);
236 		phy[i] = devm_phy_get(dev, name);
237 		if (IS_ERR(phy[i])) {
238 			ret = PTR_ERR(phy[i]);
239 			goto err_phy;
240 		}
241 		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
242 		if (!link[i]) {
243 			devm_phy_put(dev, phy[i]);
244 			ret = -EINVAL;
245 			goto err_phy;
246 		}
247 	}
248 
249 	pcie->phy_count = phy_count;
250 	pcie->phy = phy;
251 	pcie->link = link;
252 
253 	ret =  cdns_pcie_enable_phy(pcie);
254 	if (ret)
255 		goto err_phy;
256 
257 	return 0;
258 
259 err_phy:
260 	while (--i >= 0) {
261 		device_link_del(link[i]);
262 		devm_phy_put(dev, phy[i]);
263 	}
264 
265 	return ret;
266 }
267 EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
268 
cdns_pcie_suspend_noirq(struct device * dev)269 static int cdns_pcie_suspend_noirq(struct device *dev)
270 {
271 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
272 
273 	cdns_pcie_disable_phy(pcie);
274 
275 	return 0;
276 }
277 
cdns_pcie_resume_noirq(struct device * dev)278 static int cdns_pcie_resume_noirq(struct device *dev)
279 {
280 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
281 	int ret;
282 
283 	ret = cdns_pcie_enable_phy(pcie);
284 	if (ret) {
285 		dev_err(dev, "failed to enable PHY\n");
286 		return ret;
287 	}
288 
289 	return 0;
290 }
291 
292 const struct dev_pm_ops cdns_pcie_pm_ops = {
293 	NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
294 				  cdns_pcie_resume_noirq)
295 };
296 
297 MODULE_LICENSE("GPL");
298 MODULE_DESCRIPTION("Cadence PCIe controller driver");
299 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
300