xref: /linux/drivers/pci/controller/cadence/pcie-cadence.c (revision d30c1683aaecb93d2ab95685dc4300a33d3cea7a)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/of.h>
9 
10 #include "pcie-cadence.h"
11 #include "../../pci.h"
12 
13 u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
14 {
15 	return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
16 				 cap, pcie);
17 }
18 EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
19 
20 u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
21 {
22 	return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
23 }
24 EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
25 
26 bool cdns_pcie_linkup(struct cdns_pcie *pcie)
27 {
28 	u32 pl_reg_val;
29 
30 	pl_reg_val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE);
31 	if (pl_reg_val & GENMASK(0, 0))
32 		return true;
33 	return false;
34 }
35 EXPORT_SYMBOL_GPL(cdns_pcie_linkup);
36 
37 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
38 {
39 	u32 delay = 0x3;
40 	u32 ltssm_control_cap;
41 
42 	/*
43 	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
44 	 */
45 	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
46 	ltssm_control_cap = ((ltssm_control_cap &
47 			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
48 			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
49 
50 	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
51 }
52 EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
53 
54 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
55 				   u32 r, bool is_io,
56 				   u64 cpu_addr, u64 pci_addr, size_t size)
57 {
58 	/*
59 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
60 	 * for 64bit values.
61 	 */
62 	u64 sz = 1ULL << fls64(size - 1);
63 	int nbits = ilog2(sz);
64 	u32 addr0, addr1, desc0, desc1;
65 
66 	if (nbits < 8)
67 		nbits = 8;
68 
69 	/* Set the PCI address */
70 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
71 		(lower_32_bits(pci_addr) & GENMASK(31, 8));
72 	addr1 = upper_32_bits(pci_addr);
73 
74 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
75 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
76 
77 	/* Set the PCIe header descriptor */
78 	if (is_io)
79 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
80 	else
81 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
82 	desc1 = 0;
83 
84 	/*
85 	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
86 	 * PCIe descriptor, the PCI function number must be set into
87 	 * Bits [26:24] of DESC0 anyway.
88 	 *
89 	 * In Root Complex mode, the function number is always 0 but in Endpoint
90 	 * mode, the PCIe controller may support more than one function. This
91 	 * function number needs to be set properly into the outbound PCIe
92 	 * descriptor.
93 	 *
94 	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
95 	 * then the driver must provide the bus, resp. device, number in
96 	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
97 	 * number, the device number is always 0 in Root Complex mode.
98 	 *
99 	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
100 	 * the PCIe controller will use the captured values for the bus and
101 	 * device numbers.
102 	 */
103 	if (pcie->is_rc) {
104 		/* The device and function numbers are always 0. */
105 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
106 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
107 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
108 	} else {
109 		/*
110 		 * Use captured values for bus and device numbers but still
111 		 * need to set the function number.
112 		 */
113 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
114 	}
115 
116 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
117 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
118 
119 	/* Set the CPU address */
120 	if (pcie->ops && pcie->ops->cpu_addr_fixup)
121 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
122 
123 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
124 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
125 	addr1 = upper_32_bits(cpu_addr);
126 
127 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
128 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
129 }
130 EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
131 
132 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
133 						  u8 busnr, u8 fn,
134 						  u32 r, u64 cpu_addr)
135 {
136 	u32 addr0, addr1, desc0, desc1;
137 
138 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
139 	desc1 = 0;
140 
141 	/* See cdns_pcie_set_outbound_region() comments above. */
142 	if (pcie->is_rc) {
143 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
144 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
145 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
146 	} else {
147 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
148 	}
149 
150 	/* Set the CPU address */
151 	if (pcie->ops && pcie->ops->cpu_addr_fixup)
152 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
153 
154 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
155 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
156 	addr1 = upper_32_bits(cpu_addr);
157 
158 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
159 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
160 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
161 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
162 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
163 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
164 }
165 EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
166 
167 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
168 {
169 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
170 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
171 
172 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
173 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
174 
175 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
176 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
177 }
178 EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
179 
180 void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
181 {
182 	int i = pcie->phy_count;
183 
184 	while (i--) {
185 		phy_power_off(pcie->phy[i]);
186 		phy_exit(pcie->phy[i]);
187 	}
188 }
189 EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
190 
191 int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
192 {
193 	int ret;
194 	int i;
195 
196 	for (i = 0; i < pcie->phy_count; i++) {
197 		ret = phy_init(pcie->phy[i]);
198 		if (ret < 0)
199 			goto err_phy;
200 
201 		ret = phy_power_on(pcie->phy[i]);
202 		if (ret < 0) {
203 			phy_exit(pcie->phy[i]);
204 			goto err_phy;
205 		}
206 	}
207 
208 	return 0;
209 
210 err_phy:
211 	while (--i >= 0) {
212 		phy_power_off(pcie->phy[i]);
213 		phy_exit(pcie->phy[i]);
214 	}
215 
216 	return ret;
217 }
218 EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
219 
220 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
221 {
222 	struct device_node *np = dev->of_node;
223 	int phy_count;
224 	struct phy **phy;
225 	struct device_link **link;
226 	int i;
227 	int ret;
228 	const char *name;
229 
230 	phy_count = of_property_count_strings(np, "phy-names");
231 	if (phy_count < 1) {
232 		dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
233 		pcie->phy_count = 0;
234 		return 0;
235 	}
236 
237 	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
238 	if (!phy)
239 		return -ENOMEM;
240 
241 	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
242 	if (!link)
243 		return -ENOMEM;
244 
245 	for (i = 0; i < phy_count; i++) {
246 		of_property_read_string_index(np, "phy-names", i, &name);
247 		phy[i] = devm_phy_get(dev, name);
248 		if (IS_ERR(phy[i])) {
249 			ret = PTR_ERR(phy[i]);
250 			goto err_phy;
251 		}
252 		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
253 		if (!link[i]) {
254 			devm_phy_put(dev, phy[i]);
255 			ret = -EINVAL;
256 			goto err_phy;
257 		}
258 	}
259 
260 	pcie->phy_count = phy_count;
261 	pcie->phy = phy;
262 	pcie->link = link;
263 
264 	ret =  cdns_pcie_enable_phy(pcie);
265 	if (ret)
266 		goto err_phy;
267 
268 	return 0;
269 
270 err_phy:
271 	while (--i >= 0) {
272 		device_link_del(link[i]);
273 		devm_phy_put(dev, phy[i]);
274 	}
275 
276 	return ret;
277 }
278 EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
279 
280 static int cdns_pcie_suspend_noirq(struct device *dev)
281 {
282 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
283 
284 	cdns_pcie_disable_phy(pcie);
285 
286 	return 0;
287 }
288 
289 static int cdns_pcie_resume_noirq(struct device *dev)
290 {
291 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
292 	int ret;
293 
294 	ret = cdns_pcie_enable_phy(pcie);
295 	if (ret) {
296 		dev_err(dev, "failed to enable PHY\n");
297 		return ret;
298 	}
299 
300 	return 0;
301 }
302 
303 const struct dev_pm_ops cdns_pcie_pm_ops = {
304 	NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
305 				  cdns_pcie_resume_noirq)
306 };
307 EXPORT_SYMBOL_GPL(cdns_pcie_pm_ops);
308 
309 MODULE_LICENSE("GPL");
310 MODULE_DESCRIPTION("Cadence PCIe controller driver");
311 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
312