xref: /linux/drivers/pci/controller/cadence/pcie-cadence.c (revision 3719a04a80caf660f899a462cd8f3973bcfa676e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/of.h>
9 
10 #include "pcie-cadence.h"
11 
cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie * pcie)12 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
13 {
14 	u32 delay = 0x3;
15 	u32 ltssm_control_cap;
16 
17 	/*
18 	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
19 	 */
20 	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
21 	ltssm_control_cap = ((ltssm_control_cap &
22 			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
23 			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
24 
25 	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
26 }
27 EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
28 
cdns_pcie_set_outbound_region(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,bool is_io,u64 cpu_addr,u64 pci_addr,size_t size)29 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
30 				   u32 r, bool is_io,
31 				   u64 cpu_addr, u64 pci_addr, size_t size)
32 {
33 	/*
34 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
35 	 * for 64bit values.
36 	 */
37 	u64 sz = 1ULL << fls64(size - 1);
38 	int nbits = ilog2(sz);
39 	u32 addr0, addr1, desc0, desc1;
40 
41 	if (nbits < 8)
42 		nbits = 8;
43 
44 	/* Set the PCI address */
45 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
46 		(lower_32_bits(pci_addr) & GENMASK(31, 8));
47 	addr1 = upper_32_bits(pci_addr);
48 
49 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
50 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
51 
52 	/* Set the PCIe header descriptor */
53 	if (is_io)
54 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
55 	else
56 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
57 	desc1 = 0;
58 
59 	/*
60 	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
61 	 * PCIe descriptor, the PCI function number must be set into
62 	 * Bits [26:24] of DESC0 anyway.
63 	 *
64 	 * In Root Complex mode, the function number is always 0 but in Endpoint
65 	 * mode, the PCIe controller may support more than one function. This
66 	 * function number needs to be set properly into the outbound PCIe
67 	 * descriptor.
68 	 *
69 	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
70 	 * then the driver must provide the bus, resp. device, number in
71 	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
72 	 * number, the device number is always 0 in Root Complex mode.
73 	 *
74 	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
75 	 * the PCIe controller will use the captured values for the bus and
76 	 * device numbers.
77 	 */
78 	if (pcie->is_rc) {
79 		/* The device and function numbers are always 0. */
80 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
81 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
82 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
83 	} else {
84 		/*
85 		 * Use captured values for bus and device numbers but still
86 		 * need to set the function number.
87 		 */
88 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
89 	}
90 
91 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
92 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
93 
94 	/* Set the CPU address */
95 	if (pcie->ops->cpu_addr_fixup)
96 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
97 
98 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
99 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
100 	addr1 = upper_32_bits(cpu_addr);
101 
102 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
103 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
104 }
105 EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
106 
cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,u64 cpu_addr)107 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
108 						  u8 busnr, u8 fn,
109 						  u32 r, u64 cpu_addr)
110 {
111 	u32 addr0, addr1, desc0, desc1;
112 
113 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
114 	desc1 = 0;
115 
116 	/* See cdns_pcie_set_outbound_region() comments above. */
117 	if (pcie->is_rc) {
118 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
119 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
120 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
121 	} else {
122 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
123 	}
124 
125 	/* Set the CPU address */
126 	if (pcie->ops->cpu_addr_fixup)
127 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
128 
129 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
130 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
131 	addr1 = upper_32_bits(cpu_addr);
132 
133 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
134 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
135 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
136 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
137 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
138 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
139 }
140 EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
141 
cdns_pcie_reset_outbound_region(struct cdns_pcie * pcie,u32 r)142 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
143 {
144 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
145 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
146 
147 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
148 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
149 
150 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
151 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
152 }
153 EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
154 
cdns_pcie_disable_phy(struct cdns_pcie * pcie)155 void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
156 {
157 	int i = pcie->phy_count;
158 
159 	while (i--) {
160 		phy_power_off(pcie->phy[i]);
161 		phy_exit(pcie->phy[i]);
162 	}
163 }
164 EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
165 
cdns_pcie_enable_phy(struct cdns_pcie * pcie)166 int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
167 {
168 	int ret;
169 	int i;
170 
171 	for (i = 0; i < pcie->phy_count; i++) {
172 		ret = phy_init(pcie->phy[i]);
173 		if (ret < 0)
174 			goto err_phy;
175 
176 		ret = phy_power_on(pcie->phy[i]);
177 		if (ret < 0) {
178 			phy_exit(pcie->phy[i]);
179 			goto err_phy;
180 		}
181 	}
182 
183 	return 0;
184 
185 err_phy:
186 	while (--i >= 0) {
187 		phy_power_off(pcie->phy[i]);
188 		phy_exit(pcie->phy[i]);
189 	}
190 
191 	return ret;
192 }
193 EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
194 
cdns_pcie_init_phy(struct device * dev,struct cdns_pcie * pcie)195 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
196 {
197 	struct device_node *np = dev->of_node;
198 	int phy_count;
199 	struct phy **phy;
200 	struct device_link **link;
201 	int i;
202 	int ret;
203 	const char *name;
204 
205 	phy_count = of_property_count_strings(np, "phy-names");
206 	if (phy_count < 1) {
207 		dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
208 		pcie->phy_count = 0;
209 		return 0;
210 	}
211 
212 	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
213 	if (!phy)
214 		return -ENOMEM;
215 
216 	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
217 	if (!link)
218 		return -ENOMEM;
219 
220 	for (i = 0; i < phy_count; i++) {
221 		of_property_read_string_index(np, "phy-names", i, &name);
222 		phy[i] = devm_phy_get(dev, name);
223 		if (IS_ERR(phy[i])) {
224 			ret = PTR_ERR(phy[i]);
225 			goto err_phy;
226 		}
227 		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
228 		if (!link[i]) {
229 			devm_phy_put(dev, phy[i]);
230 			ret = -EINVAL;
231 			goto err_phy;
232 		}
233 	}
234 
235 	pcie->phy_count = phy_count;
236 	pcie->phy = phy;
237 	pcie->link = link;
238 
239 	ret =  cdns_pcie_enable_phy(pcie);
240 	if (ret)
241 		goto err_phy;
242 
243 	return 0;
244 
245 err_phy:
246 	while (--i >= 0) {
247 		device_link_del(link[i]);
248 		devm_phy_put(dev, phy[i]);
249 	}
250 
251 	return ret;
252 }
253 EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
254 
cdns_pcie_suspend_noirq(struct device * dev)255 static int cdns_pcie_suspend_noirq(struct device *dev)
256 {
257 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
258 
259 	cdns_pcie_disable_phy(pcie);
260 
261 	return 0;
262 }
263 
cdns_pcie_resume_noirq(struct device * dev)264 static int cdns_pcie_resume_noirq(struct device *dev)
265 {
266 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
267 	int ret;
268 
269 	ret = cdns_pcie_enable_phy(pcie);
270 	if (ret) {
271 		dev_err(dev, "failed to enable PHY\n");
272 		return ret;
273 	}
274 
275 	return 0;
276 }
277 
278 const struct dev_pm_ops cdns_pcie_pm_ops = {
279 	NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
280 				  cdns_pcie_resume_noirq)
281 };
282 
283 MODULE_LICENSE("GPL");
284 MODULE_DESCRIPTION("Cadence PCIe controller driver");
285 MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
286