xref: /linux/drivers/pci/controller/cadence/pcie-cadence.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 #include <linux/of.h>
8 
9 #include "pcie-cadence.h"
10 
cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie * pcie)11 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
12 {
13 	u32 delay = 0x3;
14 	u32 ltssm_control_cap;
15 
16 	/*
17 	 * Set the LTSSM Detect Quiet state min. delay to 2ms.
18 	 */
19 	ltssm_control_cap = cdns_pcie_readl(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP);
20 	ltssm_control_cap = ((ltssm_control_cap &
21 			    ~CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK) |
22 			    CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay));
23 
24 	cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
25 }
26 
cdns_pcie_set_outbound_region(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,bool is_io,u64 cpu_addr,u64 pci_addr,size_t size)27 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
28 				   u32 r, bool is_io,
29 				   u64 cpu_addr, u64 pci_addr, size_t size)
30 {
31 	/*
32 	 * roundup_pow_of_two() returns an unsigned long, which is not suited
33 	 * for 64bit values.
34 	 */
35 	u64 sz = 1ULL << fls64(size - 1);
36 	int nbits = ilog2(sz);
37 	u32 addr0, addr1, desc0, desc1;
38 
39 	if (nbits < 8)
40 		nbits = 8;
41 
42 	/* Set the PCI address */
43 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
44 		(lower_32_bits(pci_addr) & GENMASK(31, 8));
45 	addr1 = upper_32_bits(pci_addr);
46 
47 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0);
48 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1);
49 
50 	/* Set the PCIe header descriptor */
51 	if (is_io)
52 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO;
53 	else
54 		desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM;
55 	desc1 = 0;
56 
57 	/*
58 	 * Whatever Bit [23] is set or not inside DESC0 register of the outbound
59 	 * PCIe descriptor, the PCI function number must be set into
60 	 * Bits [26:24] of DESC0 anyway.
61 	 *
62 	 * In Root Complex mode, the function number is always 0 but in Endpoint
63 	 * mode, the PCIe controller may support more than one function. This
64 	 * function number needs to be set properly into the outbound PCIe
65 	 * descriptor.
66 	 *
67 	 * Besides, setting Bit [23] is mandatory when in Root Complex mode:
68 	 * then the driver must provide the bus, resp. device, number in
69 	 * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function
70 	 * number, the device number is always 0 in Root Complex mode.
71 	 *
72 	 * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence
73 	 * the PCIe controller will use the captured values for the bus and
74 	 * device numbers.
75 	 */
76 	if (pcie->is_rc) {
77 		/* The device and function numbers are always 0. */
78 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
79 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
80 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
81 	} else {
82 		/*
83 		 * Use captured values for bus and device numbers but still
84 		 * need to set the function number.
85 		 */
86 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
87 	}
88 
89 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
90 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
91 
92 	/* Set the CPU address */
93 	if (pcie->ops->cpu_addr_fixup)
94 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
95 
96 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
97 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
98 	addr1 = upper_32_bits(cpu_addr);
99 
100 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
101 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
102 }
103 
cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie * pcie,u8 busnr,u8 fn,u32 r,u64 cpu_addr)104 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
105 						  u8 busnr, u8 fn,
106 						  u32 r, u64 cpu_addr)
107 {
108 	u32 addr0, addr1, desc0, desc1;
109 
110 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
111 	desc1 = 0;
112 
113 	/* See cdns_pcie_set_outbound_region() comments above. */
114 	if (pcie->is_rc) {
115 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
116 			 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
117 		desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
118 	} else {
119 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn);
120 	}
121 
122 	/* Set the CPU address */
123 	if (pcie->ops->cpu_addr_fixup)
124 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
125 
126 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
127 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
128 	addr1 = upper_32_bits(cpu_addr);
129 
130 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
131 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
132 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0);
133 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
134 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
135 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
136 }
137 
cdns_pcie_reset_outbound_region(struct cdns_pcie * pcie,u32 r)138 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
139 {
140 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0);
141 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0);
142 
143 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0);
144 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0);
145 
146 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
147 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
148 }
149 
cdns_pcie_disable_phy(struct cdns_pcie * pcie)150 void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
151 {
152 	int i = pcie->phy_count;
153 
154 	while (i--) {
155 		phy_power_off(pcie->phy[i]);
156 		phy_exit(pcie->phy[i]);
157 	}
158 }
159 
cdns_pcie_enable_phy(struct cdns_pcie * pcie)160 int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
161 {
162 	int ret;
163 	int i;
164 
165 	for (i = 0; i < pcie->phy_count; i++) {
166 		ret = phy_init(pcie->phy[i]);
167 		if (ret < 0)
168 			goto err_phy;
169 
170 		ret = phy_power_on(pcie->phy[i]);
171 		if (ret < 0) {
172 			phy_exit(pcie->phy[i]);
173 			goto err_phy;
174 		}
175 	}
176 
177 	return 0;
178 
179 err_phy:
180 	while (--i >= 0) {
181 		phy_power_off(pcie->phy[i]);
182 		phy_exit(pcie->phy[i]);
183 	}
184 
185 	return ret;
186 }
187 
cdns_pcie_init_phy(struct device * dev,struct cdns_pcie * pcie)188 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
189 {
190 	struct device_node *np = dev->of_node;
191 	int phy_count;
192 	struct phy **phy;
193 	struct device_link **link;
194 	int i;
195 	int ret;
196 	const char *name;
197 
198 	phy_count = of_property_count_strings(np, "phy-names");
199 	if (phy_count < 1) {
200 		dev_err(dev, "no phy-names.  PHY will not be initialized\n");
201 		pcie->phy_count = 0;
202 		return 0;
203 	}
204 
205 	phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
206 	if (!phy)
207 		return -ENOMEM;
208 
209 	link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
210 	if (!link)
211 		return -ENOMEM;
212 
213 	for (i = 0; i < phy_count; i++) {
214 		of_property_read_string_index(np, "phy-names", i, &name);
215 		phy[i] = devm_phy_get(dev, name);
216 		if (IS_ERR(phy[i])) {
217 			ret = PTR_ERR(phy[i]);
218 			goto err_phy;
219 		}
220 		link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
221 		if (!link[i]) {
222 			devm_phy_put(dev, phy[i]);
223 			ret = -EINVAL;
224 			goto err_phy;
225 		}
226 	}
227 
228 	pcie->phy_count = phy_count;
229 	pcie->phy = phy;
230 	pcie->link = link;
231 
232 	ret =  cdns_pcie_enable_phy(pcie);
233 	if (ret)
234 		goto err_phy;
235 
236 	return 0;
237 
238 err_phy:
239 	while (--i >= 0) {
240 		device_link_del(link[i]);
241 		devm_phy_put(dev, phy[i]);
242 	}
243 
244 	return ret;
245 }
246 
cdns_pcie_suspend_noirq(struct device * dev)247 static int cdns_pcie_suspend_noirq(struct device *dev)
248 {
249 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
250 
251 	cdns_pcie_disable_phy(pcie);
252 
253 	return 0;
254 }
255 
cdns_pcie_resume_noirq(struct device * dev)256 static int cdns_pcie_resume_noirq(struct device *dev)
257 {
258 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
259 	int ret;
260 
261 	ret = cdns_pcie_enable_phy(pcie);
262 	if (ret) {
263 		dev_err(dev, "failed to enable phy\n");
264 		return ret;
265 	}
266 
267 	return 0;
268 }
269 
270 const struct dev_pm_ops cdns_pcie_pm_ops = {
271 	NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
272 				  cdns_pcie_resume_noirq)
273 };
274