xref: /linux/drivers/pci/controller/cadence/pcie-cadence-host-common.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence PCIe host controller library.
4  *
5  * Copyright (c) 2017 Cadence
6  * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
7  */
8 #include <linux/delay.h>
9 #include <linux/kernel.h>
10 #include <linux/list_sort.h>
11 #include <linux/of_address.h>
12 #include <linux/of_pci.h>
13 #include <linux/platform_device.h>
14 
15 #include "pcie-cadence.h"
16 #include "pcie-cadence-host-common.h"
17 
18 #define LINK_RETRAIN_TIMEOUT HZ
19 
20 u64 bar_max_size[] = {
21 	[RP_BAR0] = _ULL(128 * SZ_2G),
22 	[RP_BAR1] = SZ_2G,
23 	[RP_NO_BAR] = _BITULL(63),
24 };
25 EXPORT_SYMBOL_GPL(bar_max_size);
26 
27 int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
28 {
29 	u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
30 	unsigned long end_jiffies;
31 	u16 lnk_stat;
32 
33 	/* Wait for link training to complete. Exit after timeout. */
34 	end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
35 	do {
36 		lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
37 		if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
38 			break;
39 		usleep_range(0, 1000);
40 	} while (time_before(jiffies, end_jiffies));
41 
42 	if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
43 		return 0;
44 
45 	return -ETIMEDOUT;
46 }
47 EXPORT_SYMBOL_GPL(cdns_pcie_host_training_complete);
48 
49 int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie,
50 				 cdns_pcie_linkup_func pcie_link_up)
51 {
52 	struct device *dev = pcie->dev;
53 	int retries;
54 
55 	/* Check if the link is up or not */
56 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
57 		if (pcie_link_up(pcie)) {
58 			dev_info(dev, "Link up\n");
59 			return 0;
60 		}
61 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
62 	}
63 
64 	return -ETIMEDOUT;
65 }
66 EXPORT_SYMBOL_GPL(cdns_pcie_host_wait_for_link);
67 
68 int cdns_pcie_retrain(struct cdns_pcie *pcie,
69 		      cdns_pcie_linkup_func pcie_link_up)
70 {
71 	u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
72 	u16 lnk_stat, lnk_ctl;
73 	int ret = 0;
74 
75 	/*
76 	 * Set retrain bit if current speed is 2.5 GB/s,
77 	 * but the PCIe root port support is > 2.5 GB/s.
78 	 */
79 
80 	lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
81 					     PCI_EXP_LNKCAP));
82 	if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
83 		return ret;
84 
85 	lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
86 	if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
87 		lnk_ctl = cdns_pcie_rp_readw(pcie,
88 					     pcie_cap_off + PCI_EXP_LNKCTL);
89 		lnk_ctl |= PCI_EXP_LNKCTL_RL;
90 		cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
91 				    lnk_ctl);
92 
93 		ret = cdns_pcie_host_training_complete(pcie);
94 		if (ret)
95 			return ret;
96 
97 		ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up);
98 	}
99 	return ret;
100 }
101 EXPORT_SYMBOL_GPL(cdns_pcie_retrain);
102 
103 int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc,
104 			      cdns_pcie_linkup_func pcie_link_up)
105 {
106 	struct cdns_pcie *pcie = &rc->pcie;
107 	int ret;
108 
109 	ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up);
110 
111 	/*
112 	 * Retrain link for Gen2 training defect
113 	 * if quirk flag is set.
114 	 */
115 	if (!ret && rc->quirk_retrain_flag)
116 		ret = cdns_pcie_retrain(pcie, pcie_link_up);
117 
118 	return ret;
119 }
120 EXPORT_SYMBOL_GPL(cdns_pcie_host_start_link);
121 
122 enum cdns_pcie_rp_bar
123 cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
124 {
125 	enum cdns_pcie_rp_bar bar, sel_bar;
126 
127 	sel_bar = RP_BAR_UNDEFINED;
128 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
129 		if (!rc->avail_ib_bar[bar])
130 			continue;
131 
132 		if (size <= bar_max_size[bar]) {
133 			if (sel_bar == RP_BAR_UNDEFINED) {
134 				sel_bar = bar;
135 				continue;
136 			}
137 
138 			if (bar_max_size[bar] < bar_max_size[sel_bar])
139 				sel_bar = bar;
140 		}
141 	}
142 
143 	return sel_bar;
144 }
145 EXPORT_SYMBOL_GPL(cdns_pcie_host_find_min_bar);
146 
147 enum cdns_pcie_rp_bar
148 cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
149 {
150 	enum cdns_pcie_rp_bar bar, sel_bar;
151 
152 	sel_bar = RP_BAR_UNDEFINED;
153 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
154 		if (!rc->avail_ib_bar[bar])
155 			continue;
156 
157 		if (size >= bar_max_size[bar]) {
158 			if (sel_bar == RP_BAR_UNDEFINED) {
159 				sel_bar = bar;
160 				continue;
161 			}
162 
163 			if (bar_max_size[bar] > bar_max_size[sel_bar])
164 				sel_bar = bar;
165 		}
166 	}
167 
168 	return sel_bar;
169 }
170 EXPORT_SYMBOL_GPL(cdns_pcie_host_find_max_bar);
171 
172 int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
173 				  const struct list_head *b)
174 {
175 	struct resource_entry *entry1, *entry2;
176 
177 	entry1 = container_of(a, struct resource_entry, node);
178 	entry2 = container_of(b, struct resource_entry, node);
179 
180 	return resource_size(entry2->res) - resource_size(entry1->res);
181 }
182 EXPORT_SYMBOL_GPL(cdns_pcie_host_dma_ranges_cmp);
183 
184 int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
185 			      struct resource_entry *entry,
186 			      cdns_pcie_host_bar_ib_cfg pci_host_ib_config)
187 {
188 	struct cdns_pcie *pcie = &rc->pcie;
189 	struct device *dev = pcie->dev;
190 	u64 cpu_addr, size, winsize;
191 	enum cdns_pcie_rp_bar bar;
192 	unsigned long flags;
193 	int ret;
194 
195 	cpu_addr = entry->res->start;
196 	flags = entry->res->flags;
197 	size = resource_size(entry->res);
198 
199 	while (size > 0) {
200 		/*
201 		 * Try to find a minimum BAR whose size is greater than
202 		 * or equal to the remaining resource_entry size. This will
203 		 * fail if the size of each of the available BARs is less than
204 		 * the remaining resource_entry size.
205 		 *
206 		 * If a minimum BAR is found, IB ATU will be configured and
207 		 * exited.
208 		 */
209 		bar = cdns_pcie_host_find_min_bar(rc, size);
210 		if (bar != RP_BAR_UNDEFINED) {
211 			ret = pci_host_ib_config(rc, bar, cpu_addr, size, flags);
212 			if (ret)
213 				dev_err(dev, "IB BAR: %d config failed\n", bar);
214 			return ret;
215 		}
216 
217 		/*
218 		 * If the control reaches here, it would mean the remaining
219 		 * resource_entry size cannot be fitted in a single BAR. So we
220 		 * find a maximum BAR whose size is less than or equal to the
221 		 * remaining resource_entry size and split the resource entry
222 		 * so that part of resource entry is fitted inside the maximum
223 		 * BAR. The remaining size would be fitted during the next
224 		 * iteration of the loop.
225 		 *
226 		 * If a maximum BAR is not found, there is no way we can fit
227 		 * this resource_entry, so we error out.
228 		 */
229 		bar = cdns_pcie_host_find_max_bar(rc, size);
230 		if (bar == RP_BAR_UNDEFINED) {
231 			dev_err(dev, "No free BAR to map cpu_addr %llx\n",
232 				cpu_addr);
233 			return -EINVAL;
234 		}
235 
236 		winsize = bar_max_size[bar];
237 		ret = pci_host_ib_config(rc, bar, cpu_addr, winsize, flags);
238 		if (ret) {
239 			dev_err(dev, "IB BAR: %d config failed\n", bar);
240 			return ret;
241 		}
242 
243 		size -= winsize;
244 		cpu_addr += winsize;
245 	}
246 
247 	return 0;
248 }
249 
250 int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc,
251 				  cdns_pcie_host_bar_ib_cfg pci_host_ib_config)
252 {
253 	struct cdns_pcie *pcie = &rc->pcie;
254 	struct device *dev = pcie->dev;
255 	struct device_node *np = dev->of_node;
256 	struct pci_host_bridge *bridge;
257 	struct resource_entry *entry;
258 	u32 no_bar_nbits = 32;
259 	int err;
260 
261 	bridge = pci_host_bridge_from_priv(rc);
262 	if (!bridge)
263 		return -ENOMEM;
264 
265 	if (list_empty(&bridge->dma_ranges)) {
266 		of_property_read_u32(np, "cdns,no-bar-match-nbits",
267 				     &no_bar_nbits);
268 		err = pci_host_ib_config(rc, RP_NO_BAR, 0x0, (u64)1 << no_bar_nbits, 0);
269 		if (err)
270 			dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
271 		return err;
272 	}
273 
274 	list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
275 
276 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
277 		err = cdns_pcie_host_bar_config(rc, entry, pci_host_ib_config);
278 		if (err) {
279 			dev_err(dev, "Fail to configure IB using dma-ranges\n");
280 			return err;
281 		}
282 	}
283 
284 	return 0;
285 }
286 
287 MODULE_LICENSE("GPL");
288 MODULE_DESCRIPTION("Cadence PCIe host controller driver");
289