1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe Endpoint controller driver
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/align.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13
14 #include "pcie-designware.h"
15 #include <linux/pci-epc.h>
16 #include <linux/pci-epf.h>
17
18 /**
19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
20 * the endpoint function
21 * @ep: DWC EP device
22 * @func_no: Function number of the endpoint device
23 *
24 * Return: struct dw_pcie_ep_func if success, NULL otherwise.
25 */
26 struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep * ep,u8 func_no)27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
28 {
29 struct dw_pcie_ep_func *ep_func;
30
31 list_for_each_entry(ep_func, &ep->func_list, list) {
32 if (ep_func->func_no == func_no)
33 return ep_func;
34 }
35
36 return NULL;
37 }
38
__dw_pcie_ep_reset_bar(struct dw_pcie * pci,u8 func_no,enum pci_barno bar,int flags)39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
40 enum pci_barno bar, int flags)
41 {
42 struct dw_pcie_ep *ep = &pci->ep;
43 u32 reg;
44
45 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
46 dw_pcie_dbi_ro_wr_en(pci);
47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
52 }
53 dw_pcie_dbi_ro_wr_dis(pci);
54 }
55
56 /**
57 * dw_pcie_ep_reset_bar - Reset endpoint BAR
58 * @pci: DWC PCI device
59 * @bar: BAR number of the endpoint
60 */
dw_pcie_ep_reset_bar(struct dw_pcie * pci,enum pci_barno bar)61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
62 {
63 u8 func_no, funcs;
64
65 funcs = pci->ep.epc->max_functions;
66
67 for (func_no = 0; func_no < funcs; func_no++)
68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
69 }
70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
71
dw_pcie_ep_find_capability(struct dw_pcie_ep * ep,u8 func_no,u8 cap)72 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
73 {
74 return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
75 cap, NULL, ep, func_no);
76 }
77
dw_pcie_ep_find_ext_capability(struct dw_pcie_ep * ep,u8 func_no,u8 cap)78 static u16 dw_pcie_ep_find_ext_capability(struct dw_pcie_ep *ep,
79 u8 func_no, u8 cap)
80 {
81 return PCI_FIND_NEXT_EXT_CAP(dw_pcie_ep_read_cfg, 0,
82 cap, NULL, ep, func_no);
83 }
84
dw_pcie_ep_write_header(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_header * hdr)85 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
86 struct pci_epf_header *hdr)
87 {
88 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
89 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
90
91 dw_pcie_dbi_ro_wr_en(pci);
92 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
93 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
94 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
95 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
96 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
97 hdr->subclass_code | hdr->baseclass_code << 8);
98 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
99 hdr->cache_line_size);
100 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
101 hdr->subsys_vendor_id);
102 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
103 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
104 hdr->interrupt_pin);
105 dw_pcie_dbi_ro_wr_dis(pci);
106
107 return 0;
108 }
109
110 /* BAR Match Mode inbound iATU mapping */
dw_pcie_ep_ib_atu_bar(struct dw_pcie_ep * ep,u8 func_no,int type,dma_addr_t parent_bus_addr,enum pci_barno bar,size_t size)111 static int dw_pcie_ep_ib_atu_bar(struct dw_pcie_ep *ep, u8 func_no, int type,
112 dma_addr_t parent_bus_addr, enum pci_barno bar,
113 size_t size)
114 {
115 int ret;
116 u32 free_win;
117 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
118 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
119
120 if (!ep_func)
121 return -EINVAL;
122
123 if (!ep_func->bar_to_atu[bar])
124 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
125 else
126 free_win = ep_func->bar_to_atu[bar] - 1;
127
128 if (free_win >= pci->num_ib_windows) {
129 dev_err(pci->dev, "No free inbound window\n");
130 return -EINVAL;
131 }
132
133 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
134 parent_bus_addr, bar, size);
135 if (ret < 0) {
136 dev_err(pci->dev, "Failed to program IB window\n");
137 return ret;
138 }
139
140 /*
141 * Always increment free_win before assignment, since value 0 is used to identify
142 * unallocated mapping.
143 */
144 ep_func->bar_to_atu[bar] = free_win + 1;
145 set_bit(free_win, ep->ib_window_map);
146
147 return 0;
148 }
149
dw_pcie_ep_clear_ib_maps(struct dw_pcie_ep * ep,u8 func_no,enum pci_barno bar)150 static void dw_pcie_ep_clear_ib_maps(struct dw_pcie_ep *ep, u8 func_no, enum pci_barno bar)
151 {
152 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
153 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
154 struct device *dev = pci->dev;
155 unsigned int i, num;
156 u32 atu_index;
157 u32 *indexes;
158
159 if (!ep_func)
160 return;
161
162 /* Tear down the BAR Match Mode mapping, if any. */
163 if (ep_func->bar_to_atu[bar]) {
164 atu_index = ep_func->bar_to_atu[bar] - 1;
165 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
166 clear_bit(atu_index, ep->ib_window_map);
167 ep_func->bar_to_atu[bar] = 0;
168 return;
169 }
170
171 /* Tear down all Address Match Mode mappings, if any. */
172 indexes = ep_func->ib_atu_indexes[bar];
173 num = ep_func->num_ib_atu_indexes[bar];
174 ep_func->ib_atu_indexes[bar] = NULL;
175 ep_func->num_ib_atu_indexes[bar] = 0;
176 if (!indexes)
177 return;
178 for (i = 0; i < num; i++) {
179 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, indexes[i]);
180 clear_bit(indexes[i], ep->ib_window_map);
181 }
182 devm_kfree(dev, indexes);
183 }
184
dw_pcie_ep_read_bar_assigned(struct dw_pcie_ep * ep,u8 func_no,enum pci_barno bar,int flags)185 static u64 dw_pcie_ep_read_bar_assigned(struct dw_pcie_ep *ep, u8 func_no,
186 enum pci_barno bar, int flags)
187 {
188 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
189 u32 lo, hi;
190 u64 addr;
191
192 lo = dw_pcie_ep_readl_dbi(ep, func_no, reg);
193
194 if (flags & PCI_BASE_ADDRESS_SPACE)
195 return lo & PCI_BASE_ADDRESS_IO_MASK;
196
197 addr = lo & PCI_BASE_ADDRESS_MEM_MASK;
198 if (!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
199 return addr;
200
201 hi = dw_pcie_ep_readl_dbi(ep, func_no, reg + 4);
202 return addr | ((u64)hi << 32);
203 }
204
dw_pcie_ep_validate_submap(struct dw_pcie_ep * ep,const struct pci_epf_bar_submap * submap,unsigned int num_submap,size_t bar_size)205 static int dw_pcie_ep_validate_submap(struct dw_pcie_ep *ep,
206 const struct pci_epf_bar_submap *submap,
207 unsigned int num_submap, size_t bar_size)
208 {
209 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
210 u32 align = pci->region_align;
211 size_t off = 0;
212 unsigned int i;
213 size_t size;
214
215 if (!align || !IS_ALIGNED(bar_size, align))
216 return -EINVAL;
217
218 /*
219 * The submap array order defines the BAR layout (submap[0] starts
220 * at offset 0 and each entry immediately follows the previous
221 * one). Here, validate that it forms a strict, gapless
222 * decomposition of the BAR:
223 * - each entry has a non-zero size
224 * - sizes, implicit offsets and phys_addr are aligned to
225 * pci->region_align
226 * - each entry lies within the BAR range
227 * - the entries exactly cover the whole BAR
228 *
229 * Note: dw_pcie_prog_inbound_atu() also checks alignment for the
230 * PCI address and the target phys_addr, but validating up-front
231 * avoids partially programming iATU windows in vain.
232 */
233 for (i = 0; i < num_submap; i++) {
234 size = submap[i].size;
235
236 if (!size)
237 return -EINVAL;
238
239 if (!IS_ALIGNED(size, align) || !IS_ALIGNED(off, align))
240 return -EINVAL;
241
242 if (!IS_ALIGNED(submap[i].phys_addr, align))
243 return -EINVAL;
244
245 if (off > bar_size || size > bar_size - off)
246 return -EINVAL;
247
248 off += size;
249 }
250 if (off != bar_size)
251 return -EINVAL;
252
253 return 0;
254 }
255
256 /* Address Match Mode inbound iATU mapping */
dw_pcie_ep_ib_atu_addr(struct dw_pcie_ep * ep,u8 func_no,int type,const struct pci_epf_bar * epf_bar)257 static int dw_pcie_ep_ib_atu_addr(struct dw_pcie_ep *ep, u8 func_no, int type,
258 const struct pci_epf_bar *epf_bar)
259 {
260 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
261 const struct pci_epf_bar_submap *submap = epf_bar->submap;
262 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
263 enum pci_barno bar = epf_bar->barno;
264 struct device *dev = pci->dev;
265 u64 pci_addr, parent_bus_addr;
266 u64 size, base, off = 0;
267 int free_win, ret;
268 unsigned int i;
269 u32 *indexes;
270
271 if (!ep_func || !epf_bar->num_submap || !submap || !epf_bar->size)
272 return -EINVAL;
273
274 ret = dw_pcie_ep_validate_submap(ep, submap, epf_bar->num_submap,
275 epf_bar->size);
276 if (ret)
277 return ret;
278
279 base = dw_pcie_ep_read_bar_assigned(ep, func_no, bar, epf_bar->flags);
280 if (!base) {
281 dev_err(dev,
282 "BAR%u not assigned, cannot set up sub-range mappings\n",
283 bar);
284 return -EINVAL;
285 }
286
287 indexes = devm_kcalloc(dev, epf_bar->num_submap, sizeof(*indexes),
288 GFP_KERNEL);
289 if (!indexes)
290 return -ENOMEM;
291
292 ep_func->ib_atu_indexes[bar] = indexes;
293 ep_func->num_ib_atu_indexes[bar] = 0;
294
295 for (i = 0; i < epf_bar->num_submap; i++) {
296 size = submap[i].size;
297 parent_bus_addr = submap[i].phys_addr;
298
299 if (off > (~0ULL) - base) {
300 ret = -EINVAL;
301 goto err;
302 }
303
304 pci_addr = base + off;
305 off += size;
306
307 free_win = find_first_zero_bit(ep->ib_window_map,
308 pci->num_ib_windows);
309 if (free_win >= pci->num_ib_windows) {
310 ret = -ENOSPC;
311 goto err;
312 }
313
314 ret = dw_pcie_prog_inbound_atu(pci, free_win, type,
315 parent_bus_addr, pci_addr, size);
316 if (ret)
317 goto err;
318
319 set_bit(free_win, ep->ib_window_map);
320 indexes[i] = free_win;
321 ep_func->num_ib_atu_indexes[bar] = i + 1;
322 }
323 return 0;
324 err:
325 dw_pcie_ep_clear_ib_maps(ep, func_no, bar);
326 return ret;
327 }
328
dw_pcie_ep_outbound_atu(struct dw_pcie_ep * ep,struct dw_pcie_ob_atu_cfg * atu)329 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
330 struct dw_pcie_ob_atu_cfg *atu)
331 {
332 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
333 u32 free_win;
334 int ret;
335
336 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
337 if (free_win >= pci->num_ob_windows) {
338 dev_err(pci->dev, "No free outbound window\n");
339 return -EINVAL;
340 }
341
342 atu->index = free_win;
343 ret = dw_pcie_prog_outbound_atu(pci, atu);
344 if (ret)
345 return ret;
346
347 set_bit(free_win, ep->ob_window_map);
348 ep->outbound_addr[free_win] = atu->parent_bus_addr;
349
350 return 0;
351 }
352
dw_pcie_ep_clear_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)353 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
354 struct pci_epf_bar *epf_bar)
355 {
356 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
357 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
358 enum pci_barno bar = epf_bar->barno;
359 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
360
361 if (!ep_func || !ep_func->epf_bar[bar])
362 return;
363
364 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
365
366 dw_pcie_ep_clear_ib_maps(ep, func_no, bar);
367
368 ep_func->epf_bar[bar] = NULL;
369 }
370
dw_pcie_ep_get_rebar_offset(struct dw_pcie_ep * ep,u8 func_no,enum pci_barno bar)371 static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie_ep *ep, u8 func_no,
372 enum pci_barno bar)
373 {
374 u32 reg, bar_index;
375 unsigned int offset, nbars;
376 int i;
377
378 offset = dw_pcie_ep_find_ext_capability(ep, func_no, PCI_EXT_CAP_ID_REBAR);
379 if (!offset)
380 return offset;
381
382 reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
383 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
384
385 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
386 reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
387 bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
388 if (bar_index == bar)
389 return offset;
390 }
391
392 return 0;
393 }
394
dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep * ep,u8 func_no,struct pci_epf_bar * epf_bar)395 static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
396 struct pci_epf_bar *epf_bar)
397 {
398 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
399 enum pci_barno bar = epf_bar->barno;
400 size_t size = epf_bar->size;
401 int flags = epf_bar->flags;
402 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
403 unsigned int rebar_offset;
404 u32 rebar_cap, rebar_ctrl;
405 int ret;
406
407 rebar_offset = dw_pcie_ep_get_rebar_offset(ep, func_no, bar);
408 if (!rebar_offset)
409 return -EINVAL;
410
411 ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
412 if (ret)
413 return ret;
414
415 dw_pcie_dbi_ro_wr_en(pci);
416
417 /*
418 * A BAR mask should not be written for a resizable BAR. The BAR mask
419 * is automatically derived by the controller every time the "selected
420 * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
421 * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
422 * BIT(0) to set the BAR enable bit.
423 */
424 dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
425 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
426
427 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
428 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
429 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
430 }
431
432 /*
433 * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
434 * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
435 * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
436 */
437 rebar_ctrl = dw_pcie_ep_readl_dbi(ep, func_no, rebar_offset + PCI_REBAR_CTRL);
438 rebar_ctrl &= ~GENMASK(31, 16);
439 dw_pcie_ep_writel_dbi(ep, func_no, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
440
441 /*
442 * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
443 * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
444 * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
445 */
446 dw_pcie_ep_writel_dbi(ep, func_no, rebar_offset + PCI_REBAR_CAP, rebar_cap);
447
448 dw_pcie_dbi_ro_wr_dis(pci);
449
450 return 0;
451 }
452
dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep * ep,u8 func_no,struct pci_epf_bar * epf_bar)453 static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
454 struct pci_epf_bar *epf_bar)
455 {
456 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
457 enum pci_barno bar = epf_bar->barno;
458 size_t size = epf_bar->size;
459 int flags = epf_bar->flags;
460 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
461
462 dw_pcie_dbi_ro_wr_en(pci);
463
464 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
465 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
466
467 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
468 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
469 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
470 }
471
472 dw_pcie_dbi_ro_wr_dis(pci);
473
474 return 0;
475 }
476
dw_pcie_ep_get_bar_type(struct dw_pcie_ep * ep,enum pci_barno bar)477 static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
478 enum pci_barno bar)
479 {
480 const struct pci_epc_features *epc_features;
481
482 if (!ep->ops->get_features)
483 return BAR_PROGRAMMABLE;
484
485 epc_features = ep->ops->get_features(ep);
486
487 return epc_features->bar[bar].type;
488 }
489
dw_pcie_ep_set_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)490 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
491 struct pci_epf_bar *epf_bar)
492 {
493 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
494 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
495 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
496 enum pci_barno bar = epf_bar->barno;
497 size_t size = epf_bar->size;
498 enum pci_epc_bar_type bar_type;
499 int flags = epf_bar->flags;
500 int ret, type;
501
502 if (!ep_func)
503 return -EINVAL;
504
505 /*
506 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
507 * 1 and 2 to form a 64-bit BAR.
508 */
509 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
510 return -EINVAL;
511
512 /*
513 * Certain EPF drivers dynamically change the physical address of a BAR
514 * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
515 * calling clear_bar() would clear the BAR's PCI address assigned by the
516 * host).
517 */
518 if (ep_func->epf_bar[bar]) {
519 /*
520 * We can only dynamically change a BAR if the new BAR size and
521 * BAR flags do not differ from the existing configuration.
522 *
523 * Note: this safety check only works when the caller uses
524 * a new struct pci_epf_bar in the second set_bar() call.
525 * If the same instance is updated in place and passed in,
526 * we cannot reliably detect invalid barno/size/flags
527 * changes here.
528 */
529 if (ep_func->epf_bar[bar]->barno != bar ||
530 ep_func->epf_bar[bar]->size != size ||
531 ep_func->epf_bar[bar]->flags != flags)
532 return -EINVAL;
533
534 /*
535 * When dynamically changing a BAR, tear down any existing
536 * mappings before re-programming. This is redundant when
537 * both the old and new mappings are BAR Match Mode, but
538 * required to handle in-place updates and match-mode
539 * changes reliably.
540 */
541 dw_pcie_ep_clear_ib_maps(ep, func_no, bar);
542
543 /*
544 * When dynamically changing a BAR, skip writing the BAR reg, as
545 * that would clear the BAR's PCI address assigned by the host.
546 */
547 goto config_atu;
548 } else {
549 /*
550 * Subrange mapping is an update-only operation. The BAR
551 * must have been configured once without submaps so that
552 * subsequent set_bar() calls can update inbound mappings
553 * without touching the BAR register (and clobbering the
554 * host-assigned address).
555 */
556 if (epf_bar->num_submap)
557 return -EINVAL;
558 }
559
560 bar_type = dw_pcie_ep_get_bar_type(ep, bar);
561 switch (bar_type) {
562 case BAR_FIXED:
563 /*
564 * There is no need to write a BAR mask for a fixed BAR (except
565 * to write 1 to the LSB of the BAR mask register, to enable the
566 * BAR). Write the BAR mask regardless. (The fixed bits in the
567 * BAR mask register will be read-only anyway.)
568 */
569 fallthrough;
570 case BAR_PROGRAMMABLE:
571 ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
572 break;
573 case BAR_RESIZABLE:
574 ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
575 break;
576 default:
577 ret = -EINVAL;
578 dev_err(pci->dev, "Invalid BAR type\n");
579 break;
580 }
581
582 if (ret)
583 return ret;
584
585 config_atu:
586 if (!(flags & PCI_BASE_ADDRESS_SPACE))
587 type = PCIE_ATU_TYPE_MEM;
588 else
589 type = PCIE_ATU_TYPE_IO;
590
591 if (epf_bar->num_submap)
592 ret = dw_pcie_ep_ib_atu_addr(ep, func_no, type, epf_bar);
593 else
594 ret = dw_pcie_ep_ib_atu_bar(ep, func_no, type,
595 epf_bar->phys_addr, bar, size);
596
597 if (ret)
598 return ret;
599
600 ep_func->epf_bar[bar] = epf_bar;
601
602 return 0;
603 }
604
dw_pcie_find_index(struct dw_pcie_ep * ep,phys_addr_t addr,u32 * atu_index)605 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
606 u32 *atu_index)
607 {
608 u32 index;
609 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
610
611 for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
612 if (ep->outbound_addr[index] != addr)
613 continue;
614 *atu_index = index;
615 return 0;
616 }
617
618 return -EINVAL;
619 }
620
dw_pcie_ep_align_addr(struct pci_epc * epc,u64 pci_addr,size_t * pci_size,size_t * offset)621 static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
622 size_t *pci_size, size_t *offset)
623 {
624 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
625 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
626 u64 mask = pci->region_align - 1;
627 size_t ofst = pci_addr & mask;
628
629 *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
630 *offset = ofst;
631
632 return pci_addr & ~mask;
633 }
634
dw_pcie_ep_unmap_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t addr)635 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
636 phys_addr_t addr)
637 {
638 int ret;
639 u32 atu_index;
640 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
641 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
642
643 ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
644 &atu_index);
645 if (ret < 0)
646 return;
647
648 ep->outbound_addr[atu_index] = 0;
649 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
650 clear_bit(atu_index, ep->ob_window_map);
651 }
652
dw_pcie_ep_map_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t addr,u64 pci_addr,size_t size)653 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
654 phys_addr_t addr, u64 pci_addr, size_t size)
655 {
656 int ret;
657 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
658 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
659 struct dw_pcie_ob_atu_cfg atu = { 0 };
660
661 atu.func_no = func_no;
662 atu.type = PCIE_ATU_TYPE_MEM;
663 atu.parent_bus_addr = addr - pci->parent_bus_offset;
664 atu.pci_addr = pci_addr;
665 atu.size = size;
666 ret = dw_pcie_ep_outbound_atu(ep, &atu);
667 if (ret) {
668 dev_err(pci->dev, "Failed to enable address\n");
669 return ret;
670 }
671
672 return 0;
673 }
674
dw_pcie_ep_get_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no)675 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
676 {
677 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
678 struct dw_pcie_ep_func *ep_func;
679 u32 val, reg;
680
681 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
682 if (!ep_func || !ep_func->msi_cap)
683 return -EINVAL;
684
685 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
686 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
687 if (!(val & PCI_MSI_FLAGS_ENABLE))
688 return -EINVAL;
689
690 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
691
692 return 1 << val;
693 }
694
dw_pcie_ep_set_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u8 nr_irqs)695 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
696 u8 nr_irqs)
697 {
698 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
699 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
700 struct dw_pcie_ep_func *ep_func;
701 u8 mmc = order_base_2(nr_irqs);
702 u32 val, reg;
703
704 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
705 if (!ep_func || !ep_func->msi_cap)
706 return -EINVAL;
707
708 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
709 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
710 val &= ~PCI_MSI_FLAGS_QMASK;
711 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
712 dw_pcie_dbi_ro_wr_en(pci);
713 dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
714 dw_pcie_dbi_ro_wr_dis(pci);
715
716 return 0;
717 }
718
dw_pcie_ep_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)719 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
720 {
721 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
722 struct dw_pcie_ep_func *ep_func;
723 u32 val, reg;
724
725 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
726 if (!ep_func || !ep_func->msix_cap)
727 return -EINVAL;
728
729 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
730 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
731 if (!(val & PCI_MSIX_FLAGS_ENABLE))
732 return -EINVAL;
733
734 val &= PCI_MSIX_FLAGS_QSIZE;
735
736 return val + 1;
737 }
738
dw_pcie_ep_set_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u16 nr_irqs,enum pci_barno bir,u32 offset)739 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
740 u16 nr_irqs, enum pci_barno bir, u32 offset)
741 {
742 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
743 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
744 struct dw_pcie_ep_func *ep_func;
745 u32 val, reg;
746
747 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
748 if (!ep_func || !ep_func->msix_cap)
749 return -EINVAL;
750
751 dw_pcie_dbi_ro_wr_en(pci);
752
753 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
754 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
755 val &= ~PCI_MSIX_FLAGS_QSIZE;
756 val |= nr_irqs - 1; /* encoded as N-1 */
757 dw_pcie_writew_dbi(pci, reg, val);
758
759 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
760 val = offset | bir;
761 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
762
763 reg = ep_func->msix_cap + PCI_MSIX_PBA;
764 val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
765 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
766
767 dw_pcie_dbi_ro_wr_dis(pci);
768
769 return 0;
770 }
771
dw_pcie_ep_raise_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,unsigned int type,u16 interrupt_num)772 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
773 unsigned int type, u16 interrupt_num)
774 {
775 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
776
777 if (!ep->ops->raise_irq)
778 return -EINVAL;
779
780 return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
781 }
782
dw_pcie_ep_stop(struct pci_epc * epc)783 static void dw_pcie_ep_stop(struct pci_epc *epc)
784 {
785 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
786 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
787
788 /*
789 * Tear down the dedicated outbound window used for MSI
790 * generation. This avoids leaking an iATU window across
791 * endpoint stop/start cycles.
792 */
793 if (ep->msi_iatu_mapped) {
794 dw_pcie_ep_unmap_addr(epc, 0, 0, ep->msi_mem_phys);
795 ep->msi_iatu_mapped = false;
796 }
797
798 dw_pcie_stop_link(pci);
799 }
800
dw_pcie_ep_start(struct pci_epc * epc)801 static int dw_pcie_ep_start(struct pci_epc *epc)
802 {
803 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
804 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
805
806 return dw_pcie_start_link(pci);
807 }
808
809 static const struct pci_epc_features*
dw_pcie_ep_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)810 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
811 {
812 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
813
814 if (!ep->ops->get_features)
815 return NULL;
816
817 return ep->ops->get_features(ep);
818 }
819
820 static const struct pci_epc_ops epc_ops = {
821 .write_header = dw_pcie_ep_write_header,
822 .set_bar = dw_pcie_ep_set_bar,
823 .clear_bar = dw_pcie_ep_clear_bar,
824 .align_addr = dw_pcie_ep_align_addr,
825 .map_addr = dw_pcie_ep_map_addr,
826 .unmap_addr = dw_pcie_ep_unmap_addr,
827 .set_msi = dw_pcie_ep_set_msi,
828 .get_msi = dw_pcie_ep_get_msi,
829 .set_msix = dw_pcie_ep_set_msix,
830 .get_msix = dw_pcie_ep_get_msix,
831 .raise_irq = dw_pcie_ep_raise_irq,
832 .start = dw_pcie_ep_start,
833 .stop = dw_pcie_ep_stop,
834 .get_features = dw_pcie_ep_get_features,
835 };
836
837 /**
838 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
839 * @ep: DWC EP device
840 * @func_no: Function number of the endpoint
841 *
842 * Return: 0 if success, errno otherwise.
843 */
dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep * ep,u8 func_no)844 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
845 {
846 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
847 struct device *dev = pci->dev;
848
849 dev_err(dev, "EP cannot raise INTX IRQs\n");
850
851 return -EINVAL;
852 }
853 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
854
855 /**
856 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
857 * @ep: DWC EP device
858 * @func_no: Function number of the endpoint
859 * @interrupt_num: Interrupt number to be raised
860 *
861 * Return: 0 if success, errno otherwise.
862 */
dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep * ep,u8 func_no,u8 interrupt_num)863 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
864 u8 interrupt_num)
865 {
866 u32 msg_addr_lower, msg_addr_upper, reg;
867 struct dw_pcie_ep_func *ep_func;
868 struct pci_epc *epc = ep->epc;
869 size_t map_size = sizeof(u32);
870 size_t offset;
871 u16 msg_ctrl, msg_data;
872 bool has_upper;
873 u64 msg_addr;
874 int ret;
875
876 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
877 if (!ep_func || !ep_func->msi_cap)
878 return -EINVAL;
879
880 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
881 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
882 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
883 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
884 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
885 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
886 if (has_upper) {
887 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
888 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
889 reg = ep_func->msi_cap + PCI_MSI_DATA_64;
890 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
891 } else {
892 msg_addr_upper = 0;
893 reg = ep_func->msi_cap + PCI_MSI_DATA_32;
894 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
895 }
896 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
897
898 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
899
900 /*
901 * Program the outbound iATU once and keep it enabled.
902 *
903 * The spec warns that updating iATU registers while there are
904 * operations in flight on the AXI bridge interface is not
905 * supported, so we avoid reprogramming the region on every MSI,
906 * specifically unmapping immediately after writel().
907 */
908 if (!ep->msi_iatu_mapped) {
909 ret = dw_pcie_ep_map_addr(epc, func_no, 0,
910 ep->msi_mem_phys, msg_addr,
911 map_size);
912 if (ret)
913 return ret;
914
915 ep->msi_iatu_mapped = true;
916 ep->msi_msg_addr = msg_addr;
917 ep->msi_map_size = map_size;
918 } else if (WARN_ON_ONCE(ep->msi_msg_addr != msg_addr ||
919 ep->msi_map_size != map_size)) {
920 /*
921 * The host changed the MSI target address or the required
922 * mapping size changed. Reprogramming the iATU at runtime is
923 * unsafe on this controller, so bail out instead of trying to
924 * update the existing region.
925 */
926 return -EINVAL;
927 }
928
929 writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
930
931 return 0;
932 }
933 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
934
935 /**
936 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
937 * method
938 * @ep: DWC EP device
939 * @func_no: Function number of the endpoint device
940 * @interrupt_num: Interrupt number to be raised
941 *
942 * Return: 0 if success, errno otherwise.
943 */
dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep * ep,u8 func_no,u16 interrupt_num)944 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
945 u16 interrupt_num)
946 {
947 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
948 struct dw_pcie_ep_func *ep_func;
949 u32 msg_data;
950
951 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
952 if (!ep_func || !ep_func->msix_cap)
953 return -EINVAL;
954
955 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
956 (interrupt_num - 1);
957
958 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
959
960 return 0;
961 }
962
963 /**
964 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
965 * @ep: DWC EP device
966 * @func_no: Function number of the endpoint device
967 * @interrupt_num: Interrupt number to be raised
968 *
969 * Return: 0 if success, errno otherwise.
970 */
dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep * ep,u8 func_no,u16 interrupt_num)971 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
972 u16 interrupt_num)
973 {
974 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
975 struct pci_epf_msix_tbl *msix_tbl;
976 struct dw_pcie_ep_func *ep_func;
977 struct pci_epc *epc = ep->epc;
978 size_t map_size = sizeof(u32);
979 size_t offset;
980 u32 reg, msg_data, vec_ctrl;
981 u32 tbl_offset;
982 u64 msg_addr;
983 int ret;
984 u8 bir;
985
986 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
987 if (!ep_func || !ep_func->msix_cap)
988 return -EINVAL;
989
990 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
991 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
992 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
993 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
994
995 msix_tbl = ep_func->epf_bar[bir]->addr + tbl_offset;
996 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
997 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
998 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
999
1000 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
1001 dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
1002 return -EPERM;
1003 }
1004
1005 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
1006 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
1007 map_size);
1008 if (ret)
1009 return ret;
1010
1011 writel(msg_data, ep->msi_mem + offset);
1012
1013 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
1014
1015 return 0;
1016 }
1017 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq);
1018
1019 /**
1020 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
1021 * @ep: DWC EP device
1022 *
1023 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
1024 * reset like PERST#. Note that this API is only applicable for drivers
1025 * supporting PERST# or any other methods of fundamental reset.
1026 */
dw_pcie_ep_cleanup(struct dw_pcie_ep * ep)1027 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
1028 {
1029 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1030
1031 dwc_pcie_debugfs_deinit(pci);
1032 dw_pcie_edma_remove(pci);
1033 }
1034 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
1035
1036 /**
1037 * dw_pcie_ep_deinit - Deinitialize the endpoint device
1038 * @ep: DWC EP device
1039 *
1040 * Deinitialize the endpoint device. EPC device is not destroyed since that will
1041 * be taken care by Devres.
1042 */
dw_pcie_ep_deinit(struct dw_pcie_ep * ep)1043 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
1044 {
1045 struct pci_epc *epc = ep->epc;
1046
1047 dw_pcie_ep_cleanup(ep);
1048
1049 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
1050 epc->mem->window.page_size);
1051
1052 pci_epc_mem_exit(epc);
1053 }
1054 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
1055
dw_pcie_ep_init_rebar_registers(struct dw_pcie_ep * ep,u8 func_no)1056 static void dw_pcie_ep_init_rebar_registers(struct dw_pcie_ep *ep, u8 func_no)
1057 {
1058 struct dw_pcie_ep_func *ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
1059 unsigned int offset, nbars;
1060 enum pci_barno bar;
1061 u32 reg, i, val;
1062
1063 if (!ep_func)
1064 return;
1065
1066 offset = dw_pcie_ep_find_ext_capability(ep, func_no, PCI_EXT_CAP_ID_REBAR);
1067
1068 if (offset) {
1069 reg = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
1070 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
1071
1072 /*
1073 * PCIe r6.0, sec 7.8.6.2 require us to support at least one
1074 * size in the range from 1 MB to 512 GB. Advertise support
1075 * for 1 MB BAR size only.
1076 *
1077 * For a BAR that has been configured via dw_pcie_ep_set_bar(),
1078 * advertise support for only that size instead.
1079 */
1080 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
1081 /*
1082 * While the RESBAR_CAP_REG_* fields are sticky, the
1083 * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
1084 * sticky in certain versions of DWC PCIe, but not all).
1085 *
1086 * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
1087 * the controller when RESBAR_CAP_REG is written, which
1088 * is why RESBAR_CAP_REG is written here.
1089 */
1090 val = dw_pcie_ep_readl_dbi(ep, func_no, offset + PCI_REBAR_CTRL);
1091 bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
1092 if (ep_func->epf_bar[bar])
1093 pci_epc_bar_size_to_rebar_cap(ep_func->epf_bar[bar]->size, &val);
1094 else
1095 val = BIT(4);
1096
1097 dw_pcie_ep_writel_dbi(ep, func_no, offset + PCI_REBAR_CAP, val);
1098 }
1099 }
1100 }
1101
dw_pcie_ep_init_non_sticky_registers(struct dw_pcie * pci)1102 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
1103 {
1104 struct dw_pcie_ep *ep = &pci->ep;
1105 u8 funcs = ep->epc->max_functions;
1106 u8 func_no;
1107
1108 dw_pcie_dbi_ro_wr_en(pci);
1109
1110 for (func_no = 0; func_no < funcs; func_no++)
1111 dw_pcie_ep_init_rebar_registers(ep, func_no);
1112
1113 dw_pcie_setup(pci);
1114 dw_pcie_dbi_ro_wr_dis(pci);
1115 }
1116
1117 /**
1118 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
1119 * @ep: DWC EP device
1120 *
1121 * Initialize the registers (CSRs) specific to DWC EP. This API should be called
1122 * only when the endpoint receives an active refclk (either from host or
1123 * generated locally).
1124 */
dw_pcie_ep_init_registers(struct dw_pcie_ep * ep)1125 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
1126 {
1127 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1128 struct dw_pcie_ep_func *ep_func;
1129 struct device *dev = pci->dev;
1130 struct pci_epc *epc = ep->epc;
1131 u32 ptm_cap_base, reg;
1132 u8 hdr_type;
1133 u8 func_no;
1134 void *addr;
1135 int ret;
1136
1137 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
1138 PCI_HEADER_TYPE_MASK;
1139 if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
1140 dev_err(pci->dev,
1141 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
1142 hdr_type);
1143 return -EIO;
1144 }
1145
1146 dw_pcie_version_detect(pci);
1147
1148 dw_pcie_iatu_detect(pci);
1149
1150 ret = dw_pcie_edma_detect(pci);
1151 if (ret)
1152 return ret;
1153
1154 ret = -ENOMEM;
1155 if (!ep->ib_window_map) {
1156 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
1157 GFP_KERNEL);
1158 if (!ep->ib_window_map)
1159 goto err_remove_edma;
1160 }
1161
1162 if (!ep->ob_window_map) {
1163 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
1164 GFP_KERNEL);
1165 if (!ep->ob_window_map)
1166 goto err_remove_edma;
1167 }
1168
1169 if (!ep->outbound_addr) {
1170 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
1171 GFP_KERNEL);
1172 if (!addr)
1173 goto err_remove_edma;
1174 ep->outbound_addr = addr;
1175 }
1176
1177 for (func_no = 0; func_no < epc->max_functions; func_no++) {
1178
1179 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
1180 if (ep_func)
1181 continue;
1182
1183 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
1184 if (!ep_func)
1185 goto err_remove_edma;
1186
1187 ep_func->func_no = func_no;
1188 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
1189 PCI_CAP_ID_MSI);
1190 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
1191 PCI_CAP_ID_MSIX);
1192
1193 list_add_tail(&ep_func->list, &ep->func_list);
1194 }
1195
1196 if (ep->ops->init)
1197 ep->ops->init(ep);
1198
1199 /*
1200 * PCIe r6.0, section 7.9.15 states that for endpoints that support
1201 * PTM, this capability structure is required in exactly one
1202 * function, which controls the PTM behavior of all PTM capable
1203 * functions. This indicates the PTM capability structure
1204 * represents controller-level registers rather than per-function
1205 * registers.
1206 *
1207 * Therefore, PTM capability registers are configured using the
1208 * standard DBI accessors, instead of func_no indexed per-function
1209 * accessors.
1210 */
1211 ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
1212
1213 /*
1214 * PTM responder capability can be disabled only after disabling
1215 * PTM root capability.
1216 */
1217 if (ptm_cap_base) {
1218 dw_pcie_dbi_ro_wr_en(pci);
1219 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
1220 reg &= ~PCI_PTM_CAP_ROOT;
1221 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
1222
1223 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
1224 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
1225 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
1226 dw_pcie_dbi_ro_wr_dis(pci);
1227 }
1228
1229 dw_pcie_ep_init_non_sticky_registers(pci);
1230
1231 dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
1232
1233 return 0;
1234
1235 err_remove_edma:
1236 dw_pcie_edma_remove(pci);
1237
1238 return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
1241
1242 /**
1243 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
1244 * @ep: DWC EP device
1245 */
dw_pcie_ep_linkup(struct dw_pcie_ep * ep)1246 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
1247 {
1248 struct pci_epc *epc = ep->epc;
1249
1250 pci_epc_linkup(epc);
1251 }
1252 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
1253
1254 /**
1255 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
1256 * @ep: DWC EP device
1257 *
1258 * Non-sticky registers are also initialized before sending the notification to
1259 * the EPF drivers. This is needed since the registers need to be initialized
1260 * before the link comes back again.
1261 */
dw_pcie_ep_linkdown(struct dw_pcie_ep * ep)1262 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
1263 {
1264 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1265 struct pci_epc *epc = ep->epc;
1266
1267 /*
1268 * Initialize the non-sticky DWC registers as they would've reset post
1269 * Link Down. This is specifically needed for drivers not supporting
1270 * PERST# as they have no way to reinitialize the registers before the
1271 * link comes back again.
1272 */
1273 dw_pcie_ep_init_non_sticky_registers(pci);
1274
1275 pci_epc_linkdown(epc);
1276 }
1277 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
1278
dw_pcie_ep_get_resources(struct dw_pcie_ep * ep)1279 static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
1280 {
1281 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1282 struct device *dev = pci->dev;
1283 struct platform_device *pdev = to_platform_device(dev);
1284 struct device_node *np = dev->of_node;
1285 struct pci_epc *epc = ep->epc;
1286 struct resource *res;
1287 int ret;
1288
1289 ret = dw_pcie_get_resources(pci);
1290 if (ret)
1291 return ret;
1292
1293 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1294 if (!res)
1295 return -EINVAL;
1296
1297 ep->phys_base = res->start;
1298 ep->addr_size = resource_size(res);
1299
1300 /*
1301 * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
1302 * dw_pcie_parent_bus_offset() after setting ep->phys_base.
1303 */
1304 pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
1305 ep->phys_base);
1306
1307 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
1308 if (ret < 0)
1309 epc->max_functions = 1;
1310
1311 return 0;
1312 }
1313
1314 /**
1315 * dw_pcie_ep_init - Initialize the endpoint device
1316 * @ep: DWC EP device
1317 *
1318 * Initialize the endpoint device. Allocate resources and create the EPC
1319 * device with the endpoint framework.
1320 *
1321 * Return: 0 if success, errno otherwise.
1322 */
dw_pcie_ep_init(struct dw_pcie_ep * ep)1323 int dw_pcie_ep_init(struct dw_pcie_ep *ep)
1324 {
1325 int ret;
1326 struct pci_epc *epc;
1327 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1328 struct device *dev = pci->dev;
1329
1330 INIT_LIST_HEAD(&ep->func_list);
1331 ep->msi_iatu_mapped = false;
1332 ep->msi_msg_addr = 0;
1333 ep->msi_map_size = 0;
1334
1335 epc = devm_pci_epc_create(dev, &epc_ops);
1336 if (IS_ERR(epc)) {
1337 dev_err(dev, "Failed to create epc device\n");
1338 return PTR_ERR(epc);
1339 }
1340
1341 ep->epc = epc;
1342 epc_set_drvdata(epc, ep);
1343
1344 ret = dw_pcie_ep_get_resources(ep);
1345 if (ret)
1346 return ret;
1347
1348 if (ep->ops->pre_init)
1349 ep->ops->pre_init(ep);
1350
1351 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
1352 ep->page_size);
1353 if (ret < 0) {
1354 dev_err(dev, "Failed to initialize address space\n");
1355 return ret;
1356 }
1357
1358 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
1359 epc->mem->window.page_size);
1360 if (!ep->msi_mem) {
1361 ret = -ENOMEM;
1362 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
1363 goto err_exit_epc_mem;
1364 }
1365
1366 return 0;
1367
1368 err_exit_epc_mem:
1369 pci_epc_mem_exit(epc);
1370
1371 return ret;
1372 }
1373 EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
1374