1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe Endpoint controller driver
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/align.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13
14 #include "pcie-designware.h"
15 #include <linux/pci-epc.h>
16 #include <linux/pci-epf.h>
17
18 /**
19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
20 * the endpoint function
21 * @ep: DWC EP device
22 * @func_no: Function number of the endpoint device
23 *
24 * Return: struct dw_pcie_ep_func if success, NULL otherwise.
25 */
26 struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep * ep,u8 func_no)27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
28 {
29 struct dw_pcie_ep_func *ep_func;
30
31 list_for_each_entry(ep_func, &ep->func_list, list) {
32 if (ep_func->func_no == func_no)
33 return ep_func;
34 }
35
36 return NULL;
37 }
38
__dw_pcie_ep_reset_bar(struct dw_pcie * pci,u8 func_no,enum pci_barno bar,int flags)39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
40 enum pci_barno bar, int flags)
41 {
42 struct dw_pcie_ep *ep = &pci->ep;
43 u32 reg;
44
45 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
46 dw_pcie_dbi_ro_wr_en(pci);
47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
52 }
53 dw_pcie_dbi_ro_wr_dis(pci);
54 }
55
56 /**
57 * dw_pcie_ep_reset_bar - Reset endpoint BAR
58 * @pci: DWC PCI device
59 * @bar: BAR number of the endpoint
60 */
dw_pcie_ep_reset_bar(struct dw_pcie * pci,enum pci_barno bar)61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
62 {
63 u8 func_no, funcs;
64
65 funcs = pci->ep.epc->max_functions;
66
67 for (func_no = 0; func_no < funcs; func_no++)
68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
69 }
70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
71
__dw_pcie_ep_find_next_cap(struct dw_pcie_ep * ep,u8 func_no,u8 cap_ptr,u8 cap)72 static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
73 u8 cap_ptr, u8 cap)
74 {
75 u8 cap_id, next_cap_ptr;
76 u16 reg;
77
78 if (!cap_ptr)
79 return 0;
80
81 reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr);
82 cap_id = (reg & 0x00ff);
83
84 if (cap_id > PCI_CAP_ID_MAX)
85 return 0;
86
87 if (cap_id == cap)
88 return cap_ptr;
89
90 next_cap_ptr = (reg & 0xff00) >> 8;
91 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
92 }
93
dw_pcie_ep_find_capability(struct dw_pcie_ep * ep,u8 func_no,u8 cap)94 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
95 {
96 u8 next_cap_ptr;
97 u16 reg;
98
99 reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST);
100 next_cap_ptr = (reg & 0x00ff);
101
102 return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
103 }
104
105 /**
106 * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
107 * @pci: DWC PCI device
108 * @prev_cap: Capability preceding the capability that should be hidden
109 * @cap: Capability that should be hidden
110 *
111 * Return: 0 if success, errno otherwise.
112 */
dw_pcie_ep_hide_ext_capability(struct dw_pcie * pci,u8 prev_cap,u8 cap)113 int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
114 {
115 u16 prev_cap_offset, cap_offset;
116 u32 prev_cap_header, cap_header;
117
118 prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
119 if (!prev_cap_offset)
120 return -EINVAL;
121
122 prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
123 cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
124 cap_header = dw_pcie_readl_dbi(pci, cap_offset);
125
126 /* cap must immediately follow prev_cap. */
127 if (PCI_EXT_CAP_ID(cap_header) != cap)
128 return -EINVAL;
129
130 /* Clear next ptr. */
131 prev_cap_header &= ~GENMASK(31, 20);
132
133 /* Set next ptr to next ptr of cap. */
134 prev_cap_header |= cap_header & GENMASK(31, 20);
135
136 dw_pcie_dbi_ro_wr_en(pci);
137 dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
138 dw_pcie_dbi_ro_wr_dis(pci);
139
140 return 0;
141 }
142 EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
143
dw_pcie_ep_write_header(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_header * hdr)144 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
145 struct pci_epf_header *hdr)
146 {
147 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
148 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
149
150 dw_pcie_dbi_ro_wr_en(pci);
151 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
152 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
153 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
154 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
155 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
156 hdr->subclass_code | hdr->baseclass_code << 8);
157 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
158 hdr->cache_line_size);
159 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
160 hdr->subsys_vendor_id);
161 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
162 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
163 hdr->interrupt_pin);
164 dw_pcie_dbi_ro_wr_dis(pci);
165
166 return 0;
167 }
168
dw_pcie_ep_inbound_atu(struct dw_pcie_ep * ep,u8 func_no,int type,dma_addr_t parent_bus_addr,enum pci_barno bar,size_t size)169 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
170 dma_addr_t parent_bus_addr, enum pci_barno bar,
171 size_t size)
172 {
173 int ret;
174 u32 free_win;
175 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
176
177 if (!ep->bar_to_atu[bar])
178 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
179 else
180 free_win = ep->bar_to_atu[bar] - 1;
181
182 if (free_win >= pci->num_ib_windows) {
183 dev_err(pci->dev, "No free inbound window\n");
184 return -EINVAL;
185 }
186
187 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
188 parent_bus_addr, bar, size);
189 if (ret < 0) {
190 dev_err(pci->dev, "Failed to program IB window\n");
191 return ret;
192 }
193
194 /*
195 * Always increment free_win before assignment, since value 0 is used to identify
196 * unallocated mapping.
197 */
198 ep->bar_to_atu[bar] = free_win + 1;
199 set_bit(free_win, ep->ib_window_map);
200
201 return 0;
202 }
203
dw_pcie_ep_outbound_atu(struct dw_pcie_ep * ep,struct dw_pcie_ob_atu_cfg * atu)204 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
205 struct dw_pcie_ob_atu_cfg *atu)
206 {
207 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
208 u32 free_win;
209 int ret;
210
211 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
212 if (free_win >= pci->num_ob_windows) {
213 dev_err(pci->dev, "No free outbound window\n");
214 return -EINVAL;
215 }
216
217 atu->index = free_win;
218 ret = dw_pcie_prog_outbound_atu(pci, atu);
219 if (ret)
220 return ret;
221
222 set_bit(free_win, ep->ob_window_map);
223 ep->outbound_addr[free_win] = atu->parent_bus_addr;
224
225 return 0;
226 }
227
dw_pcie_ep_clear_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)228 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
229 struct pci_epf_bar *epf_bar)
230 {
231 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
232 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
233 enum pci_barno bar = epf_bar->barno;
234 u32 atu_index = ep->bar_to_atu[bar] - 1;
235
236 if (!ep->bar_to_atu[bar])
237 return;
238
239 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
240
241 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
242 clear_bit(atu_index, ep->ib_window_map);
243 ep->epf_bar[bar] = NULL;
244 ep->bar_to_atu[bar] = 0;
245 }
246
dw_pcie_ep_get_rebar_offset(struct dw_pcie * pci,enum pci_barno bar)247 static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
248 enum pci_barno bar)
249 {
250 u32 reg, bar_index;
251 unsigned int offset, nbars;
252 int i;
253
254 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
255 if (!offset)
256 return offset;
257
258 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
259 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
260
261 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
262 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
263 bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
264 if (bar_index == bar)
265 return offset;
266 }
267
268 return 0;
269 }
270
dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep * ep,u8 func_no,struct pci_epf_bar * epf_bar)271 static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
272 struct pci_epf_bar *epf_bar)
273 {
274 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
275 enum pci_barno bar = epf_bar->barno;
276 size_t size = epf_bar->size;
277 int flags = epf_bar->flags;
278 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
279 unsigned int rebar_offset;
280 u32 rebar_cap, rebar_ctrl;
281 int ret;
282
283 rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
284 if (!rebar_offset)
285 return -EINVAL;
286
287 ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
288 if (ret)
289 return ret;
290
291 dw_pcie_dbi_ro_wr_en(pci);
292
293 /*
294 * A BAR mask should not be written for a resizable BAR. The BAR mask
295 * is automatically derived by the controller every time the "selected
296 * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
297 * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
298 * BIT(0) to set the BAR enable bit.
299 */
300 dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
301 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
302
303 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
304 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
305 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
306 }
307
308 /*
309 * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
310 * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
311 * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
312 */
313 rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
314 rebar_ctrl &= ~GENMASK(31, 16);
315 dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
316
317 /*
318 * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
319 * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
320 * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
321 */
322 dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
323
324 dw_pcie_dbi_ro_wr_dis(pci);
325
326 return 0;
327 }
328
dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep * ep,u8 func_no,struct pci_epf_bar * epf_bar)329 static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
330 struct pci_epf_bar *epf_bar)
331 {
332 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
333 enum pci_barno bar = epf_bar->barno;
334 size_t size = epf_bar->size;
335 int flags = epf_bar->flags;
336 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
337
338 dw_pcie_dbi_ro_wr_en(pci);
339
340 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
341 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
342
343 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
344 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
345 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
346 }
347
348 dw_pcie_dbi_ro_wr_dis(pci);
349
350 return 0;
351 }
352
dw_pcie_ep_get_bar_type(struct dw_pcie_ep * ep,enum pci_barno bar)353 static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
354 enum pci_barno bar)
355 {
356 const struct pci_epc_features *epc_features;
357
358 if (!ep->ops->get_features)
359 return BAR_PROGRAMMABLE;
360
361 epc_features = ep->ops->get_features(ep);
362
363 return epc_features->bar[bar].type;
364 }
365
dw_pcie_ep_set_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)366 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
367 struct pci_epf_bar *epf_bar)
368 {
369 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
370 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
371 enum pci_barno bar = epf_bar->barno;
372 size_t size = epf_bar->size;
373 enum pci_epc_bar_type bar_type;
374 int flags = epf_bar->flags;
375 int ret, type;
376
377 /*
378 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
379 * 1 and 2 to form a 64-bit BAR.
380 */
381 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
382 return -EINVAL;
383
384 /*
385 * Certain EPF drivers dynamically change the physical address of a BAR
386 * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
387 * calling clear_bar() would clear the BAR's PCI address assigned by the
388 * host).
389 */
390 if (ep->epf_bar[bar]) {
391 /*
392 * We can only dynamically change a BAR if the new BAR size and
393 * BAR flags do not differ from the existing configuration.
394 */
395 if (ep->epf_bar[bar]->barno != bar ||
396 ep->epf_bar[bar]->size != size ||
397 ep->epf_bar[bar]->flags != flags)
398 return -EINVAL;
399
400 /*
401 * When dynamically changing a BAR, skip writing the BAR reg, as
402 * that would clear the BAR's PCI address assigned by the host.
403 */
404 goto config_atu;
405 }
406
407 bar_type = dw_pcie_ep_get_bar_type(ep, bar);
408 switch (bar_type) {
409 case BAR_FIXED:
410 /*
411 * There is no need to write a BAR mask for a fixed BAR (except
412 * to write 1 to the LSB of the BAR mask register, to enable the
413 * BAR). Write the BAR mask regardless. (The fixed bits in the
414 * BAR mask register will be read-only anyway.)
415 */
416 fallthrough;
417 case BAR_PROGRAMMABLE:
418 ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
419 break;
420 case BAR_RESIZABLE:
421 ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
422 break;
423 default:
424 ret = -EINVAL;
425 dev_err(pci->dev, "Invalid BAR type\n");
426 break;
427 }
428
429 if (ret)
430 return ret;
431
432 config_atu:
433 if (!(flags & PCI_BASE_ADDRESS_SPACE))
434 type = PCIE_ATU_TYPE_MEM;
435 else
436 type = PCIE_ATU_TYPE_IO;
437
438 ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
439 size);
440 if (ret)
441 return ret;
442
443 ep->epf_bar[bar] = epf_bar;
444
445 return 0;
446 }
447
dw_pcie_find_index(struct dw_pcie_ep * ep,phys_addr_t addr,u32 * atu_index)448 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
449 u32 *atu_index)
450 {
451 u32 index;
452 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
453
454 for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
455 if (ep->outbound_addr[index] != addr)
456 continue;
457 *atu_index = index;
458 return 0;
459 }
460
461 return -EINVAL;
462 }
463
dw_pcie_ep_align_addr(struct pci_epc * epc,u64 pci_addr,size_t * pci_size,size_t * offset)464 static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
465 size_t *pci_size, size_t *offset)
466 {
467 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
468 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
469 u64 mask = pci->region_align - 1;
470 size_t ofst = pci_addr & mask;
471
472 *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
473 *offset = ofst;
474
475 return pci_addr & ~mask;
476 }
477
dw_pcie_ep_unmap_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t addr)478 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
479 phys_addr_t addr)
480 {
481 int ret;
482 u32 atu_index;
483 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
484 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
485
486 ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
487 &atu_index);
488 if (ret < 0)
489 return;
490
491 ep->outbound_addr[atu_index] = 0;
492 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
493 clear_bit(atu_index, ep->ob_window_map);
494 }
495
dw_pcie_ep_map_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t addr,u64 pci_addr,size_t size)496 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
497 phys_addr_t addr, u64 pci_addr, size_t size)
498 {
499 int ret;
500 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
501 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
502 struct dw_pcie_ob_atu_cfg atu = { 0 };
503
504 atu.func_no = func_no;
505 atu.type = PCIE_ATU_TYPE_MEM;
506 atu.parent_bus_addr = addr - pci->parent_bus_offset;
507 atu.pci_addr = pci_addr;
508 atu.size = size;
509 ret = dw_pcie_ep_outbound_atu(ep, &atu);
510 if (ret) {
511 dev_err(pci->dev, "Failed to enable address\n");
512 return ret;
513 }
514
515 return 0;
516 }
517
dw_pcie_ep_get_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no)518 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
519 {
520 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
521 struct dw_pcie_ep_func *ep_func;
522 u32 val, reg;
523
524 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
525 if (!ep_func || !ep_func->msi_cap)
526 return -EINVAL;
527
528 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
529 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
530 if (!(val & PCI_MSI_FLAGS_ENABLE))
531 return -EINVAL;
532
533 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
534
535 return 1 << val;
536 }
537
dw_pcie_ep_set_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u8 nr_irqs)538 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
539 u8 nr_irqs)
540 {
541 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
542 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
543 struct dw_pcie_ep_func *ep_func;
544 u8 mmc = order_base_2(nr_irqs);
545 u32 val, reg;
546
547 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
548 if (!ep_func || !ep_func->msi_cap)
549 return -EINVAL;
550
551 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
552 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
553 val &= ~PCI_MSI_FLAGS_QMASK;
554 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
555 dw_pcie_dbi_ro_wr_en(pci);
556 dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
557 dw_pcie_dbi_ro_wr_dis(pci);
558
559 return 0;
560 }
561
dw_pcie_ep_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)562 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
563 {
564 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
565 struct dw_pcie_ep_func *ep_func;
566 u32 val, reg;
567
568 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
569 if (!ep_func || !ep_func->msix_cap)
570 return -EINVAL;
571
572 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
573 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
574 if (!(val & PCI_MSIX_FLAGS_ENABLE))
575 return -EINVAL;
576
577 val &= PCI_MSIX_FLAGS_QSIZE;
578
579 return val + 1;
580 }
581
dw_pcie_ep_set_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u16 nr_irqs,enum pci_barno bir,u32 offset)582 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
583 u16 nr_irqs, enum pci_barno bir, u32 offset)
584 {
585 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
586 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
587 struct dw_pcie_ep_func *ep_func;
588 u32 val, reg;
589
590 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
591 if (!ep_func || !ep_func->msix_cap)
592 return -EINVAL;
593
594 dw_pcie_dbi_ro_wr_en(pci);
595
596 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
597 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
598 val &= ~PCI_MSIX_FLAGS_QSIZE;
599 val |= nr_irqs - 1; /* encoded as N-1 */
600 dw_pcie_writew_dbi(pci, reg, val);
601
602 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
603 val = offset | bir;
604 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
605
606 reg = ep_func->msix_cap + PCI_MSIX_PBA;
607 val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
608 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
609
610 dw_pcie_dbi_ro_wr_dis(pci);
611
612 return 0;
613 }
614
dw_pcie_ep_raise_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,unsigned int type,u16 interrupt_num)615 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
616 unsigned int type, u16 interrupt_num)
617 {
618 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
619
620 if (!ep->ops->raise_irq)
621 return -EINVAL;
622
623 return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
624 }
625
dw_pcie_ep_stop(struct pci_epc * epc)626 static void dw_pcie_ep_stop(struct pci_epc *epc)
627 {
628 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
629 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
630
631 dw_pcie_stop_link(pci);
632 }
633
dw_pcie_ep_start(struct pci_epc * epc)634 static int dw_pcie_ep_start(struct pci_epc *epc)
635 {
636 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
637 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
638
639 return dw_pcie_start_link(pci);
640 }
641
642 static const struct pci_epc_features*
dw_pcie_ep_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)643 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
644 {
645 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
646
647 if (!ep->ops->get_features)
648 return NULL;
649
650 return ep->ops->get_features(ep);
651 }
652
653 static const struct pci_epc_ops epc_ops = {
654 .write_header = dw_pcie_ep_write_header,
655 .set_bar = dw_pcie_ep_set_bar,
656 .clear_bar = dw_pcie_ep_clear_bar,
657 .align_addr = dw_pcie_ep_align_addr,
658 .map_addr = dw_pcie_ep_map_addr,
659 .unmap_addr = dw_pcie_ep_unmap_addr,
660 .set_msi = dw_pcie_ep_set_msi,
661 .get_msi = dw_pcie_ep_get_msi,
662 .set_msix = dw_pcie_ep_set_msix,
663 .get_msix = dw_pcie_ep_get_msix,
664 .raise_irq = dw_pcie_ep_raise_irq,
665 .start = dw_pcie_ep_start,
666 .stop = dw_pcie_ep_stop,
667 .get_features = dw_pcie_ep_get_features,
668 };
669
670 /**
671 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
672 * @ep: DWC EP device
673 * @func_no: Function number of the endpoint
674 *
675 * Return: 0 if success, errno otherwise.
676 */
dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep * ep,u8 func_no)677 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
678 {
679 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
680 struct device *dev = pci->dev;
681
682 dev_err(dev, "EP cannot raise INTX IRQs\n");
683
684 return -EINVAL;
685 }
686 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
687
688 /**
689 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
690 * @ep: DWC EP device
691 * @func_no: Function number of the endpoint
692 * @interrupt_num: Interrupt number to be raised
693 *
694 * Return: 0 if success, errno otherwise.
695 */
dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep * ep,u8 func_no,u8 interrupt_num)696 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
697 u8 interrupt_num)
698 {
699 u32 msg_addr_lower, msg_addr_upper, reg;
700 struct dw_pcie_ep_func *ep_func;
701 struct pci_epc *epc = ep->epc;
702 size_t map_size = sizeof(u32);
703 size_t offset;
704 u16 msg_ctrl, msg_data;
705 bool has_upper;
706 u64 msg_addr;
707 int ret;
708
709 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
710 if (!ep_func || !ep_func->msi_cap)
711 return -EINVAL;
712
713 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
714 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
715 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
716 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
717 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
718 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
719 if (has_upper) {
720 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
721 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
722 reg = ep_func->msi_cap + PCI_MSI_DATA_64;
723 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
724 } else {
725 msg_addr_upper = 0;
726 reg = ep_func->msi_cap + PCI_MSI_DATA_32;
727 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
728 }
729 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
730
731 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
732 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
733 map_size);
734 if (ret)
735 return ret;
736
737 writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
738
739 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
740
741 return 0;
742 }
743 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
744
745 /**
746 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
747 * method
748 * @ep: DWC EP device
749 * @func_no: Function number of the endpoint device
750 * @interrupt_num: Interrupt number to be raised
751 *
752 * Return: 0 if success, errno otherwise.
753 */
dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep * ep,u8 func_no,u16 interrupt_num)754 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
755 u16 interrupt_num)
756 {
757 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
758 struct dw_pcie_ep_func *ep_func;
759 u32 msg_data;
760
761 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
762 if (!ep_func || !ep_func->msix_cap)
763 return -EINVAL;
764
765 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
766 (interrupt_num - 1);
767
768 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
769
770 return 0;
771 }
772
773 /**
774 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
775 * @ep: DWC EP device
776 * @func_no: Function number of the endpoint device
777 * @interrupt_num: Interrupt number to be raised
778 *
779 * Return: 0 if success, errno otherwise.
780 */
dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep * ep,u8 func_no,u16 interrupt_num)781 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
782 u16 interrupt_num)
783 {
784 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
785 struct pci_epf_msix_tbl *msix_tbl;
786 struct dw_pcie_ep_func *ep_func;
787 struct pci_epc *epc = ep->epc;
788 size_t map_size = sizeof(u32);
789 size_t offset;
790 u32 reg, msg_data, vec_ctrl;
791 u32 tbl_offset;
792 u64 msg_addr;
793 int ret;
794 u8 bir;
795
796 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
797 if (!ep_func || !ep_func->msix_cap)
798 return -EINVAL;
799
800 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
801 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
802 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
803 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
804
805 msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
806 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
807 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
808 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
809
810 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
811 dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
812 return -EPERM;
813 }
814
815 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
816 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
817 map_size);
818 if (ret)
819 return ret;
820
821 writel(msg_data, ep->msi_mem + offset);
822
823 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
824
825 return 0;
826 }
827
828 /**
829 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
830 * @ep: DWC EP device
831 *
832 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
833 * reset like PERST#. Note that this API is only applicable for drivers
834 * supporting PERST# or any other methods of fundamental reset.
835 */
dw_pcie_ep_cleanup(struct dw_pcie_ep * ep)836 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
837 {
838 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
839
840 dwc_pcie_debugfs_deinit(pci);
841 dw_pcie_edma_remove(pci);
842 }
843 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
844
845 /**
846 * dw_pcie_ep_deinit - Deinitialize the endpoint device
847 * @ep: DWC EP device
848 *
849 * Deinitialize the endpoint device. EPC device is not destroyed since that will
850 * be taken care by Devres.
851 */
dw_pcie_ep_deinit(struct dw_pcie_ep * ep)852 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
853 {
854 struct pci_epc *epc = ep->epc;
855
856 dw_pcie_ep_cleanup(ep);
857
858 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
859 epc->mem->window.page_size);
860
861 pci_epc_mem_exit(epc);
862 }
863 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
864
dw_pcie_ep_init_non_sticky_registers(struct dw_pcie * pci)865 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
866 {
867 struct dw_pcie_ep *ep = &pci->ep;
868 unsigned int offset;
869 unsigned int nbars;
870 enum pci_barno bar;
871 u32 reg, i, val;
872
873 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
874
875 dw_pcie_dbi_ro_wr_en(pci);
876
877 if (offset) {
878 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
879 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
880
881 /*
882 * PCIe r6.0, sec 7.8.6.2 require us to support at least one
883 * size in the range from 1 MB to 512 GB. Advertise support
884 * for 1 MB BAR size only.
885 *
886 * For a BAR that has been configured via dw_pcie_ep_set_bar(),
887 * advertise support for only that size instead.
888 */
889 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
890 /*
891 * While the RESBAR_CAP_REG_* fields are sticky, the
892 * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
893 * sticky in certain versions of DWC PCIe, but not all).
894 *
895 * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
896 * the controller when RESBAR_CAP_REG is written, which
897 * is why RESBAR_CAP_REG is written here.
898 */
899 val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
900 bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
901 if (ep->epf_bar[bar])
902 pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
903 else
904 val = BIT(4);
905
906 dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
907 }
908 }
909
910 dw_pcie_setup(pci);
911 dw_pcie_dbi_ro_wr_dis(pci);
912 }
913
914 /**
915 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
916 * @ep: DWC EP device
917 *
918 * Initialize the registers (CSRs) specific to DWC EP. This API should be called
919 * only when the endpoint receives an active refclk (either from host or
920 * generated locally).
921 */
dw_pcie_ep_init_registers(struct dw_pcie_ep * ep)922 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
923 {
924 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
925 struct dw_pcie_ep_func *ep_func;
926 struct device *dev = pci->dev;
927 struct pci_epc *epc = ep->epc;
928 u32 ptm_cap_base, reg;
929 u8 hdr_type;
930 u8 func_no;
931 void *addr;
932 int ret;
933
934 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
935 PCI_HEADER_TYPE_MASK;
936 if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
937 dev_err(pci->dev,
938 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
939 hdr_type);
940 return -EIO;
941 }
942
943 dw_pcie_version_detect(pci);
944
945 dw_pcie_iatu_detect(pci);
946
947 ret = dw_pcie_edma_detect(pci);
948 if (ret)
949 return ret;
950
951 ret = -ENOMEM;
952 if (!ep->ib_window_map) {
953 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
954 GFP_KERNEL);
955 if (!ep->ib_window_map)
956 goto err_remove_edma;
957 }
958
959 if (!ep->ob_window_map) {
960 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
961 GFP_KERNEL);
962 if (!ep->ob_window_map)
963 goto err_remove_edma;
964 }
965
966 if (!ep->outbound_addr) {
967 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
968 GFP_KERNEL);
969 if (!addr)
970 goto err_remove_edma;
971 ep->outbound_addr = addr;
972 }
973
974 for (func_no = 0; func_no < epc->max_functions; func_no++) {
975
976 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
977 if (ep_func)
978 continue;
979
980 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
981 if (!ep_func)
982 goto err_remove_edma;
983
984 ep_func->func_no = func_no;
985 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
986 PCI_CAP_ID_MSI);
987 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
988 PCI_CAP_ID_MSIX);
989
990 list_add_tail(&ep_func->list, &ep->func_list);
991 }
992
993 if (ep->ops->init)
994 ep->ops->init(ep);
995
996 ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
997
998 /*
999 * PTM responder capability can be disabled only after disabling
1000 * PTM root capability.
1001 */
1002 if (ptm_cap_base) {
1003 dw_pcie_dbi_ro_wr_en(pci);
1004 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
1005 reg &= ~PCI_PTM_CAP_ROOT;
1006 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
1007
1008 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
1009 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
1010 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
1011 dw_pcie_dbi_ro_wr_dis(pci);
1012 }
1013
1014 dw_pcie_ep_init_non_sticky_registers(pci);
1015
1016 dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
1017
1018 return 0;
1019
1020 err_remove_edma:
1021 dw_pcie_edma_remove(pci);
1022
1023 return ret;
1024 }
1025 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
1026
1027 /**
1028 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
1029 * @ep: DWC EP device
1030 */
dw_pcie_ep_linkup(struct dw_pcie_ep * ep)1031 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
1032 {
1033 struct pci_epc *epc = ep->epc;
1034
1035 pci_epc_linkup(epc);
1036 }
1037 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
1038
1039 /**
1040 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
1041 * @ep: DWC EP device
1042 *
1043 * Non-sticky registers are also initialized before sending the notification to
1044 * the EPF drivers. This is needed since the registers need to be initialized
1045 * before the link comes back again.
1046 */
dw_pcie_ep_linkdown(struct dw_pcie_ep * ep)1047 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
1048 {
1049 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1050 struct pci_epc *epc = ep->epc;
1051
1052 /*
1053 * Initialize the non-sticky DWC registers as they would've reset post
1054 * Link Down. This is specifically needed for drivers not supporting
1055 * PERST# as they have no way to reinitialize the registers before the
1056 * link comes back again.
1057 */
1058 dw_pcie_ep_init_non_sticky_registers(pci);
1059
1060 pci_epc_linkdown(epc);
1061 }
1062 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
1063
dw_pcie_ep_get_resources(struct dw_pcie_ep * ep)1064 static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
1065 {
1066 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1067 struct device *dev = pci->dev;
1068 struct platform_device *pdev = to_platform_device(dev);
1069 struct device_node *np = dev->of_node;
1070 struct pci_epc *epc = ep->epc;
1071 struct resource *res;
1072 int ret;
1073
1074 ret = dw_pcie_get_resources(pci);
1075 if (ret)
1076 return ret;
1077
1078 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1079 if (!res)
1080 return -EINVAL;
1081
1082 ep->phys_base = res->start;
1083 ep->addr_size = resource_size(res);
1084
1085 /*
1086 * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
1087 * dw_pcie_parent_bus_offset() after setting ep->phys_base.
1088 */
1089 pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
1090 ep->phys_base);
1091
1092 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
1093 if (ret < 0)
1094 epc->max_functions = 1;
1095
1096 return 0;
1097 }
1098
1099 /**
1100 * dw_pcie_ep_init - Initialize the endpoint device
1101 * @ep: DWC EP device
1102 *
1103 * Initialize the endpoint device. Allocate resources and create the EPC
1104 * device with the endpoint framework.
1105 *
1106 * Return: 0 if success, errno otherwise.
1107 */
dw_pcie_ep_init(struct dw_pcie_ep * ep)1108 int dw_pcie_ep_init(struct dw_pcie_ep *ep)
1109 {
1110 int ret;
1111 struct pci_epc *epc;
1112 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1113 struct device *dev = pci->dev;
1114
1115 INIT_LIST_HEAD(&ep->func_list);
1116
1117 epc = devm_pci_epc_create(dev, &epc_ops);
1118 if (IS_ERR(epc)) {
1119 dev_err(dev, "Failed to create epc device\n");
1120 return PTR_ERR(epc);
1121 }
1122
1123 ep->epc = epc;
1124 epc_set_drvdata(epc, ep);
1125
1126 ret = dw_pcie_ep_get_resources(ep);
1127 if (ret)
1128 return ret;
1129
1130 if (ep->ops->pre_init)
1131 ep->ops->pre_init(ep);
1132
1133 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
1134 ep->page_size);
1135 if (ret < 0) {
1136 dev_err(dev, "Failed to initialize address space\n");
1137 return ret;
1138 }
1139
1140 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
1141 epc->mem->window.page_size);
1142 if (!ep->msi_mem) {
1143 ret = -ENOMEM;
1144 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
1145 goto err_exit_epc_mem;
1146 }
1147
1148 return 0;
1149
1150 err_exit_epc_mem:
1151 pci_epc_mem_exit(epc);
1152
1153 return ret;
1154 }
1155 EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
1156