1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe Endpoint controller driver
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/align.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13
14 #include "pcie-designware.h"
15 #include <linux/pci-epc.h>
16 #include <linux/pci-epf.h>
17
18 /**
19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
20 * the endpoint function
21 * @ep: DWC EP device
22 * @func_no: Function number of the endpoint device
23 *
24 * Return: struct dw_pcie_ep_func if success, NULL otherwise.
25 */
26 struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep * ep,u8 func_no)27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
28 {
29 struct dw_pcie_ep_func *ep_func;
30
31 list_for_each_entry(ep_func, &ep->func_list, list) {
32 if (ep_func->func_no == func_no)
33 return ep_func;
34 }
35
36 return NULL;
37 }
38
__dw_pcie_ep_reset_bar(struct dw_pcie * pci,u8 func_no,enum pci_barno bar,int flags)39 static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
40 enum pci_barno bar, int flags)
41 {
42 struct dw_pcie_ep *ep = &pci->ep;
43 u32 reg;
44
45 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
46 dw_pcie_dbi_ro_wr_en(pci);
47 dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
48 dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
49 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
50 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
51 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
52 }
53 dw_pcie_dbi_ro_wr_dis(pci);
54 }
55
56 /**
57 * dw_pcie_ep_reset_bar - Reset endpoint BAR
58 * @pci: DWC PCI device
59 * @bar: BAR number of the endpoint
60 */
dw_pcie_ep_reset_bar(struct dw_pcie * pci,enum pci_barno bar)61 void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
62 {
63 u8 func_no, funcs;
64
65 funcs = pci->ep.epc->max_functions;
66
67 for (func_no = 0; func_no < funcs; func_no++)
68 __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
69 }
70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
71
dw_pcie_ep_find_capability(struct dw_pcie_ep * ep,u8 func_no,u8 cap)72 static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
73 {
74 return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
75 cap, ep, func_no);
76 }
77
78 /**
79 * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
80 * @pci: DWC PCI device
81 * @prev_cap: Capability preceding the capability that should be hidden
82 * @cap: Capability that should be hidden
83 *
84 * Return: 0 if success, errno otherwise.
85 */
dw_pcie_ep_hide_ext_capability(struct dw_pcie * pci,u8 prev_cap,u8 cap)86 int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
87 {
88 u16 prev_cap_offset, cap_offset;
89 u32 prev_cap_header, cap_header;
90
91 prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
92 if (!prev_cap_offset)
93 return -EINVAL;
94
95 prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
96 cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
97 cap_header = dw_pcie_readl_dbi(pci, cap_offset);
98
99 /* cap must immediately follow prev_cap. */
100 if (PCI_EXT_CAP_ID(cap_header) != cap)
101 return -EINVAL;
102
103 /* Clear next ptr. */
104 prev_cap_header &= ~GENMASK(31, 20);
105
106 /* Set next ptr to next ptr of cap. */
107 prev_cap_header |= cap_header & GENMASK(31, 20);
108
109 dw_pcie_dbi_ro_wr_en(pci);
110 dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
111 dw_pcie_dbi_ro_wr_dis(pci);
112
113 return 0;
114 }
115 EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
116
dw_pcie_ep_write_header(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_header * hdr)117 static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
118 struct pci_epf_header *hdr)
119 {
120 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
121 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
122
123 dw_pcie_dbi_ro_wr_en(pci);
124 dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
125 dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
126 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
127 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
128 dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
129 hdr->subclass_code | hdr->baseclass_code << 8);
130 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
131 hdr->cache_line_size);
132 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
133 hdr->subsys_vendor_id);
134 dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
135 dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
136 hdr->interrupt_pin);
137 dw_pcie_dbi_ro_wr_dis(pci);
138
139 return 0;
140 }
141
dw_pcie_ep_inbound_atu(struct dw_pcie_ep * ep,u8 func_no,int type,dma_addr_t parent_bus_addr,enum pci_barno bar,size_t size)142 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
143 dma_addr_t parent_bus_addr, enum pci_barno bar,
144 size_t size)
145 {
146 int ret;
147 u32 free_win;
148 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
149
150 if (!ep->bar_to_atu[bar])
151 free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
152 else
153 free_win = ep->bar_to_atu[bar] - 1;
154
155 if (free_win >= pci->num_ib_windows) {
156 dev_err(pci->dev, "No free inbound window\n");
157 return -EINVAL;
158 }
159
160 ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
161 parent_bus_addr, bar, size);
162 if (ret < 0) {
163 dev_err(pci->dev, "Failed to program IB window\n");
164 return ret;
165 }
166
167 /*
168 * Always increment free_win before assignment, since value 0 is used to identify
169 * unallocated mapping.
170 */
171 ep->bar_to_atu[bar] = free_win + 1;
172 set_bit(free_win, ep->ib_window_map);
173
174 return 0;
175 }
176
dw_pcie_ep_outbound_atu(struct dw_pcie_ep * ep,struct dw_pcie_ob_atu_cfg * atu)177 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
178 struct dw_pcie_ob_atu_cfg *atu)
179 {
180 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
181 u32 free_win;
182 int ret;
183
184 free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
185 if (free_win >= pci->num_ob_windows) {
186 dev_err(pci->dev, "No free outbound window\n");
187 return -EINVAL;
188 }
189
190 atu->index = free_win;
191 ret = dw_pcie_prog_outbound_atu(pci, atu);
192 if (ret)
193 return ret;
194
195 set_bit(free_win, ep->ob_window_map);
196 ep->outbound_addr[free_win] = atu->parent_bus_addr;
197
198 return 0;
199 }
200
dw_pcie_ep_clear_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)201 static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
202 struct pci_epf_bar *epf_bar)
203 {
204 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
205 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
206 enum pci_barno bar = epf_bar->barno;
207 u32 atu_index = ep->bar_to_atu[bar] - 1;
208
209 if (!ep->bar_to_atu[bar])
210 return;
211
212 __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
213
214 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
215 clear_bit(atu_index, ep->ib_window_map);
216 ep->epf_bar[bar] = NULL;
217 ep->bar_to_atu[bar] = 0;
218 }
219
dw_pcie_ep_get_rebar_offset(struct dw_pcie * pci,enum pci_barno bar)220 static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
221 enum pci_barno bar)
222 {
223 u32 reg, bar_index;
224 unsigned int offset, nbars;
225 int i;
226
227 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
228 if (!offset)
229 return offset;
230
231 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
232 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
233
234 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
235 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
236 bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
237 if (bar_index == bar)
238 return offset;
239 }
240
241 return 0;
242 }
243
dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep * ep,u8 func_no,struct pci_epf_bar * epf_bar)244 static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
245 struct pci_epf_bar *epf_bar)
246 {
247 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
248 enum pci_barno bar = epf_bar->barno;
249 size_t size = epf_bar->size;
250 int flags = epf_bar->flags;
251 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
252 unsigned int rebar_offset;
253 u32 rebar_cap, rebar_ctrl;
254 int ret;
255
256 rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
257 if (!rebar_offset)
258 return -EINVAL;
259
260 ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
261 if (ret)
262 return ret;
263
264 dw_pcie_dbi_ro_wr_en(pci);
265
266 /*
267 * A BAR mask should not be written for a resizable BAR. The BAR mask
268 * is automatically derived by the controller every time the "selected
269 * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
270 * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
271 * BIT(0) to set the BAR enable bit.
272 */
273 dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
274 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
275
276 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
277 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
278 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
279 }
280
281 /*
282 * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
283 * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
284 * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
285 */
286 rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
287 rebar_ctrl &= ~GENMASK(31, 16);
288 dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
289
290 /*
291 * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
292 * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
293 * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
294 */
295 dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
296
297 dw_pcie_dbi_ro_wr_dis(pci);
298
299 return 0;
300 }
301
dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep * ep,u8 func_no,struct pci_epf_bar * epf_bar)302 static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
303 struct pci_epf_bar *epf_bar)
304 {
305 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
306 enum pci_barno bar = epf_bar->barno;
307 size_t size = epf_bar->size;
308 int flags = epf_bar->flags;
309 u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
310
311 dw_pcie_dbi_ro_wr_en(pci);
312
313 dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
314 dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
315
316 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
317 dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
318 dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
319 }
320
321 dw_pcie_dbi_ro_wr_dis(pci);
322
323 return 0;
324 }
325
dw_pcie_ep_get_bar_type(struct dw_pcie_ep * ep,enum pci_barno bar)326 static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
327 enum pci_barno bar)
328 {
329 const struct pci_epc_features *epc_features;
330
331 if (!ep->ops->get_features)
332 return BAR_PROGRAMMABLE;
333
334 epc_features = ep->ops->get_features(ep);
335
336 return epc_features->bar[bar].type;
337 }
338
dw_pcie_ep_set_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)339 static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
340 struct pci_epf_bar *epf_bar)
341 {
342 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
343 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
344 enum pci_barno bar = epf_bar->barno;
345 size_t size = epf_bar->size;
346 enum pci_epc_bar_type bar_type;
347 int flags = epf_bar->flags;
348 int ret, type;
349
350 /*
351 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
352 * 1 and 2 to form a 64-bit BAR.
353 */
354 if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
355 return -EINVAL;
356
357 /*
358 * Certain EPF drivers dynamically change the physical address of a BAR
359 * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
360 * calling clear_bar() would clear the BAR's PCI address assigned by the
361 * host).
362 */
363 if (ep->epf_bar[bar]) {
364 /*
365 * We can only dynamically change a BAR if the new BAR size and
366 * BAR flags do not differ from the existing configuration.
367 */
368 if (ep->epf_bar[bar]->barno != bar ||
369 ep->epf_bar[bar]->size != size ||
370 ep->epf_bar[bar]->flags != flags)
371 return -EINVAL;
372
373 /*
374 * When dynamically changing a BAR, skip writing the BAR reg, as
375 * that would clear the BAR's PCI address assigned by the host.
376 */
377 goto config_atu;
378 }
379
380 bar_type = dw_pcie_ep_get_bar_type(ep, bar);
381 switch (bar_type) {
382 case BAR_FIXED:
383 /*
384 * There is no need to write a BAR mask for a fixed BAR (except
385 * to write 1 to the LSB of the BAR mask register, to enable the
386 * BAR). Write the BAR mask regardless. (The fixed bits in the
387 * BAR mask register will be read-only anyway.)
388 */
389 fallthrough;
390 case BAR_PROGRAMMABLE:
391 ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
392 break;
393 case BAR_RESIZABLE:
394 ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
395 break;
396 default:
397 ret = -EINVAL;
398 dev_err(pci->dev, "Invalid BAR type\n");
399 break;
400 }
401
402 if (ret)
403 return ret;
404
405 config_atu:
406 if (!(flags & PCI_BASE_ADDRESS_SPACE))
407 type = PCIE_ATU_TYPE_MEM;
408 else
409 type = PCIE_ATU_TYPE_IO;
410
411 ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
412 size);
413 if (ret)
414 return ret;
415
416 ep->epf_bar[bar] = epf_bar;
417
418 return 0;
419 }
420
dw_pcie_find_index(struct dw_pcie_ep * ep,phys_addr_t addr,u32 * atu_index)421 static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
422 u32 *atu_index)
423 {
424 u32 index;
425 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
426
427 for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
428 if (ep->outbound_addr[index] != addr)
429 continue;
430 *atu_index = index;
431 return 0;
432 }
433
434 return -EINVAL;
435 }
436
dw_pcie_ep_align_addr(struct pci_epc * epc,u64 pci_addr,size_t * pci_size,size_t * offset)437 static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
438 size_t *pci_size, size_t *offset)
439 {
440 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
441 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
442 u64 mask = pci->region_align - 1;
443 size_t ofst = pci_addr & mask;
444
445 *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
446 *offset = ofst;
447
448 return pci_addr & ~mask;
449 }
450
dw_pcie_ep_unmap_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t addr)451 static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
452 phys_addr_t addr)
453 {
454 int ret;
455 u32 atu_index;
456 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
457 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
458
459 ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
460 &atu_index);
461 if (ret < 0)
462 return;
463
464 ep->outbound_addr[atu_index] = 0;
465 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
466 clear_bit(atu_index, ep->ob_window_map);
467 }
468
dw_pcie_ep_map_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t addr,u64 pci_addr,size_t size)469 static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
470 phys_addr_t addr, u64 pci_addr, size_t size)
471 {
472 int ret;
473 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
474 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
475 struct dw_pcie_ob_atu_cfg atu = { 0 };
476
477 atu.func_no = func_no;
478 atu.type = PCIE_ATU_TYPE_MEM;
479 atu.parent_bus_addr = addr - pci->parent_bus_offset;
480 atu.pci_addr = pci_addr;
481 atu.size = size;
482 ret = dw_pcie_ep_outbound_atu(ep, &atu);
483 if (ret) {
484 dev_err(pci->dev, "Failed to enable address\n");
485 return ret;
486 }
487
488 return 0;
489 }
490
dw_pcie_ep_get_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no)491 static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
492 {
493 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
494 struct dw_pcie_ep_func *ep_func;
495 u32 val, reg;
496
497 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
498 if (!ep_func || !ep_func->msi_cap)
499 return -EINVAL;
500
501 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
502 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
503 if (!(val & PCI_MSI_FLAGS_ENABLE))
504 return -EINVAL;
505
506 val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
507
508 return 1 << val;
509 }
510
dw_pcie_ep_set_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u8 nr_irqs)511 static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
512 u8 nr_irqs)
513 {
514 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
515 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
516 struct dw_pcie_ep_func *ep_func;
517 u8 mmc = order_base_2(nr_irqs);
518 u32 val, reg;
519
520 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
521 if (!ep_func || !ep_func->msi_cap)
522 return -EINVAL;
523
524 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
525 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
526 val &= ~PCI_MSI_FLAGS_QMASK;
527 val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
528 dw_pcie_dbi_ro_wr_en(pci);
529 dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
530 dw_pcie_dbi_ro_wr_dis(pci);
531
532 return 0;
533 }
534
dw_pcie_ep_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)535 static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
536 {
537 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
538 struct dw_pcie_ep_func *ep_func;
539 u32 val, reg;
540
541 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
542 if (!ep_func || !ep_func->msix_cap)
543 return -EINVAL;
544
545 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
546 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
547 if (!(val & PCI_MSIX_FLAGS_ENABLE))
548 return -EINVAL;
549
550 val &= PCI_MSIX_FLAGS_QSIZE;
551
552 return val + 1;
553 }
554
dw_pcie_ep_set_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u16 nr_irqs,enum pci_barno bir,u32 offset)555 static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
556 u16 nr_irqs, enum pci_barno bir, u32 offset)
557 {
558 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
559 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
560 struct dw_pcie_ep_func *ep_func;
561 u32 val, reg;
562
563 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
564 if (!ep_func || !ep_func->msix_cap)
565 return -EINVAL;
566
567 dw_pcie_dbi_ro_wr_en(pci);
568
569 reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
570 val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
571 val &= ~PCI_MSIX_FLAGS_QSIZE;
572 val |= nr_irqs - 1; /* encoded as N-1 */
573 dw_pcie_writew_dbi(pci, reg, val);
574
575 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
576 val = offset | bir;
577 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
578
579 reg = ep_func->msix_cap + PCI_MSIX_PBA;
580 val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
581 dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
582
583 dw_pcie_dbi_ro_wr_dis(pci);
584
585 return 0;
586 }
587
dw_pcie_ep_raise_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,unsigned int type,u16 interrupt_num)588 static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
589 unsigned int type, u16 interrupt_num)
590 {
591 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
592
593 if (!ep->ops->raise_irq)
594 return -EINVAL;
595
596 return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
597 }
598
dw_pcie_ep_stop(struct pci_epc * epc)599 static void dw_pcie_ep_stop(struct pci_epc *epc)
600 {
601 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
602 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
603
604 dw_pcie_stop_link(pci);
605 }
606
dw_pcie_ep_start(struct pci_epc * epc)607 static int dw_pcie_ep_start(struct pci_epc *epc)
608 {
609 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
610 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
611
612 return dw_pcie_start_link(pci);
613 }
614
615 static const struct pci_epc_features*
dw_pcie_ep_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)616 dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
617 {
618 struct dw_pcie_ep *ep = epc_get_drvdata(epc);
619
620 if (!ep->ops->get_features)
621 return NULL;
622
623 return ep->ops->get_features(ep);
624 }
625
626 static const struct pci_epc_ops epc_ops = {
627 .write_header = dw_pcie_ep_write_header,
628 .set_bar = dw_pcie_ep_set_bar,
629 .clear_bar = dw_pcie_ep_clear_bar,
630 .align_addr = dw_pcie_ep_align_addr,
631 .map_addr = dw_pcie_ep_map_addr,
632 .unmap_addr = dw_pcie_ep_unmap_addr,
633 .set_msi = dw_pcie_ep_set_msi,
634 .get_msi = dw_pcie_ep_get_msi,
635 .set_msix = dw_pcie_ep_set_msix,
636 .get_msix = dw_pcie_ep_get_msix,
637 .raise_irq = dw_pcie_ep_raise_irq,
638 .start = dw_pcie_ep_start,
639 .stop = dw_pcie_ep_stop,
640 .get_features = dw_pcie_ep_get_features,
641 };
642
643 /**
644 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
645 * @ep: DWC EP device
646 * @func_no: Function number of the endpoint
647 *
648 * Return: 0 if success, errno otherwise.
649 */
dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep * ep,u8 func_no)650 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
651 {
652 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
653 struct device *dev = pci->dev;
654
655 dev_err(dev, "EP cannot raise INTX IRQs\n");
656
657 return -EINVAL;
658 }
659 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
660
661 /**
662 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
663 * @ep: DWC EP device
664 * @func_no: Function number of the endpoint
665 * @interrupt_num: Interrupt number to be raised
666 *
667 * Return: 0 if success, errno otherwise.
668 */
dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep * ep,u8 func_no,u8 interrupt_num)669 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
670 u8 interrupt_num)
671 {
672 u32 msg_addr_lower, msg_addr_upper, reg;
673 struct dw_pcie_ep_func *ep_func;
674 struct pci_epc *epc = ep->epc;
675 size_t map_size = sizeof(u32);
676 size_t offset;
677 u16 msg_ctrl, msg_data;
678 bool has_upper;
679 u64 msg_addr;
680 int ret;
681
682 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
683 if (!ep_func || !ep_func->msi_cap)
684 return -EINVAL;
685
686 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
687 reg = ep_func->msi_cap + PCI_MSI_FLAGS;
688 msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
689 has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
690 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
691 msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
692 if (has_upper) {
693 reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
694 msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
695 reg = ep_func->msi_cap + PCI_MSI_DATA_64;
696 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
697 } else {
698 msg_addr_upper = 0;
699 reg = ep_func->msi_cap + PCI_MSI_DATA_32;
700 msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
701 }
702 msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
703
704 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
705 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
706 map_size);
707 if (ret)
708 return ret;
709
710 writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
711
712 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
713
714 return 0;
715 }
716 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
717
718 /**
719 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
720 * method
721 * @ep: DWC EP device
722 * @func_no: Function number of the endpoint device
723 * @interrupt_num: Interrupt number to be raised
724 *
725 * Return: 0 if success, errno otherwise.
726 */
dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep * ep,u8 func_no,u16 interrupt_num)727 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
728 u16 interrupt_num)
729 {
730 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
731 struct dw_pcie_ep_func *ep_func;
732 u32 msg_data;
733
734 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
735 if (!ep_func || !ep_func->msix_cap)
736 return -EINVAL;
737
738 msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
739 (interrupt_num - 1);
740
741 dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
742
743 return 0;
744 }
745
746 /**
747 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
748 * @ep: DWC EP device
749 * @func_no: Function number of the endpoint device
750 * @interrupt_num: Interrupt number to be raised
751 *
752 * Return: 0 if success, errno otherwise.
753 */
dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep * ep,u8 func_no,u16 interrupt_num)754 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
755 u16 interrupt_num)
756 {
757 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
758 struct pci_epf_msix_tbl *msix_tbl;
759 struct dw_pcie_ep_func *ep_func;
760 struct pci_epc *epc = ep->epc;
761 size_t map_size = sizeof(u32);
762 size_t offset;
763 u32 reg, msg_data, vec_ctrl;
764 u32 tbl_offset;
765 u64 msg_addr;
766 int ret;
767 u8 bir;
768
769 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
770 if (!ep_func || !ep_func->msix_cap)
771 return -EINVAL;
772
773 reg = ep_func->msix_cap + PCI_MSIX_TABLE;
774 tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
775 bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
776 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
777
778 msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
779 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
780 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
781 vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
782
783 if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
784 dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
785 return -EPERM;
786 }
787
788 msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
789 ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
790 map_size);
791 if (ret)
792 return ret;
793
794 writel(msg_data, ep->msi_mem + offset);
795
796 dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
797
798 return 0;
799 }
800
801 /**
802 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
803 * @ep: DWC EP device
804 *
805 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
806 * reset like PERST#. Note that this API is only applicable for drivers
807 * supporting PERST# or any other methods of fundamental reset.
808 */
dw_pcie_ep_cleanup(struct dw_pcie_ep * ep)809 void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
810 {
811 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
812
813 dwc_pcie_debugfs_deinit(pci);
814 dw_pcie_edma_remove(pci);
815 }
816 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
817
818 /**
819 * dw_pcie_ep_deinit - Deinitialize the endpoint device
820 * @ep: DWC EP device
821 *
822 * Deinitialize the endpoint device. EPC device is not destroyed since that will
823 * be taken care by Devres.
824 */
dw_pcie_ep_deinit(struct dw_pcie_ep * ep)825 void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
826 {
827 struct pci_epc *epc = ep->epc;
828
829 dw_pcie_ep_cleanup(ep);
830
831 pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
832 epc->mem->window.page_size);
833
834 pci_epc_mem_exit(epc);
835 }
836 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
837
dw_pcie_ep_init_non_sticky_registers(struct dw_pcie * pci)838 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
839 {
840 struct dw_pcie_ep *ep = &pci->ep;
841 unsigned int offset;
842 unsigned int nbars;
843 enum pci_barno bar;
844 u32 reg, i, val;
845
846 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
847
848 dw_pcie_dbi_ro_wr_en(pci);
849
850 if (offset) {
851 reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
852 nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
853
854 /*
855 * PCIe r6.0, sec 7.8.6.2 require us to support at least one
856 * size in the range from 1 MB to 512 GB. Advertise support
857 * for 1 MB BAR size only.
858 *
859 * For a BAR that has been configured via dw_pcie_ep_set_bar(),
860 * advertise support for only that size instead.
861 */
862 for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
863 /*
864 * While the RESBAR_CAP_REG_* fields are sticky, the
865 * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
866 * sticky in certain versions of DWC PCIe, but not all).
867 *
868 * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
869 * the controller when RESBAR_CAP_REG is written, which
870 * is why RESBAR_CAP_REG is written here.
871 */
872 val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
873 bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
874 if (ep->epf_bar[bar])
875 pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
876 else
877 val = BIT(4);
878
879 dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
880 }
881 }
882
883 dw_pcie_setup(pci);
884 dw_pcie_dbi_ro_wr_dis(pci);
885 }
886
887 /**
888 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
889 * @ep: DWC EP device
890 *
891 * Initialize the registers (CSRs) specific to DWC EP. This API should be called
892 * only when the endpoint receives an active refclk (either from host or
893 * generated locally).
894 */
dw_pcie_ep_init_registers(struct dw_pcie_ep * ep)895 int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
896 {
897 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
898 struct dw_pcie_ep_func *ep_func;
899 struct device *dev = pci->dev;
900 struct pci_epc *epc = ep->epc;
901 u32 ptm_cap_base, reg;
902 u8 hdr_type;
903 u8 func_no;
904 void *addr;
905 int ret;
906
907 hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
908 PCI_HEADER_TYPE_MASK;
909 if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
910 dev_err(pci->dev,
911 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
912 hdr_type);
913 return -EIO;
914 }
915
916 dw_pcie_version_detect(pci);
917
918 dw_pcie_iatu_detect(pci);
919
920 ret = dw_pcie_edma_detect(pci);
921 if (ret)
922 return ret;
923
924 ret = -ENOMEM;
925 if (!ep->ib_window_map) {
926 ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
927 GFP_KERNEL);
928 if (!ep->ib_window_map)
929 goto err_remove_edma;
930 }
931
932 if (!ep->ob_window_map) {
933 ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
934 GFP_KERNEL);
935 if (!ep->ob_window_map)
936 goto err_remove_edma;
937 }
938
939 if (!ep->outbound_addr) {
940 addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
941 GFP_KERNEL);
942 if (!addr)
943 goto err_remove_edma;
944 ep->outbound_addr = addr;
945 }
946
947 for (func_no = 0; func_no < epc->max_functions; func_no++) {
948
949 ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
950 if (ep_func)
951 continue;
952
953 ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
954 if (!ep_func)
955 goto err_remove_edma;
956
957 ep_func->func_no = func_no;
958 ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
959 PCI_CAP_ID_MSI);
960 ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
961 PCI_CAP_ID_MSIX);
962
963 list_add_tail(&ep_func->list, &ep->func_list);
964 }
965
966 if (ep->ops->init)
967 ep->ops->init(ep);
968
969 ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
970
971 /*
972 * PTM responder capability can be disabled only after disabling
973 * PTM root capability.
974 */
975 if (ptm_cap_base) {
976 dw_pcie_dbi_ro_wr_en(pci);
977 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
978 reg &= ~PCI_PTM_CAP_ROOT;
979 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
980
981 reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
982 reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
983 dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
984 dw_pcie_dbi_ro_wr_dis(pci);
985 }
986
987 dw_pcie_ep_init_non_sticky_registers(pci);
988
989 dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
990
991 return 0;
992
993 err_remove_edma:
994 dw_pcie_edma_remove(pci);
995
996 return ret;
997 }
998 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
999
1000 /**
1001 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
1002 * @ep: DWC EP device
1003 */
dw_pcie_ep_linkup(struct dw_pcie_ep * ep)1004 void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
1005 {
1006 struct pci_epc *epc = ep->epc;
1007
1008 pci_epc_linkup(epc);
1009 }
1010 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
1011
1012 /**
1013 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
1014 * @ep: DWC EP device
1015 *
1016 * Non-sticky registers are also initialized before sending the notification to
1017 * the EPF drivers. This is needed since the registers need to be initialized
1018 * before the link comes back again.
1019 */
dw_pcie_ep_linkdown(struct dw_pcie_ep * ep)1020 void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
1021 {
1022 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1023 struct pci_epc *epc = ep->epc;
1024
1025 /*
1026 * Initialize the non-sticky DWC registers as they would've reset post
1027 * Link Down. This is specifically needed for drivers not supporting
1028 * PERST# as they have no way to reinitialize the registers before the
1029 * link comes back again.
1030 */
1031 dw_pcie_ep_init_non_sticky_registers(pci);
1032
1033 pci_epc_linkdown(epc);
1034 }
1035 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
1036
dw_pcie_ep_get_resources(struct dw_pcie_ep * ep)1037 static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
1038 {
1039 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1040 struct device *dev = pci->dev;
1041 struct platform_device *pdev = to_platform_device(dev);
1042 struct device_node *np = dev->of_node;
1043 struct pci_epc *epc = ep->epc;
1044 struct resource *res;
1045 int ret;
1046
1047 ret = dw_pcie_get_resources(pci);
1048 if (ret)
1049 return ret;
1050
1051 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
1052 if (!res)
1053 return -EINVAL;
1054
1055 ep->phys_base = res->start;
1056 ep->addr_size = resource_size(res);
1057
1058 /*
1059 * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
1060 * dw_pcie_parent_bus_offset() after setting ep->phys_base.
1061 */
1062 pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
1063 ep->phys_base);
1064
1065 ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
1066 if (ret < 0)
1067 epc->max_functions = 1;
1068
1069 return 0;
1070 }
1071
1072 /**
1073 * dw_pcie_ep_init - Initialize the endpoint device
1074 * @ep: DWC EP device
1075 *
1076 * Initialize the endpoint device. Allocate resources and create the EPC
1077 * device with the endpoint framework.
1078 *
1079 * Return: 0 if success, errno otherwise.
1080 */
dw_pcie_ep_init(struct dw_pcie_ep * ep)1081 int dw_pcie_ep_init(struct dw_pcie_ep *ep)
1082 {
1083 int ret;
1084 struct pci_epc *epc;
1085 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1086 struct device *dev = pci->dev;
1087
1088 INIT_LIST_HEAD(&ep->func_list);
1089
1090 epc = devm_pci_epc_create(dev, &epc_ops);
1091 if (IS_ERR(epc)) {
1092 dev_err(dev, "Failed to create epc device\n");
1093 return PTR_ERR(epc);
1094 }
1095
1096 ep->epc = epc;
1097 epc_set_drvdata(epc, ep);
1098
1099 ret = dw_pcie_ep_get_resources(ep);
1100 if (ret)
1101 return ret;
1102
1103 if (ep->ops->pre_init)
1104 ep->ops->pre_init(ep);
1105
1106 ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
1107 ep->page_size);
1108 if (ret < 0) {
1109 dev_err(dev, "Failed to initialize address space\n");
1110 return ret;
1111 }
1112
1113 ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
1114 epc->mem->window.page_size);
1115 if (!ep->msi_mem) {
1116 ret = -ENOMEM;
1117 dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
1118 goto err_exit_epc_mem;
1119 }
1120
1121 return 0;
1122
1123 err_exit_epc_mem:
1124 pci_epc_mem_exit(epc);
1125
1126 return ret;
1127 }
1128 EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
1129