1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
4 *
5 * Authors:
6 * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
7 * Serge Semin <Sergey.Semin@baikalelectronics.ru>
8 *
9 * Baikal-T1 PCIe controller driver
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/bits.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/platform_device.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 #include <linux/types.h>
25
26 #include "pcie-designware.h"
27
28 /* Baikal-T1 System CCU control registers */
29 #define BT1_CCU_PCIE_CLKC 0x140
30 #define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
31 #define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
32 #define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
33
34 #define BT1_CCU_PCIE_RSTC 0x144
35 #define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
36 #define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
37 #define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
38 #define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
39 #define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
40 #define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
41
42 #define BT1_CCU_PCIE_PMSC 0x148
43 #define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
44 #define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
45 #define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
46 #define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
47 #define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
48 #define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
49 #define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
50 #define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
51 #define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
52 #define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
53 #define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
54 #define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
55 #define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
56 #define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
57 #define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
58 #define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
59 #define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
60 #define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
61 #define BT1_CCU_PCIE_LTSSM_L0 0x11
62 #define BT1_CCU_PCIE_LTSSM_L0S 0x12
63 #define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
64 #define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
65 #define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
66 #define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
67 #define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
68 #define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
69 #define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
70 #define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
71 #define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
72 #define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
73 #define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
74 #define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
75 #define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
76 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
77 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
78 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
79 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
80 #define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
81 #define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
82 #define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
83 #define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
84 #define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
85 #define BT1_CCU_PCIE_L1_PENDING BIT(12)
86 #define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
87 #define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
88 #define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
89 #define BT1_CCU_PCIE_PM_PME_EN BIT(20)
90 #define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
91 #define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
92 #define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
93 #define BT1_CCU_PCIE_WAKE_DET BIT(24)
94 #define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
95 #define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
96
97 #define BT1_CCU_PCIE_GENC 0x14c
98 #define BT1_CCU_PCIE_LTSSM_EN BIT(1)
99 #define BT1_CCU_PCIE_DBI2_MODE BIT(2)
100 #define BT1_CCU_PCIE_MGMT_EN BIT(3)
101 #define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
102 #define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
103 #define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
104 #define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
105 #define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
106 #define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
107
108 #define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
109 ({ \
110 int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
111 __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
112 })
113
114 /* Baikal-T1 PCIe specific control registers */
115 #define BT1_PCIE_AXI2MGM_LANENUM 0xd04
116 #define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
117
118 #define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
119 #define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
120 #define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
121 #define BT1_PCIE_AXI2MGM_DONE BIT(30)
122 #define BT1_PCIE_AXI2MGM_BUSY BIT(31)
123
124 #define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
125 #define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
126
127 #define BT1_PCIE_AXI2MGM_READDATA 0xd10
128 #define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
129
130 /* Generic Baikal-T1 PCIe interface resources */
131 #define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
132 #define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
133 #define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
134 #define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
135
136 /* PCIe bus setup delays and timeouts */
137 #define BT1_PCIE_RST_DELAY_MS 100
138 #define BT1_PCIE_RUN_DELAY_US 100
139 #define BT1_PCIE_REQ_DELAY_US 1
140 #define BT1_PCIE_REQ_TIMEOUT_US 1000
141 #define BT1_PCIE_LNK_DELAY_US 1000
142 #define BT1_PCIE_LNK_TIMEOUT_US 1000000
143
144 static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
145 DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
146 };
147
148 static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
149 DW_PCIE_REF_CLK,
150 };
151
152 static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
153 DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
154 };
155
156 static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
157 DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
158 DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
159 };
160
161 struct bt1_pcie {
162 struct dw_pcie dw;
163 struct platform_device *pdev;
164 struct regmap *sys_regs;
165 };
166 #define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
167
168 /*
169 * Baikal-T1 MMIO space must be read/written by the dword-aligned
170 * instructions. Note the methods are optimized to have the dword operations
171 * performed with minimum overhead as the most frequently used ones.
172 */
bt1_pcie_read_mmio(void __iomem * addr,int size,u32 * val)173 static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
174 {
175 unsigned int ofs = (uintptr_t)addr & 0x3;
176
177 if (!IS_ALIGNED((uintptr_t)addr, size))
178 return -EINVAL;
179
180 *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
181 if (size == 4) {
182 return 0;
183 } else if (size == 2) {
184 *val &= 0xffff;
185 return 0;
186 } else if (size == 1) {
187 *val &= 0xff;
188 return 0;
189 }
190
191 return -EINVAL;
192 }
193
bt1_pcie_write_mmio(void __iomem * addr,int size,u32 val)194 static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
195 {
196 unsigned int ofs = (uintptr_t)addr & 0x3;
197 u32 tmp, mask;
198
199 if (!IS_ALIGNED((uintptr_t)addr, size))
200 return -EINVAL;
201
202 if (size == 4) {
203 writel(val, addr);
204 return 0;
205 } else if (size == 2 || size == 1) {
206 mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
207 tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
208 tmp |= (val & mask) << ofs * BITS_PER_BYTE;
209 writel(tmp, addr - ofs);
210 return 0;
211 }
212
213 return -EINVAL;
214 }
215
bt1_pcie_read_dbi(struct dw_pcie * pci,void __iomem * base,u32 reg,size_t size)216 static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
217 size_t size)
218 {
219 int ret;
220 u32 val;
221
222 ret = bt1_pcie_read_mmio(base + reg, size, &val);
223 if (ret) {
224 dev_err(pci->dev, "Read DBI address failed\n");
225 return ~0U;
226 }
227
228 return val;
229 }
230
bt1_pcie_write_dbi(struct dw_pcie * pci,void __iomem * base,u32 reg,size_t size,u32 val)231 static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
232 size_t size, u32 val)
233 {
234 int ret;
235
236 ret = bt1_pcie_write_mmio(base + reg, size, val);
237 if (ret)
238 dev_err(pci->dev, "Write DBI address failed\n");
239 }
240
bt1_pcie_write_dbi2(struct dw_pcie * pci,void __iomem * base,u32 reg,size_t size,u32 val)241 static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
242 size_t size, u32 val)
243 {
244 struct bt1_pcie *btpci = to_bt1_pcie(pci);
245 int ret;
246
247 regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
248 BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
249
250 ret = bt1_pcie_write_mmio(base + reg, size, val);
251 if (ret)
252 dev_err(pci->dev, "Write DBI2 address failed\n");
253
254 regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
255 BT1_CCU_PCIE_DBI2_MODE, 0);
256 }
257
bt1_pcie_start_link(struct dw_pcie * pci)258 static int bt1_pcie_start_link(struct dw_pcie *pci)
259 {
260 struct bt1_pcie *btpci = to_bt1_pcie(pci);
261 u32 val;
262 int ret;
263
264 /*
265 * Enable LTSSM and make sure it was able to establish both PHY and
266 * data links. This procedure shall work fine to reach 2.5 GT/s speed.
267 */
268 regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
269 BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
270
271 ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
272 (val & BT1_CCU_PCIE_SMLH_LINKUP),
273 BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
274 if (ret) {
275 dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
276 return ret;
277 }
278
279 ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
280 (val & BT1_CCU_PCIE_RDLH_LINKUP),
281 BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
282 if (ret) {
283 dev_err(pci->dev, "LTSSM failed to set data link up\n");
284 return ret;
285 }
286
287 /*
288 * Activate direct speed change after the link is established in an
289 * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s).
290 * This is required at least to get 8.0 GT/s speed.
291 */
292 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
293 val |= PORT_LOGIC_SPEED_CHANGE;
294 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
295
296 ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
297 BT1_CCU_PCIE_LTSSM_LINKUP(val),
298 BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
299 if (ret)
300 dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
301
302 return ret;
303 }
304
bt1_pcie_stop_link(struct dw_pcie * pci)305 static void bt1_pcie_stop_link(struct dw_pcie *pci)
306 {
307 struct bt1_pcie *btpci = to_bt1_pcie(pci);
308
309 regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
310 BT1_CCU_PCIE_LTSSM_EN, 0);
311 }
312
313 static const struct dw_pcie_ops bt1_pcie_ops = {
314 .read_dbi = bt1_pcie_read_dbi,
315 .write_dbi = bt1_pcie_write_dbi,
316 .write_dbi2 = bt1_pcie_write_dbi2,
317 .start_link = bt1_pcie_start_link,
318 .stop_link = bt1_pcie_stop_link,
319 };
320
321 static struct pci_ops bt1_pci_ops = {
322 .map_bus = dw_pcie_own_conf_map_bus,
323 .read = pci_generic_config_read32,
324 .write = pci_generic_config_write32,
325 };
326
bt1_pcie_get_resources(struct bt1_pcie * btpci)327 static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
328 {
329 struct device *dev = btpci->dw.dev;
330 int i;
331
332 /* DBI access is supposed to be performed by the dword-aligned IOs */
333 btpci->dw.pp.bridge->ops = &bt1_pci_ops;
334
335 /* These CSRs are in MMIO so we won't check the regmap-methods status */
336 btpci->sys_regs =
337 syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
338 if (IS_ERR(btpci->sys_regs))
339 return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
340 "Failed to get syscon\n");
341
342 /* Make sure all the required resources have been specified */
343 for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
344 if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
345 dev_err(dev, "App clocks set is incomplete\n");
346 return -ENOENT;
347 }
348 }
349
350 for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
351 if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
352 dev_err(dev, "Core clocks set is incomplete\n");
353 return -ENOENT;
354 }
355 }
356
357 for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
358 if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
359 dev_err(dev, "App resets set is incomplete\n");
360 return -ENOENT;
361 }
362 }
363
364 for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
365 if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
366 dev_err(dev, "Core resets set is incomplete\n");
367 return -ENOENT;
368 }
369 }
370
371 return 0;
372 }
373
bt1_pcie_full_stop_bus(struct bt1_pcie * btpci,bool init)374 static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
375 {
376 struct device *dev = btpci->dw.dev;
377 struct dw_pcie *pci = &btpci->dw;
378 int ret;
379
380 /* Disable LTSSM for sure */
381 regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
382 BT1_CCU_PCIE_LTSSM_EN, 0);
383
384 /*
385 * Application reset controls are trigger-based so assert the core
386 * resets only.
387 */
388 ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
389 if (ret)
390 dev_err(dev, "Failed to assert core resets\n");
391
392 /*
393 * Clocks are disabled by default at least in accordance with the clk
394 * enable counter value on init stage.
395 */
396 if (!init) {
397 clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
398
399 clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
400 }
401
402 /* The peripheral devices are unavailable anyway so reset them too */
403 gpiod_set_value_cansleep(pci->pe_rst, 1);
404
405 /* Make sure all the resets are settled */
406 msleep(BT1_PCIE_RST_DELAY_MS);
407 }
408
409 /*
410 * Implements the cold reset procedure in accordance with the reference manual
411 * and available PM signals.
412 */
bt1_pcie_cold_start_bus(struct bt1_pcie * btpci)413 static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
414 {
415 struct device *dev = btpci->dw.dev;
416 struct dw_pcie *pci = &btpci->dw;
417 u32 val;
418 int ret;
419
420 /* First get out of the Power/Hot reset state */
421 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
422 if (ret) {
423 dev_err(dev, "Failed to deassert PHY reset\n");
424 return ret;
425 }
426
427 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
428 if (ret) {
429 dev_err(dev, "Failed to deassert hot reset\n");
430 goto err_assert_pwr_rst;
431 }
432
433 /* Wait for the PM-core to stop requesting the PHY reset */
434 ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
435 !(val & BT1_CCU_PCIE_REQ_PHY_RST),
436 BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
437 if (ret) {
438 dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
439 goto err_assert_hot_rst;
440 }
441
442 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
443 if (ret) {
444 dev_err(dev, "Failed to deassert PHY reset\n");
445 goto err_assert_hot_rst;
446 }
447
448 /* Clocks can be now enabled, but the ref one is crucial at this stage */
449 ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
450 if (ret) {
451 dev_err(dev, "Failed to enable app clocks\n");
452 goto err_assert_phy_rst;
453 }
454
455 ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
456 if (ret) {
457 dev_err(dev, "Failed to enable ref clocks\n");
458 goto err_disable_app_clk;
459 }
460
461 /* Wait for the PM to stop requesting the controller core reset */
462 ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
463 !(val & BT1_CCU_PCIE_REQ_CORE_RST),
464 BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
465 if (ret) {
466 dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
467 goto err_disable_core_clk;
468 }
469
470 /* PCS-PIPE interface and controller core can be now activated */
471 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
472 if (ret) {
473 dev_err(dev, "Failed to deassert PIPE reset\n");
474 goto err_disable_core_clk;
475 }
476
477 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
478 if (ret) {
479 dev_err(dev, "Failed to deassert core reset\n");
480 goto err_assert_pipe_rst;
481 }
482
483 /* It's recommended to reset the core and application logic together */
484 ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
485 if (ret) {
486 dev_err(dev, "Failed to reset app domain\n");
487 goto err_assert_core_rst;
488 }
489
490 /* Sticky/Non-sticky CSR flags can be now unreset too */
491 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
492 if (ret) {
493 dev_err(dev, "Failed to deassert sticky reset\n");
494 goto err_assert_core_rst;
495 }
496
497 ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
498 if (ret) {
499 dev_err(dev, "Failed to deassert non-sticky reset\n");
500 goto err_assert_sticky_rst;
501 }
502
503 /* Activate the PCIe bus peripheral devices */
504 gpiod_set_value_cansleep(pci->pe_rst, 0);
505
506 /* Make sure the state is settled (LTSSM is still disabled though) */
507 usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
508
509 return 0;
510
511 err_assert_sticky_rst:
512 reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
513
514 err_assert_core_rst:
515 reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
516
517 err_assert_pipe_rst:
518 reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
519
520 err_disable_core_clk:
521 clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
522
523 err_disable_app_clk:
524 clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
525
526 err_assert_phy_rst:
527 reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
528
529 err_assert_hot_rst:
530 reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
531
532 err_assert_pwr_rst:
533 reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
534
535 return ret;
536 }
537
bt1_pcie_host_init(struct dw_pcie_rp * pp)538 static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
539 {
540 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
541 struct bt1_pcie *btpci = to_bt1_pcie(pci);
542 int ret;
543
544 ret = bt1_pcie_get_resources(btpci);
545 if (ret)
546 return ret;
547
548 bt1_pcie_full_stop_bus(btpci, true);
549
550 return bt1_pcie_cold_start_bus(btpci);
551 }
552
bt1_pcie_host_deinit(struct dw_pcie_rp * pp)553 static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
554 {
555 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
556 struct bt1_pcie *btpci = to_bt1_pcie(pci);
557
558 bt1_pcie_full_stop_bus(btpci, false);
559 }
560
561 static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
562 .init = bt1_pcie_host_init,
563 .deinit = bt1_pcie_host_deinit,
564 };
565
bt1_pcie_create_data(struct platform_device * pdev)566 static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
567 {
568 struct bt1_pcie *btpci;
569
570 btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
571 if (!btpci)
572 return ERR_PTR(-ENOMEM);
573
574 btpci->pdev = pdev;
575
576 platform_set_drvdata(pdev, btpci);
577
578 return btpci;
579 }
580
bt1_pcie_add_port(struct bt1_pcie * btpci)581 static int bt1_pcie_add_port(struct bt1_pcie *btpci)
582 {
583 struct device *dev = &btpci->pdev->dev;
584 int ret;
585
586 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
587 if (ret)
588 return ret;
589
590 btpci->dw.version = DW_PCIE_VER_460A;
591 btpci->dw.dev = dev;
592 btpci->dw.ops = &bt1_pcie_ops;
593
594 btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
595 btpci->dw.pp.ops = &bt1_pcie_host_ops;
596
597 dw_pcie_cap_set(&btpci->dw, REQ_RES);
598
599 ret = dw_pcie_host_init(&btpci->dw.pp);
600
601 return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
602 }
603
bt1_pcie_del_port(struct bt1_pcie * btpci)604 static void bt1_pcie_del_port(struct bt1_pcie *btpci)
605 {
606 dw_pcie_host_deinit(&btpci->dw.pp);
607 }
608
bt1_pcie_probe(struct platform_device * pdev)609 static int bt1_pcie_probe(struct platform_device *pdev)
610 {
611 struct bt1_pcie *btpci;
612
613 btpci = bt1_pcie_create_data(pdev);
614 if (IS_ERR(btpci))
615 return PTR_ERR(btpci);
616
617 return bt1_pcie_add_port(btpci);
618 }
619
bt1_pcie_remove(struct platform_device * pdev)620 static void bt1_pcie_remove(struct platform_device *pdev)
621 {
622 struct bt1_pcie *btpci = platform_get_drvdata(pdev);
623
624 bt1_pcie_del_port(btpci);
625 }
626
627 static const struct of_device_id bt1_pcie_of_match[] = {
628 { .compatible = "baikal,bt1-pcie" },
629 {},
630 };
631 MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
632
633 static struct platform_driver bt1_pcie_driver = {
634 .probe = bt1_pcie_probe,
635 .remove_new = bt1_pcie_remove,
636 .driver = {
637 .name = "bt1-pcie",
638 .of_match_table = bt1_pcie_of_match,
639 },
640 };
641 module_platform_driver(bt1_pcie_driver);
642
643 MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
644 MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
645 MODULE_LICENSE("GPL");
646