xref: /linux/drivers/bcma/driver_pcie2.c (revision 552c69b36ebd966186573b9c7a286b390935cce1)
1 /*
2  * Broadcom specific AMBA
3  * PCIe Gen 2 Core
4  *
5  * Copyright 2014, Broadcom Corporation
6  * Copyright 2014, Rafał Miłecki <zajec5@gmail.com>
7  *
8  * Licensed under the GNU/GPL. See COPYING for details.
9  */
10 
11 #include "bcma_private.h"
12 #include <linux/bcma/bcma.h>
13 #include <linux/pci.h>
14 
15 /**************************************************
16  * R/W ops.
17  **************************************************/
18 
19 #if 0
20 static u32 bcma_core_pcie2_cfg_read(struct bcma_drv_pcie2 *pcie2, u32 addr)
21 {
22 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
23 	pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR);
24 	return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
25 }
26 #endif
27 
bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 * pcie2,u32 addr,u32 val)28 static void bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 *pcie2, u32 addr,
29 				      u32 val)
30 {
31 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
32 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, val);
33 }
34 
35 /**************************************************
36  * Init.
37  **************************************************/
38 
bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 * pcie2,bool enable)39 static u32 bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 *pcie2,
40 						bool enable)
41 {
42 	u32 val;
43 
44 	/* restore back to default */
45 	val = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
46 	val |= PCIE2_CLKC_DLYPERST;
47 	val &= ~PCIE2_CLKC_DISSPROMLD;
48 	if (enable) {
49 		val &= ~PCIE2_CLKC_DLYPERST;
50 		val |= PCIE2_CLKC_DISSPROMLD;
51 	}
52 	pcie2_write32(pcie2, (BCMA_CORE_PCIE2_CLK_CONTROL), val);
53 	/* flush */
54 	return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
55 }
56 
bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 * pcie2)57 static void bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 *pcie2)
58 {
59 	/* LTR0 */
60 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x844);
61 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x883c883c);
62 	/* LTR1 */
63 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x848);
64 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x88648864);
65 	/* LTR2 */
66 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x84C);
67 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x90039003);
68 }
69 
bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 * pcie2)70 static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
71 {
72 	u8 core_rev = pcie2->core->id.rev;
73 	u32 devstsctr2;
74 
75 	if (core_rev < 2 || core_rev == 10 || core_rev > 13)
76 		return;
77 
78 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
79 		      PCIE2_CAP_DEVSTSCTRL2_OFFSET);
80 	devstsctr2 = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
81 	if (devstsctr2 & PCIE2_CAP_DEVSTSCTRL2_LTRENAB) {
82 		/* force the right LTR values */
83 		bcma_core_pcie2_set_ltr_vals(pcie2);
84 
85 		/* TODO:
86 		 *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
87 		 */
88 
89 		/* enable the LTR */
90 		devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
91 		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
92 			      PCIE2_CAP_DEVSTSCTRL2_OFFSET);
93 		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, devstsctr2);
94 
95 		/* set the LTR state to be active */
96 		pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
97 			      PCIE2_LTR_ACTIVE);
98 		usleep_range(1000, 2000);
99 
100 		/* set the LTR state to be sleep */
101 		pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
102 			      PCIE2_LTR_SLEEP);
103 		usleep_range(1000, 2000);
104 	}
105 }
106 
pciedev_crwlpciegen2(struct bcma_drv_pcie2 * pcie2)107 static void pciedev_crwlpciegen2(struct bcma_drv_pcie2 *pcie2)
108 {
109 	u8 core_rev = pcie2->core->id.rev;
110 	bool pciewar160, pciewar162;
111 
112 	pciewar160 = core_rev == 7 || core_rev == 9 || core_rev == 11;
113 	pciewar162 = core_rev == 5 || core_rev == 7 || core_rev == 8 ||
114 		     core_rev == 9 || core_rev == 11;
115 
116 	if (!pciewar160 && !pciewar162)
117 		return;
118 
119 /* TODO */
120 #if 0
121 	pcie2_set32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL,
122 		    PCIE_DISABLE_L1CLK_GATING);
123 #if 0
124 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
125 		      PCIEGEN2_COE_PVT_TL_CTRL_0);
126 	pcie2_mask32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA,
127 		     ~(1 << COE_PVT_TL_CTRL_0_PM_DIS_L1_REENTRY_BIT));
128 #endif
129 #endif
130 }
131 
pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 * pcie2)132 static void pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 *pcie2)
133 {
134 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PMCR_REFUP);
135 	pcie2_set32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x1f);
136 }
137 
pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 * pcie2)138 static void pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 *pcie2)
139 {
140 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_SBMBX);
141 	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 1 << 0);
142 }
143 
pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 * pcie2)144 static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
145 {
146 	struct bcma_drv_cc *drv_cc = &pcie2->core->bus->drv_cc;
147 	u8 core_rev = pcie2->core->id.rev;
148 	u32 alp_khz, pm_value;
149 
150 	if (core_rev <= 13) {
151 		alp_khz = bcma_pmu_get_alp_clock(drv_cc) / 1000;
152 		pm_value = (1000000 * 2) / alp_khz;
153 		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
154 			      PCIE2_PVT_REG_PM_CLK_PERIOD);
155 		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, pm_value);
156 	}
157 }
158 
bcma_core_pcie2_init(struct bcma_drv_pcie2 * pcie2)159 void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
160 {
161 	struct bcma_bus *bus = pcie2->core->bus;
162 	struct bcma_chipinfo *ci = &bus->chipinfo;
163 	u32 tmp;
164 
165 	tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
166 	if ((tmp & 0xe) >> 1 == 2)
167 		bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
168 
169 	switch (bus->chipinfo.id) {
170 	case BCMA_CHIP_ID_BCM4360:
171 	case BCMA_CHIP_ID_BCM4352:
172 		pcie2->reqsize = 1024;
173 		break;
174 	default:
175 		pcie2->reqsize = 128;
176 		break;
177 	}
178 
179 	if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
180 		bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
181 	bcma_core_pcie2_hw_ltr_war(pcie2);
182 	pciedev_crwlpciegen2(pcie2);
183 	pciedev_reg_pm_clk_period(pcie2);
184 	pciedev_crwlpciegen2_180(pcie2);
185 	pciedev_crwlpciegen2_182(pcie2);
186 }
187 
188 /**************************************************
189  * Runtime ops.
190  **************************************************/
191 
bcma_core_pcie2_up(struct bcma_drv_pcie2 * pcie2)192 void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
193 {
194 	struct bcma_bus *bus = pcie2->core->bus;
195 	struct pci_dev *dev = bus->host_pci;
196 	int err;
197 
198 	err = pcie_set_readrq(dev, pcie2->reqsize);
199 	if (err)
200 		bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
201 }
202