1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Marvell 88SE64xx/88SE94xx register IO interface
4 *
5 * Copyright 2007 Red Hat, Inc.
6 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 */
9
10
11 #ifndef _MV_CHIPS_H_
12 #define _MV_CHIPS_H_
13
14 #define mr32(reg) readl(regs + reg)
15 #define mw32(reg, val) writel((val), regs + reg)
16 #define mw32_f(reg, val) do { \
17 mw32(reg, val); \
18 mr32(reg); \
19 } while (0)
20
21 #define iow32(reg, val) outl(val, (unsigned long)(regs + reg))
22 #define ior32(reg) inl((unsigned long)(regs + reg))
23 #define iow16(reg, val) outw((unsigned long)(val, regs + reg))
24 #define ior16(reg) inw((unsigned long)(regs + reg))
25 #define iow8(reg, val) outb((unsigned long)(val, regs + reg))
26 #define ior8(reg) inb((unsigned long)(regs + reg))
27
mvs_cr32(struct mvs_info * mvi,u32 addr)28 static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
29 {
30 void __iomem *regs = mvi->regs;
31 mw32(MVS_CMD_ADDR, addr);
32 return mr32(MVS_CMD_DATA);
33 }
34
mvs_cw32(struct mvs_info * mvi,u32 addr,u32 val)35 static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
36 {
37 void __iomem *regs = mvi->regs;
38 mw32(MVS_CMD_ADDR, addr);
39 mw32(MVS_CMD_DATA, val);
40 }
41
mvs_read_phy_ctl(struct mvs_info * mvi,u32 port)42 static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
43 {
44 void __iomem *regs = mvi->regs;
45 return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
46 mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
47 }
48
mvs_write_phy_ctl(struct mvs_info * mvi,u32 port,u32 val)49 static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
50 {
51 void __iomem *regs = mvi->regs;
52 if (port < 4)
53 mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
54 else
55 mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
56 }
57
mvs_read_port(struct mvs_info * mvi,u32 off,u32 off2,u32 port)58 static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
59 u32 off2, u32 port)
60 {
61 void __iomem *regs = mvi->regs + off;
62 void __iomem *regs2 = mvi->regs + off2;
63 return (port < 4) ? readl(regs + port * 8) :
64 readl(regs2 + (port - 4) * 8);
65 }
66
mvs_write_port(struct mvs_info * mvi,u32 off,u32 off2,u32 port,u32 val)67 static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
68 u32 port, u32 val)
69 {
70 void __iomem *regs = mvi->regs + off;
71 void __iomem *regs2 = mvi->regs + off2;
72 if (port < 4)
73 writel(val, regs + port * 8);
74 else
75 writel(val, regs2 + (port - 4) * 8);
76 }
77
mvs_read_port_cfg_data(struct mvs_info * mvi,u32 port)78 static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
79 {
80 return mvs_read_port(mvi, MVS_P0_CFG_DATA,
81 MVS_P4_CFG_DATA, port);
82 }
83
mvs_write_port_cfg_data(struct mvs_info * mvi,u32 port,u32 val)84 static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
85 u32 port, u32 val)
86 {
87 mvs_write_port(mvi, MVS_P0_CFG_DATA,
88 MVS_P4_CFG_DATA, port, val);
89 }
90
mvs_write_port_cfg_addr(struct mvs_info * mvi,u32 port,u32 addr)91 static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
92 u32 port, u32 addr)
93 {
94 mvs_write_port(mvi, MVS_P0_CFG_ADDR,
95 MVS_P4_CFG_ADDR, port, addr);
96 mdelay(10);
97 }
98
mvs_read_port_vsr_data(struct mvs_info * mvi,u32 port)99 static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
100 {
101 return mvs_read_port(mvi, MVS_P0_VSR_DATA,
102 MVS_P4_VSR_DATA, port);
103 }
104
mvs_write_port_vsr_data(struct mvs_info * mvi,u32 port,u32 val)105 static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
106 u32 port, u32 val)
107 {
108 mvs_write_port(mvi, MVS_P0_VSR_DATA,
109 MVS_P4_VSR_DATA, port, val);
110 }
111
mvs_write_port_vsr_addr(struct mvs_info * mvi,u32 port,u32 addr)112 static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
113 u32 port, u32 addr)
114 {
115 mvs_write_port(mvi, MVS_P0_VSR_ADDR,
116 MVS_P4_VSR_ADDR, port, addr);
117 mdelay(10);
118 }
119
mvs_read_port_irq_stat(struct mvs_info * mvi,u32 port)120 static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
121 {
122 return mvs_read_port(mvi, MVS_P0_INT_STAT,
123 MVS_P4_INT_STAT, port);
124 }
125
mvs_write_port_irq_stat(struct mvs_info * mvi,u32 port,u32 val)126 static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
127 u32 port, u32 val)
128 {
129 mvs_write_port(mvi, MVS_P0_INT_STAT,
130 MVS_P4_INT_STAT, port, val);
131 }
132
mvs_read_port_irq_mask(struct mvs_info * mvi,u32 port)133 static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
134 {
135 return mvs_read_port(mvi, MVS_P0_INT_MASK,
136 MVS_P4_INT_MASK, port);
137
138 }
139
mvs_write_port_irq_mask(struct mvs_info * mvi,u32 port,u32 val)140 static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
141 u32 port, u32 val)
142 {
143 mvs_write_port(mvi, MVS_P0_INT_MASK,
144 MVS_P4_INT_MASK, port, val);
145 }
146
mvs_phy_hacks(struct mvs_info * mvi)147 static inline void mvs_phy_hacks(struct mvs_info *mvi)
148 {
149 u32 tmp;
150
151 tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
152 tmp &= ~(1 << 9);
153 tmp |= (1 << 10);
154 mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
155
156 /* enable retry 127 times */
157 mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
158
159 /* extend open frame timeout to max */
160 tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
161 tmp &= ~0xffff;
162 tmp |= 0x3fff;
163 mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
164
165 mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
166
167 /* not to halt for different port op during wideport link change */
168 mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
169 }
170
mvs_int_sata(struct mvs_info * mvi)171 static inline void mvs_int_sata(struct mvs_info *mvi)
172 {
173 u32 tmp;
174 void __iomem *regs = mvi->regs;
175 tmp = mr32(MVS_INT_STAT_SRS_0);
176 if (tmp)
177 mw32(MVS_INT_STAT_SRS_0, tmp);
178 MVS_CHIP_DISP->clear_active_cmds(mvi);
179 }
180
mvs_int_full(struct mvs_info * mvi)181 static inline void mvs_int_full(struct mvs_info *mvi)
182 {
183 void __iomem *regs = mvi->regs;
184 u32 tmp, stat;
185 int i;
186
187 stat = mr32(MVS_INT_STAT);
188 mvs_int_rx(mvi, false);
189
190 for (i = 0; i < mvi->chip->n_phy; i++) {
191 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
192 if (tmp)
193 mvs_int_port(mvi, i, tmp);
194 }
195
196 if (stat & CINT_NON_SPEC_NCQ_ERROR)
197 MVS_CHIP_DISP->non_spec_ncq_error(mvi);
198
199 if (stat & CINT_SRS)
200 mvs_int_sata(mvi);
201
202 mw32(MVS_INT_STAT, stat);
203 }
204
mvs_start_delivery(struct mvs_info * mvi,u32 tx)205 static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
206 {
207 void __iomem *regs = mvi->regs;
208 mw32(MVS_TX_PROD_IDX, tx);
209 }
210
mvs_rx_update(struct mvs_info * mvi)211 static inline u32 mvs_rx_update(struct mvs_info *mvi)
212 {
213 void __iomem *regs = mvi->regs;
214 return mr32(MVS_RX_CONS_IDX);
215 }
216
mvs_get_prd_size(void)217 static inline u32 mvs_get_prd_size(void)
218 {
219 return sizeof(struct mvs_prd);
220 }
221
mvs_get_prd_count(void)222 static inline u32 mvs_get_prd_count(void)
223 {
224 return MAX_SG_ENTRY;
225 }
226
mvs_show_pcie_usage(struct mvs_info * mvi)227 static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
228 {
229 u16 link_stat, link_spd;
230 const char *spd[] = {
231 "UnKnown",
232 "2.5",
233 "5.0",
234 };
235 if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
236 return;
237
238 pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
239 link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
240 if (link_spd >= 3)
241 link_spd = 0;
242 dev_printk(KERN_INFO, mvi->dev,
243 "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
244 (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
245 spd[link_spd]);
246 }
247
mvs_hw_max_link_rate(void)248 static inline u32 mvs_hw_max_link_rate(void)
249 {
250 return MAX_LINK_RATE;
251 }
252
253 #endif /* _MV_CHIPS_H_ */
254
255