1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Marvell 88SE64xx hardware specific
4 *
5 * Copyright 2007 Red Hat, Inc.
6 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 */
9
10 #include "mv_sas.h"
11 #include "mv_64xx.h"
12 #include "mv_chips.h"
13
mvs_64xx_detect_porttype(struct mvs_info * mvi,int i)14 static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
15 {
16 void __iomem *regs = mvi->regs;
17 u32 reg;
18 struct mvs_phy *phy = &mvi->phy[i];
19
20 reg = mr32(MVS_GBL_PORT_TYPE);
21 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
22 if (reg & MODE_SAS_SATA & (1 << i))
23 phy->phy_type |= PORT_TYPE_SAS;
24 else
25 phy->phy_type |= PORT_TYPE_SATA;
26 }
27
mvs_64xx_enable_xmt(struct mvs_info * mvi,int phy_id)28 static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
29 {
30 void __iomem *regs = mvi->regs;
31 u32 tmp;
32
33 tmp = mr32(MVS_PCS);
34 if (mvi->chip->n_phy <= MVS_SOC_PORTS)
35 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
36 else
37 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
38 mw32(MVS_PCS, tmp);
39 }
40
mvs_64xx_phy_hacks(struct mvs_info * mvi)41 static void mvs_64xx_phy_hacks(struct mvs_info *mvi)
42 {
43 void __iomem *regs = mvi->regs;
44 int i;
45
46 mvs_phy_hacks(mvi);
47
48 if (!(mvi->flags & MVF_FLAG_SOC)) {
49 for (i = 0; i < MVS_SOC_PORTS; i++) {
50 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
51 mvs_write_port_vsr_data(mvi, i, 0x2F0);
52 }
53 } else {
54 /* disable auto port detection */
55 mw32(MVS_GBL_PORT_TYPE, 0);
56 for (i = 0; i < mvi->chip->n_phy; i++) {
57 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
58 mvs_write_port_vsr_data(mvi, i, 0x90000000);
59 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
60 mvs_write_port_vsr_data(mvi, i, 0x50f2);
61 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
62 mvs_write_port_vsr_data(mvi, i, 0x0e);
63 }
64 }
65 }
66
mvs_64xx_stp_reset(struct mvs_info * mvi,u32 phy_id)67 static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
68 {
69 void __iomem *regs = mvi->regs;
70 u32 reg, tmp;
71
72 if (!(mvi->flags & MVF_FLAG_SOC)) {
73 if (phy_id < MVS_SOC_PORTS)
74 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®);
75 else
76 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®);
77
78 } else
79 reg = mr32(MVS_PHY_CTL);
80
81 tmp = reg;
82 if (phy_id < MVS_SOC_PORTS)
83 tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
84 else
85 tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
86
87 if (!(mvi->flags & MVF_FLAG_SOC)) {
88 if (phy_id < MVS_SOC_PORTS) {
89 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
90 mdelay(10);
91 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
92 } else {
93 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
94 mdelay(10);
95 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
96 }
97 } else {
98 mw32(MVS_PHY_CTL, tmp);
99 mdelay(10);
100 mw32(MVS_PHY_CTL, reg);
101 }
102 }
103
mvs_64xx_phy_reset(struct mvs_info * mvi,u32 phy_id,int hard)104 static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
105 {
106 u32 tmp;
107 tmp = mvs_read_port_irq_stat(mvi, phy_id);
108 tmp &= ~PHYEV_RDY_CH;
109 mvs_write_port_irq_stat(mvi, phy_id, tmp);
110 tmp = mvs_read_phy_ctl(mvi, phy_id);
111 if (hard == MVS_HARD_RESET)
112 tmp |= PHY_RST_HARD;
113 else if (hard == MVS_SOFT_RESET)
114 tmp |= PHY_RST;
115 mvs_write_phy_ctl(mvi, phy_id, tmp);
116 if (hard) {
117 do {
118 tmp = mvs_read_phy_ctl(mvi, phy_id);
119 } while (tmp & PHY_RST_HARD);
120 }
121 }
122
123 static void
mvs_64xx_clear_srs_irq(struct mvs_info * mvi,u8 reg_set,u8 clear_all)124 mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
125 {
126 void __iomem *regs = mvi->regs;
127 u32 tmp;
128 if (clear_all) {
129 tmp = mr32(MVS_INT_STAT_SRS_0);
130 if (tmp) {
131 printk(KERN_DEBUG "check SRS 0 %08X.\n", tmp);
132 mw32(MVS_INT_STAT_SRS_0, tmp);
133 }
134 } else {
135 tmp = mr32(MVS_INT_STAT_SRS_0);
136 if (tmp & (1 << (reg_set % 32))) {
137 printk(KERN_DEBUG "register set 0x%x was stopped.\n",
138 reg_set);
139 mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
140 }
141 }
142 }
143
mvs_64xx_chip_reset(struct mvs_info * mvi)144 static int mvs_64xx_chip_reset(struct mvs_info *mvi)
145 {
146 void __iomem *regs = mvi->regs;
147 u32 tmp;
148 int i;
149
150 /* make sure interrupts are masked immediately (paranoia) */
151 mw32(MVS_GBL_CTL, 0);
152 tmp = mr32(MVS_GBL_CTL);
153
154 /* Reset Controller */
155 if (!(tmp & HBA_RST)) {
156 if (mvi->flags & MVF_PHY_PWR_FIX) {
157 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
158 tmp &= ~PCTL_PWR_OFF;
159 tmp |= PCTL_PHY_DSBL;
160 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
161
162 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
163 tmp &= ~PCTL_PWR_OFF;
164 tmp |= PCTL_PHY_DSBL;
165 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
166 }
167 }
168
169 /* make sure interrupts are masked immediately (paranoia) */
170 mw32(MVS_GBL_CTL, 0);
171 tmp = mr32(MVS_GBL_CTL);
172
173 /* Reset Controller */
174 if (!(tmp & HBA_RST)) {
175 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
176 mw32_f(MVS_GBL_CTL, HBA_RST);
177 }
178
179 /* wait for reset to finish; timeout is just a guess */
180 i = 1000;
181 while (i-- > 0) {
182 msleep(10);
183
184 if (!(mr32(MVS_GBL_CTL) & HBA_RST))
185 break;
186 }
187 if (mr32(MVS_GBL_CTL) & HBA_RST) {
188 dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
189 return -EBUSY;
190 }
191 return 0;
192 }
193
mvs_64xx_phy_disable(struct mvs_info * mvi,u32 phy_id)194 static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
195 {
196 void __iomem *regs = mvi->regs;
197 u32 tmp;
198 if (!(mvi->flags & MVF_FLAG_SOC)) {
199 u32 offs;
200 if (phy_id < 4)
201 offs = PCR_PHY_CTL;
202 else {
203 offs = PCR_PHY_CTL2;
204 phy_id -= 4;
205 }
206 pci_read_config_dword(mvi->pdev, offs, &tmp);
207 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
208 pci_write_config_dword(mvi->pdev, offs, tmp);
209 } else {
210 tmp = mr32(MVS_PHY_CTL);
211 tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
212 mw32(MVS_PHY_CTL, tmp);
213 }
214 }
215
mvs_64xx_phy_enable(struct mvs_info * mvi,u32 phy_id)216 static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
217 {
218 void __iomem *regs = mvi->regs;
219 u32 tmp;
220 if (!(mvi->flags & MVF_FLAG_SOC)) {
221 u32 offs;
222 if (phy_id < 4)
223 offs = PCR_PHY_CTL;
224 else {
225 offs = PCR_PHY_CTL2;
226 phy_id -= 4;
227 }
228 pci_read_config_dword(mvi->pdev, offs, &tmp);
229 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
230 pci_write_config_dword(mvi->pdev, offs, tmp);
231 } else {
232 tmp = mr32(MVS_PHY_CTL);
233 tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
234 mw32(MVS_PHY_CTL, tmp);
235 }
236 }
237
mvs_64xx_init(struct mvs_info * mvi)238 static int mvs_64xx_init(struct mvs_info *mvi)
239 {
240 void __iomem *regs = mvi->regs;
241 int i;
242 u32 tmp, cctl;
243
244 if (mvi->pdev && mvi->pdev->revision == 0)
245 mvi->flags |= MVF_PHY_PWR_FIX;
246 if (!(mvi->flags & MVF_FLAG_SOC)) {
247 mvs_show_pcie_usage(mvi);
248 tmp = mvs_64xx_chip_reset(mvi);
249 if (tmp)
250 return tmp;
251 } else {
252 tmp = mr32(MVS_PHY_CTL);
253 tmp &= ~PCTL_PWR_OFF;
254 tmp |= PCTL_PHY_DSBL;
255 mw32(MVS_PHY_CTL, tmp);
256 }
257
258 /* Init Chip */
259 /* make sure RST is set; HBA_RST /should/ have done that for us */
260 cctl = mr32(MVS_CTL) & 0xFFFF;
261 if (cctl & CCTL_RST)
262 cctl &= ~CCTL_RST;
263 else
264 mw32_f(MVS_CTL, cctl | CCTL_RST);
265
266 if (!(mvi->flags & MVF_FLAG_SOC)) {
267 /* write to device control _AND_ device status register */
268 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
269 tmp &= ~PRD_REQ_MASK;
270 tmp |= PRD_REQ_SIZE;
271 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
272
273 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
274 tmp &= ~PCTL_PWR_OFF;
275 tmp &= ~PCTL_PHY_DSBL;
276 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
277
278 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
279 tmp &= PCTL_PWR_OFF;
280 tmp &= ~PCTL_PHY_DSBL;
281 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
282 } else {
283 tmp = mr32(MVS_PHY_CTL);
284 tmp &= ~PCTL_PWR_OFF;
285 tmp |= PCTL_COM_ON;
286 tmp &= ~PCTL_PHY_DSBL;
287 tmp |= PCTL_LINK_RST;
288 mw32(MVS_PHY_CTL, tmp);
289 msleep(100);
290 tmp &= ~PCTL_LINK_RST;
291 mw32(MVS_PHY_CTL, tmp);
292 msleep(100);
293 }
294
295 /* reset control */
296 mw32(MVS_PCS, 0); /* MVS_PCS */
297 /* init phys */
298 mvs_64xx_phy_hacks(mvi);
299
300 tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
301 tmp &= 0x0000ffff;
302 tmp |= 0x00fa0000;
303 mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
304
305 /* enable auto port detection */
306 mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
307
308 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
309 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
310
311 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
312 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
313
314 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
315 mw32(MVS_TX_LO, mvi->tx_dma);
316 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
317
318 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
319 mw32(MVS_RX_LO, mvi->rx_dma);
320 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
321
322 for (i = 0; i < mvi->chip->n_phy; i++) {
323 /* set phy local SAS address */
324 /* should set little endian SAS address to 64xx chip */
325 mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
326 cpu_to_be64(mvi->phy[i].dev_sas_addr));
327
328 mvs_64xx_enable_xmt(mvi, i);
329
330 mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
331 msleep(500);
332 mvs_64xx_detect_porttype(mvi, i);
333 }
334 if (mvi->flags & MVF_FLAG_SOC) {
335 /* set select registers */
336 writel(0x0E008000, regs + 0x000);
337 writel(0x59000008, regs + 0x004);
338 writel(0x20, regs + 0x008);
339 writel(0x20, regs + 0x00c);
340 writel(0x20, regs + 0x010);
341 writel(0x20, regs + 0x014);
342 writel(0x20, regs + 0x018);
343 writel(0x20, regs + 0x01c);
344 }
345 for (i = 0; i < mvi->chip->n_phy; i++) {
346 /* clear phy int status */
347 tmp = mvs_read_port_irq_stat(mvi, i);
348 tmp &= ~PHYEV_SIG_FIS;
349 mvs_write_port_irq_stat(mvi, i, tmp);
350
351 /* set phy int mask */
352 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
353 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
354 PHYEV_DEC_ERR;
355 mvs_write_port_irq_mask(mvi, i, tmp);
356
357 msleep(100);
358 mvs_update_phyinfo(mvi, i, 1);
359 }
360
361 /* little endian for open address and command table, etc. */
362 cctl = mr32(MVS_CTL);
363 cctl |= CCTL_ENDIAN_CMD;
364 cctl |= CCTL_ENDIAN_DATA;
365 cctl &= ~CCTL_ENDIAN_OPEN;
366 cctl |= CCTL_ENDIAN_RSP;
367 mw32_f(MVS_CTL, cctl);
368
369 /* reset CMD queue */
370 tmp = mr32(MVS_PCS);
371 tmp |= PCS_CMD_RST;
372 tmp &= ~PCS_SELF_CLEAR;
373 mw32(MVS_PCS, tmp);
374 /*
375 * the max count is 0x1ff, while our max slot is 0x200,
376 * it will make count 0.
377 */
378 tmp = 0;
379 if (MVS_CHIP_SLOT_SZ > 0x1ff)
380 mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
381 else
382 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
383
384 tmp = 0x10000 | interrupt_coalescing;
385 mw32(MVS_INT_COAL_TMOUT, tmp);
386
387 /* ladies and gentlemen, start your engines */
388 mw32(MVS_TX_CFG, 0);
389 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
390 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
393 PCS_CMD_EN | PCS_CMD_STOP_ERR);
394
395 /* enable completion queue interrupt */
396 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
397 CINT_DMA_PCIE);
398
399 mw32(MVS_INT_MASK, tmp);
400
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
403
404 return 0;
405 }
406
mvs_64xx_ioremap(struct mvs_info * mvi)407 static int mvs_64xx_ioremap(struct mvs_info *mvi)
408 {
409 if (!mvs_ioremap(mvi, 4, 2))
410 return 0;
411 return -1;
412 }
413
mvs_64xx_iounmap(struct mvs_info * mvi)414 static void mvs_64xx_iounmap(struct mvs_info *mvi)
415 {
416 mvs_iounmap(mvi->regs);
417 mvs_iounmap(mvi->regs_ex);
418 }
419
mvs_64xx_interrupt_enable(struct mvs_info * mvi)420 static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
421 {
422 void __iomem *regs = mvi->regs;
423 u32 tmp;
424
425 tmp = mr32(MVS_GBL_CTL);
426 mw32(MVS_GBL_CTL, tmp | INT_EN);
427 }
428
mvs_64xx_interrupt_disable(struct mvs_info * mvi)429 static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
430 {
431 void __iomem *regs = mvi->regs;
432 u32 tmp;
433
434 tmp = mr32(MVS_GBL_CTL);
435 mw32(MVS_GBL_CTL, tmp & ~INT_EN);
436 }
437
mvs_64xx_isr_status(struct mvs_info * mvi,int irq)438 static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
439 {
440 void __iomem *regs = mvi->regs;
441 u32 stat;
442
443 if (!(mvi->flags & MVF_FLAG_SOC)) {
444 stat = mr32(MVS_GBL_INT_STAT);
445
446 if (stat == 0 || stat == 0xffffffff)
447 return 0;
448 } else
449 stat = 1;
450 return stat;
451 }
452
mvs_64xx_isr(struct mvs_info * mvi,int irq,u32 stat)453 static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
454 {
455 void __iomem *regs = mvi->regs;
456
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT, CINT_DONE);
459
460 spin_lock(&mvi->lock);
461 mvs_int_full(mvi);
462 spin_unlock(&mvi->lock);
463
464 return IRQ_HANDLED;
465 }
466
mvs_64xx_command_active(struct mvs_info * mvi,u32 slot_idx)467 static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
468 {
469 u32 tmp;
470 mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
471 mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
472 do {
473 tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
474 } while (tmp & 1 << (slot_idx % 32));
475 do {
476 tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
477 } while (tmp & 1 << (slot_idx % 32));
478 }
479
mvs_64xx_issue_stop(struct mvs_info * mvi,enum mvs_port_type type,u32 tfs)480 static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
481 u32 tfs)
482 {
483 void __iomem *regs = mvi->regs;
484 u32 tmp;
485
486 if (type == PORT_TYPE_SATA) {
487 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
488 mw32(MVS_INT_STAT_SRS_0, tmp);
489 }
490 mw32(MVS_INT_STAT, CINT_CI_STOP);
491 tmp = mr32(MVS_PCS) | 0xFF00;
492 mw32(MVS_PCS, tmp);
493 }
494
mvs_64xx_free_reg_set(struct mvs_info * mvi,u8 * tfs)495 static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
496 {
497 void __iomem *regs = mvi->regs;
498 u32 tmp, offs;
499
500 if (*tfs == MVS_ID_NOT_MAPPED)
501 return;
502
503 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
504 if (*tfs < 16) {
505 tmp = mr32(MVS_PCS);
506 mw32(MVS_PCS, tmp & ~offs);
507 } else {
508 tmp = mr32(MVS_CTL);
509 mw32(MVS_CTL, tmp & ~offs);
510 }
511
512 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
513 if (tmp)
514 mw32(MVS_INT_STAT_SRS_0, tmp);
515
516 *tfs = MVS_ID_NOT_MAPPED;
517 return;
518 }
519
mvs_64xx_assign_reg_set(struct mvs_info * mvi,u8 * tfs)520 static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
521 {
522 int i;
523 u32 tmp, offs;
524 void __iomem *regs = mvi->regs;
525
526 if (*tfs != MVS_ID_NOT_MAPPED)
527 return 0;
528
529 tmp = mr32(MVS_PCS);
530
531 for (i = 0; i < mvi->chip->srs_sz; i++) {
532 if (i == 16)
533 tmp = mr32(MVS_CTL);
534 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
535 if (!(tmp & offs)) {
536 *tfs = i;
537
538 if (i < 16)
539 mw32(MVS_PCS, tmp | offs);
540 else
541 mw32(MVS_CTL, tmp | offs);
542 tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
543 if (tmp)
544 mw32(MVS_INT_STAT_SRS_0, tmp);
545 return 0;
546 }
547 }
548 return MVS_ID_NOT_MAPPED;
549 }
550
mvs_64xx_make_prd(struct scatterlist * scatter,int nr,void * prd)551 static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
552 {
553 int i;
554 struct scatterlist *sg;
555 struct mvs_prd *buf_prd = prd;
556 for_each_sg(scatter, sg, nr, i) {
557 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
558 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
559 buf_prd++;
560 }
561 }
562
mvs_64xx_oob_done(struct mvs_info * mvi,int i)563 static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
564 {
565 u32 phy_st;
566 mvs_write_port_cfg_addr(mvi, i,
567 PHYR_PHY_STAT);
568 phy_st = mvs_read_port_cfg_data(mvi, i);
569 if (phy_st & PHY_OOB_DTCTD)
570 return 1;
571 return 0;
572 }
573
mvs_64xx_fix_phy_info(struct mvs_info * mvi,int i,struct sas_identify_frame * id)574 static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
575 struct sas_identify_frame *id)
576
577 {
578 struct mvs_phy *phy = &mvi->phy[i];
579 struct asd_sas_phy *sas_phy = &phy->sas_phy;
580
581 sas_phy->linkrate =
582 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
583 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
584
585 phy->minimum_linkrate =
586 (phy->phy_status &
587 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
588 phy->maximum_linkrate =
589 (phy->phy_status &
590 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
591
592 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
593 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
594
595 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
596 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
597
598 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
599 phy->att_dev_sas_addr =
600 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
601 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
602 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
603 phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
604 }
605
mvs_64xx_phy_work_around(struct mvs_info * mvi,int i)606 static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
607 {
608 u32 tmp;
609 struct mvs_phy *phy = &mvi->phy[i];
610 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
611 tmp = mvs_read_port_vsr_data(mvi, i);
612 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
613 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
614 SAS_LINK_RATE_1_5_GBPS)
615 tmp &= ~PHY_MODE6_LATECLK;
616 else
617 tmp |= PHY_MODE6_LATECLK;
618 mvs_write_port_vsr_data(mvi, i, tmp);
619 }
620
mvs_64xx_phy_set_link_rate(struct mvs_info * mvi,u32 phy_id,struct sas_phy_linkrates * rates)621 static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
622 struct sas_phy_linkrates *rates)
623 {
624 u32 lrmin = 0, lrmax = 0;
625 u32 tmp;
626
627 tmp = mvs_read_phy_ctl(mvi, phy_id);
628 lrmin = (rates->minimum_linkrate << 8);
629 lrmax = (rates->maximum_linkrate << 12);
630
631 if (lrmin) {
632 tmp &= ~(0xf << 8);
633 tmp |= lrmin;
634 }
635 if (lrmax) {
636 tmp &= ~(0xf << 12);
637 tmp |= lrmax;
638 }
639 mvs_write_phy_ctl(mvi, phy_id, tmp);
640 mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
641 }
642
mvs_64xx_clear_active_cmds(struct mvs_info * mvi)643 static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
644 {
645 u32 tmp;
646 void __iomem *regs = mvi->regs;
647 tmp = mr32(MVS_PCS);
648 mw32(MVS_PCS, tmp & 0xFFFF);
649 mw32(MVS_PCS, tmp);
650 tmp = mr32(MVS_CTL);
651 mw32(MVS_CTL, tmp & 0xFFFF);
652 mw32(MVS_CTL, tmp);
653 }
654
655
mvs_64xx_spi_read_data(struct mvs_info * mvi)656 static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
657 {
658 void __iomem *regs = mvi->regs_ex;
659 return ior32(SPI_DATA_REG_64XX);
660 }
661
mvs_64xx_spi_write_data(struct mvs_info * mvi,u32 data)662 static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
663 {
664 void __iomem *regs = mvi->regs_ex;
665
666 iow32(SPI_DATA_REG_64XX, data);
667 }
668
669
mvs_64xx_spi_buildcmd(struct mvs_info * mvi,u32 * dwCmd,u8 cmd,u8 read,u8 length,u32 addr)670 static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
671 u32 *dwCmd,
672 u8 cmd,
673 u8 read,
674 u8 length,
675 u32 addr
676 )
677 {
678 u32 dwTmp;
679
680 dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
681 if (read)
682 dwTmp |= 1U<<23;
683
684 if (addr != MV_MAX_U32) {
685 dwTmp |= 1U<<22;
686 dwTmp |= (addr & 0x0003FFFF);
687 }
688
689 *dwCmd = dwTmp;
690 return 0;
691 }
692
693
mvs_64xx_spi_issuecmd(struct mvs_info * mvi,u32 cmd)694 static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
695 {
696 void __iomem *regs = mvi->regs_ex;
697 int retry;
698
699 for (retry = 0; retry < 1; retry++) {
700 iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
701 iow32(SPI_CMD_REG_64XX, cmd);
702 iow32(SPI_CTRL_REG_64XX,
703 SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
704 }
705
706 return 0;
707 }
708
mvs_64xx_spi_waitdataready(struct mvs_info * mvi,u32 timeout)709 static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
710 {
711 void __iomem *regs = mvi->regs_ex;
712 u32 i, dwTmp;
713
714 for (i = 0; i < timeout; i++) {
715 dwTmp = ior32(SPI_CTRL_REG_64XX);
716 if (!(dwTmp & SPI_CTRL_SPISTART))
717 return 0;
718 msleep(10);
719 }
720
721 return -1;
722 }
723
mvs_64xx_fix_dma(struct mvs_info * mvi,u32 phy_mask,int buf_len,int from,void * prd)724 static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
725 int buf_len, int from, void *prd)
726 {
727 int i;
728 struct mvs_prd *buf_prd = prd;
729 dma_addr_t buf_dma = mvi->bulk_buffer_dma;
730
731 buf_prd += from;
732 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
733 buf_prd->addr = cpu_to_le64(buf_dma);
734 buf_prd->len = cpu_to_le32(buf_len);
735 ++buf_prd;
736 }
737 }
738
mvs_64xx_tune_interrupt(struct mvs_info * mvi,u32 time)739 static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
740 {
741 void __iomem *regs = mvi->regs;
742 u32 tmp = 0;
743 /*
744 * the max count is 0x1ff, while our max slot is 0x200,
745 * it will make count 0.
746 */
747 if (time == 0) {
748 mw32(MVS_INT_COAL, 0);
749 mw32(MVS_INT_COAL_TMOUT, 0x10000);
750 } else {
751 if (MVS_CHIP_SLOT_SZ > 0x1ff)
752 mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
753 else
754 mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
755
756 tmp = 0x10000 | time;
757 mw32(MVS_INT_COAL_TMOUT, tmp);
758 }
759 }
760
761 const struct mvs_dispatch mvs_64xx_dispatch = {
762 "mv64xx",
763 mvs_64xx_init,
764 NULL,
765 mvs_64xx_ioremap,
766 mvs_64xx_iounmap,
767 mvs_64xx_isr,
768 mvs_64xx_isr_status,
769 mvs_64xx_interrupt_enable,
770 mvs_64xx_interrupt_disable,
771 mvs_read_phy_ctl,
772 mvs_write_phy_ctl,
773 mvs_read_port_cfg_data,
774 mvs_write_port_cfg_data,
775 mvs_write_port_cfg_addr,
776 mvs_read_port_vsr_data,
777 mvs_write_port_vsr_data,
778 mvs_write_port_vsr_addr,
779 mvs_read_port_irq_stat,
780 mvs_write_port_irq_stat,
781 mvs_read_port_irq_mask,
782 mvs_write_port_irq_mask,
783 mvs_64xx_command_active,
784 mvs_64xx_clear_srs_irq,
785 mvs_64xx_issue_stop,
786 mvs_start_delivery,
787 mvs_rx_update,
788 mvs_int_full,
789 mvs_64xx_assign_reg_set,
790 mvs_64xx_free_reg_set,
791 mvs_get_prd_size,
792 mvs_get_prd_count,
793 mvs_64xx_make_prd,
794 mvs_64xx_detect_porttype,
795 mvs_64xx_oob_done,
796 mvs_64xx_fix_phy_info,
797 mvs_64xx_phy_work_around,
798 mvs_64xx_phy_set_link_rate,
799 mvs_hw_max_link_rate,
800 mvs_64xx_phy_disable,
801 mvs_64xx_phy_enable,
802 mvs_64xx_phy_reset,
803 mvs_64xx_stp_reset,
804 mvs_64xx_clear_active_cmds,
805 mvs_64xx_spi_read_data,
806 mvs_64xx_spi_write_data,
807 mvs_64xx_spi_buildcmd,
808 mvs_64xx_spi_issuecmd,
809 mvs_64xx_spi_waitdataready,
810 mvs_64xx_fix_dma,
811 mvs_64xx_tune_interrupt,
812 NULL,
813 };
814
815