1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Calxeda Highbank AHCI SATA platform driver
4 * Copyright 2012 Calxeda, Inc.
5 *
6 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
7 */
8 #include <linux/kernel.h>
9 #include <linux/gfp.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/spinlock.h>
15 #include <linux/device.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/platform_device.h>
19 #include <linux/libata.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/export.h>
23 #include <linux/gpio/consumer.h>
24
25 #include "ahci.h"
26
27 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
28 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
29 #define SERDES_CR_CTL 0x80a0
30 #define SERDES_CR_ADDR 0x80a1
31 #define SERDES_CR_DATA 0x80a2
32 #define CR_BUSY 0x0001
33 #define CR_START 0x0001
34 #define CR_WR_RDN 0x0002
35 #define CPHY_TX_INPUT_STS 0x2001
36 #define CPHY_RX_INPUT_STS 0x2002
37 #define CPHY_SATA_TX_OVERRIDE 0x8000
38 #define CPHY_SATA_RX_OVERRIDE 0x4000
39 #define CPHY_TX_OVERRIDE 0x2004
40 #define CPHY_RX_OVERRIDE 0x2005
41 #define SPHY_LANE 0x100
42 #define SPHY_HALF_RATE 0x0001
43 #define CPHY_SATA_DPLL_MODE 0x0700
44 #define CPHY_SATA_DPLL_SHIFT 8
45 #define CPHY_SATA_DPLL_RESET (1 << 11)
46 #define CPHY_SATA_TX_ATTEN 0x1c00
47 #define CPHY_SATA_TX_ATTEN_SHIFT 10
48 #define CPHY_PHY_COUNT 6
49 #define CPHY_LANE_COUNT 4
50 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
51
52 static DEFINE_SPINLOCK(cphy_lock);
53 /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
54 * sata ports to their phys and then to their lanes within the phys
55 */
56 struct phy_lane_info {
57 void __iomem *phy_base;
58 u8 lane_mapping;
59 u8 phy_devs;
60 u8 tx_atten;
61 };
62 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
63
64 static DEFINE_SPINLOCK(sgpio_lock);
65 #define SCLOCK 0
66 #define SLOAD 1
67 #define SDATA 2
68 #define SGPIO_PINS 3
69 #define SGPIO_PORTS 8
70
71 struct ecx_plat_data {
72 u32 n_ports;
73 /* number of extra clocks that the SGPIO PIC controller expects */
74 u32 pre_clocks;
75 u32 post_clocks;
76 struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
77 u32 sgpio_pattern;
78 u32 port_to_sgpio[SGPIO_PORTS];
79 };
80
81 #define SGPIO_SIGNALS 3
82 #define ECX_ACTIVITY_BITS 0x300000
83 #define ECX_ACTIVITY_SHIFT 0
84 #define ECX_LOCATE_BITS 0x80000
85 #define ECX_LOCATE_SHIFT 1
86 #define ECX_FAULT_BITS 0x400000
87 #define ECX_FAULT_SHIFT 2
sgpio_bit_shift(struct ecx_plat_data * pdata,u32 port,u32 shift)88 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
89 u32 shift)
90 {
91 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
92 }
93
ecx_parse_sgpio(struct ecx_plat_data * pdata,u32 port,u32 state)94 static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
95 {
96 if (state & ECX_ACTIVITY_BITS)
97 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
98 ECX_ACTIVITY_SHIFT);
99 else
100 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
101 ECX_ACTIVITY_SHIFT);
102 if (state & ECX_LOCATE_BITS)
103 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
104 ECX_LOCATE_SHIFT);
105 else
106 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
107 ECX_LOCATE_SHIFT);
108 if (state & ECX_FAULT_BITS)
109 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
110 ECX_FAULT_SHIFT);
111 else
112 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
113 ECX_FAULT_SHIFT);
114 }
115
116 /*
117 * Tell the LED controller that the signal has changed by raising the clock
118 * line for 50 uS and then lowering it for 50 uS.
119 */
ecx_led_cycle_clock(struct ecx_plat_data * pdata)120 static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
121 {
122 gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
123 udelay(50);
124 gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
125 udelay(50);
126 }
127
ecx_transmit_led_message(struct ata_port * ap,u32 state,ssize_t size)128 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
129 ssize_t size)
130 {
131 struct ahci_host_priv *hpriv = ap->host->private_data;
132 struct ecx_plat_data *pdata = hpriv->plat_data;
133 struct ahci_port_priv *pp = ap->private_data;
134 unsigned long flags;
135 int pmp, i;
136 struct ahci_em_priv *emp;
137 u32 sgpio_out;
138
139 /* get the slot number from the message */
140 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
141 if (pmp < EM_MAX_SLOTS)
142 emp = &pp->em_priv[pmp];
143 else
144 return -EINVAL;
145
146 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
147 return size;
148
149 spin_lock_irqsave(&sgpio_lock, flags);
150 ecx_parse_sgpio(pdata, ap->port_no, state);
151 sgpio_out = pdata->sgpio_pattern;
152 for (i = 0; i < pdata->pre_clocks; i++)
153 ecx_led_cycle_clock(pdata);
154
155 gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
156 ecx_led_cycle_clock(pdata);
157 gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
158 /*
159 * bit-bang out the SGPIO pattern, by consuming a bit and then
160 * clocking it out.
161 */
162 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
163 gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
164 sgpio_out >>= 1;
165 ecx_led_cycle_clock(pdata);
166 }
167 for (i = 0; i < pdata->post_clocks; i++)
168 ecx_led_cycle_clock(pdata);
169
170 /* save off new led state for port/slot */
171 emp->led_state = state;
172
173 spin_unlock_irqrestore(&sgpio_lock, flags);
174 return size;
175 }
176
highbank_set_em_messages(struct device * dev,struct ahci_host_priv * hpriv,struct ata_port_info * pi)177 static void highbank_set_em_messages(struct device *dev,
178 struct ahci_host_priv *hpriv,
179 struct ata_port_info *pi)
180 {
181 struct device_node *np = dev->of_node;
182 struct ecx_plat_data *pdata = hpriv->plat_data;
183 int i;
184
185 for (i = 0; i < SGPIO_PINS; i++) {
186 struct gpio_desc *gpiod;
187
188 gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
189 GPIOD_OUT_HIGH);
190 if (IS_ERR(gpiod)) {
191 dev_err(dev, "failed to get GPIO %d\n", i);
192 continue;
193 }
194 gpiod_set_consumer_name(gpiod, "CX SGPIO");
195
196 pdata->sgpio_gpiod[i] = gpiod;
197 }
198 of_property_read_u32_array(np, "calxeda,led-order",
199 pdata->port_to_sgpio,
200 pdata->n_ports);
201 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
202 pdata->pre_clocks = 0;
203 if (of_property_read_u32(np, "calxeda,post-clocks",
204 &pdata->post_clocks))
205 pdata->post_clocks = 0;
206
207 /* store em_loc */
208 hpriv->em_loc = 0;
209 hpriv->em_buf_sz = 4;
210 hpriv->em_msg_type = EM_MSG_TYPE_LED;
211 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
212 }
213
__combo_phy_reg_read(u8 sata_port,u32 addr)214 static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
215 {
216 u32 data;
217 u8 dev = port_data[sata_port].phy_devs;
218 spin_lock(&cphy_lock);
219 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
220 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
221 spin_unlock(&cphy_lock);
222 return data;
223 }
224
__combo_phy_reg_write(u8 sata_port,u32 addr,u32 data)225 static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
226 {
227 u8 dev = port_data[sata_port].phy_devs;
228 spin_lock(&cphy_lock);
229 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
230 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
231 spin_unlock(&cphy_lock);
232 }
233
combo_phy_wait_for_ready(u8 sata_port)234 static void combo_phy_wait_for_ready(u8 sata_port)
235 {
236 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
237 udelay(5);
238 }
239
combo_phy_read(u8 sata_port,u32 addr)240 static u32 combo_phy_read(u8 sata_port, u32 addr)
241 {
242 combo_phy_wait_for_ready(sata_port);
243 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
244 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
245 combo_phy_wait_for_ready(sata_port);
246 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
247 }
248
combo_phy_write(u8 sata_port,u32 addr,u32 data)249 static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
250 {
251 combo_phy_wait_for_ready(sata_port);
252 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
253 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
254 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
255 }
256
highbank_cphy_disable_overrides(u8 sata_port)257 static void highbank_cphy_disable_overrides(u8 sata_port)
258 {
259 u8 lane = port_data[sata_port].lane_mapping;
260 u32 tmp;
261 if (unlikely(port_data[sata_port].phy_base == NULL))
262 return;
263 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
264 tmp &= ~CPHY_SATA_RX_OVERRIDE;
265 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
266 }
267
cphy_override_tx_attenuation(u8 sata_port,u32 val)268 static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
269 {
270 u8 lane = port_data[sata_port].lane_mapping;
271 u32 tmp;
272
273 if (val & 0x8)
274 return;
275
276 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
277 tmp &= ~CPHY_SATA_TX_OVERRIDE;
278 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
279
280 tmp |= CPHY_SATA_TX_OVERRIDE;
281 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
282
283 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
284 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
285 }
286
cphy_override_rx_mode(u8 sata_port,u32 val)287 static void cphy_override_rx_mode(u8 sata_port, u32 val)
288 {
289 u8 lane = port_data[sata_port].lane_mapping;
290 u32 tmp;
291 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
292 tmp &= ~CPHY_SATA_RX_OVERRIDE;
293 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
294
295 tmp |= CPHY_SATA_RX_OVERRIDE;
296 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
297
298 tmp &= ~CPHY_SATA_DPLL_MODE;
299 tmp |= val << CPHY_SATA_DPLL_SHIFT;
300 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
301
302 tmp |= CPHY_SATA_DPLL_RESET;
303 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
304
305 tmp &= ~CPHY_SATA_DPLL_RESET;
306 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
307
308 msleep(15);
309 }
310
highbank_cphy_override_lane(u8 sata_port)311 static void highbank_cphy_override_lane(u8 sata_port)
312 {
313 u8 lane = port_data[sata_port].lane_mapping;
314 u32 tmp, k = 0;
315
316 if (unlikely(port_data[sata_port].phy_base == NULL))
317 return;
318 do {
319 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
320 lane * SPHY_LANE);
321 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
322 cphy_override_rx_mode(sata_port, 3);
323 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
324 }
325
highbank_initialize_phys(struct device * dev,void __iomem * addr)326 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
327 {
328 struct device_node *sata_node = dev->of_node;
329 int phy_count = 0, phy, port = 0, i;
330 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
331 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
332 u32 tx_atten[CPHY_PORT_COUNT] = {};
333
334 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
335
336 do {
337 u32 tmp;
338 struct of_phandle_args phy_data;
339 if (of_parse_phandle_with_args(sata_node,
340 "calxeda,port-phys", "#phy-cells",
341 port, &phy_data))
342 break;
343 for (phy = 0; phy < phy_count; phy++) {
344 if (phy_nodes[phy] == phy_data.np)
345 break;
346 }
347 if (phy_nodes[phy] == NULL) {
348 phy_nodes[phy] = phy_data.np;
349 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
350 if (cphy_base[phy] == NULL) {
351 of_node_put(phy_data.np);
352 return 0;
353 }
354 phy_count += 1;
355 }
356 port_data[port].lane_mapping = phy_data.args[0];
357 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
358 port_data[port].phy_devs = tmp;
359 port_data[port].phy_base = cphy_base[phy];
360 of_node_put(phy_data.np);
361 port += 1;
362 } while (port < CPHY_PORT_COUNT);
363 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
364 tx_atten, port);
365 for (i = 0; i < port; i++)
366 port_data[i].tx_atten = (u8) tx_atten[i];
367 return 0;
368 }
369
370 /*
371 * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
372 * Retrying the phy hard reset can work around the issue, but the drive
373 * may fail again. In less than 150 out of 15000 test runs, it took more
374 * than 10 tries for the link to be established (but never more than 35).
375 * Triple the maximum observed retry count to provide plenty of margin for
376 * rare events and to guarantee that the link is established.
377 *
378 * Also, the default 2 second time-out on a failed drive is too long in
379 * this situation. The uboot implementation of the same driver function
380 * uses a much shorter time-out period and never experiences a time out
381 * issue. Reducing the time-out to 500ms improves the responsiveness.
382 * The other timing constants were kept the same as the stock AHCI driver.
383 * This change was also tested 15000 times on 24 drives and none of them
384 * experienced a time out.
385 */
ahci_highbank_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)386 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
387 unsigned long deadline)
388 {
389 static const unsigned int timing[] = { 5, 100, 500};
390 struct ata_port *ap = link->ap;
391 struct ahci_port_priv *pp = ap->private_data;
392 struct ahci_host_priv *hpriv = ap->host->private_data;
393 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
394 struct ata_taskfile tf;
395 bool online;
396 u32 sstatus;
397 int rc;
398 int retry = 100;
399
400 hpriv->stop_engine(ap);
401
402 /* clear D2H reception area to properly wait for D2H FIS */
403 ata_tf_init(link->device, &tf);
404 tf.status = ATA_BUSY;
405 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
406
407 do {
408 highbank_cphy_disable_overrides(link->ap->port_no);
409 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
410 highbank_cphy_override_lane(link->ap->port_no);
411
412 /* If the status is 1, we are connected, but the link did not
413 * come up. So retry resetting the link again.
414 */
415 if (sata_scr_read(link, SCR_STATUS, &sstatus))
416 break;
417 if (!(sstatus & 0x3))
418 break;
419 } while (!online && retry--);
420
421 hpriv->start_engine(ap);
422
423 if (online)
424 *class = ahci_dev_classify(ap);
425
426 return rc;
427 }
428
429 static struct ata_port_operations ahci_highbank_ops = {
430 .inherits = &ahci_ops,
431 .hardreset = ahci_highbank_hardreset,
432 .transmit_led_message = ecx_transmit_led_message,
433 };
434
435 static const struct ata_port_info ahci_highbank_port_info = {
436 .flags = AHCI_FLAG_COMMON,
437 .pio_mask = ATA_PIO4,
438 .udma_mask = ATA_UDMA6,
439 .port_ops = &ahci_highbank_ops,
440 };
441
442 static const struct scsi_host_template ahci_highbank_platform_sht = {
443 AHCI_SHT("sata_highbank"),
444 };
445
446 static const struct of_device_id ahci_of_match[] = {
447 { .compatible = "calxeda,hb-ahci" },
448 { /* sentinel */ }
449 };
450 MODULE_DEVICE_TABLE(of, ahci_of_match);
451
ahci_highbank_probe(struct platform_device * pdev)452 static int ahci_highbank_probe(struct platform_device *pdev)
453 {
454 struct device *dev = &pdev->dev;
455 struct ahci_host_priv *hpriv;
456 struct ecx_plat_data *pdata;
457 struct ata_host *host;
458 struct resource *mem;
459 int irq;
460 int i;
461 int rc;
462 u32 n_ports;
463 struct ata_port_info pi = ahci_highbank_port_info;
464 const struct ata_port_info *ppi[] = { &pi, NULL };
465
466 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
467 if (!mem) {
468 dev_err(dev, "no mmio space\n");
469 return -EINVAL;
470 }
471
472 irq = platform_get_irq(pdev, 0);
473 if (irq < 0)
474 return irq;
475 if (!irq)
476 return -EINVAL;
477
478 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
479 if (!hpriv) {
480 dev_err(dev, "can't alloc ahci_host_priv\n");
481 return -ENOMEM;
482 }
483 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
484 if (!pdata) {
485 dev_err(dev, "can't alloc ecx_plat_data\n");
486 return -ENOMEM;
487 }
488
489 hpriv->irq = irq;
490 hpriv->flags |= (unsigned long)pi.private_data;
491
492 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
493 if (!hpriv->mmio) {
494 dev_err(dev, "can't map %pR\n", mem);
495 return -ENOMEM;
496 }
497
498 rc = highbank_initialize_phys(dev, hpriv->mmio);
499 if (rc)
500 return rc;
501
502
503 ahci_save_initial_config(dev, hpriv);
504
505 /* prepare host */
506 if (hpriv->cap & HOST_CAP_NCQ)
507 pi.flags |= ATA_FLAG_NCQ;
508
509 if (hpriv->cap & HOST_CAP_PMP)
510 pi.flags |= ATA_FLAG_PMP;
511
512 if (hpriv->cap & HOST_CAP_64)
513 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
514
515 /* CAP.NP sometimes indicate the index of the last enabled
516 * port, at other times, that of the last possible port, so
517 * determining the maximum port number requires looking at
518 * both CAP.NP and port_map.
519 */
520 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
521
522 pdata->n_ports = n_ports;
523 hpriv->plat_data = pdata;
524 highbank_set_em_messages(dev, hpriv, &pi);
525
526 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
527 if (!host) {
528 rc = -ENOMEM;
529 goto err0;
530 }
531
532 host->private_data = hpriv;
533
534 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
535 host->flags |= ATA_HOST_PARALLEL_SCAN;
536
537 for (i = 0; i < host->n_ports; i++) {
538 struct ata_port *ap = host->ports[i];
539
540 ata_port_desc(ap, "mmio %pR", mem);
541 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
542
543 /* set enclosure management message type */
544 if (ap->flags & ATA_FLAG_EM)
545 ap->em_message_type = hpriv->em_msg_type;
546
547 /* disabled/not-implemented port */
548 if (!(hpriv->port_map & (1 << i)))
549 ap->ops = &ata_dummy_port_ops;
550 }
551
552 rc = ahci_reset_controller(host);
553 if (rc)
554 goto err0;
555
556 ahci_init_controller(host);
557 ahci_print_info(host, "platform");
558
559 rc = ahci_host_activate(host, &ahci_highbank_platform_sht);
560 if (rc)
561 goto err0;
562
563 return 0;
564 err0:
565 return rc;
566 }
567
568 #ifdef CONFIG_PM_SLEEP
ahci_highbank_suspend(struct device * dev)569 static int ahci_highbank_suspend(struct device *dev)
570 {
571 struct ata_host *host = dev_get_drvdata(dev);
572 struct ahci_host_priv *hpriv = host->private_data;
573 void __iomem *mmio = hpriv->mmio;
574 u32 ctl;
575
576 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
577 dev_err(dev, "firmware update required for suspend/resume\n");
578 return -EIO;
579 }
580
581 /*
582 * AHCI spec rev1.1 section 8.3.3:
583 * Software must disable interrupts prior to requesting a
584 * transition of the HBA to D3 state.
585 */
586 ctl = readl(mmio + HOST_CTL);
587 ctl &= ~HOST_IRQ_EN;
588 writel(ctl, mmio + HOST_CTL);
589 readl(mmio + HOST_CTL); /* flush */
590
591 ata_host_suspend(host, PMSG_SUSPEND);
592 return 0;
593 }
594
ahci_highbank_resume(struct device * dev)595 static int ahci_highbank_resume(struct device *dev)
596 {
597 struct ata_host *host = dev_get_drvdata(dev);
598 int rc;
599
600 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
601 rc = ahci_reset_controller(host);
602 if (rc)
603 return rc;
604
605 ahci_init_controller(host);
606 }
607
608 ata_host_resume(host);
609
610 return 0;
611 }
612 #endif
613
614 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
615 ahci_highbank_suspend, ahci_highbank_resume);
616
617 static struct platform_driver ahci_highbank_driver = {
618 .remove = ata_platform_remove_one,
619 .driver = {
620 .name = "highbank-ahci",
621 .of_match_table = ahci_of_match,
622 .pm = &ahci_highbank_pm_ops,
623 },
624 .probe = ahci_highbank_probe,
625 };
626
627 module_platform_driver(ahci_highbank_driver);
628
629 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
630 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
631 MODULE_LICENSE("GPL");
632 MODULE_ALIAS("sata:highbank");
633