xref: /linux/drivers/ata/acard-ahci.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  *  acard-ahci.c - ACard AHCI SATA support
5  *
6  *  Maintained by:  Tejun Heo <tj@kernel.org>
7  *		    Please ALWAYS copy linux-ide@vger.kernel.org
8  *		    on emails.
9  *
10  *  Copyright 2010 Red Hat, Inc.
11  *
12  * libata documentation is available via 'make {ps|pdf}docs',
13  * as Documentation/driver-api/libata.rst
14  *
15  * AHCI hardware documentation:
16  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
17  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/blkdev.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/device.h>
28 #include <linux/dmi.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <linux/libata.h>
33 #include "ahci.h"
34 
35 #define DRV_NAME	"acard-ahci"
36 #define DRV_VERSION	"1.0"
37 
38 /*
39   Received FIS structure limited to 80h.
40 */
41 
42 #define ACARD_AHCI_RX_FIS_SZ 128
43 
44 enum {
45 	AHCI_PCI_BAR		= 5,
46 };
47 
48 enum board_ids {
49 	board_acard_ahci,
50 };
51 
52 struct acard_sg {
53 	__le32			addr;
54 	__le32			addr_hi;
55 	__le32			reserved;
56 	__le32			size;	 /* bit 31 (EOT) max==0x10000 (64k) */
57 };
58 
59 static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
60 static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
61 static int acard_ahci_port_start(struct ata_port *ap);
62 static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
63 
64 #ifdef CONFIG_PM_SLEEP
65 static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
66 static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
67 #endif
68 
69 static const struct scsi_host_template acard_ahci_sht = {
70 	AHCI_SHT("acard-ahci"),
71 };
72 
73 static struct ata_port_operations acard_ops = {
74 	.inherits		= &ahci_ops,
75 	.qc_prep		= acard_ahci_qc_prep,
76 	.qc_fill_rtf		= acard_ahci_qc_fill_rtf,
77 	.port_start             = acard_ahci_port_start,
78 };
79 
80 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
81 
82 static const struct ata_port_info acard_ahci_port_info[] = {
83 	[board_acard_ahci] =
84 	{
85 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ),
86 		.flags		= AHCI_FLAG_COMMON,
87 		.pio_mask	= ATA_PIO4,
88 		.udma_mask	= ATA_UDMA6,
89 		.port_ops	= &acard_ops,
90 	},
91 };
92 
93 static const struct pci_device_id acard_ahci_pci_tbl[] = {
94 	/* ACard */
95 	{ PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
96 
97 	{ }    /* terminate list */
98 };
99 
100 static struct pci_driver acard_ahci_pci_driver = {
101 	.name			= DRV_NAME,
102 	.id_table		= acard_ahci_pci_tbl,
103 	.probe			= acard_ahci_init_one,
104 	.remove			= ata_pci_remove_one,
105 #ifdef CONFIG_PM_SLEEP
106 	.suspend		= acard_ahci_pci_device_suspend,
107 	.resume			= acard_ahci_pci_device_resume,
108 #endif
109 };
110 
111 #ifdef CONFIG_PM_SLEEP
acard_ahci_pci_device_suspend(struct pci_dev * pdev,pm_message_t mesg)112 static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
113 {
114 	struct ata_host *host = pci_get_drvdata(pdev);
115 	struct ahci_host_priv *hpriv = host->private_data;
116 	void __iomem *mmio = hpriv->mmio;
117 	u32 ctl;
118 
119 	if (mesg.event & PM_EVENT_SUSPEND &&
120 	    hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
121 		dev_err(&pdev->dev,
122 			"BIOS update required for suspend/resume\n");
123 		return -EIO;
124 	}
125 
126 	if (mesg.event & PM_EVENT_SLEEP) {
127 		/* AHCI spec rev1.1 section 8.3.3:
128 		 * Software must disable interrupts prior to requesting a
129 		 * transition of the HBA to D3 state.
130 		 */
131 		ctl = readl(mmio + HOST_CTL);
132 		ctl &= ~HOST_IRQ_EN;
133 		writel(ctl, mmio + HOST_CTL);
134 		readl(mmio + HOST_CTL); /* flush */
135 	}
136 
137 	return ata_pci_device_suspend(pdev, mesg);
138 }
139 
acard_ahci_pci_device_resume(struct pci_dev * pdev)140 static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
141 {
142 	struct ata_host *host = pci_get_drvdata(pdev);
143 	int rc;
144 
145 	rc = ata_pci_device_do_resume(pdev);
146 	if (rc)
147 		return rc;
148 
149 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
150 		rc = ahci_reset_controller(host);
151 		if (rc)
152 			return rc;
153 
154 		ahci_init_controller(host);
155 	}
156 
157 	ata_host_resume(host);
158 
159 	return 0;
160 }
161 #endif
162 
acard_ahci_pci_print_info(struct ata_host * host)163 static void acard_ahci_pci_print_info(struct ata_host *host)
164 {
165 	struct pci_dev *pdev = to_pci_dev(host->dev);
166 	u16 cc;
167 	const char *scc_s;
168 
169 	pci_read_config_word(pdev, 0x0a, &cc);
170 	if (cc == PCI_CLASS_STORAGE_IDE)
171 		scc_s = "IDE";
172 	else if (cc == PCI_CLASS_STORAGE_SATA)
173 		scc_s = "SATA";
174 	else if (cc == PCI_CLASS_STORAGE_RAID)
175 		scc_s = "RAID";
176 	else
177 		scc_s = "unknown";
178 
179 	ahci_print_info(host, scc_s);
180 }
181 
acard_ahci_fill_sg(struct ata_queued_cmd * qc,void * cmd_tbl)182 static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
183 {
184 	struct scatterlist *sg;
185 	struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
186 	unsigned int si, last_si = 0;
187 
188 	/*
189 	 * Next, the S/G list.
190 	 */
191 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
192 		dma_addr_t addr = sg_dma_address(sg);
193 		u32 sg_len = sg_dma_len(sg);
194 
195 		/*
196 		 * ACard note:
197 		 * We must set an end-of-table (EOT) bit,
198 		 * and the segment cannot exceed 64k (0x10000)
199 		 */
200 		acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
201 		acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
202 		acard_sg[si].size = cpu_to_le32(sg_len);
203 		last_si = si;
204 	}
205 
206 	acard_sg[last_si].size |= cpu_to_le32(1 << 31);	/* set EOT */
207 
208 	return si;
209 }
210 
acard_ahci_qc_prep(struct ata_queued_cmd * qc)211 static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
212 {
213 	struct ata_port *ap = qc->ap;
214 	struct ahci_port_priv *pp = ap->private_data;
215 	int is_atapi = ata_is_atapi(qc->tf.protocol);
216 	void *cmd_tbl;
217 	u32 opts;
218 	const u32 cmd_fis_len = 5; /* five dwords */
219 
220 	/*
221 	 * Fill in command table information.  First, the header,
222 	 * a SATA Register - Host to Device command FIS.
223 	 */
224 	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
225 
226 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
227 	if (is_atapi) {
228 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
229 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
230 	}
231 
232 	if (qc->flags & ATA_QCFLAG_DMAMAP)
233 		acard_ahci_fill_sg(qc, cmd_tbl);
234 
235 	/*
236 	 * Fill in command slot information.
237 	 *
238 	 * ACard note: prd table length not filled in
239 	 */
240 	opts = cmd_fis_len | (qc->dev->link->pmp << 12);
241 	if (qc->tf.flags & ATA_TFLAG_WRITE)
242 		opts |= AHCI_CMD_WRITE;
243 	if (is_atapi)
244 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
245 
246 	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
247 
248 	return AC_ERR_OK;
249 }
250 
acard_ahci_qc_fill_rtf(struct ata_queued_cmd * qc)251 static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
252 {
253 	struct ahci_port_priv *pp = qc->ap->private_data;
254 	u8 *rx_fis = pp->rx_fis;
255 
256 	if (pp->fbs_enabled)
257 		rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
258 
259 	/*
260 	 * After a successful execution of an ATA PIO data-in command,
261 	 * the device doesn't send D2H Reg FIS to update the TF and
262 	 * the host should take TF and E_Status from the preceding PIO
263 	 * Setup FIS.
264 	 */
265 	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
266 	    !(qc->flags & ATA_QCFLAG_EH)) {
267 		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
268 		qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
269 	} else
270 		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
271 }
272 
acard_ahci_port_start(struct ata_port * ap)273 static int acard_ahci_port_start(struct ata_port *ap)
274 {
275 	struct ahci_host_priv *hpriv = ap->host->private_data;
276 	struct device *dev = ap->host->dev;
277 	struct ahci_port_priv *pp;
278 	void *mem;
279 	dma_addr_t mem_dma;
280 	size_t dma_sz, rx_fis_sz;
281 
282 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
283 	if (!pp)
284 		return -ENOMEM;
285 
286 	/* check FBS capability */
287 	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
288 		void __iomem *port_mmio = ahci_port_base(ap);
289 		u32 cmd = readl(port_mmio + PORT_CMD);
290 		if (cmd & PORT_CMD_FBSCP)
291 			pp->fbs_supported = true;
292 		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
293 			dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
294 				 ap->port_no);
295 			pp->fbs_supported = true;
296 		} else
297 			dev_warn(dev, "port %d is not capable of FBS\n",
298 				 ap->port_no);
299 	}
300 
301 	if (pp->fbs_supported) {
302 		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
303 		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
304 	} else {
305 		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
306 		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
307 	}
308 
309 	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
310 	if (!mem)
311 		return -ENOMEM;
312 
313 	/*
314 	 * First item in chunk of DMA memory: 32-slot command table,
315 	 * 32 bytes each in size
316 	 */
317 	pp->cmd_slot = mem;
318 	pp->cmd_slot_dma = mem_dma;
319 
320 	mem += AHCI_CMD_SLOT_SZ;
321 	mem_dma += AHCI_CMD_SLOT_SZ;
322 
323 	/*
324 	 * Second item: Received-FIS area
325 	 */
326 	pp->rx_fis = mem;
327 	pp->rx_fis_dma = mem_dma;
328 
329 	mem += rx_fis_sz;
330 	mem_dma += rx_fis_sz;
331 
332 	/*
333 	 * Third item: data area for storing a single command
334 	 * and its scatter-gather table
335 	 */
336 	pp->cmd_tbl = mem;
337 	pp->cmd_tbl_dma = mem_dma;
338 
339 	/*
340 	 * Save off initial list of interrupts to be enabled.
341 	 * This could be changed later
342 	 */
343 	pp->intr_mask = DEF_PORT_IRQ;
344 
345 	ap->private_data = pp;
346 
347 	/* engage engines, captain */
348 	return ahci_port_resume(ap);
349 }
350 
acard_ahci_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)351 static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
352 {
353 	unsigned int board_id = ent->driver_data;
354 	struct ata_port_info pi = acard_ahci_port_info[board_id];
355 	const struct ata_port_info *ppi[] = { &pi, NULL };
356 	struct device *dev = &pdev->dev;
357 	struct ahci_host_priv *hpriv;
358 	struct ata_host *host;
359 	int n_ports, i, rc;
360 
361 	WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
362 
363 	ata_print_version_once(&pdev->dev, DRV_VERSION);
364 
365 	/* acquire resources */
366 	rc = pcim_enable_device(pdev);
367 	if (rc)
368 		return rc;
369 
370 	/* AHCI controllers often implement SFF compatible interface.
371 	 * Grab all PCI BARs just in case.
372 	 */
373 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
374 	if (rc == -EBUSY)
375 		pcim_pin_device(pdev);
376 	if (rc)
377 		return rc;
378 
379 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
380 	if (!hpriv)
381 		return -ENOMEM;
382 
383 	hpriv->irq = pdev->irq;
384 	hpriv->flags |= (unsigned long)pi.private_data;
385 
386 	if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
387 		pci_enable_msi(pdev);
388 
389 	hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
390 
391 	/* save initial config */
392 	ahci_save_initial_config(&pdev->dev, hpriv);
393 
394 	/* prepare host */
395 	if (hpriv->cap & HOST_CAP_NCQ)
396 		pi.flags |= ATA_FLAG_NCQ;
397 
398 	if (hpriv->cap & HOST_CAP_PMP)
399 		pi.flags |= ATA_FLAG_PMP;
400 
401 	ahci_set_em_messages(hpriv, &pi);
402 
403 	/* CAP.NP sometimes indicate the index of the last enabled
404 	 * port, at other times, that of the last possible port, so
405 	 * determining the maximum port number requires looking at
406 	 * both CAP.NP and port_map.
407 	 */
408 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
409 
410 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
411 	if (!host)
412 		return -ENOMEM;
413 	host->private_data = hpriv;
414 
415 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
416 		host->flags |= ATA_HOST_PARALLEL_SCAN;
417 	else
418 		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
419 
420 	for (i = 0; i < host->n_ports; i++) {
421 		struct ata_port *ap = host->ports[i];
422 
423 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
424 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
425 				   0x100 + ap->port_no * 0x80, "port");
426 
427 		/* set initial link pm policy */
428 		/*
429 		ap->pm_policy = NOT_AVAILABLE;
430 		*/
431 		/* disabled/not-implemented port */
432 		if (!(hpriv->port_map & (1 << i)))
433 			ap->ops = &ata_dummy_port_ops;
434 	}
435 
436 	/* initialize adapter */
437 	rc = dma_set_mask_and_coherent(&pdev->dev,
438 			DMA_BIT_MASK((hpriv->cap & HOST_CAP_64) ? 64 : 32));
439 	if (rc) {
440 		dev_err(&pdev->dev, "DMA enable failed\n");
441 		return rc;
442 	}
443 
444 	rc = ahci_reset_controller(host);
445 	if (rc)
446 		return rc;
447 
448 	ahci_init_controller(host);
449 	acard_ahci_pci_print_info(host);
450 
451 	pci_set_master(pdev);
452 	return ahci_host_activate(host, &acard_ahci_sht);
453 }
454 
455 module_pci_driver(acard_ahci_pci_driver);
456 
457 MODULE_AUTHOR("Jeff Garzik");
458 MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
459 MODULE_LICENSE("GPL");
460 MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
461 MODULE_VERSION(DRV_VERSION);
462