xref: /linux/drivers/ata/sata_qstor.c (revision d524dac9279b6a41ffdf7ff7958c577f2e387db6)
1 /*
2  *  sata_qstor.c - Pacific Digital Corporation QStor SATA
3  *
4  *  Maintained by:  Mark Lord <mlord@pobox.com>
5  *
6  *  Copyright 2005 Pacific Digital Corporation.
7  *  (OSL/GPL code release authorized by Jalil Fadavi).
8  *
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2, or (at your option)
13  *  any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; see the file COPYING.  If not, write to
22  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  *
25  *  libata documentation is available via 'make {ps|pdf}docs',
26  *  as Documentation/DocBook/libata.*
27  *
28  */
29 
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/gfp.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/interrupt.h>
38 #include <linux/device.h>
39 #include <scsi/scsi_host.h>
40 #include <linux/libata.h>
41 
42 #define DRV_NAME	"sata_qstor"
43 #define DRV_VERSION	"0.09"
44 
45 enum {
46 	QS_MMIO_BAR		= 4,
47 
48 	QS_PORTS		= 4,
49 	QS_MAX_PRD		= LIBATA_MAX_PRD,
50 	QS_CPB_ORDER		= 6,
51 	QS_CPB_BYTES		= (1 << QS_CPB_ORDER),
52 	QS_PRD_BYTES		= QS_MAX_PRD * 16,
53 	QS_PKT_BYTES		= QS_CPB_BYTES + QS_PRD_BYTES,
54 
55 	/* global register offsets */
56 	QS_HCF_CNFG3		= 0x0003, /* host configuration offset */
57 	QS_HID_HPHY		= 0x0004, /* host physical interface info */
58 	QS_HCT_CTRL		= 0x00e4, /* global interrupt mask offset */
59 	QS_HST_SFF		= 0x0100, /* host status fifo offset */
60 	QS_HVS_SERD3		= 0x0393, /* PHY enable offset */
61 
62 	/* global control bits */
63 	QS_HPHY_64BIT		= (1 << 1), /* 64-bit bus detected */
64 	QS_CNFG3_GSRST		= 0x01,     /* global chip reset */
65 	QS_SERD3_PHY_ENA	= 0xf0,     /* PHY detection ENAble*/
66 
67 	/* per-channel register offsets */
68 	QS_CCF_CPBA		= 0x0710, /* chan CPB base address */
69 	QS_CCF_CSEP		= 0x0718, /* chan CPB separation factor */
70 	QS_CFC_HUFT		= 0x0800, /* host upstream fifo threshold */
71 	QS_CFC_HDFT		= 0x0804, /* host downstream fifo threshold */
72 	QS_CFC_DUFT		= 0x0808, /* dev upstream fifo threshold */
73 	QS_CFC_DDFT		= 0x080c, /* dev downstream fifo threshold */
74 	QS_CCT_CTR0		= 0x0900, /* chan control-0 offset */
75 	QS_CCT_CTR1		= 0x0901, /* chan control-1 offset */
76 	QS_CCT_CFF		= 0x0a00, /* chan command fifo offset */
77 
78 	/* channel control bits */
79 	QS_CTR0_REG		= (1 << 1),   /* register mode (vs. pkt mode) */
80 	QS_CTR0_CLER		= (1 << 2),   /* clear channel errors */
81 	QS_CTR1_RDEV		= (1 << 1),   /* sata phy/comms reset */
82 	QS_CTR1_RCHN		= (1 << 4),   /* reset channel logic */
83 	QS_CCF_RUN_PKT		= 0x107,      /* RUN a new dma PKT */
84 
85 	/* pkt sub-field headers */
86 	QS_HCB_HDR		= 0x01,   /* Host Control Block header */
87 	QS_DCB_HDR		= 0x02,   /* Device Control Block header */
88 
89 	/* pkt HCB flag bits */
90 	QS_HF_DIRO		= (1 << 0),   /* data DIRection Out */
91 	QS_HF_DAT		= (1 << 3),   /* DATa pkt */
92 	QS_HF_IEN		= (1 << 4),   /* Interrupt ENable */
93 	QS_HF_VLD		= (1 << 5),   /* VaLiD pkt */
94 
95 	/* pkt DCB flag bits */
96 	QS_DF_PORD		= (1 << 2),   /* Pio OR Dma */
97 	QS_DF_ELBA		= (1 << 3),   /* Extended LBA (lba48) */
98 
99 	/* PCI device IDs */
100 	board_2068_idx		= 0,	/* QStor 4-port SATA/RAID */
101 };
102 
103 enum {
104 	QS_DMA_BOUNDARY		= ~0UL
105 };
106 
107 typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
108 
109 struct qs_port_priv {
110 	u8			*pkt;
111 	dma_addr_t		pkt_dma;
112 	qs_state_t		state;
113 };
114 
115 static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
116 static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
117 static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
118 static int qs_port_start(struct ata_port *ap);
119 static void qs_host_stop(struct ata_host *host);
120 static void qs_qc_prep(struct ata_queued_cmd *qc);
121 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
122 static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
123 static void qs_freeze(struct ata_port *ap);
124 static void qs_thaw(struct ata_port *ap);
125 static int qs_prereset(struct ata_link *link, unsigned long deadline);
126 static void qs_error_handler(struct ata_port *ap);
127 
128 static struct scsi_host_template qs_ata_sht = {
129 	ATA_BASE_SHT(DRV_NAME),
130 	.sg_tablesize		= QS_MAX_PRD,
131 	.dma_boundary		= QS_DMA_BOUNDARY,
132 };
133 
134 static struct ata_port_operations qs_ata_ops = {
135 	.inherits		= &ata_sff_port_ops,
136 
137 	.check_atapi_dma	= qs_check_atapi_dma,
138 	.qc_prep		= qs_qc_prep,
139 	.qc_issue		= qs_qc_issue,
140 
141 	.freeze			= qs_freeze,
142 	.thaw			= qs_thaw,
143 	.prereset		= qs_prereset,
144 	.softreset		= ATA_OP_NULL,
145 	.error_handler		= qs_error_handler,
146 	.lost_interrupt		= ATA_OP_NULL,
147 
148 	.scr_read		= qs_scr_read,
149 	.scr_write		= qs_scr_write,
150 
151 	.port_start		= qs_port_start,
152 	.host_stop		= qs_host_stop,
153 };
154 
155 static const struct ata_port_info qs_port_info[] = {
156 	/* board_2068_idx */
157 	{
158 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
159 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
160 		.pio_mask	= ATA_PIO4_ONLY,
161 		.udma_mask	= ATA_UDMA6,
162 		.port_ops	= &qs_ata_ops,
163 	},
164 };
165 
166 static const struct pci_device_id qs_ata_pci_tbl[] = {
167 	{ PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
168 
169 	{ }	/* terminate list */
170 };
171 
172 static struct pci_driver qs_ata_pci_driver = {
173 	.name			= DRV_NAME,
174 	.id_table		= qs_ata_pci_tbl,
175 	.probe			= qs_ata_init_one,
176 	.remove			= ata_pci_remove_one,
177 };
178 
179 static void __iomem *qs_mmio_base(struct ata_host *host)
180 {
181 	return host->iomap[QS_MMIO_BAR];
182 }
183 
184 static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
185 {
186 	return 1;	/* ATAPI DMA not supported */
187 }
188 
189 static inline void qs_enter_reg_mode(struct ata_port *ap)
190 {
191 	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
192 	struct qs_port_priv *pp = ap->private_data;
193 
194 	pp->state = qs_state_mmio;
195 	writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
196 	readb(chan + QS_CCT_CTR0);        /* flush */
197 }
198 
199 static inline void qs_reset_channel_logic(struct ata_port *ap)
200 {
201 	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
202 
203 	writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
204 	readb(chan + QS_CCT_CTR0);        /* flush */
205 	qs_enter_reg_mode(ap);
206 }
207 
208 static void qs_freeze(struct ata_port *ap)
209 {
210 	u8 __iomem *mmio_base = qs_mmio_base(ap->host);
211 
212 	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
213 	qs_enter_reg_mode(ap);
214 }
215 
216 static void qs_thaw(struct ata_port *ap)
217 {
218 	u8 __iomem *mmio_base = qs_mmio_base(ap->host);
219 
220 	qs_enter_reg_mode(ap);
221 	writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
222 }
223 
224 static int qs_prereset(struct ata_link *link, unsigned long deadline)
225 {
226 	struct ata_port *ap = link->ap;
227 
228 	qs_reset_channel_logic(ap);
229 	return ata_sff_prereset(link, deadline);
230 }
231 
232 static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
233 {
234 	if (sc_reg > SCR_CONTROL)
235 		return -EINVAL;
236 	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
237 	return 0;
238 }
239 
240 static void qs_error_handler(struct ata_port *ap)
241 {
242 	qs_enter_reg_mode(ap);
243 	ata_sff_error_handler(ap);
244 }
245 
246 static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
247 {
248 	if (sc_reg > SCR_CONTROL)
249 		return -EINVAL;
250 	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
251 	return 0;
252 }
253 
254 static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
255 {
256 	struct scatterlist *sg;
257 	struct ata_port *ap = qc->ap;
258 	struct qs_port_priv *pp = ap->private_data;
259 	u8 *prd = pp->pkt + QS_CPB_BYTES;
260 	unsigned int si;
261 
262 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
263 		u64 addr;
264 		u32 len;
265 
266 		addr = sg_dma_address(sg);
267 		*(__le64 *)prd = cpu_to_le64(addr);
268 		prd += sizeof(u64);
269 
270 		len = sg_dma_len(sg);
271 		*(__le32 *)prd = cpu_to_le32(len);
272 		prd += sizeof(u64);
273 
274 		VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
275 					(unsigned long long)addr, len);
276 	}
277 
278 	return si;
279 }
280 
281 static void qs_qc_prep(struct ata_queued_cmd *qc)
282 {
283 	struct qs_port_priv *pp = qc->ap->private_data;
284 	u8 dflags = QS_DF_PORD, *buf = pp->pkt;
285 	u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
286 	u64 addr;
287 	unsigned int nelem;
288 
289 	VPRINTK("ENTER\n");
290 
291 	qs_enter_reg_mode(qc->ap);
292 	if (qc->tf.protocol != ATA_PROT_DMA)
293 		return;
294 
295 	nelem = qs_fill_sg(qc);
296 
297 	if ((qc->tf.flags & ATA_TFLAG_WRITE))
298 		hflags |= QS_HF_DIRO;
299 	if ((qc->tf.flags & ATA_TFLAG_LBA48))
300 		dflags |= QS_DF_ELBA;
301 
302 	/* host control block (HCB) */
303 	buf[ 0] = QS_HCB_HDR;
304 	buf[ 1] = hflags;
305 	*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
306 	*(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
307 	addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
308 	*(__le64 *)(&buf[16]) = cpu_to_le64(addr);
309 
310 	/* device control block (DCB) */
311 	buf[24] = QS_DCB_HDR;
312 	buf[28] = dflags;
313 
314 	/* frame information structure (FIS) */
315 	ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
316 }
317 
318 static inline void qs_packet_start(struct ata_queued_cmd *qc)
319 {
320 	struct ata_port *ap = qc->ap;
321 	u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
322 
323 	VPRINTK("ENTER, ap %p\n", ap);
324 
325 	writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
326 	wmb();                             /* flush PRDs and pkt to memory */
327 	writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
328 	readl(chan + QS_CCT_CFF);          /* flush */
329 }
330 
331 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
332 {
333 	struct qs_port_priv *pp = qc->ap->private_data;
334 
335 	switch (qc->tf.protocol) {
336 	case ATA_PROT_DMA:
337 		pp->state = qs_state_pkt;
338 		qs_packet_start(qc);
339 		return 0;
340 
341 	case ATAPI_PROT_DMA:
342 		BUG();
343 		break;
344 
345 	default:
346 		break;
347 	}
348 
349 	pp->state = qs_state_mmio;
350 	return ata_sff_qc_issue(qc);
351 }
352 
353 static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
354 {
355 	qc->err_mask |= ac_err_mask(status);
356 
357 	if (!qc->err_mask) {
358 		ata_qc_complete(qc);
359 	} else {
360 		struct ata_port    *ap  = qc->ap;
361 		struct ata_eh_info *ehi = &ap->link.eh_info;
362 
363 		ata_ehi_clear_desc(ehi);
364 		ata_ehi_push_desc(ehi, "status 0x%02X", status);
365 
366 		if (qc->err_mask == AC_ERR_DEV)
367 			ata_port_abort(ap);
368 		else
369 			ata_port_freeze(ap);
370 	}
371 }
372 
373 static inline unsigned int qs_intr_pkt(struct ata_host *host)
374 {
375 	unsigned int handled = 0;
376 	u8 sFFE;
377 	u8 __iomem *mmio_base = qs_mmio_base(host);
378 
379 	do {
380 		u32 sff0 = readl(mmio_base + QS_HST_SFF);
381 		u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
382 		u8 sEVLD = (sff1 >> 30) & 0x01;	/* valid flag */
383 		sFFE  = sff1 >> 31;		/* empty flag */
384 
385 		if (sEVLD) {
386 			u8 sDST = sff0 >> 16;	/* dev status */
387 			u8 sHST = sff1 & 0x3f;	/* host status */
388 			unsigned int port_no = (sff1 >> 8) & 0x03;
389 			struct ata_port *ap = host->ports[port_no];
390 			struct qs_port_priv *pp = ap->private_data;
391 			struct ata_queued_cmd *qc;
392 
393 			DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
394 					sff1, sff0, port_no, sHST, sDST);
395 			handled = 1;
396 			if (!pp || pp->state != qs_state_pkt)
397 				continue;
398 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
399 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
400 				switch (sHST) {
401 				case 0: /* successful CPB */
402 				case 3: /* device error */
403 					qs_enter_reg_mode(qc->ap);
404 					qs_do_or_die(qc, sDST);
405 					break;
406 				default:
407 					break;
408 				}
409 			}
410 		}
411 	} while (!sFFE);
412 	return handled;
413 }
414 
415 static inline unsigned int qs_intr_mmio(struct ata_host *host)
416 {
417 	unsigned int handled = 0, port_no;
418 
419 	for (port_no = 0; port_no < host->n_ports; ++port_no) {
420 		struct ata_port *ap = host->ports[port_no];
421 		struct qs_port_priv *pp = ap->private_data;
422 		struct ata_queued_cmd *qc;
423 
424 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
425 		if (!qc) {
426 			/*
427 			 * The qstor hardware generates spurious
428 			 * interrupts from time to time when switching
429 			 * in and out of packet mode.  There's no
430 			 * obvious way to know if we're here now due
431 			 * to that, so just ack the irq and pretend we
432 			 * knew it was ours.. (ugh).  This does not
433 			 * affect packet mode.
434 			 */
435 			ata_sff_check_status(ap);
436 			handled = 1;
437 			continue;
438 		}
439 
440 		if (!pp || pp->state != qs_state_mmio)
441 			continue;
442 		if (!(qc->tf.flags & ATA_TFLAG_POLLING))
443 			handled |= ata_sff_port_intr(ap, qc);
444 	}
445 	return handled;
446 }
447 
448 static irqreturn_t qs_intr(int irq, void *dev_instance)
449 {
450 	struct ata_host *host = dev_instance;
451 	unsigned int handled = 0;
452 	unsigned long flags;
453 
454 	VPRINTK("ENTER\n");
455 
456 	spin_lock_irqsave(&host->lock, flags);
457 	handled  = qs_intr_pkt(host) | qs_intr_mmio(host);
458 	spin_unlock_irqrestore(&host->lock, flags);
459 
460 	VPRINTK("EXIT\n");
461 
462 	return IRQ_RETVAL(handled);
463 }
464 
465 static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
466 {
467 	port->cmd_addr		=
468 	port->data_addr		= base + 0x400;
469 	port->error_addr	=
470 	port->feature_addr	= base + 0x408; /* hob_feature = 0x409 */
471 	port->nsect_addr	= base + 0x410; /* hob_nsect   = 0x411 */
472 	port->lbal_addr		= base + 0x418; /* hob_lbal    = 0x419 */
473 	port->lbam_addr		= base + 0x420; /* hob_lbam    = 0x421 */
474 	port->lbah_addr		= base + 0x428; /* hob_lbah    = 0x429 */
475 	port->device_addr	= base + 0x430;
476 	port->status_addr	=
477 	port->command_addr	= base + 0x438;
478 	port->altstatus_addr	=
479 	port->ctl_addr		= base + 0x440;
480 	port->scr_addr		= base + 0xc00;
481 }
482 
483 static int qs_port_start(struct ata_port *ap)
484 {
485 	struct device *dev = ap->host->dev;
486 	struct qs_port_priv *pp;
487 	void __iomem *mmio_base = qs_mmio_base(ap->host);
488 	void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
489 	u64 addr;
490 
491 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
492 	if (!pp)
493 		return -ENOMEM;
494 	pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
495 				      GFP_KERNEL);
496 	if (!pp->pkt)
497 		return -ENOMEM;
498 	memset(pp->pkt, 0, QS_PKT_BYTES);
499 	ap->private_data = pp;
500 
501 	qs_enter_reg_mode(ap);
502 	addr = (u64)pp->pkt_dma;
503 	writel((u32) addr,        chan + QS_CCF_CPBA);
504 	writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
505 	return 0;
506 }
507 
508 static void qs_host_stop(struct ata_host *host)
509 {
510 	void __iomem *mmio_base = qs_mmio_base(host);
511 
512 	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
513 	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
514 }
515 
516 static void qs_host_init(struct ata_host *host, unsigned int chip_id)
517 {
518 	void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
519 	unsigned int port_no;
520 
521 	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
522 	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
523 
524 	/* reset each channel in turn */
525 	for (port_no = 0; port_no < host->n_ports; ++port_no) {
526 		u8 __iomem *chan = mmio_base + (port_no * 0x4000);
527 		writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
528 		writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
529 		readb(chan + QS_CCT_CTR0);        /* flush */
530 	}
531 	writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
532 
533 	for (port_no = 0; port_no < host->n_ports; ++port_no) {
534 		u8 __iomem *chan = mmio_base + (port_no * 0x4000);
535 		/* set FIFO depths to same settings as Windows driver */
536 		writew(32, chan + QS_CFC_HUFT);
537 		writew(32, chan + QS_CFC_HDFT);
538 		writew(10, chan + QS_CFC_DUFT);
539 		writew( 8, chan + QS_CFC_DDFT);
540 		/* set CPB size in bytes, as a power of two */
541 		writeb(QS_CPB_ORDER,    chan + QS_CCF_CSEP);
542 	}
543 	writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
544 }
545 
546 /*
547  * The QStor understands 64-bit buses, and uses 64-bit fields
548  * for DMA pointers regardless of bus width.  We just have to
549  * make sure our DMA masks are set appropriately for whatever
550  * bridge lies between us and the QStor, and then the DMA mapping
551  * code will ensure we only ever "see" appropriate buffer addresses.
552  * If we're 32-bit limited somewhere, then our 64-bit fields will
553  * just end up with zeros in the upper 32-bits, without any special
554  * logic required outside of this routine (below).
555  */
556 static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
557 {
558 	u32 bus_info = readl(mmio_base + QS_HID_HPHY);
559 	int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
560 
561 	if (have_64bit_bus &&
562 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
563 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
564 		if (rc) {
565 			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
566 			if (rc) {
567 				dev_printk(KERN_ERR, &pdev->dev,
568 					   "64-bit DMA enable failed\n");
569 				return rc;
570 			}
571 		}
572 	} else {
573 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
574 		if (rc) {
575 			dev_printk(KERN_ERR, &pdev->dev,
576 				"32-bit DMA enable failed\n");
577 			return rc;
578 		}
579 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
580 		if (rc) {
581 			dev_printk(KERN_ERR, &pdev->dev,
582 				"32-bit consistent DMA enable failed\n");
583 			return rc;
584 		}
585 	}
586 	return 0;
587 }
588 
589 static int qs_ata_init_one(struct pci_dev *pdev,
590 				const struct pci_device_id *ent)
591 {
592 	static int printed_version;
593 	unsigned int board_idx = (unsigned int) ent->driver_data;
594 	const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
595 	struct ata_host *host;
596 	int rc, port_no;
597 
598 	if (!printed_version++)
599 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
600 
601 	/* alloc host */
602 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
603 	if (!host)
604 		return -ENOMEM;
605 
606 	/* acquire resources and fill host */
607 	rc = pcim_enable_device(pdev);
608 	if (rc)
609 		return rc;
610 
611 	if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
612 		return -ENODEV;
613 
614 	rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
615 	if (rc)
616 		return rc;
617 	host->iomap = pcim_iomap_table(pdev);
618 
619 	rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
620 	if (rc)
621 		return rc;
622 
623 	for (port_no = 0; port_no < host->n_ports; ++port_no) {
624 		struct ata_port *ap = host->ports[port_no];
625 		unsigned int offset = port_no * 0x4000;
626 		void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
627 
628 		qs_ata_setup_port(&ap->ioaddr, chan);
629 
630 		ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
631 		ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
632 	}
633 
634 	/* initialize adapter */
635 	qs_host_init(host, board_idx);
636 
637 	pci_set_master(pdev);
638 	return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
639 				 &qs_ata_sht);
640 }
641 
642 static int __init qs_ata_init(void)
643 {
644 	return pci_register_driver(&qs_ata_pci_driver);
645 }
646 
647 static void __exit qs_ata_exit(void)
648 {
649 	pci_unregister_driver(&qs_ata_pci_driver);
650 }
651 
652 MODULE_AUTHOR("Mark Lord");
653 MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
654 MODULE_LICENSE("GPL");
655 MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
656 MODULE_VERSION(DRV_VERSION);
657 
658 module_init(qs_ata_init);
659 module_exit(qs_ata_exit);
660