xref: /linux/drivers/ata/libata-sff.c (revision ba6ec09911b805778a2fed6d626bfe77b011a717)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  libata-sff.c - helper library for PCI IDE BMDMA
4  *
5  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
6  *  Copyright 2003-2006 Jeff Garzik
7  *
8  *  libata documentation is available via 'make {ps|pdf}docs',
9  *  as Documentation/driver-api/libata.rst
10  *
11  *  Hardware documentation available from http://www.t13.org/ and
12  *  http://www.sata-io.org/
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/gfp.h>
17 #include <linux/pci.h>
18 #include <linux/module.h>
19 #include <linux/libata.h>
20 #include <linux/highmem.h>
21 #include <trace/events/libata.h>
22 #include "libata.h"
23 
24 static struct workqueue_struct *ata_sff_wq;
25 
26 const struct ata_port_operations ata_sff_port_ops = {
27 	.inherits		= &ata_base_port_ops,
28 
29 	.qc_issue		= ata_sff_qc_issue,
30 	.qc_fill_rtf		= ata_sff_qc_fill_rtf,
31 
32 	.freeze			= ata_sff_freeze,
33 	.thaw			= ata_sff_thaw,
34 	.prereset		= ata_sff_prereset,
35 	.softreset		= ata_sff_softreset,
36 	.hardreset		= sata_sff_hardreset,
37 	.postreset		= ata_sff_postreset,
38 	.error_handler		= ata_sff_error_handler,
39 
40 	.sff_dev_select		= ata_sff_dev_select,
41 	.sff_check_status	= ata_sff_check_status,
42 	.sff_tf_load		= ata_sff_tf_load,
43 	.sff_tf_read		= ata_sff_tf_read,
44 	.sff_exec_command	= ata_sff_exec_command,
45 	.sff_data_xfer		= ata_sff_data_xfer,
46 	.sff_drain_fifo		= ata_sff_drain_fifo,
47 
48 	.lost_interrupt		= ata_sff_lost_interrupt,
49 };
50 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
51 
52 /**
53  *	ata_sff_check_status - Read device status reg & clear interrupt
54  *	@ap: port where the device is
55  *
56  *	Reads ATA taskfile status register for currently-selected device
57  *	and return its value. This also clears pending interrupts
58  *      from this device
59  *
60  *	LOCKING:
61  *	Inherited from caller.
62  */
ata_sff_check_status(struct ata_port * ap)63 u8 ata_sff_check_status(struct ata_port *ap)
64 {
65 	return ioread8(ap->ioaddr.status_addr);
66 }
67 EXPORT_SYMBOL_GPL(ata_sff_check_status);
68 
69 /**
70  *	ata_sff_altstatus - Read device alternate status reg
71  *	@ap: port where the device is
72  *	@status: pointer to a status value
73  *
74  *	Reads ATA alternate status register for currently-selected device
75  *	and return its value.
76  *
77  *	RETURN:
78  *	true if the register exists, false if not.
79  *
80  *	LOCKING:
81  *	Inherited from caller.
82  */
ata_sff_altstatus(struct ata_port * ap,u8 * status)83 static bool ata_sff_altstatus(struct ata_port *ap, u8 *status)
84 {
85 	u8 tmp;
86 
87 	if (ap->ops->sff_check_altstatus) {
88 		tmp = ap->ops->sff_check_altstatus(ap);
89 		goto read;
90 	}
91 	if (ap->ioaddr.altstatus_addr) {
92 		tmp = ioread8(ap->ioaddr.altstatus_addr);
93 		goto read;
94 	}
95 	return false;
96 
97 read:
98 	if (status)
99 		*status = tmp;
100 	return true;
101 }
102 
103 /**
104  *	ata_sff_irq_status - Check if the device is busy
105  *	@ap: port where the device is
106  *
107  *	Determine if the port is currently busy. Uses altstatus
108  *	if available in order to avoid clearing shared IRQ status
109  *	when finding an IRQ source. Non ctl capable devices don't
110  *	share interrupt lines fortunately for us.
111  *
112  *	LOCKING:
113  *	Inherited from caller.
114  */
ata_sff_irq_status(struct ata_port * ap)115 static u8 ata_sff_irq_status(struct ata_port *ap)
116 {
117 	u8 status;
118 
119 	/* Not us: We are busy */
120 	if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY))
121 		return status;
122 	/* Clear INTRQ latch */
123 	status = ap->ops->sff_check_status(ap);
124 	return status;
125 }
126 
127 /**
128  *	ata_sff_sync - Flush writes
129  *	@ap: Port to wait for.
130  *
131  *	CAUTION:
132  *	If we have an mmio device with no ctl and no altstatus
133  *	method this will fail. No such devices are known to exist.
134  *
135  *	LOCKING:
136  *	Inherited from caller.
137  */
138 
ata_sff_sync(struct ata_port * ap)139 static void ata_sff_sync(struct ata_port *ap)
140 {
141 	ata_sff_altstatus(ap, NULL);
142 }
143 
144 /**
145  *	ata_sff_pause		-	Flush writes and wait 400nS
146  *	@ap: Port to pause for.
147  *
148  *	CAUTION:
149  *	If we have an mmio device with no ctl and no altstatus
150  *	method this will fail. No such devices are known to exist.
151  *
152  *	LOCKING:
153  *	Inherited from caller.
154  */
155 
ata_sff_pause(struct ata_port * ap)156 void ata_sff_pause(struct ata_port *ap)
157 {
158 	ata_sff_sync(ap);
159 	ndelay(400);
160 }
161 EXPORT_SYMBOL_GPL(ata_sff_pause);
162 
163 /**
164  *	ata_sff_dma_pause	-	Pause before commencing DMA
165  *	@ap: Port to pause for.
166  *
167  *	Perform I/O fencing and ensure sufficient cycle delays occur
168  *	for the HDMA1:0 transition
169  */
170 
ata_sff_dma_pause(struct ata_port * ap)171 void ata_sff_dma_pause(struct ata_port *ap)
172 {
173 	/*
174 	 * An altstatus read will cause the needed delay without
175 	 * messing up the IRQ status
176 	 */
177 	if (ata_sff_altstatus(ap, NULL))
178 		return;
179 	/* There are no DMA controllers without ctl. BUG here to ensure
180 	   we never violate the HDMA1:0 transition timing and risk
181 	   corruption. */
182 	BUG();
183 }
184 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
185 
ata_sff_check_ready(struct ata_link * link)186 static int ata_sff_check_ready(struct ata_link *link)
187 {
188 	u8 status = link->ap->ops->sff_check_status(link->ap);
189 
190 	return ata_check_ready(status);
191 }
192 
193 /**
194  *	ata_sff_wait_ready - sleep until BSY clears, or timeout
195  *	@link: SFF link to wait ready status for
196  *	@deadline: deadline jiffies for the operation
197  *
198  *	Sleep until ATA Status register bit BSY clears, or timeout
199  *	occurs.
200  *
201  *	LOCKING:
202  *	Kernel thread context (may sleep).
203  *
204  *	RETURNS:
205  *	0 on success, -errno otherwise.
206  */
ata_sff_wait_ready(struct ata_link * link,unsigned long deadline)207 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
208 {
209 	return ata_wait_ready(link, deadline, ata_sff_check_ready);
210 }
211 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
212 
213 /**
214  *	ata_sff_set_devctl - Write device control reg
215  *	@ap: port where the device is
216  *	@ctl: value to write
217  *
218  *	Writes ATA device control register.
219  *
220  *	RETURN:
221  *	true if the register exists, false if not.
222  *
223  *	LOCKING:
224  *	Inherited from caller.
225  */
ata_sff_set_devctl(struct ata_port * ap,u8 ctl)226 static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
227 {
228 	if (ap->ops->sff_set_devctl) {
229 		ap->ops->sff_set_devctl(ap, ctl);
230 		return true;
231 	}
232 	if (ap->ioaddr.ctl_addr) {
233 		iowrite8(ctl, ap->ioaddr.ctl_addr);
234 		return true;
235 	}
236 
237 	return false;
238 }
239 
240 /**
241  *	ata_sff_dev_select - Select device 0/1 on ATA bus
242  *	@ap: ATA channel to manipulate
243  *	@device: ATA device (numbered from zero) to select
244  *
245  *	Use the method defined in the ATA specification to
246  *	make either device 0, or device 1, active on the
247  *	ATA channel.  Works with both PIO and MMIO.
248  *
249  *	May be used as the dev_select() entry in ata_port_operations.
250  *
251  *	LOCKING:
252  *	caller.
253  */
ata_sff_dev_select(struct ata_port * ap,unsigned int device)254 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
255 {
256 	u8 tmp;
257 
258 	if (device == 0)
259 		tmp = ATA_DEVICE_OBS;
260 	else
261 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
262 
263 	iowrite8(tmp, ap->ioaddr.device_addr);
264 	ata_sff_pause(ap);	/* needed; also flushes, for mmio */
265 }
266 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
267 
268 /**
269  *	ata_dev_select - Select device 0/1 on ATA bus
270  *	@ap: ATA channel to manipulate
271  *	@device: ATA device (numbered from zero) to select
272  *	@wait: non-zero to wait for Status register BSY bit to clear
273  *	@can_sleep: non-zero if context allows sleeping
274  *
275  *	Use the method defined in the ATA specification to
276  *	make either device 0, or device 1, active on the
277  *	ATA channel.
278  *
279  *	This is a high-level version of ata_sff_dev_select(), which
280  *	additionally provides the services of inserting the proper
281  *	pauses and status polling, where needed.
282  *
283  *	LOCKING:
284  *	caller.
285  */
ata_dev_select(struct ata_port * ap,unsigned int device,unsigned int wait,unsigned int can_sleep)286 static void ata_dev_select(struct ata_port *ap, unsigned int device,
287 			   unsigned int wait, unsigned int can_sleep)
288 {
289 	if (wait)
290 		ata_wait_idle(ap);
291 
292 	ap->ops->sff_dev_select(ap, device);
293 
294 	if (wait) {
295 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
296 			ata_msleep(ap, 150);
297 		ata_wait_idle(ap);
298 	}
299 }
300 
301 /**
302  *	ata_sff_irq_on - Enable interrupts on a port.
303  *	@ap: Port on which interrupts are enabled.
304  *
305  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
306  *	wait for idle, clear any pending interrupts.
307  *
308  *	Note: may NOT be used as the sff_irq_on() entry in
309  *	ata_port_operations.
310  *
311  *	LOCKING:
312  *	Inherited from caller.
313  */
ata_sff_irq_on(struct ata_port * ap)314 void ata_sff_irq_on(struct ata_port *ap)
315 {
316 	if (ap->ops->sff_irq_on) {
317 		ap->ops->sff_irq_on(ap);
318 		return;
319 	}
320 
321 	ap->ctl &= ~ATA_NIEN;
322 	ap->last_ctl = ap->ctl;
323 
324 	ata_sff_set_devctl(ap, ap->ctl);
325 	ata_wait_idle(ap);
326 
327 	if (ap->ops->sff_irq_clear)
328 		ap->ops->sff_irq_clear(ap);
329 }
330 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
331 
332 /**
333  *	ata_sff_tf_load - send taskfile registers to host controller
334  *	@ap: Port to which output is sent
335  *	@tf: ATA taskfile register set
336  *
337  *	Outputs ATA taskfile to standard ATA host controller.
338  *
339  *	LOCKING:
340  *	Inherited from caller.
341  */
ata_sff_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)342 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
343 {
344 	struct ata_ioports *ioaddr = &ap->ioaddr;
345 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
346 
347 	if (tf->ctl != ap->last_ctl) {
348 		if (ioaddr->ctl_addr)
349 			iowrite8(tf->ctl, ioaddr->ctl_addr);
350 		ap->last_ctl = tf->ctl;
351 		ata_wait_idle(ap);
352 	}
353 
354 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
355 		WARN_ON_ONCE(!ioaddr->ctl_addr);
356 		iowrite8(tf->hob_feature, ioaddr->feature_addr);
357 		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
358 		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
359 		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
360 		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
361 	}
362 
363 	if (is_addr) {
364 		iowrite8(tf->feature, ioaddr->feature_addr);
365 		iowrite8(tf->nsect, ioaddr->nsect_addr);
366 		iowrite8(tf->lbal, ioaddr->lbal_addr);
367 		iowrite8(tf->lbam, ioaddr->lbam_addr);
368 		iowrite8(tf->lbah, ioaddr->lbah_addr);
369 	}
370 
371 	if (tf->flags & ATA_TFLAG_DEVICE)
372 		iowrite8(tf->device, ioaddr->device_addr);
373 
374 	ata_wait_idle(ap);
375 }
376 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
377 
378 /**
379  *	ata_sff_tf_read - input device's ATA taskfile shadow registers
380  *	@ap: Port from which input is read
381  *	@tf: ATA taskfile register set for storing input
382  *
383  *	Reads ATA taskfile registers for currently-selected device
384  *	into @tf. Assumes the device has a fully SFF compliant task file
385  *	layout and behaviour. If you device does not (eg has a different
386  *	status method) then you will need to provide a replacement tf_read
387  *
388  *	LOCKING:
389  *	Inherited from caller.
390  */
ata_sff_tf_read(struct ata_port * ap,struct ata_taskfile * tf)391 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
392 {
393 	struct ata_ioports *ioaddr = &ap->ioaddr;
394 
395 	tf->status = ata_sff_check_status(ap);
396 	tf->error = ioread8(ioaddr->error_addr);
397 	tf->nsect = ioread8(ioaddr->nsect_addr);
398 	tf->lbal = ioread8(ioaddr->lbal_addr);
399 	tf->lbam = ioread8(ioaddr->lbam_addr);
400 	tf->lbah = ioread8(ioaddr->lbah_addr);
401 	tf->device = ioread8(ioaddr->device_addr);
402 
403 	if (tf->flags & ATA_TFLAG_LBA48) {
404 		if (likely(ioaddr->ctl_addr)) {
405 			iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
406 			tf->hob_feature = ioread8(ioaddr->error_addr);
407 			tf->hob_nsect = ioread8(ioaddr->nsect_addr);
408 			tf->hob_lbal = ioread8(ioaddr->lbal_addr);
409 			tf->hob_lbam = ioread8(ioaddr->lbam_addr);
410 			tf->hob_lbah = ioread8(ioaddr->lbah_addr);
411 			iowrite8(tf->ctl, ioaddr->ctl_addr);
412 			ap->last_ctl = tf->ctl;
413 		} else
414 			WARN_ON_ONCE(1);
415 	}
416 }
417 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
418 
419 /**
420  *	ata_sff_exec_command - issue ATA command to host controller
421  *	@ap: port to which command is being issued
422  *	@tf: ATA taskfile register set
423  *
424  *	Issues ATA command, with proper synchronization with interrupt
425  *	handler / other threads.
426  *
427  *	LOCKING:
428  *	spin_lock_irqsave(host lock)
429  */
ata_sff_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)430 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
431 {
432 	iowrite8(tf->command, ap->ioaddr.command_addr);
433 	ata_sff_pause(ap);
434 }
435 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
436 
437 /**
438  *	ata_tf_to_host - issue ATA taskfile to host controller
439  *	@ap: port to which command is being issued
440  *	@tf: ATA taskfile register set
441  *	@tag: tag of the associated command
442  *
443  *	Issues ATA taskfile register set to ATA host controller,
444  *	with proper synchronization with interrupt handler and
445  *	other threads.
446  *
447  *	LOCKING:
448  *	spin_lock_irqsave(host lock)
449  */
ata_tf_to_host(struct ata_port * ap,const struct ata_taskfile * tf,unsigned int tag)450 static inline void ata_tf_to_host(struct ata_port *ap,
451 				  const struct ata_taskfile *tf,
452 				  unsigned int tag)
453 {
454 	trace_ata_tf_load(ap, tf);
455 	ap->ops->sff_tf_load(ap, tf);
456 	trace_ata_exec_command(ap, tf, tag);
457 	ap->ops->sff_exec_command(ap, tf);
458 }
459 
460 /**
461  *	ata_sff_data_xfer - Transfer data by PIO
462  *	@qc: queued command
463  *	@buf: data buffer
464  *	@buflen: buffer length
465  *	@rw: read/write
466  *
467  *	Transfer data from/to the device data register by PIO.
468  *
469  *	LOCKING:
470  *	Inherited from caller.
471  *
472  *	RETURNS:
473  *	Bytes consumed.
474  */
ata_sff_data_xfer(struct ata_queued_cmd * qc,unsigned char * buf,unsigned int buflen,int rw)475 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf,
476 			       unsigned int buflen, int rw)
477 {
478 	struct ata_port *ap = qc->dev->link->ap;
479 	void __iomem *data_addr = ap->ioaddr.data_addr;
480 	unsigned int words = buflen >> 1;
481 
482 	/* Transfer multiple of 2 bytes */
483 	if (rw == READ)
484 		ioread16_rep(data_addr, buf, words);
485 	else
486 		iowrite16_rep(data_addr, buf, words);
487 
488 	/* Transfer trailing byte, if any. */
489 	if (unlikely(buflen & 0x01)) {
490 		unsigned char pad[2] = { };
491 
492 		/* Point buf to the tail of buffer */
493 		buf += buflen - 1;
494 
495 		/*
496 		 * Use io*16_rep() accessors here as well to avoid pointlessly
497 		 * swapping bytes to and from on the big endian machines...
498 		 */
499 		if (rw == READ) {
500 			ioread16_rep(data_addr, pad, 1);
501 			*buf = pad[0];
502 		} else {
503 			pad[0] = *buf;
504 			iowrite16_rep(data_addr, pad, 1);
505 		}
506 		words++;
507 	}
508 
509 	return words << 1;
510 }
511 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
512 
513 /**
514  *	ata_sff_data_xfer32 - Transfer data by PIO
515  *	@qc: queued command
516  *	@buf: data buffer
517  *	@buflen: buffer length
518  *	@rw: read/write
519  *
520  *	Transfer data from/to the device data register by PIO using 32bit
521  *	I/O operations.
522  *
523  *	LOCKING:
524  *	Inherited from caller.
525  *
526  *	RETURNS:
527  *	Bytes consumed.
528  */
529 
ata_sff_data_xfer32(struct ata_queued_cmd * qc,unsigned char * buf,unsigned int buflen,int rw)530 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf,
531 			       unsigned int buflen, int rw)
532 {
533 	struct ata_device *dev = qc->dev;
534 	struct ata_port *ap = dev->link->ap;
535 	void __iomem *data_addr = ap->ioaddr.data_addr;
536 	unsigned int words = buflen >> 2;
537 	int slop = buflen & 3;
538 
539 	if (!(ap->pflags & ATA_PFLAG_PIO32))
540 		return ata_sff_data_xfer(qc, buf, buflen, rw);
541 
542 	/* Transfer multiple of 4 bytes */
543 	if (rw == READ)
544 		ioread32_rep(data_addr, buf, words);
545 	else
546 		iowrite32_rep(data_addr, buf, words);
547 
548 	/* Transfer trailing bytes, if any */
549 	if (unlikely(slop)) {
550 		unsigned char pad[4] = { };
551 
552 		/* Point buf to the tail of buffer */
553 		buf += buflen - slop;
554 
555 		/*
556 		 * Use io*_rep() accessors here as well to avoid pointlessly
557 		 * swapping bytes to and from on the big endian machines...
558 		 */
559 		if (rw == READ) {
560 			if (slop < 3)
561 				ioread16_rep(data_addr, pad, 1);
562 			else
563 				ioread32_rep(data_addr, pad, 1);
564 			memcpy(buf, pad, slop);
565 		} else {
566 			memcpy(pad, buf, slop);
567 			if (slop < 3)
568 				iowrite16_rep(data_addr, pad, 1);
569 			else
570 				iowrite32_rep(data_addr, pad, 1);
571 		}
572 	}
573 	return (buflen + 1) & ~1;
574 }
575 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
576 
ata_pio_xfer(struct ata_queued_cmd * qc,struct page * page,unsigned int offset,size_t xfer_size)577 static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page,
578 		unsigned int offset, size_t xfer_size)
579 {
580 	bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
581 	unsigned char *buf;
582 
583 	buf = kmap_atomic(page);
584 	qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write);
585 	kunmap_atomic(buf);
586 
587 	if (!do_write && !PageSlab(page))
588 		flush_dcache_page(page);
589 }
590 
591 /**
592  *	ata_pio_sector - Transfer a sector of data.
593  *	@qc: Command on going
594  *
595  *	Transfer qc->sect_size bytes of data from/to the ATA device.
596  *
597  *	LOCKING:
598  *	Inherited from caller.
599  */
ata_pio_sector(struct ata_queued_cmd * qc)600 static void ata_pio_sector(struct ata_queued_cmd *qc)
601 {
602 	struct ata_port *ap = qc->ap;
603 	struct page *page;
604 	unsigned int offset, count;
605 
606 	if (!qc->cursg) {
607 		qc->curbytes = qc->nbytes;
608 		return;
609 	}
610 	if (qc->curbytes == qc->nbytes - qc->sect_size)
611 		ap->hsm_task_state = HSM_ST_LAST;
612 
613 	page = sg_page(qc->cursg);
614 	offset = qc->cursg->offset + qc->cursg_ofs;
615 
616 	/* get the current page and offset */
617 	page = nth_page(page, (offset >> PAGE_SHIFT));
618 	offset %= PAGE_SIZE;
619 
620 	/* don't overrun current sg */
621 	count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
622 
623 	trace_ata_sff_pio_transfer_data(qc, offset, count);
624 
625 	/*
626 	 * Split the transfer when it splits a page boundary.  Note that the
627 	 * split still has to be dword aligned like all ATA data transfers.
628 	 */
629 	WARN_ON_ONCE(offset % 4);
630 	if (offset + count > PAGE_SIZE) {
631 		unsigned int split_len = PAGE_SIZE - offset;
632 
633 		ata_pio_xfer(qc, page, offset, split_len);
634 		ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
635 	} else {
636 		ata_pio_xfer(qc, page, offset, count);
637 	}
638 
639 	qc->curbytes += count;
640 	qc->cursg_ofs += count;
641 
642 	if (qc->cursg_ofs == qc->cursg->length) {
643 		qc->cursg = sg_next(qc->cursg);
644 		if (!qc->cursg)
645 			ap->hsm_task_state = HSM_ST_LAST;
646 		qc->cursg_ofs = 0;
647 	}
648 }
649 
650 /**
651  *	ata_pio_sectors - Transfer one or many sectors.
652  *	@qc: Command on going
653  *
654  *	Transfer one or many sectors of data from/to the
655  *	ATA device for the DRQ request.
656  *
657  *	LOCKING:
658  *	Inherited from caller.
659  */
ata_pio_sectors(struct ata_queued_cmd * qc)660 static void ata_pio_sectors(struct ata_queued_cmd *qc)
661 {
662 	if (is_multi_taskfile(&qc->tf)) {
663 		/* READ/WRITE MULTIPLE */
664 		unsigned int nsect;
665 
666 		WARN_ON_ONCE(qc->dev->multi_count == 0);
667 
668 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
669 			    qc->dev->multi_count);
670 		while (nsect--)
671 			ata_pio_sector(qc);
672 	} else
673 		ata_pio_sector(qc);
674 
675 	ata_sff_sync(qc->ap); /* flush */
676 }
677 
678 /**
679  *	atapi_send_cdb - Write CDB bytes to hardware
680  *	@ap: Port to which ATAPI device is attached.
681  *	@qc: Taskfile currently active
682  *
683  *	When device has indicated its readiness to accept
684  *	a CDB, this function is called.  Send the CDB.
685  *
686  *	LOCKING:
687  *	caller.
688  */
atapi_send_cdb(struct ata_port * ap,struct ata_queued_cmd * qc)689 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
690 {
691 	/* send SCSI cdb */
692 	trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
693 	WARN_ON_ONCE(qc->dev->cdb_len < 12);
694 
695 	ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
696 	ata_sff_sync(ap);
697 	/* FIXME: If the CDB is for DMA do we need to do the transition delay
698 	   or is bmdma_start guaranteed to do it ? */
699 	switch (qc->tf.protocol) {
700 	case ATAPI_PROT_PIO:
701 		ap->hsm_task_state = HSM_ST;
702 		break;
703 	case ATAPI_PROT_NODATA:
704 		ap->hsm_task_state = HSM_ST_LAST;
705 		break;
706 #ifdef CONFIG_ATA_BMDMA
707 	case ATAPI_PROT_DMA:
708 		ap->hsm_task_state = HSM_ST_LAST;
709 		/* initiate bmdma */
710 		trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
711 		ap->ops->bmdma_start(qc);
712 		break;
713 #endif /* CONFIG_ATA_BMDMA */
714 	default:
715 		BUG();
716 	}
717 }
718 
719 /**
720  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
721  *	@qc: Command on going
722  *	@bytes: number of bytes
723  *
724  *	Transfer data from/to the ATAPI device.
725  *
726  *	LOCKING:
727  *	Inherited from caller.
728  *
729  */
__atapi_pio_bytes(struct ata_queued_cmd * qc,unsigned int bytes)730 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
731 {
732 	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
733 	struct ata_port *ap = qc->ap;
734 	struct ata_device *dev = qc->dev;
735 	struct ata_eh_info *ehi = &dev->link->eh_info;
736 	struct scatterlist *sg;
737 	struct page *page;
738 	unsigned char *buf;
739 	unsigned int offset, count, consumed;
740 
741 next_sg:
742 	sg = qc->cursg;
743 	if (unlikely(!sg)) {
744 		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
745 				  "buf=%u cur=%u bytes=%u",
746 				  qc->nbytes, qc->curbytes, bytes);
747 		return -1;
748 	}
749 
750 	page = sg_page(sg);
751 	offset = sg->offset + qc->cursg_ofs;
752 
753 	/* get the current page and offset */
754 	page = nth_page(page, (offset >> PAGE_SHIFT));
755 	offset %= PAGE_SIZE;
756 
757 	/* don't overrun current sg */
758 	count = min(sg->length - qc->cursg_ofs, bytes);
759 
760 	/* don't cross page boundaries */
761 	count = min(count, (unsigned int)PAGE_SIZE - offset);
762 
763 	trace_atapi_pio_transfer_data(qc, offset, count);
764 
765 	/* do the actual data transfer */
766 	buf = kmap_atomic(page);
767 	consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
768 	kunmap_atomic(buf);
769 
770 	bytes -= min(bytes, consumed);
771 	qc->curbytes += count;
772 	qc->cursg_ofs += count;
773 
774 	if (qc->cursg_ofs == sg->length) {
775 		qc->cursg = sg_next(qc->cursg);
776 		qc->cursg_ofs = 0;
777 	}
778 
779 	/*
780 	 * There used to be a  WARN_ON_ONCE(qc->cursg && count != consumed);
781 	 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
782 	 * check correctly as it doesn't know if it is the last request being
783 	 * made. Somebody should implement a proper sanity check.
784 	 */
785 	if (bytes)
786 		goto next_sg;
787 	return 0;
788 }
789 
790 /**
791  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
792  *	@qc: Command on going
793  *
794  *	Transfer Transfer data from/to the ATAPI device.
795  *
796  *	LOCKING:
797  *	Inherited from caller.
798  */
atapi_pio_bytes(struct ata_queued_cmd * qc)799 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
800 {
801 	struct ata_port *ap = qc->ap;
802 	struct ata_device *dev = qc->dev;
803 	struct ata_eh_info *ehi = &dev->link->eh_info;
804 	unsigned int ireason, bc_lo, bc_hi, bytes;
805 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
806 
807 	/* Abuse qc->result_tf for temp storage of intermediate TF
808 	 * here to save some kernel stack usage.
809 	 * For normal completion, qc->result_tf is not relevant. For
810 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
811 	 * So, the correctness of qc->result_tf is not affected.
812 	 */
813 	ap->ops->sff_tf_read(ap, &qc->result_tf);
814 	ireason = qc->result_tf.nsect;
815 	bc_lo = qc->result_tf.lbam;
816 	bc_hi = qc->result_tf.lbah;
817 	bytes = (bc_hi << 8) | bc_lo;
818 
819 	/* shall be cleared to zero, indicating xfer of data */
820 	if (unlikely(ireason & ATAPI_COD))
821 		goto atapi_check;
822 
823 	/* make sure transfer direction matches expected */
824 	i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
825 	if (unlikely(do_write != i_write))
826 		goto atapi_check;
827 
828 	if (unlikely(!bytes))
829 		goto atapi_check;
830 
831 	if (unlikely(__atapi_pio_bytes(qc, bytes)))
832 		goto err_out;
833 	ata_sff_sync(ap); /* flush */
834 
835 	return;
836 
837  atapi_check:
838 	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
839 			  ireason, bytes);
840  err_out:
841 	qc->err_mask |= AC_ERR_HSM;
842 	ap->hsm_task_state = HSM_ST_ERR;
843 }
844 
845 /**
846  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
847  *	@ap: the target ata_port
848  *	@qc: qc on going
849  *
850  *	RETURNS:
851  *	1 if ok in workqueue, 0 otherwise.
852  */
ata_hsm_ok_in_wq(struct ata_port * ap,struct ata_queued_cmd * qc)853 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
854 						struct ata_queued_cmd *qc)
855 {
856 	if (qc->tf.flags & ATA_TFLAG_POLLING)
857 		return 1;
858 
859 	if (ap->hsm_task_state == HSM_ST_FIRST) {
860 		if (qc->tf.protocol == ATA_PROT_PIO &&
861 		   (qc->tf.flags & ATA_TFLAG_WRITE))
862 		    return 1;
863 
864 		if (ata_is_atapi(qc->tf.protocol) &&
865 		   !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
866 			return 1;
867 	}
868 
869 	return 0;
870 }
871 
872 /**
873  *	ata_hsm_qc_complete - finish a qc running on standard HSM
874  *	@qc: Command to complete
875  *	@in_wq: 1 if called from workqueue, 0 otherwise
876  *
877  *	Finish @qc which is running on standard HSM.
878  *
879  *	LOCKING:
880  *	If @in_wq is zero, spin_lock_irqsave(host lock).
881  *	Otherwise, none on entry and grabs host lock.
882  */
ata_hsm_qc_complete(struct ata_queued_cmd * qc,int in_wq)883 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
884 {
885 	struct ata_port *ap = qc->ap;
886 
887 	if (in_wq) {
888 		/* EH might have kicked in while host lock is released. */
889 		qc = ata_qc_from_tag(ap, qc->tag);
890 		if (qc) {
891 			if (likely(!(qc->err_mask & AC_ERR_HSM))) {
892 				ata_sff_irq_on(ap);
893 				ata_qc_complete(qc);
894 			} else
895 				ata_port_freeze(ap);
896 		}
897 	} else {
898 		if (likely(!(qc->err_mask & AC_ERR_HSM)))
899 			ata_qc_complete(qc);
900 		else
901 			ata_port_freeze(ap);
902 	}
903 }
904 
905 /**
906  *	ata_sff_hsm_move - move the HSM to the next state.
907  *	@ap: the target ata_port
908  *	@qc: qc on going
909  *	@status: current device status
910  *	@in_wq: 1 if called from workqueue, 0 otherwise
911  *
912  *	RETURNS:
913  *	1 when poll next status needed, 0 otherwise.
914  */
ata_sff_hsm_move(struct ata_port * ap,struct ata_queued_cmd * qc,u8 status,int in_wq)915 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
916 		     u8 status, int in_wq)
917 {
918 	struct ata_link *link = qc->dev->link;
919 	struct ata_eh_info *ehi = &link->eh_info;
920 	int poll_next;
921 
922 	lockdep_assert_held(ap->lock);
923 
924 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
925 
926 	/* Make sure ata_sff_qc_issue() does not throw things
927 	 * like DMA polling into the workqueue. Notice that
928 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
929 	 */
930 	WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
931 
932 fsm_start:
933 	trace_ata_sff_hsm_state(qc, status);
934 
935 	switch (ap->hsm_task_state) {
936 	case HSM_ST_FIRST:
937 		/* Send first data block or PACKET CDB */
938 
939 		/* If polling, we will stay in the work queue after
940 		 * sending the data. Otherwise, interrupt handler
941 		 * takes over after sending the data.
942 		 */
943 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
944 
945 		/* check device status */
946 		if (unlikely((status & ATA_DRQ) == 0)) {
947 			/* handle BSY=0, DRQ=0 as error */
948 			if (likely(status & (ATA_ERR | ATA_DF)))
949 				/* device stops HSM for abort/error */
950 				qc->err_mask |= AC_ERR_DEV;
951 			else {
952 				/* HSM violation. Let EH handle this */
953 				ata_ehi_push_desc(ehi,
954 					"ST_FIRST: !(DRQ|ERR|DF)");
955 				qc->err_mask |= AC_ERR_HSM;
956 			}
957 
958 			ap->hsm_task_state = HSM_ST_ERR;
959 			goto fsm_start;
960 		}
961 
962 		/* Device should not ask for data transfer (DRQ=1)
963 		 * when it finds something wrong.
964 		 * We ignore DRQ here and stop the HSM by
965 		 * changing hsm_task_state to HSM_ST_ERR and
966 		 * let the EH abort the command or reset the device.
967 		 */
968 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
969 			/* Some ATAPI tape drives forget to clear the ERR bit
970 			 * when doing the next command (mostly request sense).
971 			 * We ignore ERR here to workaround and proceed sending
972 			 * the CDB.
973 			 */
974 			if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) {
975 				ata_ehi_push_desc(ehi, "ST_FIRST: "
976 					"DRQ=1 with device error, "
977 					"dev_stat 0x%X", status);
978 				qc->err_mask |= AC_ERR_HSM;
979 				ap->hsm_task_state = HSM_ST_ERR;
980 				goto fsm_start;
981 			}
982 		}
983 
984 		if (qc->tf.protocol == ATA_PROT_PIO) {
985 			/* PIO data out protocol.
986 			 * send first data block.
987 			 */
988 
989 			/* ata_pio_sectors() might change the state
990 			 * to HSM_ST_LAST. so, the state is changed here
991 			 * before ata_pio_sectors().
992 			 */
993 			ap->hsm_task_state = HSM_ST;
994 			ata_pio_sectors(qc);
995 		} else
996 			/* send CDB */
997 			atapi_send_cdb(ap, qc);
998 
999 		/* if polling, ata_sff_pio_task() handles the rest.
1000 		 * otherwise, interrupt handler takes over from here.
1001 		 */
1002 		break;
1003 
1004 	case HSM_ST:
1005 		/* complete command or read/write the data register */
1006 		if (qc->tf.protocol == ATAPI_PROT_PIO) {
1007 			/* ATAPI PIO protocol */
1008 			if ((status & ATA_DRQ) == 0) {
1009 				/* No more data to transfer or device error.
1010 				 * Device error will be tagged in HSM_ST_LAST.
1011 				 */
1012 				ap->hsm_task_state = HSM_ST_LAST;
1013 				goto fsm_start;
1014 			}
1015 
1016 			/* Device should not ask for data transfer (DRQ=1)
1017 			 * when it finds something wrong.
1018 			 * We ignore DRQ here and stop the HSM by
1019 			 * changing hsm_task_state to HSM_ST_ERR and
1020 			 * let the EH abort the command or reset the device.
1021 			 */
1022 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
1023 				ata_ehi_push_desc(ehi, "ST-ATAPI: "
1024 					"DRQ=1 with device error, "
1025 					"dev_stat 0x%X", status);
1026 				qc->err_mask |= AC_ERR_HSM;
1027 				ap->hsm_task_state = HSM_ST_ERR;
1028 				goto fsm_start;
1029 			}
1030 
1031 			atapi_pio_bytes(qc);
1032 
1033 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1034 				/* bad ireason reported by device */
1035 				goto fsm_start;
1036 
1037 		} else {
1038 			/* ATA PIO protocol */
1039 			if (unlikely((status & ATA_DRQ) == 0)) {
1040 				/* handle BSY=0, DRQ=0 as error */
1041 				if (likely(status & (ATA_ERR | ATA_DF))) {
1042 					/* device stops HSM for abort/error */
1043 					qc->err_mask |= AC_ERR_DEV;
1044 
1045 					/* If diagnostic failed and this is
1046 					 * IDENTIFY, it's likely a phantom
1047 					 * device.  Mark hint.
1048 					 */
1049 					if (qc->dev->quirks &
1050 					    ATA_QUIRK_DIAGNOSTIC)
1051 						qc->err_mask |=
1052 							AC_ERR_NODEV_HINT;
1053 				} else {
1054 					/* HSM violation. Let EH handle this.
1055 					 * Phantom devices also trigger this
1056 					 * condition.  Mark hint.
1057 					 */
1058 					ata_ehi_push_desc(ehi, "ST-ATA: "
1059 						"DRQ=0 without device error, "
1060 						"dev_stat 0x%X", status);
1061 					qc->err_mask |= AC_ERR_HSM |
1062 							AC_ERR_NODEV_HINT;
1063 				}
1064 
1065 				ap->hsm_task_state = HSM_ST_ERR;
1066 				goto fsm_start;
1067 			}
1068 
1069 			/* For PIO reads, some devices may ask for
1070 			 * data transfer (DRQ=1) alone with ERR=1.
1071 			 * We respect DRQ here and transfer one
1072 			 * block of junk data before changing the
1073 			 * hsm_task_state to HSM_ST_ERR.
1074 			 *
1075 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
1076 			 * sense since the data block has been
1077 			 * transferred to the device.
1078 			 */
1079 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
1080 				/* data might be corrputed */
1081 				qc->err_mask |= AC_ERR_DEV;
1082 
1083 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1084 					ata_pio_sectors(qc);
1085 					status = ata_wait_idle(ap);
1086 				}
1087 
1088 				if (status & (ATA_BUSY | ATA_DRQ)) {
1089 					ata_ehi_push_desc(ehi, "ST-ATA: "
1090 						"BUSY|DRQ persists on ERR|DF, "
1091 						"dev_stat 0x%X", status);
1092 					qc->err_mask |= AC_ERR_HSM;
1093 				}
1094 
1095 				/* There are oddball controllers with
1096 				 * status register stuck at 0x7f and
1097 				 * lbal/m/h at zero which makes it
1098 				 * pass all other presence detection
1099 				 * mechanisms we have.  Set NODEV_HINT
1100 				 * for it.  Kernel bz#7241.
1101 				 */
1102 				if (status == 0x7f)
1103 					qc->err_mask |= AC_ERR_NODEV_HINT;
1104 
1105 				/* ata_pio_sectors() might change the
1106 				 * state to HSM_ST_LAST. so, the state
1107 				 * is changed after ata_pio_sectors().
1108 				 */
1109 				ap->hsm_task_state = HSM_ST_ERR;
1110 				goto fsm_start;
1111 			}
1112 
1113 			ata_pio_sectors(qc);
1114 
1115 			if (ap->hsm_task_state == HSM_ST_LAST &&
1116 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1117 				/* all data read */
1118 				status = ata_wait_idle(ap);
1119 				goto fsm_start;
1120 			}
1121 		}
1122 
1123 		poll_next = 1;
1124 		break;
1125 
1126 	case HSM_ST_LAST:
1127 		if (unlikely(!ata_ok(status))) {
1128 			qc->err_mask |= __ac_err_mask(status);
1129 			ap->hsm_task_state = HSM_ST_ERR;
1130 			goto fsm_start;
1131 		}
1132 
1133 		/* no more data to transfer */
1134 		trace_ata_sff_hsm_command_complete(qc, status);
1135 
1136 		WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1137 
1138 		ap->hsm_task_state = HSM_ST_IDLE;
1139 
1140 		/* complete taskfile transaction */
1141 		ata_hsm_qc_complete(qc, in_wq);
1142 
1143 		poll_next = 0;
1144 		break;
1145 
1146 	case HSM_ST_ERR:
1147 		ap->hsm_task_state = HSM_ST_IDLE;
1148 
1149 		/* complete taskfile transaction */
1150 		ata_hsm_qc_complete(qc, in_wq);
1151 
1152 		poll_next = 0;
1153 		break;
1154 	default:
1155 		poll_next = 0;
1156 		WARN(true, "ata%d: SFF host state machine in invalid state %d",
1157 		     ap->print_id, ap->hsm_task_state);
1158 	}
1159 
1160 	return poll_next;
1161 }
1162 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1163 
ata_sff_queue_work(struct work_struct * work)1164 void ata_sff_queue_work(struct work_struct *work)
1165 {
1166 	queue_work(ata_sff_wq, work);
1167 }
1168 EXPORT_SYMBOL_GPL(ata_sff_queue_work);
1169 
ata_sff_queue_delayed_work(struct delayed_work * dwork,unsigned long delay)1170 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
1171 {
1172 	queue_delayed_work(ata_sff_wq, dwork, delay);
1173 }
1174 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
1175 
ata_sff_queue_pio_task(struct ata_link * link,unsigned long delay)1176 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1177 {
1178 	struct ata_port *ap = link->ap;
1179 
1180 	WARN_ON((ap->sff_pio_task_link != NULL) &&
1181 		(ap->sff_pio_task_link != link));
1182 	ap->sff_pio_task_link = link;
1183 
1184 	/* may fail if ata_sff_flush_pio_task() in progress */
1185 	ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
1186 }
1187 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1188 
ata_sff_flush_pio_task(struct ata_port * ap)1189 void ata_sff_flush_pio_task(struct ata_port *ap)
1190 {
1191 	trace_ata_sff_flush_pio_task(ap);
1192 
1193 	cancel_delayed_work_sync(&ap->sff_pio_task);
1194 
1195 	/*
1196 	 * We wanna reset the HSM state to IDLE.  If we do so without
1197 	 * grabbing the port lock, critical sections protected by it which
1198 	 * expect the HSM state to stay stable may get surprised.  For
1199 	 * example, we may set IDLE in between the time
1200 	 * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
1201 	 * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
1202 	 */
1203 	spin_lock_irq(ap->lock);
1204 	ap->hsm_task_state = HSM_ST_IDLE;
1205 	spin_unlock_irq(ap->lock);
1206 
1207 	ap->sff_pio_task_link = NULL;
1208 }
1209 
ata_sff_pio_task(struct work_struct * work)1210 static void ata_sff_pio_task(struct work_struct *work)
1211 {
1212 	struct ata_port *ap =
1213 		container_of(work, struct ata_port, sff_pio_task.work);
1214 	struct ata_link *link = ap->sff_pio_task_link;
1215 	struct ata_queued_cmd *qc;
1216 	u8 status;
1217 	int poll_next;
1218 
1219 	spin_lock_irq(ap->lock);
1220 
1221 	BUG_ON(ap->sff_pio_task_link == NULL);
1222 	/* qc can be NULL if timeout occurred */
1223 	qc = ata_qc_from_tag(ap, link->active_tag);
1224 	if (!qc) {
1225 		ap->sff_pio_task_link = NULL;
1226 		goto out_unlock;
1227 	}
1228 
1229 fsm_start:
1230 	WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1231 
1232 	/*
1233 	 * This is purely heuristic.  This is a fast path.
1234 	 * Sometimes when we enter, BSY will be cleared in
1235 	 * a chk-status or two.  If not, the drive is probably seeking
1236 	 * or something.  Snooze for a couple msecs, then
1237 	 * chk-status again.  If still busy, queue delayed work.
1238 	 */
1239 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1240 	if (status & ATA_BUSY) {
1241 		spin_unlock_irq(ap->lock);
1242 		ata_msleep(ap, 2);
1243 		spin_lock_irq(ap->lock);
1244 
1245 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1246 		if (status & ATA_BUSY) {
1247 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1248 			goto out_unlock;
1249 		}
1250 	}
1251 
1252 	/*
1253 	 * hsm_move() may trigger another command to be processed.
1254 	 * clean the link beforehand.
1255 	 */
1256 	ap->sff_pio_task_link = NULL;
1257 	/* move the HSM */
1258 	poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1259 
1260 	/* another command or interrupt handler
1261 	 * may be running at this point.
1262 	 */
1263 	if (poll_next)
1264 		goto fsm_start;
1265 out_unlock:
1266 	spin_unlock_irq(ap->lock);
1267 }
1268 
1269 /**
1270  *	ata_sff_qc_issue - issue taskfile to a SFF controller
1271  *	@qc: command to issue to device
1272  *
1273  *	This function issues a PIO or NODATA command to a SFF
1274  *	controller.
1275  *
1276  *	LOCKING:
1277  *	spin_lock_irqsave(host lock)
1278  *
1279  *	RETURNS:
1280  *	Zero on success, AC_ERR_* mask on failure
1281  */
ata_sff_qc_issue(struct ata_queued_cmd * qc)1282 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1283 {
1284 	struct ata_port *ap = qc->ap;
1285 	struct ata_link *link = qc->dev->link;
1286 
1287 	/* Use polling pio if the LLD doesn't handle
1288 	 * interrupt driven pio and atapi CDB interrupt.
1289 	 */
1290 	if (ap->flags & ATA_FLAG_PIO_POLLING)
1291 		qc->tf.flags |= ATA_TFLAG_POLLING;
1292 
1293 	/* select the device */
1294 	ata_dev_select(ap, qc->dev->devno, 1, 0);
1295 
1296 	/* start the command */
1297 	switch (qc->tf.protocol) {
1298 	case ATA_PROT_NODATA:
1299 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1300 			ata_qc_set_polling(qc);
1301 
1302 		ata_tf_to_host(ap, &qc->tf, qc->tag);
1303 		ap->hsm_task_state = HSM_ST_LAST;
1304 
1305 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1306 			ata_sff_queue_pio_task(link, 0);
1307 
1308 		break;
1309 
1310 	case ATA_PROT_PIO:
1311 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1312 			ata_qc_set_polling(qc);
1313 
1314 		ata_tf_to_host(ap, &qc->tf, qc->tag);
1315 
1316 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
1317 			/* PIO data out protocol */
1318 			ap->hsm_task_state = HSM_ST_FIRST;
1319 			ata_sff_queue_pio_task(link, 0);
1320 
1321 			/* always send first data block using the
1322 			 * ata_sff_pio_task() codepath.
1323 			 */
1324 		} else {
1325 			/* PIO data in protocol */
1326 			ap->hsm_task_state = HSM_ST;
1327 
1328 			if (qc->tf.flags & ATA_TFLAG_POLLING)
1329 				ata_sff_queue_pio_task(link, 0);
1330 
1331 			/* if polling, ata_sff_pio_task() handles the
1332 			 * rest.  otherwise, interrupt handler takes
1333 			 * over from here.
1334 			 */
1335 		}
1336 
1337 		break;
1338 
1339 	case ATAPI_PROT_PIO:
1340 	case ATAPI_PROT_NODATA:
1341 		if (qc->tf.flags & ATA_TFLAG_POLLING)
1342 			ata_qc_set_polling(qc);
1343 
1344 		ata_tf_to_host(ap, &qc->tf, qc->tag);
1345 
1346 		ap->hsm_task_state = HSM_ST_FIRST;
1347 
1348 		/* send cdb by polling if no cdb interrupt */
1349 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1350 		    (qc->tf.flags & ATA_TFLAG_POLLING))
1351 			ata_sff_queue_pio_task(link, 0);
1352 		break;
1353 
1354 	default:
1355 		return AC_ERR_SYSTEM;
1356 	}
1357 
1358 	return 0;
1359 }
1360 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1361 
1362 /**
1363  *	ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1364  *	@qc: qc to fill result TF for
1365  *
1366  *	@qc is finished and result TF needs to be filled.  Fill it
1367  *	using ->sff_tf_read.
1368  *
1369  *	LOCKING:
1370  *	spin_lock_irqsave(host lock)
1371  */
ata_sff_qc_fill_rtf(struct ata_queued_cmd * qc)1372 void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1373 {
1374 	qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1375 }
1376 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1377 
ata_sff_idle_irq(struct ata_port * ap)1378 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1379 {
1380 	ap->stats.idle_irq++;
1381 
1382 #ifdef ATA_IRQ_TRAP
1383 	if ((ap->stats.idle_irq % 1000) == 0) {
1384 		ap->ops->sff_check_status(ap);
1385 		if (ap->ops->sff_irq_clear)
1386 			ap->ops->sff_irq_clear(ap);
1387 		ata_port_warn(ap, "irq trap\n");
1388 		return 1;
1389 	}
1390 #endif
1391 	return 0;	/* irq not handled */
1392 }
1393 
__ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc,bool hsmv_on_idle)1394 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1395 					struct ata_queued_cmd *qc,
1396 					bool hsmv_on_idle)
1397 {
1398 	u8 status;
1399 
1400 	trace_ata_sff_port_intr(qc, hsmv_on_idle);
1401 
1402 	/* Check whether we are expecting interrupt in this state */
1403 	switch (ap->hsm_task_state) {
1404 	case HSM_ST_FIRST:
1405 		/* Some pre-ATAPI-4 devices assert INTRQ
1406 		 * at this state when ready to receive CDB.
1407 		 */
1408 
1409 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1410 		 * The flag was turned on only for atapi devices.  No
1411 		 * need to check ata_is_atapi(qc->tf.protocol) again.
1412 		 */
1413 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1414 			return ata_sff_idle_irq(ap);
1415 		break;
1416 	case HSM_ST_IDLE:
1417 		return ata_sff_idle_irq(ap);
1418 	default:
1419 		break;
1420 	}
1421 
1422 	/* check main status, clearing INTRQ if needed */
1423 	status = ata_sff_irq_status(ap);
1424 	if (status & ATA_BUSY) {
1425 		if (hsmv_on_idle) {
1426 			/* BMDMA engine is already stopped, we're screwed */
1427 			qc->err_mask |= AC_ERR_HSM;
1428 			ap->hsm_task_state = HSM_ST_ERR;
1429 		} else
1430 			return ata_sff_idle_irq(ap);
1431 	}
1432 
1433 	/* clear irq events */
1434 	if (ap->ops->sff_irq_clear)
1435 		ap->ops->sff_irq_clear(ap);
1436 
1437 	ata_sff_hsm_move(ap, qc, status, 0);
1438 
1439 	return 1;	/* irq handled */
1440 }
1441 
1442 /**
1443  *	ata_sff_port_intr - Handle SFF port interrupt
1444  *	@ap: Port on which interrupt arrived (possibly...)
1445  *	@qc: Taskfile currently active in engine
1446  *
1447  *	Handle port interrupt for given queued command.
1448  *
1449  *	LOCKING:
1450  *	spin_lock_irqsave(host lock)
1451  *
1452  *	RETURNS:
1453  *	One if interrupt was handled, zero if not (shared irq).
1454  */
ata_sff_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)1455 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1456 {
1457 	return __ata_sff_port_intr(ap, qc, false);
1458 }
1459 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1460 
__ata_sff_interrupt(int irq,void * dev_instance,unsigned int (* port_intr)(struct ata_port *,struct ata_queued_cmd *))1461 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1462 	unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1463 {
1464 	struct ata_host *host = dev_instance;
1465 	bool retried = false;
1466 	unsigned int i;
1467 	unsigned int handled, idle, polling;
1468 	unsigned long flags;
1469 
1470 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1471 	spin_lock_irqsave(&host->lock, flags);
1472 
1473 retry:
1474 	handled = idle = polling = 0;
1475 	for (i = 0; i < host->n_ports; i++) {
1476 		struct ata_port *ap = host->ports[i];
1477 		struct ata_queued_cmd *qc;
1478 
1479 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1480 		if (qc) {
1481 			if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1482 				handled |= port_intr(ap, qc);
1483 			else
1484 				polling |= 1 << i;
1485 		} else
1486 			idle |= 1 << i;
1487 	}
1488 
1489 	/*
1490 	 * If no port was expecting IRQ but the controller is actually
1491 	 * asserting IRQ line, nobody cared will ensue.  Check IRQ
1492 	 * pending status if available and clear spurious IRQ.
1493 	 */
1494 	if (!handled && !retried) {
1495 		bool retry = false;
1496 
1497 		for (i = 0; i < host->n_ports; i++) {
1498 			struct ata_port *ap = host->ports[i];
1499 
1500 			if (polling & (1 << i))
1501 				continue;
1502 
1503 			if (!ap->ops->sff_irq_check ||
1504 			    !ap->ops->sff_irq_check(ap))
1505 				continue;
1506 
1507 			if (idle & (1 << i)) {
1508 				ap->ops->sff_check_status(ap);
1509 				if (ap->ops->sff_irq_clear)
1510 					ap->ops->sff_irq_clear(ap);
1511 			} else {
1512 				/* clear INTRQ and check if BUSY cleared */
1513 				if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1514 					retry |= true;
1515 				/*
1516 				 * With command in flight, we can't do
1517 				 * sff_irq_clear() w/o racing with completion.
1518 				 */
1519 			}
1520 		}
1521 
1522 		if (retry) {
1523 			retried = true;
1524 			goto retry;
1525 		}
1526 	}
1527 
1528 	spin_unlock_irqrestore(&host->lock, flags);
1529 
1530 	return IRQ_RETVAL(handled);
1531 }
1532 
1533 /**
1534  *	ata_sff_interrupt - Default SFF ATA host interrupt handler
1535  *	@irq: irq line (unused)
1536  *	@dev_instance: pointer to our ata_host information structure
1537  *
1538  *	Default interrupt handler for PCI IDE devices.  Calls
1539  *	ata_sff_port_intr() for each port that is not disabled.
1540  *
1541  *	LOCKING:
1542  *	Obtains host lock during operation.
1543  *
1544  *	RETURNS:
1545  *	IRQ_NONE or IRQ_HANDLED.
1546  */
ata_sff_interrupt(int irq,void * dev_instance)1547 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1548 {
1549 	return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1550 }
1551 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1552 
1553 /**
1554  *	ata_sff_lost_interrupt	-	Check for an apparent lost interrupt
1555  *	@ap: port that appears to have timed out
1556  *
1557  *	Called from the libata error handlers when the core code suspects
1558  *	an interrupt has been lost. If it has complete anything we can and
1559  *	then return. Interface must support altstatus for this faster
1560  *	recovery to occur.
1561  *
1562  *	Locking:
1563  *	Caller holds host lock
1564  */
1565 
ata_sff_lost_interrupt(struct ata_port * ap)1566 void ata_sff_lost_interrupt(struct ata_port *ap)
1567 {
1568 	u8 status = 0;
1569 	struct ata_queued_cmd *qc;
1570 
1571 	/* Only one outstanding command per SFF channel */
1572 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1573 	/* We cannot lose an interrupt on a non-existent or polled command */
1574 	if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1575 		return;
1576 	/* See if the controller thinks it is still busy - if so the command
1577 	   isn't a lost IRQ but is still in progress */
1578 	if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status)))
1579 		return;
1580 	if (status & ATA_BUSY)
1581 		return;
1582 
1583 	/* There was a command running, we are no longer busy and we have
1584 	   no interrupt. */
1585 	ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status);
1586 	/* Run the host interrupt logic as if the interrupt had not been
1587 	   lost */
1588 	ata_sff_port_intr(ap, qc);
1589 }
1590 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1591 
1592 /**
1593  *	ata_sff_freeze - Freeze SFF controller port
1594  *	@ap: port to freeze
1595  *
1596  *	Freeze SFF controller port.
1597  *
1598  *	LOCKING:
1599  *	Inherited from caller.
1600  */
ata_sff_freeze(struct ata_port * ap)1601 void ata_sff_freeze(struct ata_port *ap)
1602 {
1603 	ap->ctl |= ATA_NIEN;
1604 	ap->last_ctl = ap->ctl;
1605 
1606 	ata_sff_set_devctl(ap, ap->ctl);
1607 
1608 	/* Under certain circumstances, some controllers raise IRQ on
1609 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
1610 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
1611 	 */
1612 	ap->ops->sff_check_status(ap);
1613 
1614 	if (ap->ops->sff_irq_clear)
1615 		ap->ops->sff_irq_clear(ap);
1616 }
1617 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1618 
1619 /**
1620  *	ata_sff_thaw - Thaw SFF controller port
1621  *	@ap: port to thaw
1622  *
1623  *	Thaw SFF controller port.
1624  *
1625  *	LOCKING:
1626  *	Inherited from caller.
1627  */
ata_sff_thaw(struct ata_port * ap)1628 void ata_sff_thaw(struct ata_port *ap)
1629 {
1630 	/* clear & re-enable interrupts */
1631 	ap->ops->sff_check_status(ap);
1632 	if (ap->ops->sff_irq_clear)
1633 		ap->ops->sff_irq_clear(ap);
1634 	ata_sff_irq_on(ap);
1635 }
1636 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1637 
1638 /**
1639  *	ata_sff_prereset - prepare SFF link for reset
1640  *	@link: SFF link to be reset
1641  *	@deadline: deadline jiffies for the operation
1642  *
1643  *	SFF link @link is about to be reset.  Initialize it.  It first
1644  *	calls ata_std_prereset() and wait for !BSY if the port is
1645  *	being softreset.
1646  *
1647  *	LOCKING:
1648  *	Kernel thread context (may sleep)
1649  *
1650  *	RETURNS:
1651  *	Always 0.
1652  */
ata_sff_prereset(struct ata_link * link,unsigned long deadline)1653 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1654 {
1655 	struct ata_eh_context *ehc = &link->eh_context;
1656 	int rc;
1657 
1658 	/* The standard prereset is best-effort and always returns 0 */
1659 	ata_std_prereset(link, deadline);
1660 
1661 	/* if we're about to do hardreset, nothing more to do */
1662 	if (ehc->i.action & ATA_EH_HARDRESET)
1663 		return 0;
1664 
1665 	/* wait for !BSY if we don't know that no device is attached */
1666 	if (!ata_link_offline(link)) {
1667 		rc = ata_sff_wait_ready(link, deadline);
1668 		if (rc && rc != -ENODEV) {
1669 			ata_link_warn(link,
1670 				      "device not ready (errno=%d), forcing hardreset\n",
1671 				      rc);
1672 			ehc->i.action |= ATA_EH_HARDRESET;
1673 		}
1674 	}
1675 
1676 	return 0;
1677 }
1678 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1679 
1680 /**
1681  *	ata_devchk - PATA device presence detection
1682  *	@ap: ATA channel to examine
1683  *	@device: Device to examine (starting at zero)
1684  *
1685  *	This technique was originally described in
1686  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
1687  *	later found its way into the ATA/ATAPI spec.
1688  *
1689  *	Write a pattern to the ATA shadow registers,
1690  *	and if a device is present, it will respond by
1691  *	correctly storing and echoing back the
1692  *	ATA shadow register contents.
1693  *
1694  *	RETURN:
1695  *	true if device is present, false if not.
1696  *
1697  *	LOCKING:
1698  *	caller.
1699  */
ata_devchk(struct ata_port * ap,unsigned int device)1700 static bool ata_devchk(struct ata_port *ap, unsigned int device)
1701 {
1702 	struct ata_ioports *ioaddr = &ap->ioaddr;
1703 	u8 nsect, lbal;
1704 
1705 	ap->ops->sff_dev_select(ap, device);
1706 
1707 	iowrite8(0x55, ioaddr->nsect_addr);
1708 	iowrite8(0xaa, ioaddr->lbal_addr);
1709 
1710 	iowrite8(0xaa, ioaddr->nsect_addr);
1711 	iowrite8(0x55, ioaddr->lbal_addr);
1712 
1713 	iowrite8(0x55, ioaddr->nsect_addr);
1714 	iowrite8(0xaa, ioaddr->lbal_addr);
1715 
1716 	nsect = ioread8(ioaddr->nsect_addr);
1717 	lbal = ioread8(ioaddr->lbal_addr);
1718 
1719 	if ((nsect == 0x55) && (lbal == 0xaa))
1720 		return true;	/* we found a device */
1721 
1722 	return false;		/* nothing found */
1723 }
1724 
1725 /**
1726  *	ata_sff_dev_classify - Parse returned ATA device signature
1727  *	@dev: ATA device to classify (starting at zero)
1728  *	@present: device seems present
1729  *	@r_err: Value of error register on completion
1730  *
1731  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1732  *	an ATA/ATAPI-defined set of values is placed in the ATA
1733  *	shadow registers, indicating the results of device detection
1734  *	and diagnostics.
1735  *
1736  *	Select the ATA device, and read the values from the ATA shadow
1737  *	registers.  Then parse according to the Error register value,
1738  *	and the spec-defined values examined by ata_dev_classify().
1739  *
1740  *	LOCKING:
1741  *	caller.
1742  *
1743  *	RETURNS:
1744  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1745  */
ata_sff_dev_classify(struct ata_device * dev,int present,u8 * r_err)1746 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1747 				  u8 *r_err)
1748 {
1749 	struct ata_port *ap = dev->link->ap;
1750 	struct ata_taskfile tf;
1751 	unsigned int class;
1752 	u8 err;
1753 
1754 	ap->ops->sff_dev_select(ap, dev->devno);
1755 
1756 	memset(&tf, 0, sizeof(tf));
1757 
1758 	ap->ops->sff_tf_read(ap, &tf);
1759 	err = tf.error;
1760 	if (r_err)
1761 		*r_err = err;
1762 
1763 	/* see if device passed diags: continue and warn later */
1764 	if (err == 0)
1765 		/* diagnostic fail : do nothing _YET_ */
1766 		dev->quirks |= ATA_QUIRK_DIAGNOSTIC;
1767 	else if (err == 1)
1768 		/* do nothing */ ;
1769 	else if ((dev->devno == 0) && (err == 0x81))
1770 		/* do nothing */ ;
1771 	else
1772 		return ATA_DEV_NONE;
1773 
1774 	/* determine if device is ATA or ATAPI */
1775 	class = ata_port_classify(ap, &tf);
1776 	switch (class) {
1777 	case ATA_DEV_UNKNOWN:
1778 		/*
1779 		 * If the device failed diagnostic, it's likely to
1780 		 * have reported incorrect device signature too.
1781 		 * Assume ATA device if the device seems present but
1782 		 * device signature is invalid with diagnostic
1783 		 * failure.
1784 		 */
1785 		if (present && (dev->quirks & ATA_QUIRK_DIAGNOSTIC))
1786 			class = ATA_DEV_ATA;
1787 		else
1788 			class = ATA_DEV_NONE;
1789 		break;
1790 	case ATA_DEV_ATA:
1791 		if (ap->ops->sff_check_status(ap) == 0)
1792 			class = ATA_DEV_NONE;
1793 		break;
1794 	}
1795 	return class;
1796 }
1797 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1798 
1799 /**
1800  *	ata_sff_wait_after_reset - wait for devices to become ready after reset
1801  *	@link: SFF link which is just reset
1802  *	@devmask: mask of present devices
1803  *	@deadline: deadline jiffies for the operation
1804  *
1805  *	Wait devices attached to SFF @link to become ready after
1806  *	reset.  It contains preceding 150ms wait to avoid accessing TF
1807  *	status register too early.
1808  *
1809  *	LOCKING:
1810  *	Kernel thread context (may sleep).
1811  *
1812  *	RETURNS:
1813  *	0 on success, -ENODEV if some or all of devices in @devmask
1814  *	don't seem to exist.  -errno on other errors.
1815  */
ata_sff_wait_after_reset(struct ata_link * link,unsigned int devmask,unsigned long deadline)1816 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1817 			     unsigned long deadline)
1818 {
1819 	struct ata_port *ap = link->ap;
1820 	struct ata_ioports *ioaddr = &ap->ioaddr;
1821 	unsigned int dev0 = devmask & (1 << 0);
1822 	unsigned int dev1 = devmask & (1 << 1);
1823 	int rc, ret = 0;
1824 
1825 	ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1826 
1827 	/* always check readiness of the master device */
1828 	rc = ata_sff_wait_ready(link, deadline);
1829 	/* -ENODEV means the odd clown forgot the D7 pulldown resistor
1830 	 * and TF status is 0xff, bail out on it too.
1831 	 */
1832 	if (rc)
1833 		return rc;
1834 
1835 	/* if device 1 was found in ata_devchk, wait for register
1836 	 * access briefly, then wait for BSY to clear.
1837 	 */
1838 	if (dev1) {
1839 		int i;
1840 
1841 		ap->ops->sff_dev_select(ap, 1);
1842 
1843 		/* Wait for register access.  Some ATAPI devices fail
1844 		 * to set nsect/lbal after reset, so don't waste too
1845 		 * much time on it.  We're gonna wait for !BSY anyway.
1846 		 */
1847 		for (i = 0; i < 2; i++) {
1848 			u8 nsect, lbal;
1849 
1850 			nsect = ioread8(ioaddr->nsect_addr);
1851 			lbal = ioread8(ioaddr->lbal_addr);
1852 			if ((nsect == 1) && (lbal == 1))
1853 				break;
1854 			ata_msleep(ap, 50);	/* give drive a breather */
1855 		}
1856 
1857 		rc = ata_sff_wait_ready(link, deadline);
1858 		if (rc) {
1859 			if (rc != -ENODEV)
1860 				return rc;
1861 			ret = rc;
1862 		}
1863 	}
1864 
1865 	/* is all this really necessary? */
1866 	ap->ops->sff_dev_select(ap, 0);
1867 	if (dev1)
1868 		ap->ops->sff_dev_select(ap, 1);
1869 	if (dev0)
1870 		ap->ops->sff_dev_select(ap, 0);
1871 
1872 	return ret;
1873 }
1874 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1875 
ata_bus_softreset(struct ata_port * ap,unsigned int devmask,unsigned long deadline)1876 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1877 			     unsigned long deadline)
1878 {
1879 	struct ata_ioports *ioaddr = &ap->ioaddr;
1880 
1881 	if (ap->ioaddr.ctl_addr) {
1882 		/* software reset.  causes dev0 to be selected */
1883 		iowrite8(ap->ctl, ioaddr->ctl_addr);
1884 		udelay(20);	/* FIXME: flush */
1885 		iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1886 		udelay(20);	/* FIXME: flush */
1887 		iowrite8(ap->ctl, ioaddr->ctl_addr);
1888 		ap->last_ctl = ap->ctl;
1889 	}
1890 
1891 	/* wait the port to become ready */
1892 	return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1893 }
1894 
1895 /**
1896  *	ata_sff_softreset - reset host port via ATA SRST
1897  *	@link: ATA link to reset
1898  *	@classes: resulting classes of attached devices
1899  *	@deadline: deadline jiffies for the operation
1900  *
1901  *	Reset host port using ATA SRST.
1902  *
1903  *	LOCKING:
1904  *	Kernel thread context (may sleep)
1905  *
1906  *	RETURNS:
1907  *	0 on success, -errno otherwise.
1908  */
ata_sff_softreset(struct ata_link * link,unsigned int * classes,unsigned long deadline)1909 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1910 		      unsigned long deadline)
1911 {
1912 	struct ata_port *ap = link->ap;
1913 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1914 	unsigned int devmask = 0;
1915 	int rc;
1916 	u8 err;
1917 
1918 	/* determine if device 0/1 are present */
1919 	if (ata_devchk(ap, 0))
1920 		devmask |= (1 << 0);
1921 	if (slave_possible && ata_devchk(ap, 1))
1922 		devmask |= (1 << 1);
1923 
1924 	/* select device 0 again */
1925 	ap->ops->sff_dev_select(ap, 0);
1926 
1927 	/* issue bus reset */
1928 	rc = ata_bus_softreset(ap, devmask, deadline);
1929 	/* if link is occupied, -ENODEV too is an error */
1930 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
1931 		ata_link_err(link, "SRST failed (errno=%d)\n", rc);
1932 		return rc;
1933 	}
1934 
1935 	/* determine by signature whether we have ATA or ATAPI devices */
1936 	classes[0] = ata_sff_dev_classify(&link->device[0],
1937 					  devmask & (1 << 0), &err);
1938 	if (slave_possible && err != 0x81)
1939 		classes[1] = ata_sff_dev_classify(&link->device[1],
1940 						  devmask & (1 << 1), &err);
1941 
1942 	return 0;
1943 }
1944 EXPORT_SYMBOL_GPL(ata_sff_softreset);
1945 
1946 /**
1947  *	sata_sff_hardreset - reset host port via SATA phy reset
1948  *	@link: link to reset
1949  *	@class: resulting class of attached device
1950  *	@deadline: deadline jiffies for the operation
1951  *
1952  *	SATA phy-reset host port using DET bits of SControl register,
1953  *	wait for !BSY and classify the attached device.
1954  *
1955  *	LOCKING:
1956  *	Kernel thread context (may sleep)
1957  *
1958  *	RETURNS:
1959  *	0 on success, -errno otherwise.
1960  */
sata_sff_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1961 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
1962 		       unsigned long deadline)
1963 {
1964 	struct ata_eh_context *ehc = &link->eh_context;
1965 	const unsigned int *timing = sata_ehc_deb_timing(ehc);
1966 	bool online;
1967 	int rc;
1968 
1969 	rc = sata_link_hardreset(link, timing, deadline, &online,
1970 				 ata_sff_check_ready);
1971 	if (online)
1972 		*class = ata_sff_dev_classify(link->device, 1, NULL);
1973 
1974 	return rc;
1975 }
1976 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
1977 
1978 /**
1979  *	ata_sff_postreset - SFF postreset callback
1980  *	@link: the target SFF ata_link
1981  *	@classes: classes of attached devices
1982  *
1983  *	This function is invoked after a successful reset.  It first
1984  *	calls ata_std_postreset() and performs SFF specific postreset
1985  *	processing.
1986  *
1987  *	LOCKING:
1988  *	Kernel thread context (may sleep)
1989  */
ata_sff_postreset(struct ata_link * link,unsigned int * classes)1990 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
1991 {
1992 	struct ata_port *ap = link->ap;
1993 
1994 	ata_std_postreset(link, classes);
1995 
1996 	/* is double-select really necessary? */
1997 	if (classes[0] != ATA_DEV_NONE)
1998 		ap->ops->sff_dev_select(ap, 1);
1999 	if (classes[1] != ATA_DEV_NONE)
2000 		ap->ops->sff_dev_select(ap, 0);
2001 
2002 	/* bail out if no device is present */
2003 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
2004 		return;
2005 
2006 	/* set up device control */
2007 	if (ata_sff_set_devctl(ap, ap->ctl))
2008 		ap->last_ctl = ap->ctl;
2009 }
2010 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2011 
2012 /**
2013  *	ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2014  *	@qc: command
2015  *
2016  *	Drain the FIFO and device of any stuck data following a command
2017  *	failing to complete. In some cases this is necessary before a
2018  *	reset will recover the device.
2019  *
2020  */
2021 
ata_sff_drain_fifo(struct ata_queued_cmd * qc)2022 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2023 {
2024 	int count;
2025 	struct ata_port *ap;
2026 
2027 	/* We only need to flush incoming data when a command was running */
2028 	if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2029 		return;
2030 
2031 	ap = qc->ap;
2032 	/* Drain up to 64K of data before we give up this recovery method */
2033 	for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2034 						&& count < 65536; count += 2)
2035 		ioread16(ap->ioaddr.data_addr);
2036 
2037 	if (count)
2038 		ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
2039 
2040 }
2041 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2042 
2043 /**
2044  *	ata_sff_error_handler - Stock error handler for SFF controller
2045  *	@ap: port to handle error for
2046  *
2047  *	Stock error handler for SFF controller.  It can handle both
2048  *	PATA and SATA controllers.  Many controllers should be able to
2049  *	use this EH as-is or with some added handling before and
2050  *	after.
2051  *
2052  *	LOCKING:
2053  *	Kernel thread context (may sleep)
2054  */
ata_sff_error_handler(struct ata_port * ap)2055 void ata_sff_error_handler(struct ata_port *ap)
2056 {
2057 	ata_reset_fn_t softreset = ap->ops->softreset;
2058 	ata_reset_fn_t hardreset = ap->ops->hardreset;
2059 	struct ata_queued_cmd *qc;
2060 	unsigned long flags;
2061 
2062 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2063 	if (qc && !(qc->flags & ATA_QCFLAG_EH))
2064 		qc = NULL;
2065 
2066 	spin_lock_irqsave(ap->lock, flags);
2067 
2068 	/*
2069 	 * We *MUST* do FIFO draining before we issue a reset as
2070 	 * several devices helpfully clear their internal state and
2071 	 * will lock solid if we touch the data port post reset. Pass
2072 	 * qc in case anyone wants to do different PIO/DMA recovery or
2073 	 * has per command fixups
2074 	 */
2075 	if (ap->ops->sff_drain_fifo)
2076 		ap->ops->sff_drain_fifo(qc);
2077 
2078 	spin_unlock_irqrestore(ap->lock, flags);
2079 
2080 	/* ignore built-in hardresets if SCR access is not available */
2081 	if ((hardreset == sata_std_hardreset ||
2082 	     hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2083 		hardreset = NULL;
2084 
2085 	ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2086 		  ap->ops->postreset);
2087 }
2088 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2089 
2090 /**
2091  *	ata_sff_std_ports - initialize ioaddr with standard port offsets.
2092  *	@ioaddr: IO address structure to be initialized
2093  *
2094  *	Utility function which initializes data_addr, error_addr,
2095  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2096  *	device_addr, status_addr, and command_addr to standard offsets
2097  *	relative to cmd_addr.
2098  *
2099  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2100  */
ata_sff_std_ports(struct ata_ioports * ioaddr)2101 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2102 {
2103 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2104 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2105 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2106 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2107 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2108 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2109 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2110 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2111 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2112 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2113 }
2114 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2115 
2116 #ifdef CONFIG_PCI
2117 
ata_resources_present(struct pci_dev * pdev,int port)2118 static bool ata_resources_present(struct pci_dev *pdev, int port)
2119 {
2120 	int i;
2121 
2122 	/* Check the PCI resources for this channel are enabled */
2123 	port *= 2;
2124 	for (i = 0; i < 2; i++) {
2125 		if (pci_resource_start(pdev, port + i) == 0 ||
2126 		    pci_resource_len(pdev, port + i) == 0)
2127 			return false;
2128 	}
2129 	return true;
2130 }
2131 
2132 /**
2133  *	ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2134  *	@host: target ATA host
2135  *
2136  *	Acquire native PCI ATA resources for @host and initialize the
2137  *	first two ports of @host accordingly.  Ports marked dummy are
2138  *	skipped and allocation failure makes the port dummy.
2139  *
2140  *	Note that native PCI resources are valid even for legacy hosts
2141  *	as we fix up pdev resources array early in boot, so this
2142  *	function can be used for both native and legacy SFF hosts.
2143  *
2144  *	LOCKING:
2145  *	Inherited from calling layer (may sleep).
2146  *
2147  *	RETURNS:
2148  *	0 if at least one port is initialized, -ENODEV if no port is
2149  *	available.
2150  */
ata_pci_sff_init_host(struct ata_host * host)2151 int ata_pci_sff_init_host(struct ata_host *host)
2152 {
2153 	struct device *gdev = host->dev;
2154 	struct pci_dev *pdev = to_pci_dev(gdev);
2155 	unsigned int mask = 0;
2156 	int i, rc;
2157 
2158 	/* request, iomap BARs and init port addresses accordingly */
2159 	for (i = 0; i < 2; i++) {
2160 		struct ata_port *ap = host->ports[i];
2161 		int base = i * 2;
2162 		void __iomem * const *iomap;
2163 
2164 		if (ata_port_is_dummy(ap))
2165 			continue;
2166 
2167 		/* Discard disabled ports.  Some controllers show
2168 		 * their unused channels this way.  Disabled ports are
2169 		 * made dummy.
2170 		 */
2171 		if (!ata_resources_present(pdev, i)) {
2172 			ap->ops = &ata_dummy_port_ops;
2173 			continue;
2174 		}
2175 
2176 		rc = pcim_iomap_regions(pdev, 0x3 << base,
2177 					dev_driver_string(gdev));
2178 		if (rc) {
2179 			dev_warn(gdev,
2180 				 "failed to request/iomap BARs for port %d (errno=%d)\n",
2181 				 i, rc);
2182 			if (rc == -EBUSY)
2183 				pcim_pin_device(pdev);
2184 			ap->ops = &ata_dummy_port_ops;
2185 			continue;
2186 		}
2187 		host->iomap = iomap = pcim_iomap_table(pdev);
2188 
2189 		ap->ioaddr.cmd_addr = iomap[base];
2190 		ap->ioaddr.altstatus_addr =
2191 		ap->ioaddr.ctl_addr = (void __iomem *)
2192 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2193 		ata_sff_std_ports(&ap->ioaddr);
2194 
2195 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2196 			(unsigned long long)pci_resource_start(pdev, base),
2197 			(unsigned long long)pci_resource_start(pdev, base + 1));
2198 
2199 		mask |= 1 << i;
2200 	}
2201 
2202 	if (!mask) {
2203 		dev_err(gdev, "no available native port\n");
2204 		return -ENODEV;
2205 	}
2206 
2207 	return 0;
2208 }
2209 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2210 
2211 /**
2212  *	ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2213  *	@pdev: target PCI device
2214  *	@ppi: array of port_info, must be enough for two ports
2215  *	@r_host: out argument for the initialized ATA host
2216  *
2217  *	Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2218  *	all PCI resources and initialize it accordingly in one go.
2219  *
2220  *	LOCKING:
2221  *	Inherited from calling layer (may sleep).
2222  *
2223  *	RETURNS:
2224  *	0 on success, -errno otherwise.
2225  */
ata_pci_sff_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)2226 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2227 			     const struct ata_port_info * const *ppi,
2228 			     struct ata_host **r_host)
2229 {
2230 	struct ata_host *host;
2231 	int rc;
2232 
2233 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2234 		return -ENOMEM;
2235 
2236 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2237 	if (!host) {
2238 		dev_err(&pdev->dev, "failed to allocate ATA host\n");
2239 		rc = -ENOMEM;
2240 		goto err_out;
2241 	}
2242 
2243 	rc = ata_pci_sff_init_host(host);
2244 	if (rc)
2245 		goto err_out;
2246 
2247 	devres_remove_group(&pdev->dev, NULL);
2248 	*r_host = host;
2249 	return 0;
2250 
2251 err_out:
2252 	devres_release_group(&pdev->dev, NULL);
2253 	return rc;
2254 }
2255 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2256 
2257 /**
2258  *	ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2259  *	@host: target SFF ATA host
2260  *	@irq_handler: irq_handler used when requesting IRQ(s)
2261  *	@sht: scsi_host_template to use when registering the host
2262  *
2263  *	This is the counterpart of ata_host_activate() for SFF ATA
2264  *	hosts.  This separate helper is necessary because SFF hosts
2265  *	use two separate interrupts in legacy mode.
2266  *
2267  *	LOCKING:
2268  *	Inherited from calling layer (may sleep).
2269  *
2270  *	RETURNS:
2271  *	0 on success, -errno otherwise.
2272  */
ata_pci_sff_activate_host(struct ata_host * host,irq_handler_t irq_handler,const struct scsi_host_template * sht)2273 int ata_pci_sff_activate_host(struct ata_host *host,
2274 			      irq_handler_t irq_handler,
2275 			      const struct scsi_host_template *sht)
2276 {
2277 	struct device *dev = host->dev;
2278 	struct pci_dev *pdev = to_pci_dev(dev);
2279 	const char *drv_name = dev_driver_string(host->dev);
2280 	int legacy_mode = 0, rc;
2281 
2282 	rc = ata_host_start(host);
2283 	if (rc)
2284 		return rc;
2285 
2286 	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2287 		u8 tmp8, mask = 0;
2288 
2289 		/*
2290 		 * ATA spec says we should use legacy mode when one
2291 		 * port is in legacy mode, but disabled ports on some
2292 		 * PCI hosts appear as fixed legacy ports, e.g SB600/700
2293 		 * on which the secondary port is not wired, so
2294 		 * ignore ports that are marked as 'dummy' during
2295 		 * this check
2296 		 */
2297 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2298 		if (!ata_port_is_dummy(host->ports[0]))
2299 			mask |= (1 << 0);
2300 		if (!ata_port_is_dummy(host->ports[1]))
2301 			mask |= (1 << 2);
2302 		if ((tmp8 & mask) != mask)
2303 			legacy_mode = 1;
2304 	}
2305 
2306 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
2307 		return -ENOMEM;
2308 
2309 	if (!legacy_mode && pdev->irq) {
2310 		int i;
2311 
2312 		rc = devm_request_irq(dev, pdev->irq, irq_handler,
2313 				      IRQF_SHARED, drv_name, host);
2314 		if (rc)
2315 			goto out;
2316 
2317 		for (i = 0; i < 2; i++) {
2318 			if (ata_port_is_dummy(host->ports[i]))
2319 				continue;
2320 			ata_port_desc_misc(host->ports[i], pdev->irq);
2321 		}
2322 	} else if (legacy_mode) {
2323 		if (!ata_port_is_dummy(host->ports[0])) {
2324 			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2325 					      irq_handler, IRQF_SHARED,
2326 					      drv_name, host);
2327 			if (rc)
2328 				goto out;
2329 
2330 			ata_port_desc_misc(host->ports[0],
2331 					   ATA_PRIMARY_IRQ(pdev));
2332 		}
2333 
2334 		if (!ata_port_is_dummy(host->ports[1])) {
2335 			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2336 					      irq_handler, IRQF_SHARED,
2337 					      drv_name, host);
2338 			if (rc)
2339 				goto out;
2340 
2341 			ata_port_desc_misc(host->ports[1],
2342 					   ATA_SECONDARY_IRQ(pdev));
2343 		}
2344 	}
2345 
2346 	rc = ata_host_register(host, sht);
2347 out:
2348 	if (rc == 0)
2349 		devres_remove_group(dev, NULL);
2350 	else
2351 		devres_release_group(dev, NULL);
2352 
2353 	return rc;
2354 }
2355 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2356 
ata_sff_find_valid_pi(const struct ata_port_info * const * ppi)2357 static const struct ata_port_info *ata_sff_find_valid_pi(
2358 					const struct ata_port_info * const *ppi)
2359 {
2360 	int i;
2361 
2362 	/* look up the first valid port_info */
2363 	for (i = 0; i < 2 && ppi[i]; i++)
2364 		if (ppi[i]->port_ops != &ata_dummy_port_ops)
2365 			return ppi[i];
2366 
2367 	return NULL;
2368 }
2369 
ata_pci_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,const struct scsi_host_template * sht,void * host_priv,int hflags,bool bmdma)2370 static int ata_pci_init_one(struct pci_dev *pdev,
2371 		const struct ata_port_info * const *ppi,
2372 		const struct scsi_host_template *sht, void *host_priv,
2373 		int hflags, bool bmdma)
2374 {
2375 	struct device *dev = &pdev->dev;
2376 	const struct ata_port_info *pi;
2377 	struct ata_host *host = NULL;
2378 	int rc;
2379 
2380 	pi = ata_sff_find_valid_pi(ppi);
2381 	if (!pi) {
2382 		dev_err(&pdev->dev, "no valid port_info specified\n");
2383 		return -EINVAL;
2384 	}
2385 
2386 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
2387 		return -ENOMEM;
2388 
2389 	rc = pcim_enable_device(pdev);
2390 	if (rc)
2391 		goto out;
2392 
2393 #ifdef CONFIG_ATA_BMDMA
2394 	if (bmdma)
2395 		/* prepare and activate BMDMA host */
2396 		rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2397 	else
2398 #endif
2399 		/* prepare and activate SFF host */
2400 		rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2401 	if (rc)
2402 		goto out;
2403 	host->private_data = host_priv;
2404 	host->flags |= hflags;
2405 
2406 #ifdef CONFIG_ATA_BMDMA
2407 	if (bmdma) {
2408 		pci_set_master(pdev);
2409 		rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
2410 	} else
2411 #endif
2412 		rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2413 out:
2414 	if (rc == 0)
2415 		devres_remove_group(&pdev->dev, NULL);
2416 	else
2417 		devres_release_group(&pdev->dev, NULL);
2418 
2419 	return rc;
2420 }
2421 
2422 /**
2423  *	ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2424  *	@pdev: Controller to be initialized
2425  *	@ppi: array of port_info, must be enough for two ports
2426  *	@sht: scsi_host_template to use when registering the host
2427  *	@host_priv: host private_data
2428  *	@hflag: host flags
2429  *
2430  *	This is a helper function which can be called from a driver's
2431  *	xxx_init_one() probe function if the hardware uses traditional
2432  *	IDE taskfile registers and is PIO only.
2433  *
2434  *	ASSUMPTION:
2435  *	Nobody makes a single channel controller that appears solely as
2436  *	the secondary legacy port on PCI.
2437  *
2438  *	LOCKING:
2439  *	Inherited from PCI layer (may sleep).
2440  *
2441  *	RETURNS:
2442  *	Zero on success, negative on errno-based value on error.
2443  */
ata_pci_sff_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,const struct scsi_host_template * sht,void * host_priv,int hflag)2444 int ata_pci_sff_init_one(struct pci_dev *pdev,
2445 		 const struct ata_port_info * const *ppi,
2446 		 const struct scsi_host_template *sht, void *host_priv, int hflag)
2447 {
2448 	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
2449 }
2450 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2451 
2452 #endif /* CONFIG_PCI */
2453 
2454 /*
2455  *	BMDMA support
2456  */
2457 
2458 #ifdef CONFIG_ATA_BMDMA
2459 
2460 const struct ata_port_operations ata_bmdma_port_ops = {
2461 	.inherits		= &ata_sff_port_ops,
2462 
2463 	.error_handler		= ata_bmdma_error_handler,
2464 	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
2465 
2466 	.qc_prep		= ata_bmdma_qc_prep,
2467 	.qc_issue		= ata_bmdma_qc_issue,
2468 
2469 	.sff_irq_clear		= ata_bmdma_irq_clear,
2470 	.bmdma_setup		= ata_bmdma_setup,
2471 	.bmdma_start		= ata_bmdma_start,
2472 	.bmdma_stop		= ata_bmdma_stop,
2473 	.bmdma_status		= ata_bmdma_status,
2474 
2475 	.port_start		= ata_bmdma_port_start,
2476 };
2477 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2478 
2479 const struct ata_port_operations ata_bmdma32_port_ops = {
2480 	.inherits		= &ata_bmdma_port_ops,
2481 
2482 	.sff_data_xfer		= ata_sff_data_xfer32,
2483 	.port_start		= ata_bmdma_port_start32,
2484 };
2485 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2486 
2487 /**
2488  *	ata_bmdma_fill_sg - Fill PCI IDE PRD table
2489  *	@qc: Metadata associated with taskfile to be transferred
2490  *
2491  *	Fill PCI IDE PRD (scatter-gather) table with segments
2492  *	associated with the current disk command.
2493  *
2494  *	LOCKING:
2495  *	spin_lock_irqsave(host lock)
2496  *
2497  */
ata_bmdma_fill_sg(struct ata_queued_cmd * qc)2498 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2499 {
2500 	struct ata_port *ap = qc->ap;
2501 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
2502 	struct scatterlist *sg;
2503 	unsigned int si, pi;
2504 
2505 	pi = 0;
2506 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2507 		u32 addr, offset;
2508 		u32 sg_len, len;
2509 
2510 		/* determine if physical DMA addr spans 64K boundary.
2511 		 * Note h/w doesn't support 64-bit, so we unconditionally
2512 		 * truncate dma_addr_t to u32.
2513 		 */
2514 		addr = (u32) sg_dma_address(sg);
2515 		sg_len = sg_dma_len(sg);
2516 
2517 		while (sg_len) {
2518 			offset = addr & 0xffff;
2519 			len = sg_len;
2520 			if ((offset + sg_len) > 0x10000)
2521 				len = 0x10000 - offset;
2522 
2523 			prd[pi].addr = cpu_to_le32(addr);
2524 			prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2525 
2526 			pi++;
2527 			sg_len -= len;
2528 			addr += len;
2529 		}
2530 	}
2531 
2532 	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2533 }
2534 
2535 /**
2536  *	ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2537  *	@qc: Metadata associated with taskfile to be transferred
2538  *
2539  *	Fill PCI IDE PRD (scatter-gather) table with segments
2540  *	associated with the current disk command. Perform the fill
2541  *	so that we avoid writing any length 64K records for
2542  *	controllers that don't follow the spec.
2543  *
2544  *	LOCKING:
2545  *	spin_lock_irqsave(host lock)
2546  *
2547  */
ata_bmdma_fill_sg_dumb(struct ata_queued_cmd * qc)2548 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2549 {
2550 	struct ata_port *ap = qc->ap;
2551 	struct ata_bmdma_prd *prd = ap->bmdma_prd;
2552 	struct scatterlist *sg;
2553 	unsigned int si, pi;
2554 
2555 	pi = 0;
2556 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2557 		u32 addr, offset;
2558 		u32 sg_len, len, blen;
2559 
2560 		/* determine if physical DMA addr spans 64K boundary.
2561 		 * Note h/w doesn't support 64-bit, so we unconditionally
2562 		 * truncate dma_addr_t to u32.
2563 		 */
2564 		addr = (u32) sg_dma_address(sg);
2565 		sg_len = sg_dma_len(sg);
2566 
2567 		while (sg_len) {
2568 			offset = addr & 0xffff;
2569 			len = sg_len;
2570 			if ((offset + sg_len) > 0x10000)
2571 				len = 0x10000 - offset;
2572 
2573 			blen = len & 0xffff;
2574 			prd[pi].addr = cpu_to_le32(addr);
2575 			if (blen == 0) {
2576 				/* Some PATA chipsets like the CS5530 can't
2577 				   cope with 0x0000 meaning 64K as the spec
2578 				   says */
2579 				prd[pi].flags_len = cpu_to_le32(0x8000);
2580 				blen = 0x8000;
2581 				prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2582 			}
2583 			prd[pi].flags_len = cpu_to_le32(blen);
2584 
2585 			pi++;
2586 			sg_len -= len;
2587 			addr += len;
2588 		}
2589 	}
2590 
2591 	prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2592 }
2593 
2594 /**
2595  *	ata_bmdma_qc_prep - Prepare taskfile for submission
2596  *	@qc: Metadata associated with taskfile to be prepared
2597  *
2598  *	Prepare ATA taskfile for submission.
2599  *
2600  *	LOCKING:
2601  *	spin_lock_irqsave(host lock)
2602  */
ata_bmdma_qc_prep(struct ata_queued_cmd * qc)2603 enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2604 {
2605 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2606 		return AC_ERR_OK;
2607 
2608 	ata_bmdma_fill_sg(qc);
2609 
2610 	return AC_ERR_OK;
2611 }
2612 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2613 
2614 /**
2615  *	ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2616  *	@qc: Metadata associated with taskfile to be prepared
2617  *
2618  *	Prepare ATA taskfile for submission.
2619  *
2620  *	LOCKING:
2621  *	spin_lock_irqsave(host lock)
2622  */
ata_bmdma_dumb_qc_prep(struct ata_queued_cmd * qc)2623 enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2624 {
2625 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2626 		return AC_ERR_OK;
2627 
2628 	ata_bmdma_fill_sg_dumb(qc);
2629 
2630 	return AC_ERR_OK;
2631 }
2632 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2633 
2634 /**
2635  *	ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2636  *	@qc: command to issue to device
2637  *
2638  *	This function issues a PIO, NODATA or DMA command to a
2639  *	SFF/BMDMA controller.  PIO and NODATA are handled by
2640  *	ata_sff_qc_issue().
2641  *
2642  *	LOCKING:
2643  *	spin_lock_irqsave(host lock)
2644  *
2645  *	RETURNS:
2646  *	Zero on success, AC_ERR_* mask on failure
2647  */
ata_bmdma_qc_issue(struct ata_queued_cmd * qc)2648 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2649 {
2650 	struct ata_port *ap = qc->ap;
2651 	struct ata_link *link = qc->dev->link;
2652 
2653 	/* defer PIO handling to sff_qc_issue */
2654 	if (!ata_is_dma(qc->tf.protocol))
2655 		return ata_sff_qc_issue(qc);
2656 
2657 	/* select the device */
2658 	ata_dev_select(ap, qc->dev->devno, 1, 0);
2659 
2660 	/* start the command */
2661 	switch (qc->tf.protocol) {
2662 	case ATA_PROT_DMA:
2663 		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2664 
2665 		trace_ata_tf_load(ap, &qc->tf);
2666 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2667 		trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2668 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
2669 		trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
2670 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
2671 		ap->hsm_task_state = HSM_ST_LAST;
2672 		break;
2673 
2674 	case ATAPI_PROT_DMA:
2675 		WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2676 
2677 		trace_ata_tf_load(ap, &qc->tf);
2678 		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
2679 		trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
2680 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
2681 		ap->hsm_task_state = HSM_ST_FIRST;
2682 
2683 		/* send cdb by polling if no cdb interrupt */
2684 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2685 			ata_sff_queue_pio_task(link, 0);
2686 		break;
2687 
2688 	default:
2689 		WARN_ON(1);
2690 		return AC_ERR_SYSTEM;
2691 	}
2692 
2693 	return 0;
2694 }
2695 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2696 
2697 /**
2698  *	ata_bmdma_port_intr - Handle BMDMA port interrupt
2699  *	@ap: Port on which interrupt arrived (possibly...)
2700  *	@qc: Taskfile currently active in engine
2701  *
2702  *	Handle port interrupt for given queued command.
2703  *
2704  *	LOCKING:
2705  *	spin_lock_irqsave(host lock)
2706  *
2707  *	RETURNS:
2708  *	One if interrupt was handled, zero if not (shared irq).
2709  */
ata_bmdma_port_intr(struct ata_port * ap,struct ata_queued_cmd * qc)2710 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2711 {
2712 	struct ata_eh_info *ehi = &ap->link.eh_info;
2713 	u8 host_stat = 0;
2714 	bool bmdma_stopped = false;
2715 	unsigned int handled;
2716 
2717 	if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2718 		/* check status of DMA engine */
2719 		host_stat = ap->ops->bmdma_status(ap);
2720 		trace_ata_bmdma_status(ap, host_stat);
2721 
2722 		/* if it's not our irq... */
2723 		if (!(host_stat & ATA_DMA_INTR))
2724 			return ata_sff_idle_irq(ap);
2725 
2726 		/* before we do anything else, clear DMA-Start bit */
2727 		trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2728 		ap->ops->bmdma_stop(qc);
2729 		bmdma_stopped = true;
2730 
2731 		if (unlikely(host_stat & ATA_DMA_ERR)) {
2732 			/* error when transferring data to/from memory */
2733 			qc->err_mask |= AC_ERR_HOST_BUS;
2734 			ap->hsm_task_state = HSM_ST_ERR;
2735 		}
2736 	}
2737 
2738 	handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2739 
2740 	if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2741 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2742 
2743 	return handled;
2744 }
2745 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2746 
2747 /**
2748  *	ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2749  *	@irq: irq line (unused)
2750  *	@dev_instance: pointer to our ata_host information structure
2751  *
2752  *	Default interrupt handler for PCI IDE devices.  Calls
2753  *	ata_bmdma_port_intr() for each port that is not disabled.
2754  *
2755  *	LOCKING:
2756  *	Obtains host lock during operation.
2757  *
2758  *	RETURNS:
2759  *	IRQ_NONE or IRQ_HANDLED.
2760  */
ata_bmdma_interrupt(int irq,void * dev_instance)2761 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2762 {
2763 	return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2764 }
2765 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2766 
2767 /**
2768  *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
2769  *	@ap: port to handle error for
2770  *
2771  *	Stock error handler for BMDMA controller.  It can handle both
2772  *	PATA and SATA controllers.  Most BMDMA controllers should be
2773  *	able to use this EH as-is or with some added handling before
2774  *	and after.
2775  *
2776  *	LOCKING:
2777  *	Kernel thread context (may sleep)
2778  */
ata_bmdma_error_handler(struct ata_port * ap)2779 void ata_bmdma_error_handler(struct ata_port *ap)
2780 {
2781 	struct ata_queued_cmd *qc;
2782 	unsigned long flags;
2783 	bool thaw = false;
2784 
2785 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2786 	if (qc && !(qc->flags & ATA_QCFLAG_EH))
2787 		qc = NULL;
2788 
2789 	/* reset PIO HSM and stop DMA engine */
2790 	spin_lock_irqsave(ap->lock, flags);
2791 
2792 	if (qc && ata_is_dma(qc->tf.protocol)) {
2793 		u8 host_stat;
2794 
2795 		host_stat = ap->ops->bmdma_status(ap);
2796 		trace_ata_bmdma_status(ap, host_stat);
2797 
2798 		/* BMDMA controllers indicate host bus error by
2799 		 * setting DMA_ERR bit and timing out.  As it wasn't
2800 		 * really a timeout event, adjust error mask and
2801 		 * cancel frozen state.
2802 		 */
2803 		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2804 			qc->err_mask = AC_ERR_HOST_BUS;
2805 			thaw = true;
2806 		}
2807 
2808 		trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2809 		ap->ops->bmdma_stop(qc);
2810 
2811 		/* if we're gonna thaw, make sure IRQ is clear */
2812 		if (thaw) {
2813 			ap->ops->sff_check_status(ap);
2814 			if (ap->ops->sff_irq_clear)
2815 				ap->ops->sff_irq_clear(ap);
2816 		}
2817 	}
2818 
2819 	spin_unlock_irqrestore(ap->lock, flags);
2820 
2821 	if (thaw)
2822 		ata_eh_thaw_port(ap);
2823 
2824 	ata_sff_error_handler(ap);
2825 }
2826 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2827 
2828 /**
2829  *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2830  *	@qc: internal command to clean up
2831  *
2832  *	LOCKING:
2833  *	Kernel thread context (may sleep)
2834  */
ata_bmdma_post_internal_cmd(struct ata_queued_cmd * qc)2835 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2836 {
2837 	struct ata_port *ap = qc->ap;
2838 	unsigned long flags;
2839 
2840 	if (ata_is_dma(qc->tf.protocol)) {
2841 		spin_lock_irqsave(ap->lock, flags);
2842 		trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
2843 		ap->ops->bmdma_stop(qc);
2844 		spin_unlock_irqrestore(ap->lock, flags);
2845 	}
2846 }
2847 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2848 
2849 /**
2850  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2851  *	@ap: Port associated with this ATA transaction.
2852  *
2853  *	Clear interrupt and error flags in DMA status register.
2854  *
2855  *	May be used as the irq_clear() entry in ata_port_operations.
2856  *
2857  *	LOCKING:
2858  *	spin_lock_irqsave(host lock)
2859  */
ata_bmdma_irq_clear(struct ata_port * ap)2860 void ata_bmdma_irq_clear(struct ata_port *ap)
2861 {
2862 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
2863 
2864 	if (!mmio)
2865 		return;
2866 
2867 	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2868 }
2869 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2870 
2871 /**
2872  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2873  *	@qc: Info associated with this ATA transaction.
2874  *
2875  *	LOCKING:
2876  *	spin_lock_irqsave(host lock)
2877  */
ata_bmdma_setup(struct ata_queued_cmd * qc)2878 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2879 {
2880 	struct ata_port *ap = qc->ap;
2881 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2882 	u8 dmactl;
2883 
2884 	/* load PRD table addr. */
2885 	mb();	/* make sure PRD table writes are visible to controller */
2886 	iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2887 
2888 	/* specify data direction, triple-check start bit is clear */
2889 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2890 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2891 	if (!rw)
2892 		dmactl |= ATA_DMA_WR;
2893 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2894 
2895 	/* issue r/w command */
2896 	ap->ops->sff_exec_command(ap, &qc->tf);
2897 }
2898 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2899 
2900 /**
2901  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
2902  *	@qc: Info associated with this ATA transaction.
2903  *
2904  *	LOCKING:
2905  *	spin_lock_irqsave(host lock)
2906  */
ata_bmdma_start(struct ata_queued_cmd * qc)2907 void ata_bmdma_start(struct ata_queued_cmd *qc)
2908 {
2909 	struct ata_port *ap = qc->ap;
2910 	u8 dmactl;
2911 
2912 	/* start host DMA transaction */
2913 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2914 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2915 
2916 	/* Strictly, one may wish to issue an ioread8() here, to
2917 	 * flush the mmio write.  However, control also passes
2918 	 * to the hardware at this point, and it will interrupt
2919 	 * us when we are to resume control.  So, in effect,
2920 	 * we don't care when the mmio write flushes.
2921 	 * Further, a read of the DMA status register _immediately_
2922 	 * following the write may not be what certain flaky hardware
2923 	 * is expected, so I think it is best to not add a readb()
2924 	 * without first all the MMIO ATA cards/mobos.
2925 	 * Or maybe I'm just being paranoid.
2926 	 *
2927 	 * FIXME: The posting of this write means I/O starts are
2928 	 * unnecessarily delayed for MMIO
2929 	 */
2930 }
2931 EXPORT_SYMBOL_GPL(ata_bmdma_start);
2932 
2933 /**
2934  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2935  *	@qc: Command we are ending DMA for
2936  *
2937  *	Clears the ATA_DMA_START flag in the dma control register
2938  *
2939  *	May be used as the bmdma_stop() entry in ata_port_operations.
2940  *
2941  *	LOCKING:
2942  *	spin_lock_irqsave(host lock)
2943  */
ata_bmdma_stop(struct ata_queued_cmd * qc)2944 void ata_bmdma_stop(struct ata_queued_cmd *qc)
2945 {
2946 	struct ata_port *ap = qc->ap;
2947 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
2948 
2949 	/* clear start/stop bit */
2950 	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2951 		 mmio + ATA_DMA_CMD);
2952 
2953 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2954 	ata_sff_dma_pause(ap);
2955 }
2956 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2957 
2958 /**
2959  *	ata_bmdma_status - Read PCI IDE BMDMA status
2960  *	@ap: Port associated with this ATA transaction.
2961  *
2962  *	Read and return BMDMA status register.
2963  *
2964  *	May be used as the bmdma_status() entry in ata_port_operations.
2965  *
2966  *	LOCKING:
2967  *	spin_lock_irqsave(host lock)
2968  */
ata_bmdma_status(struct ata_port * ap)2969 u8 ata_bmdma_status(struct ata_port *ap)
2970 {
2971 	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2972 }
2973 EXPORT_SYMBOL_GPL(ata_bmdma_status);
2974 
2975 
2976 /**
2977  *	ata_bmdma_port_start - Set port up for bmdma.
2978  *	@ap: Port to initialize
2979  *
2980  *	Called just after data structures for each port are
2981  *	initialized.  Allocates space for PRD table.
2982  *
2983  *	May be used as the port_start() entry in ata_port_operations.
2984  *
2985  *	LOCKING:
2986  *	Inherited from caller.
2987  */
ata_bmdma_port_start(struct ata_port * ap)2988 int ata_bmdma_port_start(struct ata_port *ap)
2989 {
2990 	if (ap->mwdma_mask || ap->udma_mask) {
2991 		ap->bmdma_prd =
2992 			dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
2993 					    &ap->bmdma_prd_dma, GFP_KERNEL);
2994 		if (!ap->bmdma_prd)
2995 			return -ENOMEM;
2996 	}
2997 
2998 	return 0;
2999 }
3000 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3001 
3002 /**
3003  *	ata_bmdma_port_start32 - Set port up for dma.
3004  *	@ap: Port to initialize
3005  *
3006  *	Called just after data structures for each port are
3007  *	initialized.  Enables 32bit PIO and allocates space for PRD
3008  *	table.
3009  *
3010  *	May be used as the port_start() entry in ata_port_operations for
3011  *	devices that are capable of 32bit PIO.
3012  *
3013  *	LOCKING:
3014  *	Inherited from caller.
3015  */
ata_bmdma_port_start32(struct ata_port * ap)3016 int ata_bmdma_port_start32(struct ata_port *ap)
3017 {
3018 	ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3019 	return ata_bmdma_port_start(ap);
3020 }
3021 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3022 
3023 #ifdef CONFIG_PCI
3024 
3025 /**
3026  *	ata_pci_bmdma_clear_simplex -	attempt to kick device out of simplex
3027  *	@pdev: PCI device
3028  *
3029  *	Some PCI ATA devices report simplex mode but in fact can be told to
3030  *	enter non simplex mode. This implements the necessary logic to
3031  *	perform the task on such devices. Calling it on other devices will
3032  *	have -undefined- behaviour.
3033  */
ata_pci_bmdma_clear_simplex(struct pci_dev * pdev)3034 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3035 {
3036 #ifdef CONFIG_HAS_IOPORT
3037 	unsigned long bmdma = pci_resource_start(pdev, 4);
3038 	u8 simplex;
3039 
3040 	if (bmdma == 0)
3041 		return -ENOENT;
3042 
3043 	simplex = inb(bmdma + 0x02);
3044 	outb(simplex & 0x60, bmdma + 0x02);
3045 	simplex = inb(bmdma + 0x02);
3046 	if (simplex & 0x80)
3047 		return -EOPNOTSUPP;
3048 	return 0;
3049 #else
3050 	return -ENOENT;
3051 #endif /* CONFIG_HAS_IOPORT */
3052 }
3053 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3054 
ata_bmdma_nodma(struct ata_host * host,const char * reason)3055 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3056 {
3057 	int i;
3058 
3059 	dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
3060 
3061 	for (i = 0; i < 2; i++) {
3062 		host->ports[i]->mwdma_mask = 0;
3063 		host->ports[i]->udma_mask = 0;
3064 	}
3065 }
3066 
3067 /**
3068  *	ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3069  *	@host: target ATA host
3070  *
3071  *	Acquire PCI BMDMA resources and initialize @host accordingly.
3072  *
3073  *	LOCKING:
3074  *	Inherited from calling layer (may sleep).
3075  */
ata_pci_bmdma_init(struct ata_host * host)3076 void ata_pci_bmdma_init(struct ata_host *host)
3077 {
3078 	struct device *gdev = host->dev;
3079 	struct pci_dev *pdev = to_pci_dev(gdev);
3080 	int i, rc;
3081 
3082 	/* No BAR4 allocation: No DMA */
3083 	if (pci_resource_start(pdev, 4) == 0) {
3084 		ata_bmdma_nodma(host, "BAR4 is zero");
3085 		return;
3086 	}
3087 
3088 	/*
3089 	 * Some controllers require BMDMA region to be initialized
3090 	 * even if DMA is not in use to clear IRQ status via
3091 	 * ->sff_irq_clear method.  Try to initialize bmdma_addr
3092 	 * regardless of dma masks.
3093 	 */
3094 	rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
3095 	if (rc)
3096 		ata_bmdma_nodma(host, "failed to set dma mask");
3097 
3098 	/* request and iomap DMA region */
3099 	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3100 	if (rc) {
3101 		ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3102 		return;
3103 	}
3104 	host->iomap = pcim_iomap_table(pdev);
3105 
3106 	for (i = 0; i < 2; i++) {
3107 		struct ata_port *ap = host->ports[i];
3108 		void __iomem *bmdma = host->iomap[4] + 8 * i;
3109 
3110 		if (ata_port_is_dummy(ap))
3111 			continue;
3112 
3113 		ap->ioaddr.bmdma_addr = bmdma;
3114 		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3115 		    (ioread8(bmdma + 2) & 0x80))
3116 			host->flags |= ATA_HOST_SIMPLEX;
3117 
3118 		ata_port_desc(ap, "bmdma 0x%llx",
3119 		    (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3120 	}
3121 }
3122 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3123 
3124 /**
3125  *	ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3126  *	@pdev: target PCI device
3127  *	@ppi: array of port_info, must be enough for two ports
3128  *	@r_host: out argument for the initialized ATA host
3129  *
3130  *	Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3131  *	resources and initialize it accordingly in one go.
3132  *
3133  *	LOCKING:
3134  *	Inherited from calling layer (may sleep).
3135  *
3136  *	RETURNS:
3137  *	0 on success, -errno otherwise.
3138  */
ata_pci_bmdma_prepare_host(struct pci_dev * pdev,const struct ata_port_info * const * ppi,struct ata_host ** r_host)3139 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3140 			       const struct ata_port_info * const * ppi,
3141 			       struct ata_host **r_host)
3142 {
3143 	int rc;
3144 
3145 	rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3146 	if (rc)
3147 		return rc;
3148 
3149 	ata_pci_bmdma_init(*r_host);
3150 	return 0;
3151 }
3152 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3153 
3154 /**
3155  *	ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3156  *	@pdev: Controller to be initialized
3157  *	@ppi: array of port_info, must be enough for two ports
3158  *	@sht: scsi_host_template to use when registering the host
3159  *	@host_priv: host private_data
3160  *	@hflags: host flags
3161  *
3162  *	This function is similar to ata_pci_sff_init_one() but also
3163  *	takes care of BMDMA initialization.
3164  *
3165  *	LOCKING:
3166  *	Inherited from PCI layer (may sleep).
3167  *
3168  *	RETURNS:
3169  *	Zero on success, negative on errno-based value on error.
3170  */
ata_pci_bmdma_init_one(struct pci_dev * pdev,const struct ata_port_info * const * ppi,const struct scsi_host_template * sht,void * host_priv,int hflags)3171 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3172 			   const struct ata_port_info * const * ppi,
3173 			   const struct scsi_host_template *sht, void *host_priv,
3174 			   int hflags)
3175 {
3176 	return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
3177 }
3178 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3179 
3180 #endif /* CONFIG_PCI */
3181 #endif /* CONFIG_ATA_BMDMA */
3182 
3183 /**
3184  *	ata_sff_port_init - Initialize SFF/BMDMA ATA port
3185  *	@ap: Port to initialize
3186  *
3187  *	Called on port allocation to initialize SFF/BMDMA specific
3188  *	fields.
3189  *
3190  *	LOCKING:
3191  *	None.
3192  */
ata_sff_port_init(struct ata_port * ap)3193 void ata_sff_port_init(struct ata_port *ap)
3194 {
3195 	INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3196 	ap->ctl = ATA_DEVCTL_OBS;
3197 	ap->last_ctl = 0xFF;
3198 }
3199 
ata_sff_init(void)3200 int __init ata_sff_init(void)
3201 {
3202 	ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3203 	if (!ata_sff_wq)
3204 		return -ENOMEM;
3205 
3206 	return 0;
3207 }
3208 
ata_sff_exit(void)3209 void ata_sff_exit(void)
3210 {
3211 	destroy_workqueue(ata_sff_wq);
3212 }
3213