Lines Matching full:qc
462 * @qc: queued command
475 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf, in ata_sff_data_xfer() argument
478 struct ata_port *ap = qc->dev->link->ap; in ata_sff_data_xfer()
515 * @qc: queued command
530 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, in ata_sff_data_xfer32() argument
533 struct ata_device *dev = qc->dev; in ata_sff_data_xfer32()
540 return ata_sff_data_xfer(qc, buf, buflen, rw); in ata_sff_data_xfer32()
577 static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page, in ata_pio_xfer() argument
580 bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE); in ata_pio_xfer()
584 qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write); in ata_pio_xfer()
593 * @qc: Command on going
595 * Transfer qc->sect_size bytes of data from/to the ATA device.
600 static void ata_pio_sector(struct ata_queued_cmd *qc) in ata_pio_sector() argument
602 struct ata_port *ap = qc->ap; in ata_pio_sector()
606 if (!qc->cursg) { in ata_pio_sector()
607 qc->curbytes = qc->nbytes; in ata_pio_sector()
610 if (qc->curbytes == qc->nbytes - qc->sect_size) in ata_pio_sector()
613 page = sg_page(qc->cursg); in ata_pio_sector()
614 offset = qc->cursg->offset + qc->cursg_ofs; in ata_pio_sector()
620 trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size); in ata_pio_sector()
627 if (offset + qc->sect_size > PAGE_SIZE) { in ata_pio_sector()
630 ata_pio_xfer(qc, page, offset, split_len); in ata_pio_sector()
631 ata_pio_xfer(qc, nth_page(page, 1), 0, in ata_pio_sector()
632 qc->sect_size - split_len); in ata_pio_sector()
634 ata_pio_xfer(qc, page, offset, qc->sect_size); in ata_pio_sector()
637 qc->curbytes += qc->sect_size; in ata_pio_sector()
638 qc->cursg_ofs += qc->sect_size; in ata_pio_sector()
640 if (qc->cursg_ofs == qc->cursg->length) { in ata_pio_sector()
641 qc->cursg = sg_next(qc->cursg); in ata_pio_sector()
642 if (!qc->cursg) in ata_pio_sector()
644 qc->cursg_ofs = 0; in ata_pio_sector()
650 * @qc: Command on going
658 static void ata_pio_sectors(struct ata_queued_cmd *qc) in ata_pio_sectors() argument
660 if (is_multi_taskfile(&qc->tf)) { in ata_pio_sectors()
664 WARN_ON_ONCE(qc->dev->multi_count == 0); in ata_pio_sectors()
666 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, in ata_pio_sectors()
667 qc->dev->multi_count); in ata_pio_sectors()
669 ata_pio_sector(qc); in ata_pio_sectors()
671 ata_pio_sector(qc); in ata_pio_sectors()
673 ata_sff_sync(qc->ap); /* flush */ in ata_pio_sectors()
679 * @qc: Taskfile currently active
687 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) in atapi_send_cdb() argument
690 trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len); in atapi_send_cdb()
691 WARN_ON_ONCE(qc->dev->cdb_len < 12); in atapi_send_cdb()
693 ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1); in atapi_send_cdb()
697 switch (qc->tf.protocol) { in atapi_send_cdb()
708 trace_ata_bmdma_start(ap, &qc->tf, qc->tag); in atapi_send_cdb()
709 ap->ops->bmdma_start(qc); in atapi_send_cdb()
719 * @qc: Command on going
728 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) in __atapi_pio_bytes() argument
730 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; in __atapi_pio_bytes()
731 struct ata_port *ap = qc->ap; in __atapi_pio_bytes()
732 struct ata_device *dev = qc->dev; in __atapi_pio_bytes()
740 sg = qc->cursg; in __atapi_pio_bytes()
744 qc->nbytes, qc->curbytes, bytes); in __atapi_pio_bytes()
749 offset = sg->offset + qc->cursg_ofs; in __atapi_pio_bytes()
756 count = min(sg->length - qc->cursg_ofs, bytes); in __atapi_pio_bytes()
761 trace_atapi_pio_transfer_data(qc, offset, count); in __atapi_pio_bytes()
765 consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw); in __atapi_pio_bytes()
769 qc->curbytes += count; in __atapi_pio_bytes()
770 qc->cursg_ofs += count; in __atapi_pio_bytes()
772 if (qc->cursg_ofs == sg->length) { in __atapi_pio_bytes()
773 qc->cursg = sg_next(qc->cursg); in __atapi_pio_bytes()
774 qc->cursg_ofs = 0; in __atapi_pio_bytes()
778 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); in __atapi_pio_bytes()
790 * @qc: Command on going
797 static void atapi_pio_bytes(struct ata_queued_cmd *qc) in atapi_pio_bytes() argument
799 struct ata_port *ap = qc->ap; in atapi_pio_bytes()
800 struct ata_device *dev = qc->dev; in atapi_pio_bytes()
803 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; in atapi_pio_bytes()
805 /* Abuse qc->result_tf for temp storage of intermediate TF in atapi_pio_bytes()
807 * For normal completion, qc->result_tf is not relevant. For in atapi_pio_bytes()
808 * error, qc->result_tf is later overwritten by ata_qc_complete(). in atapi_pio_bytes()
809 * So, the correctness of qc->result_tf is not affected. in atapi_pio_bytes()
811 ap->ops->sff_tf_read(ap, &qc->result_tf); in atapi_pio_bytes()
812 ireason = qc->result_tf.nsect; in atapi_pio_bytes()
813 bc_lo = qc->result_tf.lbam; in atapi_pio_bytes()
814 bc_hi = qc->result_tf.lbah; in atapi_pio_bytes()
829 if (unlikely(__atapi_pio_bytes(qc, bytes))) in atapi_pio_bytes()
839 qc->err_mask |= AC_ERR_HSM; in atapi_pio_bytes()
844 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
846 * @qc: qc on going
852 struct ata_queued_cmd *qc) in ata_hsm_ok_in_wq() argument
854 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_hsm_ok_in_wq()
858 if (qc->tf.protocol == ATA_PROT_PIO && in ata_hsm_ok_in_wq()
859 (qc->tf.flags & ATA_TFLAG_WRITE)) in ata_hsm_ok_in_wq()
862 if (ata_is_atapi(qc->tf.protocol) && in ata_hsm_ok_in_wq()
863 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in ata_hsm_ok_in_wq()
871 * ata_hsm_qc_complete - finish a qc running on standard HSM
872 * @qc: Command to complete
875 * Finish @qc which is running on standard HSM.
881 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) in ata_hsm_qc_complete() argument
883 struct ata_port *ap = qc->ap; in ata_hsm_qc_complete()
887 qc = ata_qc_from_tag(ap, qc->tag); in ata_hsm_qc_complete()
888 if (qc) { in ata_hsm_qc_complete()
889 if (likely(!(qc->err_mask & AC_ERR_HSM))) { in ata_hsm_qc_complete()
891 ata_qc_complete(qc); in ata_hsm_qc_complete()
896 if (likely(!(qc->err_mask & AC_ERR_HSM))) in ata_hsm_qc_complete()
897 ata_qc_complete(qc); in ata_hsm_qc_complete()
906 * @qc: qc on going
913 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, in ata_sff_hsm_move() argument
916 struct ata_link *link = qc->dev->link; in ata_sff_hsm_move()
922 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); in ata_sff_hsm_move()
926 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). in ata_sff_hsm_move()
928 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); in ata_sff_hsm_move()
931 trace_ata_sff_hsm_state(qc, status); in ata_sff_hsm_move()
941 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); in ata_sff_hsm_move()
948 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
953 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
972 if (!(qc->dev->quirks & ATA_QUIRK_STUCK_ERR)) { in ata_sff_hsm_move()
976 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
982 if (qc->tf.protocol == ATA_PROT_PIO) { in ata_sff_hsm_move()
992 ata_pio_sectors(qc); in ata_sff_hsm_move()
995 atapi_send_cdb(ap, qc); in ata_sff_hsm_move()
1004 if (qc->tf.protocol == ATAPI_PROT_PIO) { in ata_sff_hsm_move()
1024 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1029 atapi_pio_bytes(qc); in ata_sff_hsm_move()
1041 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1047 if (qc->dev->quirks & in ata_sff_hsm_move()
1049 qc->err_mask |= in ata_sff_hsm_move()
1059 qc->err_mask |= AC_ERR_HSM | in ata_sff_hsm_move()
1079 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1081 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { in ata_sff_hsm_move()
1082 ata_pio_sectors(qc); in ata_sff_hsm_move()
1090 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1101 qc->err_mask |= AC_ERR_NODEV_HINT; in ata_sff_hsm_move()
1111 ata_pio_sectors(qc); in ata_sff_hsm_move()
1114 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { in ata_sff_hsm_move()
1126 qc->err_mask |= __ac_err_mask(status); in ata_sff_hsm_move()
1132 trace_ata_sff_hsm_command_complete(qc, status); in ata_sff_hsm_move()
1134 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); in ata_sff_hsm_move()
1139 ata_hsm_qc_complete(qc, in_wq); in ata_sff_hsm_move()
1148 ata_hsm_qc_complete(qc, in_wq); in ata_sff_hsm_move()
1213 struct ata_queued_cmd *qc; in ata_sff_pio_task() local
1220 /* qc can be NULL if timeout occurred */ in ata_sff_pio_task()
1221 qc = ata_qc_from_tag(ap, link->active_tag); in ata_sff_pio_task()
1222 if (!qc) { in ata_sff_pio_task()
1256 poll_next = ata_sff_hsm_move(ap, qc, status, 1); in ata_sff_pio_task()
1269 * @qc: command to issue to device
1280 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) in ata_sff_qc_issue() argument
1282 struct ata_port *ap = qc->ap; in ata_sff_qc_issue()
1283 struct ata_link *link = qc->dev->link; in ata_sff_qc_issue()
1289 qc->tf.flags |= ATA_TFLAG_POLLING; in ata_sff_qc_issue()
1292 ata_dev_select(ap, qc->dev->devno, 1, 0); in ata_sff_qc_issue()
1295 switch (qc->tf.protocol) { in ata_sff_qc_issue()
1297 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1298 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1300 ata_tf_to_host(ap, &qc->tf, qc->tag); in ata_sff_qc_issue()
1303 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1309 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1310 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1312 ata_tf_to_host(ap, &qc->tf, qc->tag); in ata_sff_qc_issue()
1314 if (qc->tf.flags & ATA_TFLAG_WRITE) { in ata_sff_qc_issue()
1326 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1339 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1340 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1342 ata_tf_to_host(ap, &qc->tf, qc->tag); in ata_sff_qc_issue()
1347 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || in ata_sff_qc_issue()
1348 (qc->tf.flags & ATA_TFLAG_POLLING)) in ata_sff_qc_issue()
1362 * @qc: qc to fill result TF for
1364 * @qc is finished and result TF needs to be filled. Fill it
1370 void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) in ata_sff_qc_fill_rtf() argument
1372 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); in ata_sff_qc_fill_rtf()
1393 struct ata_queued_cmd *qc, in __ata_sff_port_intr() argument
1398 trace_ata_sff_port_intr(qc, hsmv_on_idle); in __ata_sff_port_intr()
1409 * need to check ata_is_atapi(qc->tf.protocol) again. in __ata_sff_port_intr()
1411 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in __ata_sff_port_intr()
1425 qc->err_mask |= AC_ERR_HSM; in __ata_sff_port_intr()
1435 ata_sff_hsm_move(ap, qc, status, 0); in __ata_sff_port_intr()
1443 * @qc: Taskfile currently active in engine
1453 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) in ata_sff_port_intr() argument
1455 return __ata_sff_port_intr(ap, qc, false); in ata_sff_port_intr()
1475 struct ata_queued_cmd *qc; in __ata_sff_interrupt() local
1477 qc = ata_qc_from_tag(ap, ap->link.active_tag); in __ata_sff_interrupt()
1478 if (qc) { in __ata_sff_interrupt()
1479 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) in __ata_sff_interrupt()
1480 handled |= port_intr(ap, qc); in __ata_sff_interrupt()
1567 struct ata_queued_cmd *qc; in ata_sff_lost_interrupt() local
1570 qc = ata_qc_from_tag(ap, ap->link.active_tag); in ata_sff_lost_interrupt()
1572 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_lost_interrupt()
1586 ata_sff_port_intr(ap, qc); in ata_sff_lost_interrupt()
2012 * @qc: command
2020 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) in ata_sff_drain_fifo() argument
2026 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) in ata_sff_drain_fifo()
2029 ap = qc->ap; in ata_sff_drain_fifo()
2057 struct ata_queued_cmd *qc; in ata_sff_error_handler() local
2060 qc = __ata_qc_from_tag(ap, ap->link.active_tag); in ata_sff_error_handler()
2061 if (qc && !(qc->flags & ATA_QCFLAG_EH)) in ata_sff_error_handler()
2062 qc = NULL; in ata_sff_error_handler()
2070 * qc in case anyone wants to do different PIO/DMA recovery or in ata_sff_error_handler()
2074 ap->ops->sff_drain_fifo(qc); in ata_sff_error_handler()
2487 * @qc: Metadata associated with taskfile to be transferred
2496 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) in ata_bmdma_fill_sg() argument
2498 struct ata_port *ap = qc->ap; in ata_bmdma_fill_sg()
2504 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ata_bmdma_fill_sg()
2535 * @qc: Metadata associated with taskfile to be transferred
2546 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) in ata_bmdma_fill_sg_dumb() argument
2548 struct ata_port *ap = qc->ap; in ata_bmdma_fill_sg_dumb()
2554 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ata_bmdma_fill_sg_dumb()
2594 * @qc: Metadata associated with taskfile to be prepared
2601 enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) in ata_bmdma_qc_prep() argument
2603 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in ata_bmdma_qc_prep()
2606 ata_bmdma_fill_sg(qc); in ata_bmdma_qc_prep()
2614 * @qc: Metadata associated with taskfile to be prepared
2621 enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) in ata_bmdma_dumb_qc_prep() argument
2623 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in ata_bmdma_dumb_qc_prep()
2626 ata_bmdma_fill_sg_dumb(qc); in ata_bmdma_dumb_qc_prep()
2634 * @qc: command to issue to device
2646 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) in ata_bmdma_qc_issue() argument
2648 struct ata_port *ap = qc->ap; in ata_bmdma_qc_issue()
2649 struct ata_link *link = qc->dev->link; in ata_bmdma_qc_issue()
2652 if (!ata_is_dma(qc->tf.protocol)) in ata_bmdma_qc_issue()
2653 return ata_sff_qc_issue(qc); in ata_bmdma_qc_issue()
2656 ata_dev_select(ap, qc->dev->devno, 1, 0); in ata_bmdma_qc_issue()
2659 switch (qc->tf.protocol) { in ata_bmdma_qc_issue()
2661 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in ata_bmdma_qc_issue()
2663 trace_ata_tf_load(ap, &qc->tf); in ata_bmdma_qc_issue()
2664 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in ata_bmdma_qc_issue()
2665 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); in ata_bmdma_qc_issue()
2666 ap->ops->bmdma_setup(qc); /* set up bmdma */ in ata_bmdma_qc_issue()
2667 trace_ata_bmdma_start(ap, &qc->tf, qc->tag); in ata_bmdma_qc_issue()
2668 ap->ops->bmdma_start(qc); /* initiate bmdma */ in ata_bmdma_qc_issue()
2673 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in ata_bmdma_qc_issue()
2675 trace_ata_tf_load(ap, &qc->tf); in ata_bmdma_qc_issue()
2676 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in ata_bmdma_qc_issue()
2677 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); in ata_bmdma_qc_issue()
2678 ap->ops->bmdma_setup(qc); /* set up bmdma */ in ata_bmdma_qc_issue()
2682 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in ata_bmdma_qc_issue()
2698 * @qc: Taskfile currently active in engine
2708 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) in ata_bmdma_port_intr() argument
2715 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { in ata_bmdma_port_intr()
2725 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); in ata_bmdma_port_intr()
2726 ap->ops->bmdma_stop(qc); in ata_bmdma_port_intr()
2731 qc->err_mask |= AC_ERR_HOST_BUS; in ata_bmdma_port_intr()
2736 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); in ata_bmdma_port_intr()
2738 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) in ata_bmdma_port_intr()
2779 struct ata_queued_cmd *qc; in ata_bmdma_error_handler() local
2783 qc = __ata_qc_from_tag(ap, ap->link.active_tag); in ata_bmdma_error_handler()
2784 if (qc && !(qc->flags & ATA_QCFLAG_EH)) in ata_bmdma_error_handler()
2785 qc = NULL; in ata_bmdma_error_handler()
2790 if (qc && ata_is_dma(qc->tf.protocol)) { in ata_bmdma_error_handler()
2801 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { in ata_bmdma_error_handler()
2802 qc->err_mask = AC_ERR_HOST_BUS; in ata_bmdma_error_handler()
2806 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); in ata_bmdma_error_handler()
2807 ap->ops->bmdma_stop(qc); in ata_bmdma_error_handler()
2828 * @qc: internal command to clean up
2833 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) in ata_bmdma_post_internal_cmd() argument
2835 struct ata_port *ap = qc->ap; in ata_bmdma_post_internal_cmd()
2838 if (ata_is_dma(qc->tf.protocol)) { in ata_bmdma_post_internal_cmd()
2840 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); in ata_bmdma_post_internal_cmd()
2841 ap->ops->bmdma_stop(qc); in ata_bmdma_post_internal_cmd()
2871 * @qc: Info associated with this ATA transaction.
2876 void ata_bmdma_setup(struct ata_queued_cmd *qc) in ata_bmdma_setup() argument
2878 struct ata_port *ap = qc->ap; in ata_bmdma_setup()
2879 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); in ata_bmdma_setup()
2894 ap->ops->sff_exec_command(ap, &qc->tf); in ata_bmdma_setup()
2900 * @qc: Info associated with this ATA transaction.
2905 void ata_bmdma_start(struct ata_queued_cmd *qc) in ata_bmdma_start() argument
2907 struct ata_port *ap = qc->ap; in ata_bmdma_start()
2933 * @qc: Command we are ending DMA for
2942 void ata_bmdma_stop(struct ata_queued_cmd *qc) in ata_bmdma_stop() argument
2944 struct ata_port *ap = qc->ap; in ata_bmdma_stop()