Lines Matching +full:use +full:- +full:advanced +full:- +full:sector +full:- +full:protection

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ipr.c -- driver for IBM Power Linux RAID adapters
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
29 * - Hot spare
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
36 * - Tagged command queuing
37 * - Adapter microcode download
38 * - PCI hot plug
39 * - SCSI device hot plug
91 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
192 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2…
194 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
207 …ESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (…
243 "FFF9: Device sector reassign successful"},
247 "7001: IOA sector reassignment successful"},
249 "FFF9: Soft media error. Sector reassignment recommended"},
375 "9073: Invalid multi-adapter configuration"},
397 "Illegal request, command not allowed to a non-optimized resource"},
413 "9031: Array protection temporarily suspended, protection resuming"},
415 "9040: Array protection temporarily suspended, protection resuming"},
447 "9074: Asymmetric advanced function disk configuration"},
469 "9041: Array protection temporarily suspended"},
521 "9092: Disk unit requires initialization before use"},
547 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
548 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
553 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
554 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
574 * ipr_trc_hook - Add a trace entry to the driver trace
586 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook()
589 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
590 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
591 trace_entry->time = jiffies; in ipr_trc_hook()
592 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; in ipr_trc_hook()
593 trace_entry->type = type; in ipr_trc_hook()
594 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; in ipr_trc_hook()
595 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; in ipr_trc_hook()
596 trace_entry->u.add_data = add_data; in ipr_trc_hook()
604 * ipr_lock_and_done - Acquire lock and complete command
613 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done()
615 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
616 ipr_cmd->done(ipr_cmd); in ipr_lock_and_done()
617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
621 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
629 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd()
630 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd()
631 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd()
634 hrrq_id = ioarcb->cmd_pkt.hrrq_id; in ipr_reinit_ipr_cmnd()
635 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd()
636 ioarcb->cmd_pkt.hrrq_id = hrrq_id; in ipr_reinit_ipr_cmnd()
637 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
638 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
639 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd()
640 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd()
642 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
643 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd()
646 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd()
648 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd()
651 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd()
652 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd()
653 ipr_cmd->scsi_cmd = NULL; in ipr_reinit_ipr_cmnd()
654 ipr_cmd->sense_buffer[0] = 0; in ipr_reinit_ipr_cmnd()
655 ipr_cmd->dma_use_sg = 0; in ipr_reinit_ipr_cmnd()
659 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
661 * @fast_done: fast done function call-back
670 ipr_cmd->u.scratch = 0; in ipr_init_ipr_cmnd()
671 ipr_cmd->sibling = NULL; in ipr_init_ipr_cmnd()
672 ipr_cmd->eh_comp = NULL; in ipr_init_ipr_cmnd()
673 ipr_cmd->fast_done = fast_done; in ipr_init_ipr_cmnd()
674 timer_setup(&ipr_cmd->timer, NULL, 0); in ipr_init_ipr_cmnd()
678 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
689 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
690 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
692 list_del(&ipr_cmd->queue); in __ipr_get_free_ipr_cmnd()
700 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
710 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
716 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
732 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
733 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
734 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
735 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
739 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
740 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
742 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
745 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
746 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
747 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
748 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
752 * ipr_save_pcix_cmd_reg - Save PCI-X command register
756 * 0 on success / -EIO on failure
760 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
766 rc = pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
767 &ioa_cfg->saved_pcix_cmd_reg); in ipr_save_pcix_cmd_reg()
769 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
770 return -EIO; in ipr_save_pcix_cmd_reg()
773 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
778 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
782 * 0 on success / -EIO on failure
786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
790 rc = pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
791 ioa_cfg->saved_pcix_cmd_reg); in ipr_set_pcix_cmd_reg()
793 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
794 return -EIO; in ipr_set_pcix_cmd_reg()
803 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
807 * ops generated by the SCSI mid-layer which are being aborted.
814 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_scsi_eh_done()
816 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_scsi_eh_done()
818 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_scsi_eh_done()
820 if (ipr_cmd->eh_comp) in __ipr_scsi_eh_done()
821 complete(ipr_cmd->eh_comp); in __ipr_scsi_eh_done()
822 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_scsi_eh_done()
826 * ipr_scsi_eh_done - mid-layer done function for aborted ops
830 * ops generated by the SCSI mid-layer which are being aborted.
838 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_scsi_eh_done()
840 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
842 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
846 * ipr_fail_all_ops - Fails all outstanding ops.
861 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
863 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
864 list_del(&ipr_cmd->queue); in ipr_fail_all_ops()
866 ipr_cmd->s.ioasa.hdr.ioasc = in ipr_fail_all_ops()
868 ipr_cmd->s.ioasa.hdr.ilid = in ipr_fail_all_ops()
871 if (ipr_cmd->scsi_cmd) in ipr_fail_all_ops()
872 ipr_cmd->done = __ipr_scsi_eh_done; in ipr_fail_all_ops()
876 timer_delete(&ipr_cmd->timer); in ipr_fail_all_ops()
877 ipr_cmd->done(ipr_cmd); in ipr_fail_all_ops()
879 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
885 * ipr_send_command - Send driver initiated requests.
897 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command()
898 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; in ipr_send_command()
900 if (ioa_cfg->sis64) { in ipr_send_command()
905 then use a 512 byte ioarcb */ in ipr_send_command()
906 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) in ipr_send_command()
908 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
910 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
914 * ipr_do_req - Send driver initiated requests.
930 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
932 ipr_cmd->done = done; in ipr_do_req()
934 ipr_cmd->timer.expires = jiffies + timeout; in ipr_do_req()
935 ipr_cmd->timer.function = timeout_func; in ipr_do_req()
937 add_timer(&ipr_cmd->timer); in ipr_do_req()
945 * ipr_internal_cmd_done - Op done function for an internally generated op.
956 if (ipr_cmd->sibling) in ipr_internal_cmd_done()
957 ipr_cmd->sibling = NULL; in ipr_internal_cmd_done()
959 complete(&ipr_cmd->completion); in ipr_internal_cmd_done()
963 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
978 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_init_ioadl()
979 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_init_ioadl()
981 ipr_cmd->dma_use_sg = 1; in ipr_init_ioadl()
983 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
984 ioadl64->flags = cpu_to_be32(flags); in ipr_init_ioadl()
985 ioadl64->data_len = cpu_to_be32(len); in ipr_init_ioadl()
986 ioadl64->address = cpu_to_be64(dma_addr); in ipr_init_ioadl()
988 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
990 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
992 ioadl->flags_and_data_len = cpu_to_be32(flags | len); in ipr_init_ioadl()
993 ioadl->address = cpu_to_be32(dma_addr); in ipr_init_ioadl()
996 ipr_cmd->ioarcb.read_ioadl_len = in ipr_init_ioadl()
998 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1000 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1002 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1008 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1020 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd()
1022 init_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1025 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1026 wait_for_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1027 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1034 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1037 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1038 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1044 * ipr_send_hcam - Send an HCAM to the adapter.
1062 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1064 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1065 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1067 ipr_cmd->u.hostrcb = hostrcb; in ipr_send_hcam()
1068 ioarcb = &ipr_cmd->ioarcb; in ipr_send_hcam()
1070 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_send_hcam()
1071 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; in ipr_send_hcam()
1072 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; in ipr_send_hcam()
1073 ioarcb->cmd_pkt.cdb[1] = type; in ipr_send_hcam()
1074 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; in ipr_send_hcam()
1075 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; in ipr_send_hcam()
1077 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, in ipr_send_hcam()
1078 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); in ipr_send_hcam()
1081 ipr_cmd->done = ipr_process_ccn; in ipr_send_hcam()
1083 ipr_cmd->done = ipr_process_error; in ipr_send_hcam()
1089 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1094 * ipr_init_res_entry - Initialize a resource entry struct.
1105 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry()
1108 res->needs_sync_complete = 0; in ipr_init_res_entry()
1109 res->in_erp = 0; in ipr_init_res_entry()
1110 res->add_to_ml = 0; in ipr_init_res_entry()
1111 res->del_from_ml = 0; in ipr_init_res_entry()
1112 res->resetting_device = 0; in ipr_init_res_entry()
1113 res->reset_occurred = 0; in ipr_init_res_entry()
1114 res->sdev = NULL; in ipr_init_res_entry()
1116 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1117 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_init_res_entry()
1118 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_init_res_entry()
1119 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_init_res_entry()
1120 res->type = cfgtew->u.cfgte64->res_type; in ipr_init_res_entry()
1122 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_init_res_entry()
1123 sizeof(res->res_path)); in ipr_init_res_entry()
1125 res->bus = 0; in ipr_init_res_entry()
1126 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_init_res_entry()
1127 sizeof(res->dev_lun.scsi_lun)); in ipr_init_res_entry()
1128 res->lun = scsilun_to_int(&res->dev_lun); in ipr_init_res_entry()
1130 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_init_res_entry()
1131 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1132 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { in ipr_init_res_entry()
1134 res->target = gscsi_res->target; in ipr_init_res_entry()
1139 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1140 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1141 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1143 } else if (res->type == IPR_RES_TYPE_IOAFP) { in ipr_init_res_entry()
1144 res->bus = IPR_IOAFP_VIRTUAL_BUS; in ipr_init_res_entry()
1145 res->target = 0; in ipr_init_res_entry()
1146 } else if (res->type == IPR_RES_TYPE_ARRAY) { in ipr_init_res_entry()
1147 res->bus = IPR_ARRAY_VIRTUAL_BUS; in ipr_init_res_entry()
1148 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1149 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1150 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1151 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { in ipr_init_res_entry()
1152 res->bus = IPR_VSET_VIRTUAL_BUS; in ipr_init_res_entry()
1153 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1154 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1155 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1157 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1158 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1159 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1162 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_init_res_entry()
1163 res->flags = cfgtew->u.cfgte->flags; in ipr_init_res_entry()
1164 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_init_res_entry()
1165 res->type = IPR_RES_TYPE_IOAFP; in ipr_init_res_entry()
1167 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_init_res_entry()
1169 res->bus = cfgtew->u.cfgte->res_addr.bus; in ipr_init_res_entry()
1170 res->target = cfgtew->u.cfgte->res_addr.target; in ipr_init_res_entry()
1171 res->lun = cfgtew->u.cfgte->res_addr.lun; in ipr_init_res_entry()
1172 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); in ipr_init_res_entry()
1177 * ipr_is_same_device - Determine if two devices are the same.
1187 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1188 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, in ipr_is_same_device()
1189 sizeof(cfgtew->u.cfgte64->dev_id)) && in ipr_is_same_device()
1190 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_is_same_device()
1191 sizeof(cfgtew->u.cfgte64->lun))) { in ipr_is_same_device()
1195 if (res->bus == cfgtew->u.cfgte->res_addr.bus && in ipr_is_same_device()
1196 res->target == cfgtew->u.cfgte->res_addr.target && in ipr_is_same_device()
1197 res->lun == cfgtew->u.cfgte->res_addr.lun) in ipr_is_same_device()
1205 * __ipr_format_res_path - Format the resource path for printing.
1219 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); in __ipr_format_res_path()
1221 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); in __ipr_format_res_path()
1227 * ipr_format_res_path - Format the resource path for printing.
1242 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1243 __ipr_format_res_path(res_path, p, len - (p - buffer)); in ipr_format_res_path()
1248 * ipr_update_res_entry - Update the resource entry.
1261 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1262 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_update_res_entry()
1263 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_update_res_entry()
1264 res->type = cfgtew->u.cfgte64->res_type; in ipr_update_res_entry()
1266 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, in ipr_update_res_entry()
1269 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_update_res_entry()
1270 res->res_handle = cfgtew->u.cfgte64->res_handle; in ipr_update_res_entry()
1271 res->dev_id = cfgtew->u.cfgte64->dev_id; in ipr_update_res_entry()
1273 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_update_res_entry()
1274 sizeof(res->dev_lun.scsi_lun)); in ipr_update_res_entry()
1276 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1277 sizeof(res->res_path))) { in ipr_update_res_entry()
1278 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1279 sizeof(res->res_path)); in ipr_update_res_entry()
1283 if (res->sdev && new_path) in ipr_update_res_entry()
1284 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", in ipr_update_res_entry()
1285 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1286 res->res_path, buffer, sizeof(buffer))); in ipr_update_res_entry()
1288 res->flags = cfgtew->u.cfgte->flags; in ipr_update_res_entry()
1289 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_update_res_entry()
1290 res->type = IPR_RES_TYPE_IOAFP; in ipr_update_res_entry()
1292 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_update_res_entry()
1294 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, in ipr_update_res_entry()
1297 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_update_res_entry()
1298 res->res_handle = cfgtew->u.cfgte->res_handle; in ipr_update_res_entry()
1303 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1313 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target()
1315 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1318 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) in ipr_clear_res_target()
1319 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1320 else if (res->bus == IPR_VSET_VIRTUAL_BUS) in ipr_clear_res_target()
1321 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1322 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_clear_res_target()
1323 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1324 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) in ipr_clear_res_target()
1326 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1328 } else if (res->bus == 0) in ipr_clear_res_target()
1329 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1333 * ipr_handle_config_change - Handle a config change from the adapter
1349 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1350 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; in ipr_handle_config_change()
1351 cc_res_handle = cfgtew.u.cfgte64->res_handle; in ipr_handle_config_change()
1353 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; in ipr_handle_config_change()
1354 cc_res_handle = cfgtew.u.cfgte->res_handle; in ipr_handle_config_change()
1357 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1358 if (res->res_handle == cc_res_handle) { in ipr_handle_config_change()
1365 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1372 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1375 list_del(&res->queue); in ipr_handle_config_change()
1377 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1382 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { in ipr_handle_config_change()
1383 if (res->sdev) { in ipr_handle_config_change()
1384 res->del_from_ml = 1; in ipr_handle_config_change()
1385 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_handle_config_change()
1386 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1389 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1391 } else if (!res->sdev || res->del_from_ml) { in ipr_handle_config_change()
1392 res->add_to_ml = 1; in ipr_handle_config_change()
1393 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1400 * ipr_process_ccn - Op done function for a CCN.
1411 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn()
1412 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_ccn()
1413 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_ccn()
1415 list_del_init(&hostrcb->queue); in ipr_process_ccn()
1416 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
1421 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1431 * strip_whitespace - Strip and pad trailing whitespace.
1443 i--; in strip_whitespace()
1445 i--; in strip_whitespace()
1450 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1465 memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd_compact()
1468 memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); in ipr_log_vpd_compact()
1471 memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd_compact()
1479 * ipr_log_vpd - Log the passed VPD to the error log.
1490 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd()
1491 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, in ipr_log_vpd()
1496 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd()
1502 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1513 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); in ipr_log_ext_vpd_compact()
1515 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd_compact()
1519 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1527 ipr_log_vpd(&vpd->vpd); in ipr_log_ext_vpd()
1528 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), in ipr_log_ext_vpd()
1529 be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd()
1533 * ipr_log_enhanced_cache_error - Log a cache error.
1545 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1546 error = &hostrcb->hcam.u.error64.u.type_12_error; in ipr_log_enhanced_cache_error()
1548 error = &hostrcb->hcam.u.error.u.type_12_error; in ipr_log_enhanced_cache_error()
1550 ipr_err("-----Current Configuration-----\n"); in ipr_log_enhanced_cache_error()
1552 ipr_log_ext_vpd(&error->ioa_vpd); in ipr_log_enhanced_cache_error()
1554 ipr_log_ext_vpd(&error->cfc_vpd); in ipr_log_enhanced_cache_error()
1556 ipr_err("-----Expected Configuration-----\n"); in ipr_log_enhanced_cache_error()
1558 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_enhanced_cache_error()
1560 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_enhanced_cache_error()
1563 be32_to_cpu(error->ioa_data[0]), in ipr_log_enhanced_cache_error()
1564 be32_to_cpu(error->ioa_data[1]), in ipr_log_enhanced_cache_error()
1565 be32_to_cpu(error->ioa_data[2])); in ipr_log_enhanced_cache_error()
1569 * ipr_log_cache_error - Log a cache error.
1580 &hostrcb->hcam.u.error.u.type_02_error; in ipr_log_cache_error()
1582 ipr_err("-----Current Configuration-----\n"); in ipr_log_cache_error()
1584 ipr_log_vpd(&error->ioa_vpd); in ipr_log_cache_error()
1586 ipr_log_vpd(&error->cfc_vpd); in ipr_log_cache_error()
1588 ipr_err("-----Expected Configuration-----\n"); in ipr_log_cache_error()
1590 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_cache_error()
1592 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_cache_error()
1595 be32_to_cpu(error->ioa_data[0]), in ipr_log_cache_error()
1596 be32_to_cpu(error->ioa_data[1]), in ipr_log_cache_error()
1597 be32_to_cpu(error->ioa_data[2])); in ipr_log_cache_error()
1601 * ipr_log_enhanced_config_error - Log a configuration error.
1615 error = &hostrcb->hcam.u.error.u.type_13_error; in ipr_log_enhanced_config_error()
1616 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_enhanced_config_error()
1619 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_enhanced_config_error()
1621 dev_entry = error->dev; in ipr_log_enhanced_config_error()
1626 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1627 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_enhanced_config_error()
1629 ipr_err("-----New Device Information-----\n"); in ipr_log_enhanced_config_error()
1630 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_enhanced_config_error()
1633 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1636 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1641 * ipr_log_sis64_config_error - Log a device error.
1656 error = &hostrcb->hcam.u.error64.u.type_23_error; in ipr_log_sis64_config_error()
1657 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_sis64_config_error()
1660 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_sis64_config_error()
1662 dev_entry = error->dev; in ipr_log_sis64_config_error()
1668 __ipr_format_res_path(dev_entry->res_path, in ipr_log_sis64_config_error()
1670 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_sis64_config_error()
1672 ipr_err("-----New Device Information-----\n"); in ipr_log_sis64_config_error()
1673 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_sis64_config_error()
1676 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_sis64_config_error()
1679 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_sis64_config_error()
1684 * ipr_log_config_error - Log a configuration error.
1698 error = &hostrcb->hcam.u.error.u.type_03_error; in ipr_log_config_error()
1699 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_config_error()
1702 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_config_error()
1704 dev_entry = error->dev; in ipr_log_config_error()
1709 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1710 ipr_log_vpd(&dev_entry->vpd); in ipr_log_config_error()
1712 ipr_err("-----New Device Information-----\n"); in ipr_log_config_error()
1713 ipr_log_vpd(&dev_entry->new_vpd); in ipr_log_config_error()
1716 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_config_error()
1719 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_config_error()
1722 be32_to_cpu(dev_entry->ioa_data[0]), in ipr_log_config_error()
1723 be32_to_cpu(dev_entry->ioa_data[1]), in ipr_log_config_error()
1724 be32_to_cpu(dev_entry->ioa_data[2]), in ipr_log_config_error()
1725 be32_to_cpu(dev_entry->ioa_data[3]), in ipr_log_config_error()
1726 be32_to_cpu(dev_entry->ioa_data[4])); in ipr_log_config_error()
1731 * ipr_log_enhanced_array_error - Log an array configuration error.
1744 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_enhanced_array_error()
1746 error = &hostrcb->hcam.u.error.u.type_14_error; in ipr_log_enhanced_array_error()
1751 error->protection_level, in ipr_log_enhanced_array_error()
1752 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1753 error->last_func_vset_res_addr.bus, in ipr_log_enhanced_array_error()
1754 error->last_func_vset_res_addr.target, in ipr_log_enhanced_array_error()
1755 error->last_func_vset_res_addr.lun); in ipr_log_enhanced_array_error()
1759 array_entry = error->array_member; in ipr_log_enhanced_array_error()
1760 num_entries = min_t(u32, be32_to_cpu(error->num_entries), in ipr_log_enhanced_array_error()
1761 ARRAY_SIZE(error->array_member)); in ipr_log_enhanced_array_error()
1764 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_enhanced_array_error()
1767 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_enhanced_array_error()
1772 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_enhanced_array_error()
1773 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1774 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1782 * ipr_log_array_error - Log an array configuration error.
1795 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_array_error()
1797 error = &hostrcb->hcam.u.error.u.type_04_error; in ipr_log_array_error()
1802 error->protection_level, in ipr_log_array_error()
1803 ioa_cfg->host->host_no, in ipr_log_array_error()
1804 error->last_func_vset_res_addr.bus, in ipr_log_array_error()
1805 error->last_func_vset_res_addr.target, in ipr_log_array_error()
1806 error->last_func_vset_res_addr.lun); in ipr_log_array_error()
1810 array_entry = error->array_member; in ipr_log_array_error()
1813 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_array_error()
1816 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_array_error()
1821 ipr_log_vpd(&array_entry->vpd); in ipr_log_array_error()
1823 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1824 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1830 array_entry = error->array_member2; in ipr_log_array_error()
1837 * ipr_log_hex_data - Log additional hex IOA error data.
1852 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1865 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1877 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1878 error = &hostrcb->hcam.u.error64.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1880 error = &hostrcb->hcam.u.error.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1882 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_enhanced_dual_ioa_error()
1883 strim(error->failure_reason); in ipr_log_enhanced_dual_ioa_error()
1885 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_enhanced_dual_ioa_error()
1886 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_enhanced_dual_ioa_error()
1887 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_enhanced_dual_ioa_error()
1888 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1889 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_enhanced_dual_ioa_error()
1895 * ipr_log_dual_ioa_error - Log a dual adapter error.
1907 error = &hostrcb->hcam.u.error.u.type_07_error; in ipr_log_dual_ioa_error()
1908 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_dual_ioa_error()
1909 strim(error->failure_reason); in ipr_log_dual_ioa_error()
1911 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_dual_ioa_error()
1912 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_dual_ioa_error()
1913 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_dual_ioa_error()
1914 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
1915 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_dual_ioa_error()
1940 * ipr_log_fabric_path - Log a fabric path error
1951 u8 path_state = fabric->path_state; in ipr_log_fabric_path()
1963 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { in ipr_log_fabric_path()
1966 fabric->ioa_port); in ipr_log_fabric_path()
1967 } else if (fabric->cascaded_expander == 0xff) { in ipr_log_fabric_path()
1970 fabric->ioa_port, fabric->phy); in ipr_log_fabric_path()
1971 } else if (fabric->phy == 0xff) { in ipr_log_fabric_path()
1974 fabric->ioa_port, fabric->cascaded_expander); in ipr_log_fabric_path()
1978 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
1985 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
1989 * ipr_log64_fabric_path - Log a fabric path error
2000 u8 path_state = fabric->path_state; in ipr_log64_fabric_path()
2015 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2016 fabric->res_path, in ipr_log64_fabric_path()
2023 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2069 * ipr_log_path_elem - Log a fabric path element.
2080 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log_path_elem()
2081 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log_path_elem()
2097 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2098 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2100 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { in ipr_log_path_elem()
2103 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2104 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2105 } else if (cfg->cascaded_expander == 0xff) { in ipr_log_path_elem()
2108 path_type_desc[i].desc, cfg->phy, in ipr_log_path_elem()
2109 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2110 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2111 } else if (cfg->phy == 0xff) { in ipr_log_path_elem()
2114 path_type_desc[i].desc, cfg->cascaded_expander, in ipr_log_path_elem()
2115 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2116 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2120 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2121 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2122 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2130 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2131 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2132 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2136 * ipr_log64_path_elem - Log a fabric path element.
2147 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; in ipr_log64_path_elem()
2148 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log64_path_elem()
2149 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log64_path_elem()
2165 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2166 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2167 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2168 be32_to_cpu(cfg->wwid[0]), in ipr_log64_path_elem()
2169 be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2174 "WWN=%08X%08X\n", cfg->type_status, in ipr_log64_path_elem()
2175 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2176 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2177 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2178 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2182 * ipr_log_fabric_error - Log a fabric error.
2197 error = &hostrcb->hcam.u.error.u.type_20_error; in ipr_log_fabric_error()
2198 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_fabric_error()
2199 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_fabric_error()
2201 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_fabric_error()
2205 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_fabric_error()
2210 add_len -= be16_to_cpu(fabric->length); in ipr_log_fabric_error()
2212 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_fabric_error()
2219 * ipr_log_sis64_array_error - Log a sis64 array error.
2233 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_sis64_array_error()
2235 error = &hostrcb->hcam.u.error64.u.type_24_error; in ipr_log_sis64_array_error()
2240 error->protection_level, in ipr_log_sis64_array_error()
2241 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2246 array_entry = error->array_member; in ipr_log_sis64_array_error()
2247 num_entries = min_t(u32, error->num_entries, in ipr_log_sis64_array_error()
2248 ARRAY_SIZE(error->array_member)); in ipr_log_sis64_array_error()
2252 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_sis64_array_error()
2255 if (error->exposed_mode_adn == i) in ipr_log_sis64_array_error()
2261 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_sis64_array_error()
2263 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2267 array_entry->expected_res_path, in ipr_log_sis64_array_error()
2275 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2290 error = &hostrcb->hcam.u.error64.u.type_30_error; in ipr_log_sis64_fabric_error()
2292 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_fabric_error()
2293 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_sis64_fabric_error()
2295 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_fabric_error()
2299 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_sis64_fabric_error()
2304 add_len -= be16_to_cpu(fabric->length); in ipr_log_sis64_fabric_error()
2306 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_sis64_fabric_error()
2313 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2325 error = &hostrcb->hcam.u.error64.u.type_41_error; in ipr_log_sis64_service_required_error()
2327 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_service_required_error()
2328 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); in ipr_log_sis64_service_required_error()
2329 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_sis64_service_required_error()
2330 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_service_required_error()
2335 * ipr_log_generic_error - Log an adapter error.
2345 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2346 be32_to_cpu(hostrcb->hcam.length)); in ipr_log_generic_error()
2350 * ipr_log_sis64_device_error - Log a cache error.
2363 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_log_sis64_device_error()
2365 ipr_err("-----Failing Device Information-----\n"); in ipr_log_sis64_device_error()
2367 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), in ipr_log_sis64_device_error()
2368 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); in ipr_log_sis64_device_error()
2370 __ipr_format_res_path(error->res_path, in ipr_log_sis64_device_error()
2372 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2373 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2374 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); in ipr_log_sis64_device_error()
2375 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); in ipr_log_sis64_device_error()
2377 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2379 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2382 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2386 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2408 * ipr_handle_log_data - Log an adapter error.
2424 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) in ipr_handle_log_data()
2427 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) in ipr_handle_log_data()
2428 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2430 if (ioa_cfg->sis64) in ipr_handle_log_data()
2431 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_handle_log_data()
2433 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_handle_log_data()
2435 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2438 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2439 hostrcb->hcam.u.error.fd_res_addr.bus); in ipr_handle_log_data()
2448 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { in ipr_handle_log_data()
2449 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_handle_log_data()
2451 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && in ipr_handle_log_data()
2452 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2459 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2461 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2463 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) in ipr_handle_log_data()
2464 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); in ipr_handle_log_data()
2466 switch (hostrcb->hcam.overlay_id) { in ipr_handle_log_data()
2524 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, in ipr_get_free_hostrcb()
2528 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); in ipr_get_free_hostrcb()
2529 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, in ipr_get_free_hostrcb()
2533 list_del_init(&hostrcb->queue); in ipr_get_free_hostrcb()
2538 * ipr_process_error - Op done function for an adapter error log.
2550 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error()
2551 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_error()
2552 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_error()
2555 if (ioa_cfg->sis64) in ipr_process_error()
2556 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_process_error()
2558 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_process_error()
2560 list_del_init(&hostrcb->queue); in ipr_process_error()
2561 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
2569 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2573 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); in ipr_process_error()
2574 schedule_work(&ioa_cfg->work_q); in ipr_process_error()
2581 * ipr_timeout - An internally generated op has timed out.
2594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout()
2597 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2599 ioa_cfg->errors_logged++; in ipr_timeout()
2600 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2603 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2604 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2606 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2609 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2614 * ipr_oper_timeout - Adapter timed out transitioning to operational
2627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout()
2630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2632 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2633 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2636 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2637 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2639 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2641 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2650 * ipr_find_ses_entry - Find matching SES in SES table
2665 if (ste->compare_product_id_byte[j] == 'X') { in ipr_find_ses_entry()
2666 vpids = &res->std_inq_data.vpids; in ipr_find_ses_entry()
2667 if (vpids->product_id[j] == ste->product_id[j]) in ipr_find_ses_entry()
2683 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2690 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2701 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2702 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) in ipr_get_max_scsi_speed()
2705 if (bus != res->bus) in ipr_get_max_scsi_speed()
2711 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); in ipr_get_max_scsi_speed()
2718 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2720 * @max_delay: max delay in micro-seconds to wait
2734 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2747 return -EIO; in ipr_wait_iodbg_ack()
2751 * ipr_get_sis64_dump_data_section - Dump IOA memory
2767 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2768 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2776 * ipr_get_ldump_data_section - Dump IOA memory
2783 * 0 on success / -EIO on failure
2792 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2798 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2803 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2805 return -EIO; in ipr_get_ldump_data_section()
2808 /* Signal LDUMP interlocked - clear IO debug ack */ in ipr_get_ldump_data_section()
2810 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2813 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2815 /* Signal address valid - clear IOA Reset alert */ in ipr_get_ldump_data_section()
2817 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2823 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2825 return -EIO; in ipr_get_ldump_data_section()
2829 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2833 if (i < (length_in_words - 1)) { in ipr_get_ldump_data_section()
2834 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2836 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2842 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2845 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2847 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2849 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2851 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ in ipr_get_ldump_data_section()
2854 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2868 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2885 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2887 if (ioa_cfg->sis64) in ipr_sdt_copy()
2893 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { in ipr_sdt_copy()
2894 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy()
2895 ioa_dump->page_offset == 0) { in ipr_sdt_copy()
2903 ioa_dump->page_offset = 0; in ipr_sdt_copy()
2904 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; in ipr_sdt_copy()
2905 ioa_dump->next_page_index++; in ipr_sdt_copy()
2907 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; in ipr_sdt_copy()
2909 rem_len = length - bytes_copied; in ipr_sdt_copy()
2910 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy()
2913 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2914 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
2915 rc = -EIO; in ipr_sdt_copy()
2919 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy()
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
2925 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
2938 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2946 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_init_dump_entry_hdr()
2947 hdr->num_elems = 1; in ipr_init_dump_entry_hdr()
2948 hdr->offset = sizeof(*hdr); in ipr_init_dump_entry_hdr()
2949 hdr->status = IPR_DUMP_STATUS_SUCCESS; in ipr_init_dump_entry_hdr()
2953 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2963 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
2965 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); in ipr_dump_ioa_type_data()
2966 driver_dump->ioa_type_entry.hdr.len = in ipr_dump_ioa_type_data()
2967 sizeof(struct ipr_dump_ioa_type_entry) - in ipr_dump_ioa_type_data()
2969 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_ioa_type_data()
2970 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; in ipr_dump_ioa_type_data()
2971 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
2972 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | in ipr_dump_ioa_type_data()
2973 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | in ipr_dump_ioa_type_data()
2974 ucode_vpd->minor_release[1]; in ipr_dump_ioa_type_data()
2975 driver_dump->hdr.num_entries++; in ipr_dump_ioa_type_data()
2979 * ipr_dump_version_data - Fill in the driver version in the dump.
2989 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); in ipr_dump_version_data()
2990 driver_dump->version_entry.hdr.len = in ipr_dump_version_data()
2991 sizeof(struct ipr_dump_version_entry) - in ipr_dump_version_data()
2993 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_version_data()
2994 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; in ipr_dump_version_data()
2995 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); in ipr_dump_version_data()
2996 driver_dump->hdr.num_entries++; in ipr_dump_version_data()
3000 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3010 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); in ipr_dump_trace_data()
3011 driver_dump->trace_entry.hdr.len = in ipr_dump_trace_data()
3012 sizeof(struct ipr_dump_trace_entry) - in ipr_dump_trace_data()
3014 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_trace_data()
3015 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; in ipr_dump_trace_data()
3016 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3017 driver_dump->hdr.num_entries++; in ipr_dump_trace_data()
3021 * ipr_dump_location_data - Fill in the IOA location in the dump.
3031 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); in ipr_dump_location_data()
3032 driver_dump->location_entry.hdr.len = in ipr_dump_location_data()
3033 sizeof(struct ipr_dump_location_entry) - in ipr_dump_location_data()
3035 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_location_data()
3036 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; in ipr_dump_location_data()
3037 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3038 driver_dump->hdr.num_entries++; in ipr_dump_location_data()
3042 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3053 struct ipr_driver_dump *driver_dump = &dump->driver_dump; in ipr_get_ioa_dump()
3054 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; in ipr_get_ioa_dump()
3063 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3065 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3070 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3076 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3078 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3079 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3085 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3087 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_get_ioa_dump()
3090 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); in ipr_get_ioa_dump()
3091 driver_dump->hdr.num_entries = 1; in ipr_get_ioa_dump()
3092 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); in ipr_get_ioa_dump()
3093 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; in ipr_get_ioa_dump()
3094 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; in ipr_get_ioa_dump()
3095 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; in ipr_get_ioa_dump()
3103 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); in ipr_get_ioa_dump()
3106 ipr_init_dump_entry_hdr(&ioa_dump->hdr); in ipr_get_ioa_dump()
3107 ioa_dump->hdr.len = 0; in ipr_get_ioa_dump()
3108 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_get_ioa_dump()
3109 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; in ipr_get_ioa_dump()
3115 sdt = &ioa_dump->sdt; in ipr_get_ioa_dump()
3117 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3130 /* Smart Dump table is ready to use and the first entry is valid */ in ipr_get_ioa_dump()
3131 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && in ipr_get_ioa_dump()
3132 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { in ipr_get_ioa_dump()
3133 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3135 rc, be32_to_cpu(sdt->hdr.state)); in ipr_get_ioa_dump()
3136 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; in ipr_get_ioa_dump()
3137 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3142 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); in ipr_get_ioa_dump()
3148 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); in ipr_get_ioa_dump()
3149 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3150 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3152 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3157 if (ioa_dump->hdr.len > max_dump_size) { in ipr_get_ioa_dump()
3158 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3162 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { in ipr_get_ioa_dump()
3163 sdt_word = be32_to_cpu(sdt->entry[i].start_token); in ipr_get_ioa_dump()
3164 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3165 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3168 end_off = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3171 bytes_to_copy = end_off - start_off; in ipr_get_ioa_dump()
3177 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; in ipr_get_ioa_dump()
3185 ioa_dump->hdr.len += bytes_copied; in ipr_get_ioa_dump()
3188 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3195 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3198 driver_dump->hdr.len += ioa_dump->hdr.len; in ipr_get_ioa_dump()
3200 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3209 * ipr_release_dump - Free adapter dump memory
3218 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump()
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3224 ioa_cfg->dump = NULL; in ipr_release_dump()
3225 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3228 for (i = 0; i < dump->ioa_dump.next_page_index; i++) in ipr_release_dump()
3229 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); in ipr_release_dump()
3231 vfree(dump->ioa_dump.ioa_data); in ipr_release_dump()
3247 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3252 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_add_remove_thread()
3253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3257 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3258 if (res->del_from_ml && res->sdev) { in ipr_add_remove_thread()
3260 sdev = res->sdev; in ipr_add_remove_thread()
3262 if (!res->add_to_ml) in ipr_add_remove_thread()
3263 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_add_remove_thread()
3265 res->del_from_ml = 0; in ipr_add_remove_thread()
3266 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3269 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3276 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3277 if (res->add_to_ml) { in ipr_add_remove_thread()
3278 bus = res->bus; in ipr_add_remove_thread()
3279 target = res->target; in ipr_add_remove_thread()
3280 lun = res->lun; in ipr_add_remove_thread()
3281 res->add_to_ml = 0; in ipr_add_remove_thread()
3282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3283 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_add_remove_thread()
3284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3289 ioa_cfg->scan_done = 1; in ipr_add_remove_thread()
3290 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3291 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_add_remove_thread()
3296 * ipr_worker_thread - Worker thread
3300 * of adding and removing device from the mid-layer as configuration
3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3316 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3317 dump = ioa_cfg->dump; in ipr_worker_thread()
3319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3322 kref_get(&dump->kref); in ipr_worker_thread()
3323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3325 kref_put(&dump->kref, ipr_release_dump); in ipr_worker_thread()
3327 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3328 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3334 if (ioa_cfg->scsi_unblock) { in ipr_worker_thread()
3335 ioa_cfg->scsi_unblock = 0; in ipr_worker_thread()
3336 ioa_cfg->scsi_blocked = 0; in ipr_worker_thread()
3337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3338 scsi_unblock_requests(ioa_cfg->host); in ipr_worker_thread()
3339 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3340 if (ioa_cfg->scsi_blocked) in ipr_worker_thread()
3341 scsi_block_requests(ioa_cfg->host); in ipr_worker_thread()
3344 if (!ioa_cfg->scan_enabled) { in ipr_worker_thread()
3345 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3349 schedule_work(&ioa_cfg->scsi_add_work_q); in ipr_worker_thread()
3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3357 * ipr_read_trace - Dump the adapter trace
3374 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace()
3378 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3379 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3397 * ipr_show_fw_version - Show the firmware version
3409 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version()
3410 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3416 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_show_fw_version()
3417 ucode_vpd->minor_release[0], in ipr_show_fw_version()
3418 ucode_vpd->minor_release[1]); in ipr_show_fw_version()
3419 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3432 * ipr_show_log_level - Show the adapter's error logging level
3444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level()
3448 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3449 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3455 * ipr_store_log_level - Change the adapter's error logging level
3469 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level()
3472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3473 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3488 * ipr_store_diagnostics - IOA Diagnostics interface
3505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics()
3510 return -EACCES; in ipr_store_diagnostics()
3512 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3513 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3515 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3516 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3519 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3522 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3523 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3524 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3529 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3530 return -EIO; in ipr_store_diagnostics()
3533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3534 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3535 rc = -EIO; in ipr_store_diagnostics()
3536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3550 * ipr_show_adapter_state - Show the adapter's state
3562 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state()
3566 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3567 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3571 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3576 * ipr_store_adapter_state - Change adapter state
3592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state()
3597 return -EACCES; in ipr_store_adapter_state()
3599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3600 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3602 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3603 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3604 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3605 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3608 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3609 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3613 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3628 * ipr_store_reset_adapter - Reset the adapter
3644 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter()
3649 return -EACCES; in ipr_store_reset_adapter()
3651 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3652 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3654 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3655 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3670 * ipr_show_iopoll_weight - Show ipr polling mode
3682 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight()
3686 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3687 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3688 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3694 * ipr_store_iopoll_weight - Change the adapter's polling mode
3708 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight()
3713 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3714 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3715 return -EINVAL; in ipr_store_iopoll_weight()
3718 return -EINVAL; in ipr_store_iopoll_weight()
3721 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3722 return -EINVAL; in ipr_store_iopoll_weight()
3725 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3726 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); in ipr_store_iopoll_weight()
3730 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3731 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3732 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3735 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3736 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3737 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3738 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3739 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3740 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3743 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3758 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3762 * list to use for microcode download
3773 sg_size = buf_len / (IPR_MAX_SGLIST - 1); in ipr_alloc_ucode_buffer()
3784 sglist->order = order; in ipr_alloc_ucode_buffer()
3785 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, in ipr_alloc_ucode_buffer()
3786 &sglist->num_sg); in ipr_alloc_ucode_buffer()
3787 if (!sglist->scatterlist) { in ipr_alloc_ucode_buffer()
3796 * ipr_free_ucode_buffer - Frees a microcode download buffer
3807 sgl_free_order(sglist->scatterlist, sglist->order); in ipr_free_ucode_buffer()
3812 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3830 bsize_elem = PAGE_SIZE * (1 << sglist->order); in ipr_copy_ucode_buffer()
3832 sg = sglist->scatterlist; in ipr_copy_ucode_buffer()
3840 sg->length = bsize_elem; in ipr_copy_ucode_buffer()
3853 sg->length = len % bsize_elem; in ipr_copy_ucode_buffer()
3856 sglist->buffer_len = len; in ipr_copy_ucode_buffer()
3861 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3871 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl64()
3872 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ucode_ioadl64()
3873 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl64()
3877 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl64()
3878 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl64()
3879 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl64()
3881 ioarcb->ioadl_len = in ipr_build_ucode_ioadl64()
3882 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl64()
3883 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl64()
3889 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ucode_ioadl64()
3893 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3903 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl()
3904 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ucode_ioadl()
3905 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl()
3909 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl()
3910 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl()
3911 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl()
3913 ioarcb->ioadl_len = in ipr_build_ucode_ioadl()
3914 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl()
3916 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl()
3923 ioadl[i-1].flags_and_data_len |= in ipr_build_ucode_ioadl()
3928 * ipr_update_ioa_ucode - Update IOA's microcode
3935 * 0 on success / -EIO on failure
3942 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3943 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
3944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3945 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3946 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3949 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
3950 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3951 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3953 return -EIO; in ipr_update_ioa_ucode()
3956 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3957 sglist->scatterlist, sglist->num_sg, in ipr_update_ioa_ucode()
3960 if (!sglist->num_dma_sg) { in ipr_update_ioa_ucode()
3961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3962 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
3964 return -EIO; in ipr_update_ioa_ucode()
3967 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3970 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
3972 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3973 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
3974 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
3979 * ipr_store_update_fw - Update the firmware on the adapter
3995 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw()
4005 return -EACCES; in ipr_store_update_fw()
4013 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4014 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4015 return -EIO; in ipr_store_update_fw()
4018 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; in ipr_store_update_fw()
4020 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4021 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4025 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4027 return -ENOMEM; in ipr_store_update_fw()
4033 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4059 * ipr_show_fw_type - Show the adapter's firmware type.
4071 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type()
4075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4076 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4077 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4095 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_async_err_log()
4100 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4101 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_read_async_err_log()
4104 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4107 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, in ipr_read_async_err_log()
4108 sizeof(hostrcb->hcam)); in ipr_read_async_err_log()
4109 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4119 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_next_async_err_log()
4123 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4124 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_next_async_err_log()
4127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4132 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_next_async_err_log()
4133 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4163 * ipr_read_dump - Dump the adapter
4180 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump()
4188 return -EACCES; in ipr_read_dump()
4190 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4191 dump = ioa_cfg->dump; in ipr_read_dump()
4193 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4194 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4197 kref_get(&dump->kref); in ipr_read_dump()
4198 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4200 if (off > dump->driver_dump.hdr.len) { in ipr_read_dump()
4201 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4205 if (off + count > dump->driver_dump.hdr.len) { in ipr_read_dump()
4206 count = dump->driver_dump.hdr.len - off; in ipr_read_dump()
4210 if (count && off < sizeof(dump->driver_dump)) { in ipr_read_dump()
4211 if (off + count > sizeof(dump->driver_dump)) in ipr_read_dump()
4212 len = sizeof(dump->driver_dump) - off; in ipr_read_dump()
4215 src = (u8 *)&dump->driver_dump + off; in ipr_read_dump()
4219 count -= len; in ipr_read_dump()
4222 off -= sizeof(dump->driver_dump); in ipr_read_dump()
4224 if (ioa_cfg->sis64) in ipr_read_dump()
4226 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * in ipr_read_dump()
4234 len = sdt_end - off; in ipr_read_dump()
4237 src = (u8 *)&dump->ioa_dump + off; in ipr_read_dump()
4241 count -= len; in ipr_read_dump()
4244 off -= sdt_end; in ipr_read_dump()
4248 len = PAGE_ALIGN(off) - off; in ipr_read_dump()
4251 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; in ipr_read_dump()
4256 count -= len; in ipr_read_dump()
4259 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4264 * ipr_alloc_dump - Prepare for adapter dump
4280 return -ENOMEM; in ipr_alloc_dump()
4283 if (ioa_cfg->sis64) in ipr_alloc_dump()
4293 return -ENOMEM; in ipr_alloc_dump()
4296 dump->ioa_dump.ioa_data = ioa_data; in ipr_alloc_dump()
4298 kref_init(&dump->kref); in ipr_alloc_dump()
4299 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4301 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4303 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4305 vfree(dump->ioa_dump.ioa_data); in ipr_alloc_dump()
4310 ioa_cfg->dump = dump; in ipr_alloc_dump()
4311 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4312 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4313 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4314 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4322 * ipr_free_dump - Free adapter dump memory
4335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4336 dump = ioa_cfg->dump; in ipr_free_dump()
4338 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4342 ioa_cfg->dump = NULL; in ipr_free_dump()
4343 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4345 kref_put(&dump->kref, ipr_release_dump); in ipr_free_dump()
4352 * ipr_write_dump - Setup dump state of adapter
4369 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump()
4373 return -EACCES; in ipr_write_dump()
4380 return -EINVAL; in ipr_write_dump()
4402 * ipr_change_queue_depth - Change the device's queue depth
4412 return sdev->queue_depth; in ipr_change_queue_depth()
4416 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4427 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle()
4430 ssize_t len = -ENXIO; in ipr_show_adapter_handle()
4432 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4433 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_adapter_handle()
4435 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); in ipr_show_adapter_handle()
4436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4449 * ipr_show_resource_path - Show the resource path or the resource address for
4461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path()
4464 ssize_t len = -ENXIO; in ipr_show_resource_path()
4467 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4468 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_path()
4469 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4471 __ipr_format_res_path(res->res_path, buffer, in ipr_show_resource_path()
4474 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4475 res->bus, res->target, res->lun); in ipr_show_resource_path()
4477 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4490 * ipr_show_device_id - Show the device_id for this device.
4501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id()
4504 ssize_t len = -ENXIO; in ipr_show_device_id()
4506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4507 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_device_id()
4508 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4509 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); in ipr_show_device_id()
4511 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); in ipr_show_device_id()
4513 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4526 * ipr_show_resource_type - Show the resource type for this device.
4537 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type()
4540 ssize_t len = -ENXIO; in ipr_show_resource_type()
4542 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4543 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_type()
4546 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); in ipr_show_resource_type()
4548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4561 * ipr_show_raw_mode - Show the adapter's raw mode
4573 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode()
4578 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4579 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_raw_mode()
4581 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); in ipr_show_raw_mode()
4583 len = -ENXIO; in ipr_show_raw_mode()
4584 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4589 * ipr_store_raw_mode - Change the adapter's raw mode
4603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode()
4608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4609 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_store_raw_mode()
4612 res->raw_mode = simple_strtoul(buf, NULL, 10); in ipr_store_raw_mode()
4614 if (res->sdev) in ipr_store_raw_mode()
4615 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", in ipr_store_raw_mode()
4616 res->raw_mode ? "enabled" : "disabled"); in ipr_store_raw_mode()
4618 len = -EINVAL; in ipr_store_raw_mode()
4620 len = -ENXIO; in ipr_store_raw_mode()
4621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4646 * ipr_biosparam - Return the HSC mapping
4681 * ipr_find_starget - Find target based on bus/target.
4689 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_find_starget()
4690 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget()
4693 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4694 if ((res->bus == starget->channel) && in ipr_find_starget()
4695 (res->target == starget->id)) { in ipr_find_starget()
4704 * ipr_target_destroy - Destroy a SCSI target
4710 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_destroy()
4711 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy()
4713 if (ioa_cfg->sis64) { in ipr_target_destroy()
4715 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) in ipr_target_destroy()
4716 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4717 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) in ipr_target_destroy()
4718 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4719 else if (starget->channel == 0) in ipr_target_destroy()
4720 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4726 * ipr_find_sdev - Find device based on bus/target/lun.
4734 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev()
4737 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4738 if ((res->bus == sdev->channel) && in ipr_find_sdev()
4739 (res->target == sdev->id) && in ipr_find_sdev()
4740 (res->lun == sdev->lun)) in ipr_find_sdev()
4748 * ipr_sdev_destroy - Unconfigure a SCSI device
4760 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_sdev_destroy()
4762 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_destroy()
4763 res = (struct ipr_resource_entry *) sdev->hostdata; in ipr_sdev_destroy()
4765 sdev->hostdata = NULL; in ipr_sdev_destroy()
4766 res->sdev = NULL; in ipr_sdev_destroy()
4768 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_destroy()
4772 * ipr_sdev_configure - Configure a SCSI device
4784 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_sdev_configure()
4789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_configure()
4790 res = sdev->hostdata; in ipr_sdev_configure()
4793 sdev->type = TYPE_RAID; in ipr_sdev_configure()
4795 sdev->scsi_level = 4; in ipr_sdev_configure()
4796 sdev->no_uld_attach = 1; in ipr_sdev_configure()
4799 sdev->scsi_level = SCSI_SPC_3; in ipr_sdev_configure()
4800 sdev->no_report_opcodes = 1; in ipr_sdev_configure()
4801 blk_queue_rq_timeout(sdev->request_queue, in ipr_sdev_configure()
4803 lim->max_hw_sectors = IPR_VSET_MAX_SECTORS; in ipr_sdev_configure()
4805 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_configure()
4807 if (ioa_cfg->sis64) in ipr_sdev_configure()
4810 res->res_path, buffer, sizeof(buffer))); in ipr_sdev_configure()
4813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_configure()
4818 * ipr_sdev_init - Prepare for commands to a device.
4823 * can then use this pointer in ipr_queuecommand when
4827 * 0 on success / -ENXIO if device does not exist
4831 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_sdev_init()
4834 int rc = -ENXIO; in ipr_sdev_init()
4836 sdev->hostdata = NULL; in ipr_sdev_init()
4838 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_init()
4842 res->sdev = sdev; in ipr_sdev_init()
4843 res->add_to_ml = 0; in ipr_sdev_init()
4844 res->in_erp = 0; in ipr_sdev_init()
4845 sdev->hostdata = res; in ipr_sdev_init()
4847 res->needs_sync_complete = 1; in ipr_sdev_init()
4852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_init()
4853 return -ENXIO; in ipr_sdev_init()
4857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdev_init()
4863 * ipr_match_lun - Match function for specified LUN
4872 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) in ipr_match_lun()
4878 * ipr_cmnd_is_free - Check if a command is free or not
4888 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { in ipr_cmnd_is_free()
4897 * ipr_wait_for_ops - Wait for matching commands to complete
4900 * @match: match function to use
4920 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4921 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
4922 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
4925 ipr_cmd->eh_comp = &comp; in ipr_wait_for_ops()
4930 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4940 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
4941 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
4942 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
4945 ipr_cmd->eh_comp = NULL; in ipr_wait_for_ops()
4950 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
4954 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
4972 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
4973 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4975 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
4977 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
4980 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
4981 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
4984 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4985 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
4986 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
4990 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
4995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5001 * ipr_device_reset - Reset the device
5011 * 0 on success / non-zero on failure
5023 ioarcb = &ipr_cmd->ioarcb; in ipr_device_reset()
5024 cmd_pkt = &ioarcb->cmd_pkt; in ipr_device_reset()
5026 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5027 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_device_reset()
5029 ioarcb->res_handle = res->res_handle; in ipr_device_reset()
5030 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_device_reset()
5031 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_device_reset()
5034 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_device_reset()
5035 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5038 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; in ipr_device_reset()
5042 * __ipr_eh_dev_reset - Reset the device
5059 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5060 res = scsi_cmd->device->hostdata; in __ipr_eh_dev_reset()
5064 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the in __ipr_eh_dev_reset()
5067 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5069 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5072 res->resetting_device = 1; in __ipr_eh_dev_reset()
5076 res->resetting_device = 0; in __ipr_eh_dev_reset()
5077 res->reset_occurred = 1; in __ipr_eh_dev_reset()
5089 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5090 res = cmd->device->hostdata; in ipr_eh_dev_reset()
5095 spin_lock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5097 spin_unlock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5100 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5106 * ipr_bus_reset_done - Op done function for bus reset.
5116 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done()
5120 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5121 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5122 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { in ipr_bus_reset_done()
5123 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5132 if (ipr_cmd->sibling->sibling) in ipr_bus_reset_done()
5133 ipr_cmd->sibling->sibling = NULL; in ipr_bus_reset_done()
5135 ipr_cmd->sibling->done(ipr_cmd->sibling); in ipr_bus_reset_done()
5137 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5142 * ipr_abort_timeout - An abort task has timed out
5156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout()
5161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5162 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5163 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5167 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); in ipr_abort_timeout()
5169 ipr_cmd->sibling = reset_cmd; in ipr_abort_timeout()
5170 reset_cmd->sibling = ipr_cmd; in ipr_abort_timeout()
5171 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; in ipr_abort_timeout()
5172 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; in ipr_abort_timeout()
5173 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_abort_timeout()
5174 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_abort_timeout()
5175 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; in ipr_abort_timeout()
5178 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5183 * ipr_cancel_op - Cancel specified op
5202 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5203 res = scsi_cmd->device->hostdata; in ipr_cancel_op()
5206 * This will force the mid-layer to call ipr_eh_host_reset, in ipr_cancel_op()
5209 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5210 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5220 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5226 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5227 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_cancel_op()
5228 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { in ipr_cancel_op()
5229 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { in ipr_cancel_op()
5235 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5242 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_cancel_op()
5243 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_cancel_op()
5244 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_cancel_op()
5245 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_cancel_op()
5246 ipr_cmd->u.sdev = scsi_cmd->device; in ipr_cancel_op()
5249 scsi_cmd->cmnd[0]); in ipr_cancel_op()
5251 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_cancel_op()
5262 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5264 res->needs_sync_complete = 1; in ipr_cancel_op()
5271 * ipr_scan_finished - Report whether scan is done
5281 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished()
5284 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_scan_finished()
5285 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5287 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5289 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_scan_finished()
5294 * ipr_eh_abort - Reset the host adapter
5308 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5310 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5312 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5315 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5321 * ipr_handle_other_interrupt - Handle "other" interrupts
5334 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5341 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5342 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5343 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5347 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5348 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5349 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5350 timer_delete(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5351 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5361 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5362 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5364 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5365 timer_delete(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5366 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5368 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5370 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5372 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5373 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5378 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5380 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5383 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5386 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5387 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5397 * ipr_isr_eh - Interrupt service routine error handler
5407 ioa_cfg->errors_logged++; in ipr_isr_eh()
5408 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5410 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5411 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5422 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq()
5426 if (!hrr_queue->allow_interrupts) in ipr_process_hrrq()
5429 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_process_hrrq()
5430 hrr_queue->toggle_bit) { in ipr_process_hrrq()
5432 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & in ipr_process_hrrq()
5436 if (unlikely(cmd_index > hrr_queue->max_cmd_id || in ipr_process_hrrq()
5437 cmd_index < hrr_queue->min_cmd_id)) { in ipr_process_hrrq()
5444 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5445 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_hrrq()
5449 list_move_tail(&ipr_cmd->queue, doneq); in ipr_process_hrrq()
5451 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { in ipr_process_hrrq()
5452 hrr_queue->hrrq_curr++; in ipr_process_hrrq()
5454 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; in ipr_process_hrrq()
5455 hrr_queue->toggle_bit ^= 1u; in ipr_process_hrrq()
5475 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5480 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5483 list_del(&ipr_cmd->queue); in ipr_iopoll()
5484 timer_delete(&ipr_cmd->timer); in ipr_iopoll()
5485 ipr_cmd->fast_done(ipr_cmd); in ipr_iopoll()
5492 * ipr_isr - Interrupt service routine
5502 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5511 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5513 if (!hrrq->allow_interrupts) { in ipr_isr()
5514 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5519 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5522 if (!ioa_cfg->clear_isr) in ipr_isr()
5529 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5530 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5535 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5550 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5552 list_del(&ipr_cmd->queue); in ipr_isr()
5553 timer_delete(&ipr_cmd->timer); in ipr_isr()
5554 ipr_cmd->fast_done(ipr_cmd); in ipr_isr()
5560 * ipr_isr_mhrrq - Interrupt service routine
5570 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5576 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5579 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5580 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5584 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5585 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5586 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5587 irq_poll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5588 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5592 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5593 hrrq->toggle_bit) in ipr_isr_mhrrq()
5595 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5599 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5602 list_del(&ipr_cmd->queue); in ipr_isr_mhrrq()
5603 timer_delete(&ipr_cmd->timer); in ipr_isr_mhrrq()
5604 ipr_cmd->fast_done(ipr_cmd); in ipr_isr_mhrrq()
5610 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5615 * 0 on success / -1 on failure
5624 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl64()
5625 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl64()
5626 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ioadl64()
5635 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5636 return -1; in ipr_build_ioadl64()
5639 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl64()
5641 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl64()
5642 ioarcb->ioadl_len = in ipr_build_ioadl64()
5643 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl64()
5645 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl64()
5647 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl64()
5648 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) in ipr_build_ioadl64()
5651 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl64()
5657 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl64()
5662 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5667 * 0 on success / -1 on failure
5676 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl()
5677 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl()
5678 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ioadl()
5686 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
5687 return -1; in ipr_build_ioadl()
5690 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl()
5692 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl()
5694 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl()
5695 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
5696 ioarcb->ioadl_len = in ipr_build_ioadl()
5697 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
5698 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { in ipr_build_ioadl()
5700 ioarcb->read_data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
5701 ioarcb->read_ioadl_len = in ipr_build_ioadl()
5702 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
5705 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { in ipr_build_ioadl()
5706 ioadl = ioarcb->u.add_data.u.ioadl; in ipr_build_ioadl()
5707 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + in ipr_build_ioadl()
5709 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_build_ioadl()
5712 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl()
5718 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl()
5723 * __ipr_erp_done - Process completion of ERP for a device
5734 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_erp_done()
5735 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in __ipr_erp_done()
5736 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_done()
5739 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_erp_done()
5743 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, in __ipr_erp_done()
5749 res->needs_sync_complete = 1; in __ipr_erp_done()
5750 res->in_erp = 0; in __ipr_erp_done()
5752 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_erp_done()
5754 if (ipr_cmd->eh_comp) in __ipr_erp_done()
5755 complete(ipr_cmd->eh_comp); in __ipr_erp_done()
5756 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_erp_done()
5760 * ipr_erp_done - Process completion of ERP for a device
5771 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_done()
5774 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
5776 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
5780 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5788 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd_for_erp()
5789 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd_for_erp()
5790 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd_for_erp()
5792 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd_for_erp()
5793 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
5794 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
5795 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
5796 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
5797 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd_for_erp()
5798 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
5800 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
5801 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
5804 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
5806 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd_for_erp()
5811 * __ipr_erp_request_sense - Send request sense to a device
5822 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in __ipr_erp_request_sense()
5823 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_request_sense()
5832 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; in __ipr_erp_request_sense()
5833 cmd_pkt->cdb[0] = REQUEST_SENSE; in __ipr_erp_request_sense()
5834 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; in __ipr_erp_request_sense()
5835 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; in __ipr_erp_request_sense()
5836 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in __ipr_erp_request_sense()
5837 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); in __ipr_erp_request_sense()
5839 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, in __ipr_erp_request_sense()
5847 * ipr_erp_request_sense - Send request sense to a device
5858 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_request_sense()
5861 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
5863 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
5867 * ipr_erp_cancel_all - Send cancel all to a device
5880 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_cancel_all()
5881 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_cancel_all()
5884 res->in_erp = 1; in ipr_erp_cancel_all()
5888 if (!scsi_cmd->device->simple_tags) { in ipr_erp_cancel_all()
5893 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_erp_cancel_all()
5894 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_erp_cancel_all()
5895 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_erp_cancel_all()
5902 * ipr_dump_ioasa - Dump contents of IOASA
5920 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_dump_ioasa()
5924 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
5925 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
5930 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
5938 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
5940 if (ioasa->hdr.ilid != 0) in ipr_dump_ioasa()
5952 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); in ipr_dump_ioasa()
5953 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
5955 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
5970 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5979 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; in ipr_gen_sense()
5980 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; in ipr_gen_sense()
5981 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_gen_sense()
5982 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); in ipr_gen_sense()
5989 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; in ipr_gen_sense()
5993 ioasa->u.vset.failing_lba_hi != 0) { in ipr_gen_sense()
6004 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); in ipr_gen_sense()
6011 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6025 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { in ipr_gen_sense()
6036 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; in ipr_gen_sense()
6039 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; in ipr_gen_sense()
6043 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6045 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); in ipr_gen_sense()
6060 * ipr_get_autosense - Copy autosense data to sense buffer
6071 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_get_autosense()
6072 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_get_autosense()
6074 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) in ipr_get_autosense()
6077 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6078 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, in ipr_get_autosense()
6079 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), in ipr_get_autosense()
6082 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, in ipr_get_autosense()
6083 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), in ipr_get_autosense()
6089 * ipr_erp_start - Process an error response for a SCSI op
6102 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_start()
6103 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_start()
6104 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_erp_start()
6120 scsi_cmd->result |= (DID_ABORT << 16); in ipr_erp_start()
6122 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6126 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6129 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6131 res->needs_sync_complete = 1; in ipr_erp_start()
6134 if (!res->in_erp) in ipr_erp_start()
6135 res->needs_sync_complete = 1; in ipr_erp_start()
6136 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6142 * so SCSI mid-layer and upper layers handle it accordingly. in ipr_erp_start()
6144 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) in ipr_erp_start()
6145 scsi_cmd->result |= (DID_PASSTHROUGH << 16); in ipr_erp_start()
6153 if (!res->resetting_device) in ipr_erp_start()
6154 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6155 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6157 res->needs_sync_complete = 1; in ipr_erp_start()
6160 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); in ipr_erp_start()
6170 res->needs_sync_complete = 1; in ipr_erp_start()
6175 if (res->raw_mode) { in ipr_erp_start()
6176 res->raw_mode = 0; in ipr_erp_start()
6177 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6179 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6183 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6185 res->needs_sync_complete = 1; in ipr_erp_start()
6189 scsi_dma_unmap(ipr_cmd->scsi_cmd); in ipr_erp_start()
6191 if (ipr_cmd->eh_comp) in ipr_erp_start()
6192 complete(ipr_cmd->eh_comp); in ipr_erp_start()
6193 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6197 * ipr_scsi_done - mid-layer done function
6201 * ops generated by the SCSI mid-layer
6208 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done()
6209 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_scsi_done()
6210 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_scsi_done()
6213 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); in ipr_scsi_done()
6218 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6220 if (ipr_cmd->eh_comp) in ipr_scsi_done()
6221 complete(ipr_cmd->eh_comp); in ipr_scsi_done()
6222 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6223 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6225 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6226 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6228 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6234 * ipr_queuecommand - Queue a mid-layer request
6238 * This function queues a request generated by the mid-layer.
6257 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6259 scsi_cmd->result = (DID_OK << 16); in ipr_queuecommand()
6260 res = scsi_cmd->device->hostdata; in ipr_queuecommand()
6263 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6265 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6271 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6272 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6277 * FIXME - Create scsi_set_host_offline interface in ipr_queuecommand()
6280 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6281 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6287 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6290 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6293 ioarcb = &ipr_cmd->ioarcb; in ipr_queuecommand()
6295 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); in ipr_queuecommand()
6296 ipr_cmd->scsi_cmd = scsi_cmd; in ipr_queuecommand()
6297 ipr_cmd->done = ipr_scsi_eh_done; in ipr_queuecommand()
6300 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6301 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6303 if (res->reset_occurred) { in ipr_queuecommand()
6304 res->reset_occurred = 0; in ipr_queuecommand()
6305 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; in ipr_queuecommand()
6310 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_queuecommand()
6312 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; in ipr_queuecommand()
6313 if (scsi_cmd->flags & SCMD_TAGGED) in ipr_queuecommand()
6314 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; in ipr_queuecommand()
6316 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; in ipr_queuecommand()
6319 if (scsi_cmd->cmnd[0] >= 0xC0 && in ipr_queuecommand()
6320 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { in ipr_queuecommand()
6321 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_queuecommand()
6323 if (res->raw_mode && ipr_is_af_dasd_device(res)) { in ipr_queuecommand()
6324 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; in ipr_queuecommand()
6326 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6327 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6330 if (ioa_cfg->sis64) in ipr_queuecommand()
6335 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6336 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6337 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6338 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6344 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6345 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6346 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6351 ioarcb->res_handle = res->res_handle; in ipr_queuecommand()
6352 if (res->needs_sync_complete) { in ipr_queuecommand()
6353 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; in ipr_queuecommand()
6354 res->needs_sync_complete = 0; in ipr_queuecommand()
6356 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6359 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6363 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6364 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in ipr_queuecommand()
6365 scsi_cmd->result = (DID_NO_CONNECT << 16); in ipr_queuecommand()
6367 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6372 * ipr_ioa_info - Get information about the card/driver
6384 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6386 spin_lock_irqsave(host->host_lock, lock_flags); in ipr_ioa_info()
6387 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6388 spin_unlock_irqrestore(host->host_lock, lock_flags); in ipr_ioa_info()
6409 .this_id = -1,
6419 * ipr_ioa_bringdown_done - IOA bring down completion.
6430 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done()
6434 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
6436 ioa_cfg->scsi_unblock = 1; in ipr_ioa_bringdown_done()
6437 schedule_work(&ioa_cfg->work_q); in ipr_ioa_bringdown_done()
6440 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
6441 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
6442 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
6443 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6444 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
6445 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
6449 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
6450 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
6457 * ipr_ioa_reset_done - IOA reset completion.
6461 * It schedules any necessary mid-layer add/removes and
6469 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done()
6474 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
6475 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
6476 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
6477 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
6478 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
6481 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
6482 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
6484 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
6485 if (res->add_to_ml || res->del_from_ml) { in ipr_ioa_reset_done()
6490 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
6493 list_del_init(&ioa_cfg->hostrcb[j]->queue); in ipr_ioa_reset_done()
6497 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
6501 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
6504 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
6505 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
6507 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
6508 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
6509 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
6511 ioa_cfg->scsi_unblock = 1; in ipr_ioa_reset_done()
6512 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
6518 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6529 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); in ipr_set_sup_dev_dflt()
6530 supported_dev->num_records = 1; in ipr_set_sup_dev_dflt()
6531 supported_dev->data_length = in ipr_set_sup_dev_dflt()
6533 supported_dev->reserved = 0; in ipr_set_sup_dev_dflt()
6537 * ipr_set_supported_devs - Send Set Supported Devices for a device
6547 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs()
6548 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
6549 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_set_supported_devs()
6550 struct ipr_resource_entry *res = ipr_cmd->u.res; in ipr_set_supported_devs()
6552 ipr_cmd->job_step = ipr_ioa_reset_done; in ipr_set_supported_devs()
6554 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
6558 ipr_cmd->u.res = res; in ipr_set_supported_devs()
6559 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); in ipr_set_supported_devs()
6561 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_set_supported_devs()
6562 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_set_supported_devs()
6563 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_set_supported_devs()
6565 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; in ipr_set_supported_devs()
6566 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; in ipr_set_supported_devs()
6567 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; in ipr_set_supported_devs()
6568 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; in ipr_set_supported_devs()
6571 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
6579 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
6580 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_set_supported_devs()
6590 * ipr_get_mode_page - Locate specified mode page
6605 if (!mode_pages || (mode_pages->hdr.length == 0)) in ipr_get_mode_page()
6608 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; in ipr_get_mode_page()
6610 (mode_pages->data + mode_pages->hdr.block_desc_len); in ipr_get_mode_page()
6614 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) in ipr_get_mode_page()
6619 mode_hdr->page_length); in ipr_get_mode_page()
6620 length -= page_length; in ipr_get_mode_page()
6629 * ipr_check_term_power - Check for term power errors
6649 entry_length = mode_page->entry_length; in ipr_check_term_power()
6651 bus = mode_page->bus; in ipr_check_term_power()
6653 for (i = 0; i < mode_page->num_entries; i++) { in ipr_check_term_power()
6654 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { in ipr_check_term_power()
6655 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
6657 bus->res_addr.bus); in ipr_check_term_power()
6665 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6682 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
6684 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
6685 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
6690 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6710 entry_length = mode_page->entry_length; in ipr_modify_ioafp_mode_page_28()
6713 for (i = 0, bus = mode_page->bus; in ipr_modify_ioafp_mode_page_28()
6714 i < mode_page->num_entries; in ipr_modify_ioafp_mode_page_28()
6716 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { in ipr_modify_ioafp_mode_page_28()
6717 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
6719 IPR_GET_PHYS_LOC(bus->res_addr)); in ipr_modify_ioafp_mode_page_28()
6723 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
6724 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; in ipr_modify_ioafp_mode_page_28()
6725 bus->bus_width = bus_attr->bus_width; in ipr_modify_ioafp_mode_page_28()
6726 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); in ipr_modify_ioafp_mode_page_28()
6727 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; in ipr_modify_ioafp_mode_page_28()
6728 if (bus_attr->qas_enabled) in ipr_modify_ioafp_mode_page_28()
6729 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; in ipr_modify_ioafp_mode_page_28()
6731 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; in ipr_modify_ioafp_mode_page_28()
6736 * ipr_build_mode_select - Build a mode select command
6750 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_select()
6752 ioarcb->res_handle = res_handle; in ipr_build_mode_select()
6753 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_select()
6754 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_mode_select()
6755 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; in ipr_build_mode_select()
6756 ioarcb->cmd_pkt.cdb[1] = parm; in ipr_build_mode_select()
6757 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_select()
6763 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6774 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28()
6775 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
6782 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page28()
6783 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page28()
6786 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
6789 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_ioafp_mode_select_page28()
6790 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
6799 * ipr_build_mode_sense - Builds a mode sense command
6813 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_sense()
6815 ioarcb->res_handle = res_handle; in ipr_build_mode_sense()
6816 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; in ipr_build_mode_sense()
6817 ioarcb->cmd_pkt.cdb[2] = parm; in ipr_build_mode_sense()
6818 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_sense()
6819 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_sense()
6825 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed()
6836 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_cmd_failed()
6838 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
6840 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); in ipr_reset_cmd_failed()
6843 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
6848 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6859 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed()
6860 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_failed()
6863 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_reset_mode_sense_failed()
6864 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
6873 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28()
6888 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
6892 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; in ipr_ioafp_mode_sense_page28()
6893 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; in ipr_ioafp_mode_sense_page28()
6902 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6912 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24()
6913 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
6922 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; in ipr_ioafp_mode_select_page24()
6924 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page24()
6925 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page24()
6928 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
6931 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_ioafp_mode_select_page24()
6939 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6950 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_page24_failed()
6953 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_reset_mode_sense_page24_failed()
6961 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6965 * the IOA Advanced Function Control mode page.
6972 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24()
6976 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
6980 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; in ipr_ioafp_mode_sense_page24()
6981 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; in ipr_ioafp_mode_sense_page24()
6990 * ipr_init_res_table - Initialize the resource table
6995 * devices and schedule adding/removing them from the mid-layer
7003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table()
7010 if (ioa_cfg->sis64) in ipr_init_res_table()
7011 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7013 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7016 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7018 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7019 list_move_tail(&res->queue, &old_res); in ipr_init_res_table()
7021 if (ioa_cfg->sis64) in ipr_init_res_table()
7022 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7024 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7027 if (ioa_cfg->sis64) in ipr_init_res_table()
7028 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7030 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7035 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7042 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7043 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7048 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7050 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7052 res->add_to_ml = 1; in ipr_init_res_table()
7053 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) in ipr_init_res_table()
7054 res->sdev->allow_restart = 1; in ipr_init_res_table()
7061 if (res->sdev) { in ipr_init_res_table()
7062 res->del_from_ml = 1; in ipr_init_res_table()
7063 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_init_res_table()
7064 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7070 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7073 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7074 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; in ipr_init_res_table()
7076 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_init_res_table()
7083 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg()
7095 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_query_ioa_cfg()
7096 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7097 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7100 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) in ipr_ioafp_query_ioa_cfg()
7101 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7102 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7103 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_ioafp_query_ioa_cfg()
7104 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); in ipr_ioafp_query_ioa_cfg()
7105 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_query_ioa_cfg()
7106 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_query_ioa_cfg()
7108 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; in ipr_ioafp_query_ioa_cfg()
7109 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7110 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7111 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7113 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7116 ipr_cmd->job_step = ipr_init_res_table; in ipr_ioafp_query_ioa_cfg()
7126 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_ioa_service_action_failed()
7137 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioa_service_action()
7139 ioarcb->res_handle = res_handle; in ipr_build_ioa_service_action()
7140 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; in ipr_build_ioa_service_action()
7141 ioarcb->cmd_pkt.cdb[1] = sa_code; in ipr_build_ioa_service_action()
7142 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_build_ioa_service_action()
7146 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7155 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_set_caching_parameters()
7156 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters()
7157 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
7161 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; in ipr_ioafp_set_caching_parameters()
7163 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { in ipr_ioafp_set_caching_parameters()
7168 ioarcb->cmd_pkt.cdb[2] = 0x40; in ipr_ioafp_set_caching_parameters()
7170 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; in ipr_ioafp_set_caching_parameters()
7183 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7198 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_inquiry()
7201 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_ioafp_inquiry()
7202 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_inquiry()
7204 ioarcb->cmd_pkt.cdb[0] = INQUIRY; in ipr_ioafp_inquiry()
7205 ioarcb->cmd_pkt.cdb[1] = flags; in ipr_ioafp_inquiry()
7206 ioarcb->cmd_pkt.cdb[2] = page; in ipr_ioafp_inquiry()
7207 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_ioafp_inquiry()
7216 * ipr_inquiry_page_supported - Is the given inquiry page supported
7229 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) in ipr_inquiry_page_supported()
7230 if (page0->page[i] == page) in ipr_inquiry_page_supported()
7237 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7248 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry()
7249 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
7250 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
7253 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; in ipr_ioafp_pageC4_inquiry()
7258 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
7270 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7281 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry()
7282 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
7283 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
7286 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; in ipr_ioafp_cap_inquiry()
7291 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
7301 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7312 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry()
7316 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; in ipr_ioafp_page3_inquiry()
7319 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
7327 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry()
7344 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
7346 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
7348 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; in ipr_ioafp_page0_inquiry()
7351 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
7359 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7369 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry()
7372 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; in ipr_ioafp_std_inquiry()
7375 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
7383 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7394 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq()
7395 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_identify_hrrq()
7399 ipr_cmd->job_step = ipr_ioafp_std_inquiry; in ipr_ioafp_identify_hrrq()
7400 if (ioa_cfg->identify_hrrq_index == 0) in ipr_ioafp_identify_hrrq()
7401 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
7403 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
7404 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
7406 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; in ipr_ioafp_identify_hrrq()
7407 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_identify_hrrq()
7409 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_identify_hrrq()
7410 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
7411 ioarcb->cmd_pkt.cdb[1] = 0x1; in ipr_ioafp_identify_hrrq()
7413 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
7414 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
7416 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
7418 ioarcb->cmd_pkt.cdb[2] = in ipr_ioafp_identify_hrrq()
7419 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
7420 ioarcb->cmd_pkt.cdb[3] = in ipr_ioafp_identify_hrrq()
7421 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
7422 ioarcb->cmd_pkt.cdb[4] = in ipr_ioafp_identify_hrrq()
7423 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7424 ioarcb->cmd_pkt.cdb[5] = in ipr_ioafp_identify_hrrq()
7425 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
7426 ioarcb->cmd_pkt.cdb[7] = in ipr_ioafp_identify_hrrq()
7427 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
7428 ioarcb->cmd_pkt.cdb[8] = in ipr_ioafp_identify_hrrq()
7429 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
7431 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
7432 ioarcb->cmd_pkt.cdb[9] = in ipr_ioafp_identify_hrrq()
7433 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7435 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
7436 ioarcb->cmd_pkt.cdb[10] = in ipr_ioafp_identify_hrrq()
7437 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
7438 ioarcb->cmd_pkt.cdb[11] = in ipr_ioafp_identify_hrrq()
7439 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
7440 ioarcb->cmd_pkt.cdb[12] = in ipr_ioafp_identify_hrrq()
7441 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
7442 ioarcb->cmd_pkt.cdb[13] = in ipr_ioafp_identify_hrrq()
7443 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
7446 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
7447 ioarcb->cmd_pkt.cdb[14] = in ipr_ioafp_identify_hrrq()
7448 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
7453 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
7454 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_ioafp_identify_hrrq()
7465 * ipr_reset_timer_done - Adapter reset timer function
7480 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done()
7483 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
7485 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
7486 list_del(&ipr_cmd->queue); in ipr_reset_timer_done()
7487 ipr_cmd->done(ipr_cmd); in ipr_reset_timer_done()
7490 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
7494 * ipr_reset_start_timer - Start a timer for adapter reset job
7512 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
7513 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_start_timer()
7515 ipr_cmd->timer.expires = jiffies + timeout; in ipr_reset_start_timer()
7516 ipr_cmd->timer.function = ipr_reset_timer_done; in ipr_reset_start_timer()
7517 add_timer(&ipr_cmd->timer); in ipr_reset_start_timer()
7521 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7532 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
7533 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
7536 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
7537 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
7538 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
7539 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
7540 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
7544 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
7545 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
7546 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
7548 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
7551 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
7555 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7566 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage()
7569 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
7584 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
7585 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
7586 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
7587 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
7589 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
7591 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
7594 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
7595 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
7600 ipr_cmd->timer.expires = jiffies + stage_time * HZ; in ipr_reset_next_stage()
7601 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_next_stage()
7602 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_next_stage()
7603 add_timer(&ipr_cmd->timer); in ipr_reset_next_stage()
7605 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
7611 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7622 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa()
7628 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_enable_ioa()
7631 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
7632 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
7633 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
7634 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
7636 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
7638 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
7639 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
7642 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
7646 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
7647 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
7652 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
7654 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
7657 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
7659 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
7661 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
7663 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
7665 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
7666 ipr_cmd->job_step = ipr_reset_next_stage; in ipr_reset_enable_ioa()
7670 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
7671 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_enable_ioa()
7672 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_enable_ioa()
7673 add_timer(&ipr_cmd->timer); in ipr_reset_enable_ioa()
7674 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
7681 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7692 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump()
7694 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
7695 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
7696 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
7697 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
7699 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
7700 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_wait_for_dump()
7706 * ipr_unit_check_no_data - Log a unit check/no data error log
7717 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
7718 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
7722 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7739 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
7741 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
7761 length = (be32_to_cpu(sdt.entry[0].end_token) - in ipr_get_unit_check_buffer()
7765 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
7767 list_del_init(&hostrcb->queue); in ipr_get_unit_check_buffer()
7768 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); in ipr_get_unit_check_buffer()
7772 (__be32 *)&hostrcb->hcam, in ipr_get_unit_check_buffer()
7773 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); in ipr_get_unit_check_buffer()
7777 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_get_unit_check_buffer()
7779 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
7780 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
7784 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
7788 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7798 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job()
7801 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
7803 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_get_unit_check_job()
7812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait()
7816 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
7819 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
7820 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
7823 if (!ipr_cmd->u.time_left) in ipr_dump_mailbox_wait()
7824 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
7827 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
7828 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
7829 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
7833 ipr_cmd->job_step = ipr_reset_wait_for_dump; in ipr_dump_mailbox_wait()
7834 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
7837 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_dump_mailbox_wait()
7847 * ipr_reset_restore_cfg_space - Restore PCI config space.
7859 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space()
7862 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
7863 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
7866 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_restore_cfg_space()
7872 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
7874 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
7875 readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
7878 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
7879 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
7880 ipr_cmd->job_step = ipr_reset_get_unit_check_job; in ipr_reset_restore_cfg_space()
7884 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
7886 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_restore_cfg_space()
7892 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
7893 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_restore_cfg_space()
7894 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
7895 ipr_cmd->job_step = ipr_dump_mailbox_wait; in ipr_reset_restore_cfg_space()
7896 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; in ipr_reset_restore_cfg_space()
7898 ipr_cmd->job_step = ipr_reset_enable_ioa; in ipr_reset_restore_cfg_space()
7906 * ipr_reset_bist_done - BIST has completed on the adapter.
7916 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done()
7919 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
7920 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
7921 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
7922 ipr_cmd->job_step = ipr_reset_restore_cfg_space; in ipr_reset_bist_done()
7928 * ipr_reset_start_bist - Run BIST on the adapter.
7938 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist()
7942 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
7944 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
7946 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
7949 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_start_bist()
7953 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
7954 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
7955 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
7956 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_start_bist()
7965 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7976 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_slot_reset_done()
7983 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
7992 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work()
7993 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8002 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8009 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8019 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset()
8022 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); in ipr_reset_slot_reset()
8023 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8024 ipr_cmd->job_step = ipr_reset_slot_reset_done; in ipr_reset_slot_reset()
8030 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8040 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait()
8043 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8044 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8045 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8047 if (ipr_cmd->u.time_left) { in ipr_reset_block_config_access_wait()
8049 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access_wait()
8053 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8054 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8063 * ipr_reset_block_config_access - Block config access to the IOA
8073 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8074 ipr_cmd->job_step = ipr_reset_block_config_access_wait; in ipr_reset_block_config_access()
8075 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access()
8080 * ipr_reset_allowed - Query whether or not IOA can be reset
8084 * 0 if reset not allowed / non-zero if reset is allowed
8090 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8095 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist()
8114 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8115 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_wait_to_start_bist()
8118 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_wait_to_start_bist()
8126 * ipr_reset_alert - Alert the adapter of a pending reset
8139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert()
8144 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8148 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8149 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; in ipr_reset_alert()
8151 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_alert()
8154 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_alert()
8162 * ipr_reset_quiesce_done - Complete IOA disconnect
8172 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done()
8175 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_quiesce_done()
8182 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8193 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done()
8200 ipr_cmd->job_step = ipr_reset_quiesce_done; in ipr_reset_cancel_hcam_done()
8203 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8204 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
8207 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
8211 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
8222 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam()
8236 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
8239 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; in ipr_reset_cancel_hcam()
8241 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
8242 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
8243 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
8244 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) in ipr_reset_cancel_hcam()
8247 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_cancel_hcam()
8248 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
8249 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_reset_cancel_hcam()
8250 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
8251 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; in ipr_reset_cancel_hcam()
8252 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; in ipr_reset_cancel_hcam()
8253 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; in ipr_reset_cancel_hcam()
8254 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; in ipr_reset_cancel_hcam()
8255 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; in ipr_reset_cancel_hcam()
8256 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; in ipr_reset_cancel_hcam()
8257 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; in ipr_reset_cancel_hcam()
8258 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; in ipr_reset_cancel_hcam()
8259 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; in ipr_reset_cancel_hcam()
8260 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; in ipr_reset_cancel_hcam()
8266 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_cancel_hcam()
8271 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_cancel_hcam()
8278 * ipr_reset_ucode_download_done - Microcode download completion
8288 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done()
8289 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
8291 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
8292 sglist->num_sg, DMA_TO_DEVICE); in ipr_reset_ucode_download_done()
8294 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download_done()
8299 * ipr_reset_ucode_download - Download microcode to the adapter
8310 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download()
8311 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
8314 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download()
8319 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_ucode_download()
8320 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_reset_ucode_download()
8321 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; in ipr_reset_ucode_download()
8322 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; in ipr_reset_ucode_download()
8323 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; in ipr_reset_ucode_download()
8324 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; in ipr_reset_ucode_download()
8325 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; in ipr_reset_ucode_download()
8327 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
8331 ipr_cmd->job_step = ipr_reset_ucode_download_done; in ipr_reset_ucode_download()
8341 * ipr_reset_shutdown_ioa - Shutdown the adapter
8353 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa()
8354 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; in ipr_reset_shutdown_ioa()
8360 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_shutdown_ioa()
8362 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
8363 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_shutdown_ioa()
8364 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_shutdown_ioa()
8365 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_reset_shutdown_ioa()
8366 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; in ipr_reset_shutdown_ioa()
8372 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
8380 ipr_cmd->job_step = ipr_reset_ucode_download; in ipr_reset_shutdown_ioa()
8382 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_shutdown_ioa()
8389 * ipr_reset_ioa_job - Adapter reset job
8400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job()
8403 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_ioa_job()
8405 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
8410 list_add_tail(&ipr_cmd->queue, in ipr_reset_ioa_job()
8411 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
8416 rc = ipr_cmd->job_step_failed(ipr_cmd); in ipr_reset_ioa_job()
8422 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; in ipr_reset_ioa_job()
8423 rc = ipr_cmd->job_step(ipr_cmd); in ipr_reset_ioa_job()
8428 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8448 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
8449 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
8450 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8451 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
8452 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
8455 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in _ipr_initiate_ioa_reset()
8456 ioa_cfg->scsi_unblock = 0; in _ipr_initiate_ioa_reset()
8457 ioa_cfg->scsi_blocked = 1; in _ipr_initiate_ioa_reset()
8458 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
8462 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
8463 ipr_cmd->job_step = job_step; in _ipr_initiate_ioa_reset()
8464 ipr_cmd->u.shutdown_type = shutdown_type; in _ipr_initiate_ioa_reset()
8470 * ipr_initiate_ioa_reset - Initiate an adapter reset
8486 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
8489 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
8490 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
8491 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
8492 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
8493 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
8496 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
8497 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
8498 "IOA taken offline - error recovery failed\n"); in ipr_initiate_ioa_reset()
8500 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
8501 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
8502 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8503 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
8504 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
8508 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
8509 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
8510 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
8512 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
8514 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
8515 ioa_cfg->scsi_unblock = 1; in ipr_initiate_ioa_reset()
8516 schedule_work(&ioa_cfg->work_q); in ipr_initiate_ioa_reset()
8520 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
8530 * ipr_reset_freeze - Hold off all I/O activity
8539 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze()
8543 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
8544 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8545 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
8546 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
8549 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
8550 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_freeze()
8555 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8566 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
8567 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
8569 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
8574 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8586 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
8587 if (ioa_cfg->probe_done) in ipr_pci_frozen()
8589 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
8593 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8605 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
8606 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
8607 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
8613 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
8614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
8619 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8631 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
8632 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
8633 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
8634 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
8635 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
8636 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
8637 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
8638 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
8639 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
8640 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
8645 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
8646 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
8650 * ipr_pci_error_detected - Called when a PCI error is detected.
8676 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8690 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
8691 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
8692 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
8693 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
8694 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
8699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
8705 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8715 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
8717 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
8718 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
8719 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
8720 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
8722 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
8726 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
8728 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
8729 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
8730 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
8731 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
8732 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
8736 * ipr_free_mem - Frees memory allocated for an adapter
8746 kfree(ioa_cfg->res_entries); in ipr_free_mem()
8747 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
8748 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
8751 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
8752 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
8753 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
8754 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
8755 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
8757 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
8758 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
8761 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
8763 ioa_cfg->hostrcb[i], in ipr_free_mem()
8764 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
8768 kfree(ioa_cfg->trace); in ipr_free_mem()
8772 * ipr_free_irqs - Free all allocated IRQs for the adapter.
8783 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
8786 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
8787 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); in ipr_free_irqs()
8792 * ipr_free_all_resources - Free all allocated resources for an adapter.
8803 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
8807 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
8808 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
8809 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
8812 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
8818 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8822 * 0 on success / -ENOMEM on allocation failure
8831 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
8834 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
8835 return -ENOMEM; in ipr_alloc_cmd_blks()
8837 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
8838 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
8840 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
8842 return -ENOMEM; in ipr_alloc_cmd_blks()
8845 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
8846 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
8849 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
8850 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
8851 (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
8855 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
8856 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
8858 (i - 1) * entries_each_hrrq; in ipr_alloc_cmd_blks()
8859 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
8861 i * entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
8865 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
8866 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
8868 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
8871 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
8873 i = IPR_NUM_CMD_BLKS - in ipr_alloc_cmd_blks()
8874 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
8876 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
8877 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
8881 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, in ipr_alloc_cmd_blks()
8886 return -ENOMEM; in ipr_alloc_cmd_blks()
8889 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
8890 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
8892 ioarcb = &ipr_cmd->ioarcb; in ipr_alloc_cmd_blks()
8893 ipr_cmd->dma_addr = dma_addr; in ipr_alloc_cmd_blks()
8894 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
8895 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); in ipr_alloc_cmd_blks()
8897 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); in ipr_alloc_cmd_blks()
8899 ioarcb->host_response_handle = cpu_to_be32(i << 2); in ipr_alloc_cmd_blks()
8900 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
8901 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_alloc_cmd_blks()
8903 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
8906 ioarcb->write_ioadl_addr = in ipr_alloc_cmd_blks()
8908 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_alloc_cmd_blks()
8909 ioarcb->ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
8912 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); in ipr_alloc_cmd_blks()
8913 ipr_cmd->cmd_index = i; in ipr_alloc_cmd_blks()
8914 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
8915 ipr_cmd->sense_buffer_dma = dma_addr + in ipr_alloc_cmd_blks()
8918 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; in ipr_alloc_cmd_blks()
8919 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
8920 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
8921 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
8929 * ipr_alloc_mem - Allocate memory for an adapter
8933 * 0 on success / non-zero for error
8937 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
8938 int i, rc = -ENOMEM; in ipr_alloc_mem()
8941 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, in ipr_alloc_mem()
8945 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
8948 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
8949 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
8950 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
8953 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
8955 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
8958 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
8964 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
8965 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
8966 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
8967 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
8970 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
8971 while (--i >= 0) in ipr_alloc_mem()
8972 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
8973 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
8974 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
8975 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
8978 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
8981 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
8982 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
8983 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
8986 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
8990 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
8992 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
8995 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
8998 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
8999 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9000 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9001 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9004 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, in ipr_alloc_mem()
9008 if (!ioa_cfg->trace) in ipr_alloc_mem()
9017 while (i-- > 0) { in ipr_alloc_mem()
9018 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), in ipr_alloc_mem()
9019 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9020 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9022 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9023 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9025 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9026 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9027 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9028 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9029 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9034 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_alloc_mem()
9035 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9037 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9042 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9053 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9054 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9055 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9057 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9059 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9064 * ipr_init_regs - Initialize IOA registers
9076 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9077 t = &ioa_cfg->regs; in ipr_init_regs()
9078 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9080 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; in ipr_init_regs()
9081 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; in ipr_init_regs()
9082 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; in ipr_init_regs()
9083 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; in ipr_init_regs()
9084 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; in ipr_init_regs()
9085 t->clr_interrupt_reg = base + p->clr_interrupt_reg; in ipr_init_regs()
9086 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; in ipr_init_regs()
9087 t->sense_interrupt_reg = base + p->sense_interrupt_reg; in ipr_init_regs()
9088 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; in ipr_init_regs()
9089 t->ioarrin_reg = base + p->ioarrin_reg; in ipr_init_regs()
9090 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; in ipr_init_regs()
9091 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; in ipr_init_regs()
9092 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; in ipr_init_regs()
9093 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; in ipr_init_regs()
9094 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; in ipr_init_regs()
9095 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; in ipr_init_regs()
9097 if (ioa_cfg->sis64) { in ipr_init_regs()
9098 t->init_feedback_reg = base + p->init_feedback_reg; in ipr_init_regs()
9099 t->dump_addr_reg = base + p->dump_addr_reg; in ipr_init_regs()
9100 t->dump_data_reg = base + p->dump_data_reg; in ipr_init_regs()
9101 t->endian_swap_reg = base + p->endian_swap_reg; in ipr_init_regs()
9106 * ipr_init_ioa_cfg - Initialize IOA config struct
9119 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9120 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9121 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9122 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9123 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9124 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9125 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9126 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9127 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9128 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9130 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9131 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9132 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); in ipr_init_ioa_cfg()
9133 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9134 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9135 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9136 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); in ipr_init_ioa_cfg()
9137 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9138 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9139 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9140 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9143 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9145 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9146 host->max_channel = IPR_MAX_SIS64_BUSES; in ipr_init_ioa_cfg()
9147 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9148 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9150 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9151 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9153 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9155 host->max_channel = IPR_VSET_BUS; in ipr_init_ioa_cfg()
9156 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9157 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9159 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9160 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9162 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9165 host->unique_id = host->host_no; in ipr_init_ioa_cfg()
9166 host->max_cmd_len = IPR_MAX_CDB_LEN; in ipr_init_ioa_cfg()
9167 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9170 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9171 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9172 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9173 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
9175 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
9177 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
9182 * ipr_get_chip_info - Find adapter chip information
9194 if (ipr_chip[i].vendor == dev_id->vendor && in ipr_get_chip_info()
9195 ipr_chip[i].device == dev_id->device) in ipr_get_chip_info()
9201 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9210 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
9213 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
9222 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
9224 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
9225 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
9226 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
9227 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
9228 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
9237 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
9241 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
9242 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9244 while (--i > 0) in ipr_request_other_msi_irqs()
9246 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
9254 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9262 * 0 on success / non-zero on failure
9269 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
9270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9272 ioa_cfg->msi_received = 1; in ipr_test_intr()
9273 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
9275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
9280 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9289 * 0 on success / non-zero on failure
9299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9300 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
9301 ioa_cfg->msi_received = 0; in ipr_test_msi()
9303 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
9304 readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
9305 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9309 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); in ipr_test_msi()
9312 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); in ipr_test_msi()
9314 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
9315 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
9316 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
9317 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9320 if (!ioa_cfg->msi_received) { in ipr_test_msi()
9322 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); in ipr_test_msi()
9323 rc = -EOPNOTSUPP; in ipr_test_msi()
9325 dev_info(&pdev->dev, "MSI test succeeded.\n"); in ipr_test_msi()
9327 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
9336 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9341 * 0 on success / non-zero on failure
9357 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); in ipr_probe_ioa()
9361 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); in ipr_probe_ioa()
9362 rc = -ENOMEM; in ipr_probe_ioa()
9366 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
9369 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
9371 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
9372 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", in ipr_probe_ioa()
9373 dev_id->vendor, dev_id->device); in ipr_probe_ioa()
9378 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
9379 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
9380 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
9381 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
9384 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
9385 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) in ipr_probe_ioa()
9386 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
9388 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
9390 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
9398 dev_err(&pdev->dev, in ipr_probe_ioa()
9412 dev_err(&pdev->dev, "Cannot enable adapter\n"); in ipr_probe_ioa()
9421 dev_err(&pdev->dev, in ipr_probe_ioa()
9423 rc = -ENOMEM; in ipr_probe_ioa()
9427 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
9428 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
9429 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
9433 if (ioa_cfg->sis64) { in ipr_probe_ioa()
9434 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ipr_probe_ioa()
9436 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); in ipr_probe_ioa()
9437 rc = dma_set_mask_and_coherent(&pdev->dev, in ipr_probe_ioa()
9441 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ipr_probe_ioa()
9444 dev_err(&pdev->dev, "Failed to set DMA mask\n"); in ipr_probe_ioa()
9449 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
9452 dev_err(&pdev->dev, "Write of cache line size failed\n"); in ipr_probe_ioa()
9454 rc = -EIO; in ipr_probe_ioa()
9459 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
9463 dev_err(&pdev->dev, "The max number of MSIX is %d\n", in ipr_probe_ioa()
9469 if (ioa_cfg->ipr_chip->has_msi) in ipr_probe_ioa()
9476 ioa_cfg->nvectors = rc; in ipr_probe_ioa()
9478 if (!pdev->msi_enabled && !pdev->msix_enabled) in ipr_probe_ioa()
9479 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
9487 rc = -EIO; in ipr_probe_ioa()
9492 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
9496 dev_info(&pdev->dev, in ipr_probe_ioa()
9497 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, in ipr_probe_ioa()
9498 pdev->msix_enabled ? "-X" : ""); in ipr_probe_ioa()
9500 case -EOPNOTSUPP: in ipr_probe_ioa()
9504 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
9505 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
9512 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
9524 dev_err(&pdev->dev, in ipr_probe_ioa()
9529 /* Save away PCI config space for use following IOA reset */ in ipr_probe_ioa()
9533 dev_err(&pdev->dev, "Failed to save PCI config space\n"); in ipr_probe_ioa()
9534 rc = -EIO; in ipr_probe_ioa()
9542 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
9543 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
9544 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
9546 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
9548 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
9550 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
9552 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
9554 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
9556 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
9559 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
9560 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
9564 rc = request_irq(pdev->irq, ipr_isr, in ipr_probe_ioa()
9566 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
9569 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", in ipr_probe_ioa()
9570 pdev->irq, rc); in ipr_probe_ioa()
9574 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || in ipr_probe_ioa()
9575 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
9576 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
9577 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
9579 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
9580 WQ_MEM_RECLAIM, host->host_no); in ipr_probe_ioa()
9582 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
9583 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); in ipr_probe_ioa()
9584 rc = -ENOMEM; in ipr_probe_ioa()
9588 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
9591 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
9617 * ipr_initiate_ioa_bringdown - Bring down an adapter
9634 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
9635 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
9636 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
9637 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
9643 * __ipr_remove - Remove a single adapter
9659 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9660 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
9661 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9662 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
9663 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9666 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
9667 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
9668 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
9669 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
9674 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9675 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
9676 flush_work(&ioa_cfg->work_q); in __ipr_remove()
9677 if (ioa_cfg->reset_work_q) in __ipr_remove()
9678 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
9679 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
9680 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9683 list_del(&ioa_cfg->queue); in __ipr_remove()
9686 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
9687 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
9688 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
9696 * ipr_remove - IOA hot plug remove entry point
9710 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
9712 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
9714 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
9716 scsi_remove_host(ioa_cfg->host); in ipr_remove()
9724 * ipr_probe - Adapter hot plug add entry point
9729 * 0 on success / non-zero on failure
9745 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
9752 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9756 scsi_remove_host(ioa_cfg->host); in ipr_probe()
9761 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9765 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9767 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9769 scsi_remove_host(ioa_cfg->host); in ipr_probe()
9774 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9778 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9780 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
9782 scsi_remove_host(ioa_cfg->host); in ipr_probe()
9786 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_probe()
9787 ioa_cfg->scan_enabled = 1; in ipr_probe()
9788 schedule_work(&ioa_cfg->work_q); in ipr_probe()
9789 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_probe()
9791 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
9793 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
9794 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
9795 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
9796 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
9800 scsi_scan_host(ioa_cfg->host); in ipr_probe()
9806 * ipr_shutdown - Shutdown handler.
9822 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9823 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
9824 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
9825 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
9826 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
9829 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
9830 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9831 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
9832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9835 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
9839 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
9840 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
9841 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
9843 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
9977 * ipr_halt_done - Shutdown prepare completion
9985 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
9989 * ipr_halt - Issue shutdown prepare to all adapters
10009 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10010 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10011 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10012 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10017 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_halt()
10018 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_halt()
10019 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_halt()
10020 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; in ipr_halt()
10023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10035 * ipr_init - Module entry point
10058 * ipr_exit - Module unload