Lines Matching refs:fnic
31 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
111 static void fnic_release_ioreq_buf(struct fnic *fnic, in fnic_release_ioreq_buf() argument
116 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_release_ioreq_buf()
123 fnic->io_sgl_pool[io_req->sgl_type]); in fnic_release_ioreq_buf()
125 dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa, in fnic_release_ioreq_buf()
130 fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, in fnic_count_portid_ioreqs_iter() argument
144 unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid) in fnic_count_ioreqs() argument
148 fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter, in fnic_count_ioreqs()
151 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_count_ioreqs()
156 unsigned int fnic_count_all_ioreqs(struct fnic *fnic) in fnic_count_all_ioreqs() argument
158 return fnic_count_ioreqs(fnic, 0); in fnic_count_all_ioreqs()
162 fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, in fnic_count_lun_ioreqs_iter() argument
176 fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device) in fnic_count_lun_ioreqs() argument
180 fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter, in fnic_count_lun_ioreqs()
183 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_count_lun_ioreqs()
189 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) in free_wq_copy_descs() argument
192 if (!fnic->fw_ack_recd[hwq]) in free_wq_copy_descs()
199 if (wq->to_clean_index <= fnic->fw_ack_index[hwq]) in free_wq_copy_descs()
200 wq->ring.desc_avail += (fnic->fw_ack_index[hwq] in free_wq_copy_descs()
205 + fnic->fw_ack_index[hwq] + 1); in free_wq_copy_descs()
213 (fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count; in free_wq_copy_descs()
216 fnic->fw_ack_recd[hwq] = 0; in free_wq_copy_descs()
226 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, in __fnic_set_state_flags() argument
231 spin_lock_irqsave(&fnic->fnic_lock, flags); in __fnic_set_state_flags()
234 fnic->state_flags &= ~st_flags; in __fnic_set_state_flags()
236 fnic->state_flags |= st_flags; in __fnic_set_state_flags()
238 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in __fnic_set_state_flags()
248 int fnic_fw_reset_handler(struct fnic *fnic) in fnic_fw_reset_handler() argument
250 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; in fnic_fw_reset_handler()
256 fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); in fnic_fw_reset_handler()
257 ioreq_count = fnic_count_all_ioreqs(fnic); in fnic_fw_reset_handler()
260 while (atomic_read(&fnic->in_flight)) in fnic_fw_reset_handler()
263 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_fw_reset_handler()
265 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_fw_reset_handler()
266 free_wq_copy_descs(fnic, wq, 0); in fnic_fw_reset_handler()
271 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fw_reset_handler()
274 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_fw_reset_handler()
275 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_fw_reset_handler()
276 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_fw_reset_handler()
277 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_fw_reset_handler()
279 &fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_fw_reset_handler()
282 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_fw_reset_handler()
285 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); in fnic_fw_reset_handler()
286 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fw_reset_handler()
289 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); in fnic_fw_reset_handler()
290 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fw_reset_handler()
302 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) in fnic_flogi_reg_handler() argument
304 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; in fnic_flogi_reg_handler()
309 struct fnic_iport_s *iport = &fnic->iport; in fnic_flogi_reg_handler()
311 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_flogi_reg_handler()
313 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_flogi_reg_handler()
314 free_wq_copy_descs(fnic, wq, 0); in fnic_flogi_reg_handler()
321 memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); in fnic_flogi_reg_handler()
324 if (fnic->config.flags & VFCF_FIP_CAPABLE) { in fnic_flogi_reg_handler()
327 fnic->iport.fpma, in fnic_flogi_reg_handler()
329 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_flogi_reg_handler()
331 fc_id, fnic->iport.fpma, gw_mac); in fnic_flogi_reg_handler()
335 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_flogi_reg_handler()
340 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_flogi_reg_handler()
341 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_flogi_reg_handler()
342 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_flogi_reg_handler()
343 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_flogi_reg_handler()
344 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_flogi_reg_handler()
347 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_flogi_reg_handler()
355 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, in fnic_queue_wq_copy_desc() argument
366 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_wq_copy_desc()
387 io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev, in fnic_queue_wq_copy_desc()
391 if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) { in fnic_queue_wq_copy_desc()
397 io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev, in fnic_queue_wq_copy_desc()
401 if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) { in fnic_queue_wq_copy_desc()
402 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa, in fnic_queue_wq_copy_desc()
412 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_wq_copy_desc()
413 free_wq_copy_descs(fnic, wq, hwq); in fnic_queue_wq_copy_desc()
416 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_queue_wq_copy_desc()
429 if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && in fnic_queue_wq_copy_desc()
448 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_wq_copy_desc()
449 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_wq_copy_desc()
450 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_wq_copy_desc()
451 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_wq_copy_desc()
452 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_wq_copy_desc()
464 struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host)); in fnic_queuecommand() local
466 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_queuecommand()
481 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
490 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
499 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_queuecommand()
500 iport = &fnic->iport; in fnic_queuecommand()
503 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
504 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
518 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
523 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
524 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
546 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
547 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
555 atomic_inc(&fnic->in_flight); in fnic_queuecommand()
558 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { in fnic_queuecommand()
559 atomic_dec(&fnic->in_flight); in fnic_queuecommand()
561 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
565 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { in fnic_queuecommand()
566 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
567 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_queuecommand()
569 fnic->state_flags); in fnic_queuecommand()
578 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queuecommand()
584 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); in fnic_queuecommand()
597 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand()
610 mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], in fnic_queuecommand()
616 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand()
635 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
646 if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) { in fnic_queuecommand()
648 fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag)); in fnic_queuecommand()
649 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
653 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req; in fnic_queuecommand()
657 wq = &fnic->hw_copy_wq[hwq]; in fnic_queuecommand()
659 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq); in fnic_queuecommand()
670 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; in fnic_queuecommand()
672 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
674 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_queuecommand()
675 mempool_free(io_req, fnic->io_req_pool); in fnic_queuecommand()
677 atomic_dec(&fnic->in_flight); in fnic_queuecommand()
703 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queuecommand()
705 atomic_dec(&fnic->in_flight); in fnic_queuecommand()
709 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_queuecommand()
722 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, in fnic_fcpio_fw_reset_cmpl_handler() argument
730 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; in fnic_fcpio_fw_reset_cmpl_handler()
737 fnic_cleanup_io(fnic, SCSI_NO_TAG); in fnic_fcpio_fw_reset_cmpl_handler()
739 atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); in fnic_fcpio_fw_reset_cmpl_handler()
740 atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); in fnic_fcpio_fw_reset_cmpl_handler()
741 atomic64_set(&fnic->io_cmpl_skip, 0); in fnic_fcpio_fw_reset_cmpl_handler()
743 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcpio_fw_reset_cmpl_handler()
746 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { in fnic_fcpio_fw_reset_cmpl_handler()
749 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_fw_reset_cmpl_handler()
752 fnic->state = FNIC_IN_ETH_MODE; in fnic_fcpio_fw_reset_cmpl_handler()
754 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_fw_reset_cmpl_handler()
758 fnic->state = FNIC_IN_FC_MODE; in fnic_fcpio_fw_reset_cmpl_handler()
763 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_fw_reset_cmpl_handler()
765 fnic_state_to_str(fnic->state)); in fnic_fcpio_fw_reset_cmpl_handler()
770 if (fnic->fw_reset_done) in fnic_fcpio_fw_reset_cmpl_handler()
771 complete(fnic->fw_reset_done); in fnic_fcpio_fw_reset_cmpl_handler()
778 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_fw_reset_cmpl_handler()
779 fnic_free_txq(&fnic->tx_queue); in fnic_fcpio_fw_reset_cmpl_handler()
783 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_fw_reset_cmpl_handler()
785 queue_work(fnic_event_queue, &fnic->flush_work); in fnic_fcpio_fw_reset_cmpl_handler()
788 fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); in fnic_fcpio_fw_reset_cmpl_handler()
797 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, in fnic_fcpio_flogi_reg_cmpl_handler() argument
809 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
811 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { in fnic_fcpio_flogi_reg_cmpl_handler()
815 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_flogi_reg_cmpl_handler()
817 fnic->state = FNIC_IN_FC_MODE; in fnic_fcpio_flogi_reg_cmpl_handler()
820 fnic->host, fnic->fnic_num, in fnic_fcpio_flogi_reg_cmpl_handler()
823 fnic->state = FNIC_IN_ETH_MODE; in fnic_fcpio_flogi_reg_cmpl_handler()
827 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_flogi_reg_cmpl_handler()
830 fnic_state_to_str(fnic->state)); in fnic_fcpio_flogi_reg_cmpl_handler()
835 if (fnic->stop_rx_link_events) { in fnic_fcpio_flogi_reg_cmpl_handler()
836 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
839 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
841 queue_work(fnic_event_queue, &fnic->flush_work); in fnic_fcpio_flogi_reg_cmpl_handler()
842 queue_work(fnic_event_queue, &fnic->frame_work); in fnic_fcpio_flogi_reg_cmpl_handler()
844 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_fcpio_flogi_reg_cmpl_handler()
876 static inline void fnic_fcpio_ack_handler(struct fnic *fnic, in fnic_fcpio_ack_handler() argument
887 wq = &fnic->hw_copy_wq[cq_index]; in fnic_fcpio_ack_handler()
888 spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags); in fnic_fcpio_ack_handler()
890 fnic->fnic_stats.misc_stats.last_ack_time = jiffies; in fnic_fcpio_ack_handler()
892 fnic->fw_ack_index[wq_index] = request_out; in fnic_fcpio_ack_handler()
893 fnic->fw_ack_recd[wq_index] = 1; in fnic_fcpio_ack_handler()
896 &fnic->fnic_stats.misc_stats.ack_index_out_of_range); in fnic_fcpio_ack_handler()
898 spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags); in fnic_fcpio_ack_handler()
900 fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], in fnic_fcpio_ack_handler()
908 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_index, in fnic_fcpio_icmnd_cmpl_handler() argument
919 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcpio_icmnd_cmpl_handler()
938 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
941 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
946 if (tag >= fnic->fnic_max_tag_id) { in fnic_fcpio_icmnd_cmpl_handler()
947 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
950 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
955 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
957 sc = scsi_host_find_tag(fnic->host, id); in fnic_fcpio_icmnd_cmpl_handler()
961 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
962 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_icmnd_cmpl_handler()
967 fnic->host->host_no, id, in fnic_fcpio_icmnd_cmpl_handler()
978 if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) { in fnic_fcpio_icmnd_cmpl_handler()
981 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
989 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
990 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_icmnd_cmpl_handler()
1013 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
1017 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
1049 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_icmnd_cmpl_handler()
1106 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; in fnic_fcpio_icmnd_cmpl_handler()
1108 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_icmnd_cmpl_handler()
1112 shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n", in fnic_fcpio_icmnd_cmpl_handler()
1116 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_fcpio_icmnd_cmpl_handler()
1133 fnic->fcp_input_bytes += xfer_len; in fnic_fcpio_icmnd_cmpl_handler()
1136 fnic->fcp_output_bytes += xfer_len; in fnic_fcpio_icmnd_cmpl_handler()
1143 mempool_free(io_req, fnic->io_req_pool); in fnic_fcpio_icmnd_cmpl_handler()
1146 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_fcpio_icmnd_cmpl_handler()
1147 atomic64_dec(&fnic->io_cmpl_skip); in fnic_fcpio_icmnd_cmpl_handler()
1178 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_index, in fnic_fcpio_itmf_cmpl_handler() argument
1187 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_fcpio_itmf_cmpl_handler()
1188 struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; in fnic_fcpio_itmf_cmpl_handler()
1189 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_fcpio_itmf_cmpl_handler()
1190 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_fcpio_itmf_cmpl_handler()
1205 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1208 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1213 if (tag > fnic->fnic_max_tag_id) { in fnic_fcpio_itmf_cmpl_handler()
1214 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1217 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1221 } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { in fnic_fcpio_itmf_cmpl_handler()
1222 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1225 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1231 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1236 if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) { in fnic_fcpio_itmf_cmpl_handler()
1237 io_req = fnic->sw_copy_wq[hwq].io_req_table[tag]; in fnic_fcpio_itmf_cmpl_handler()
1241 sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK); in fnic_fcpio_itmf_cmpl_handler()
1247 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1248 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1258 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1260 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1271 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1280 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1283 shost_printk(KERN_DEBUG, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1298 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1319 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1333 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1345 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1346 shost_printk(KERN_INFO, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1350 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1356 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; in fnic_fcpio_itmf_cmpl_handler()
1357 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1359 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_fcpio_itmf_cmpl_handler()
1360 mempool_free(io_req, fnic->io_req_pool); in fnic_fcpio_itmf_cmpl_handler()
1374 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_fcpio_itmf_cmpl_handler()
1375 atomic64_dec(&fnic->io_cmpl_skip); in fnic_fcpio_itmf_cmpl_handler()
1381 shost_printk(KERN_INFO, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1387 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1393 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1401 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1406 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1415 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_fcpio_itmf_cmpl_handler()
1421 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1424 shost_printk(KERN_ERR, fnic->host, in fnic_fcpio_itmf_cmpl_handler()
1427 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_fcpio_itmf_cmpl_handler()
1440 struct fnic *fnic = vnic_dev_priv(vdev); in fnic_fcpio_cmpl_handler() local
1448 atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_fcpio_cmpl_handler()
1454 cq_index -= fnic->copy_wq_base; in fnic_fcpio_cmpl_handler()
1458 fnic_fcpio_ack_handler(fnic, cq_index, desc); in fnic_fcpio_cmpl_handler()
1462 fnic_fcpio_icmnd_cmpl_handler(fnic, cq_index, desc); in fnic_fcpio_cmpl_handler()
1466 fnic_fcpio_itmf_cmpl_handler(fnic, cq_index, desc); in fnic_fcpio_cmpl_handler()
1471 fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); in fnic_fcpio_cmpl_handler()
1475 fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); in fnic_fcpio_cmpl_handler()
1479 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_fcpio_cmpl_handler()
1492 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index) in fnic_wq_copy_cmpl_handler() argument
1495 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_wq_copy_cmpl_handler()
1502 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], in fnic_wq_copy_cmpl_handler()
1520 struct fnic *fnic = data; in fnic_cleanup_io_iter() local
1524 struct fnic_stats *fnic_stats = &fnic->fnic_stats; in fnic_cleanup_io_iter()
1533 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1535 fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL; in fnic_cleanup_io_iter()
1539 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1540 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_cleanup_io_iter()
1558 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1561 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1568 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_cleanup_io_iter()
1574 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_cleanup_io_iter()
1575 mempool_free(io_req, fnic->io_req_pool); in fnic_cleanup_io_iter()
1578 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_cleanup_io_iter()
1582 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_cleanup_io_iter()
1583 atomic64_dec(&fnic->io_cmpl_skip); in fnic_cleanup_io_iter()
1602 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) in fnic_cleanup_io() argument
1609 io_count = fnic_count_all_ioreqs(fnic); in fnic_cleanup_io()
1610 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_cleanup_io()
1613 atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); in fnic_cleanup_io()
1615 scsi_host_busy_iter(fnic->host, in fnic_cleanup_io()
1616 fnic_cleanup_io_iter, fnic); in fnic_cleanup_io()
1619 spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); in fnic_cleanup_io()
1620 io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id]; in fnic_cleanup_io()
1632 spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); in fnic_cleanup_io()
1634 while ((io_count = fnic_count_all_ioreqs(fnic))) { in fnic_cleanup_io()
1635 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_cleanup_io()
1638 atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); in fnic_cleanup_io()
1648 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_copy_cleanup_handler() local
1659 if (id >= fnic->fnic_max_tag_id) in fnic_wq_copy_cleanup_handler()
1662 sc = scsi_host_find_tag(fnic->host, id); in fnic_wq_copy_cleanup_handler()
1667 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_wq_copy_cleanup_handler()
1675 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_wq_copy_cleanup_handler()
1681 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL; in fnic_wq_copy_cleanup_handler()
1683 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_wq_copy_cleanup_handler()
1686 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_wq_copy_cleanup_handler()
1687 mempool_free(io_req, fnic->io_req_pool); in fnic_wq_copy_cleanup_handler()
1691 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:" in fnic_wq_copy_cleanup_handler()
1705 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, in fnic_queue_abort_io_req() argument
1710 struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; in fnic_queue_abort_io_req()
1711 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_abort_io_req()
1715 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_queue_abort_io_req()
1716 if (unlikely(fnic_chk_state_flags_locked(fnic, in fnic_queue_abort_io_req()
1718 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1720 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_abort_io_req()
1723 atomic_inc(&fnic->in_flight); in fnic_queue_abort_io_req()
1724 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_abort_io_req()
1726 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_abort_io_req()
1728 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_abort_io_req()
1729 free_wq_copy_descs(fnic, wq, hwq); in fnic_queue_abort_io_req()
1732 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_abort_io_req()
1733 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1735 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_queue_abort_io_req()
1742 fnic->config.ra_tov, fnic->config.ed_tov); in fnic_queue_abort_io_req()
1744 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_abort_io_req()
1745 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_abort_io_req()
1746 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_abort_io_req()
1747 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_abort_io_req()
1748 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_abort_io_req()
1750 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_abort_io_req()
1751 atomic_dec(&fnic->in_flight); in fnic_queue_abort_io_req()
1757 struct fnic *fnic; member
1766 struct fnic *fnic = iter_data->fnic; in fnic_rport_abort_io_iter() local
1769 struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; in fnic_rport_abort_io_iter()
1770 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_rport_abort_io_iter()
1780 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1785 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1788 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1794 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1797 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1806 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1811 shost_printk(KERN_ERR, fnic->host, in fnic_rport_abort_io_iter()
1817 shost_printk(KERN_ERR, fnic->host, in fnic_rport_abort_io_iter()
1820 shost_printk(KERN_ERR, fnic->host, in fnic_rport_abort_io_iter()
1831 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1834 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1837 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1840 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1845 if (fnic_queue_abort_io_req(fnic, abt_tag, in fnic_rport_abort_io_iter()
1854 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1855 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_rport_abort_io_iter()
1860 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1862 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1867 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_rport_abort_io_iter()
1875 void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) in fnic_rport_exch_reset() argument
1879 struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; in fnic_rport_exch_reset()
1881 .fnic = fnic, in fnic_rport_exch_reset()
1886 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_exch_reset()
1890 if (fnic->in_remove) in fnic_rport_exch_reset()
1893 io_count = fnic_count_ioreqs(fnic, port_id); in fnic_rport_exch_reset()
1894 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_exch_reset()
1897 atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); in fnic_rport_exch_reset()
1899 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_rport_exch_reset()
1901 atomic_inc(&fnic->in_flight); in fnic_rport_exch_reset()
1902 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { in fnic_rport_exch_reset()
1903 atomic_dec(&fnic->in_flight); in fnic_rport_exch_reset()
1904 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rport_exch_reset()
1907 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_rport_exch_reset()
1909 scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter, in fnic_rport_exch_reset()
1915 atomic_dec(&fnic->in_flight); in fnic_rport_exch_reset()
1917 while ((io_count = fnic_count_ioreqs(fnic, port_id))) in fnic_rport_exch_reset()
1920 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_rport_exch_reset()
1930 struct fnic *fnic = NULL; in fnic_terminate_rport_io() local
1952 fnic = iport->fnic; in fnic_terminate_rport_io()
1953 fnic_rport_exch_reset(fnic, rport->port_id); in fnic_terminate_rport_io()
1962 void fnic_scsi_unload(struct fnic *fnic) in fnic_scsi_unload() argument
1972 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1973 fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; in fnic_scsi_unload()
1974 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1976 if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) in fnic_scsi_unload()
1977 fnic_scsi_fcpio_reset(fnic); in fnic_scsi_unload()
1979 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1980 fnic->in_remove = 1; in fnic_scsi_unload()
1981 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_unload()
1983 fnic_flush_tport_event_list(fnic); in fnic_scsi_unload()
1984 fnic_delete_fcp_tports(fnic); in fnic_scsi_unload()
1987 void fnic_scsi_unload_cleanup(struct fnic *fnic) in fnic_scsi_unload_cleanup() argument
1991 fc_remove_host(fnic->host); in fnic_scsi_unload_cleanup()
1992 scsi_remove_host(fnic->host); in fnic_scsi_unload_cleanup()
1993 for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) in fnic_scsi_unload_cleanup()
1994 kfree(fnic->sw_copy_wq[hwq].io_req_table); in fnic_scsi_unload_cleanup()
2007 struct fnic *fnic; in fnic_abort_cmd() local
2030 fnic = *((struct fnic **) shost_priv(sc->device->host)); in fnic_abort_cmd()
2032 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2033 iport = &fnic->iport; in fnic_abort_cmd()
2035 fnic_stats = &fnic->fnic_stats; in fnic_abort_cmd()
2036 abts_stats = &fnic->fnic_stats.abts_stats; in fnic_abort_cmd()
2037 term_stats = &fnic->fnic_stats.term_stats; in fnic_abort_cmd()
2049 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2052 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2057 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2061 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2065 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2072 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2075 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2081 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2084 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2088 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_abort_cmd()
2101 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2105 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2112 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2132 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2145 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2162 if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun, in fnic_abort_cmd()
2164 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2170 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2190 (2 * fnic->config.ra_tov + in fnic_abort_cmd()
2191 fnic->config.ed_tov)); in fnic_abort_cmd()
2194 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2199 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2208 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2222 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2223 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2244 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2248 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL; in fnic_abort_cmd()
2249 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abort_cmd()
2251 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_abort_cmd()
2252 mempool_free(io_req, fnic->io_req_pool); in fnic_abort_cmd()
2258 if (atomic64_read(&fnic->io_cmpl_skip)) in fnic_abort_cmd()
2259 atomic64_dec(&fnic->io_cmpl_skip); in fnic_abort_cmd()
2271 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_abort_cmd()
2278 static inline int fnic_queue_dr_io_req(struct fnic *fnic, in fnic_queue_dr_io_req() argument
2283 struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; in fnic_queue_dr_io_req()
2293 wq = &fnic->hw_copy_wq[hwq]; in fnic_queue_dr_io_req()
2295 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_queue_dr_io_req()
2296 if (unlikely(fnic_chk_state_flags_locked(fnic, in fnic_queue_dr_io_req()
2298 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_dr_io_req()
2301 atomic_inc(&fnic->in_flight); in fnic_queue_dr_io_req()
2304 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_queue_dr_io_req()
2306 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_dr_io_req()
2308 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq]) in fnic_queue_dr_io_req()
2309 free_wq_copy_descs(fnic, wq, hwq); in fnic_queue_dr_io_req()
2312 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_queue_dr_io_req()
2326 fnic->config.ra_tov, fnic->config.ed_tov); in fnic_queue_dr_io_req()
2328 atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); in fnic_queue_dr_io_req()
2329 if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > in fnic_queue_dr_io_req()
2330 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) in fnic_queue_dr_io_req()
2331 atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, in fnic_queue_dr_io_req()
2332 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); in fnic_queue_dr_io_req()
2335 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_queue_dr_io_req()
2336 atomic_dec(&fnic->in_flight); in fnic_queue_dr_io_req()
2343 struct fnic *fnic; member
2353 struct fnic *fnic = iter_data->fnic; in fnic_pending_aborts_iter() local
2369 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2372 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2380 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2385 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2390 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2392 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2397 shost_printk(KERN_ERR, fnic->host, in fnic_pending_aborts_iter()
2413 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2419 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2424 if (fnic_queue_abort_io_req(fnic, abt_tag, in fnic_pending_aborts_iter()
2427 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2433 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2435 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_pending_aborts_iter()
2440 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2443 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2448 (fnic->config.ed_tov)); in fnic_pending_aborts_iter()
2451 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2454 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2463 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2473 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL; in fnic_pending_aborts_iter()
2475 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_pending_aborts_iter()
2479 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_pending_aborts_iter()
2480 mempool_free(io_req, fnic->io_req_pool); in fnic_pending_aborts_iter()
2500 static int fnic_clean_pending_aborts(struct fnic *fnic, in fnic_clean_pending_aborts() argument
2507 .fnic = fnic, in fnic_clean_pending_aborts()
2514 scsi_host_busy_iter(fnic->host, in fnic_clean_pending_aborts()
2520 schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); in fnic_clean_pending_aborts()
2523 if (fnic_is_abts_pending(fnic, lr_sc)) in fnic_clean_pending_aborts()
2527 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_clean_pending_aborts()
2540 struct fnic *fnic; in fnic_device_reset() local
2566 fnic = *((struct fnic **) shost_priv(sc->device->host)); in fnic_device_reset()
2567 iport = &fnic->iport; in fnic_device_reset()
2569 fnic_stats = &fnic->fnic_stats; in fnic_device_reset()
2576 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_device_reset()
2577 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2585 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2588 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2594 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_device_reset()
2596 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2602 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2604 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2607 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2625 mutex_lock(&fnic->sgreset_mutex); in fnic_device_reset()
2626 mqtag = fnic->fnic_max_tag_id; in fnic_device_reset()
2633 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2641 io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); in fnic_device_reset()
2643 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2653 if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) in fnic_device_reset()
2655 fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag)); in fnic_device_reset()
2657 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = in fnic_device_reset()
2663 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2665 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag); in fnic_device_reset()
2671 if (fnic_queue_dr_io_req(fnic, sc, io_req)) { in fnic_device_reset()
2672 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2678 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2680 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2682 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_device_reset()
2683 old_link_down_cnt = iport->fnic->link_down_cnt; in fnic_device_reset()
2684 old_soft_reset_count = fnic->soft_reset_count; in fnic_device_reset()
2685 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2703 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_device_reset()
2704 if ((old_link_down_cnt != fnic->link_down_cnt) || in fnic_device_reset()
2705 (fnic->reset_in_progress) || in fnic_device_reset()
2706 (fnic->soft_reset_count != old_soft_reset_count) || in fnic_device_reset()
2710 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_device_reset()
2712 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2715 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2716 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2722 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2737 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2743 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2748 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2750 fnic->host, fnic->fnic_num, in fnic_device_reset()
2763 if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { in fnic_device_reset()
2764 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2766 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2772 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2782 fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL; in fnic_device_reset()
2785 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_device_reset()
2789 fnic_release_ioreq_buf(fnic, io_req, sc); in fnic_device_reset()
2790 mempool_free(io_req, fnic->io_req_pool); in fnic_device_reset()
2811 fnic->sgreset_sc = NULL; in fnic_device_reset()
2812 mutex_unlock(&fnic->sgreset_mutex); in fnic_device_reset()
2815 while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { in fnic_device_reset()
2820 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_device_reset()
2826 FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, in fnic_device_reset()
2837 static void fnic_post_flogo_linkflap(struct fnic *fnic) in fnic_post_flogo_linkflap() argument
2841 fnic_fdls_link_status_change(fnic, 0); in fnic_post_flogo_linkflap()
2842 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_post_flogo_linkflap()
2844 if (fnic->link_status) { in fnic_post_flogo_linkflap()
2845 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_post_flogo_linkflap()
2846 fnic_fdls_link_status_change(fnic, 1); in fnic_post_flogo_linkflap()
2849 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_post_flogo_linkflap()
2855 struct fnic *fnic; in fnic_reset() local
2858 fnic = *((struct fnic **) shost_priv(shost)); in fnic_reset()
2859 reset_stats = &fnic->fnic_stats.reset_stats; in fnic_reset()
2861 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_reset()
2865 fnic_post_flogo_linkflap(fnic); in fnic_reset()
2867 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_reset()
2876 struct fnic *fnic = *((struct fnic **) shost_priv(shost)); in fnic_issue_fc_host_lip() local
2878 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_issue_fc_host_lip()
2889 struct fnic *fnic = *((struct fnic **) shost_priv(shost)); in fnic_host_reset() local
2891 struct fnic_iport_s *iport = &fnic->iport; in fnic_host_reset()
2893 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2894 if (fnic->reset_in_progress == NOT_IN_PROGRESS) { in fnic_host_reset()
2895 fnic->reset_in_progress = IN_PROGRESS; in fnic_host_reset()
2897 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2898 wait_for_completion_timeout(&fnic->reset_completion_wait, in fnic_host_reset()
2901 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2902 if (fnic->reset_in_progress == IN_PROGRESS) { in fnic_host_reset()
2903 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2904 FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, in fnic_host_reset()
2908 fnic->reset_in_progress = IN_PROGRESS; in fnic_host_reset()
2910 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2919 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2920 fnic->reset_in_progress = NOT_IN_PROGRESS; in fnic_host_reset()
2921 complete(&fnic->reset_completion_wait); in fnic_host_reset()
2922 fnic->soft_reset_count++; in fnic_host_reset()
2925 if (fnic->link_status) { in fnic_host_reset()
2930 && fnic->link_status) { in fnic_host_reset()
2931 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2933 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_host_reset()
2940 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_host_reset()
2942 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_host_reset()
2951 struct fnic *fnic = iter_data->fnic; in fnic_abts_pending_iter() local
2970 spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); in fnic_abts_pending_iter()
2974 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abts_pending_iter()
2982 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_abts_pending_iter()
2987 spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); in fnic_abts_pending_iter()
3001 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) in fnic_is_abts_pending() argument
3004 .fnic = fnic, in fnic_is_abts_pending()
3015 scsi_host_busy_iter(fnic->host, in fnic_is_abts_pending()
3034 struct fnic *fnic = *((struct fnic **) shost_priv(shost)); in fnic_eh_host_reset_handler() local
3036 FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, in fnic_eh_host_reset_handler()
3044 void fnic_scsi_fcpio_reset(struct fnic *fnic) in fnic_scsi_fcpio_reset() argument
3048 struct fnic_iport_s *iport = &fnic->iport; in fnic_scsi_fcpio_reset()
3053 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3054 if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { in fnic_scsi_fcpio_reset()
3056 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3057 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3059 fnic->state); in fnic_scsi_fcpio_reset()
3063 old_state = fnic->state; in fnic_scsi_fcpio_reset()
3064 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; in fnic_scsi_fcpio_reset()
3066 fnic_update_mac_locked(fnic, iport->hwmac); in fnic_scsi_fcpio_reset()
3067 fnic->fw_reset_done = &fw_reset_done; in fnic_scsi_fcpio_reset()
3068 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3070 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3072 if (fnic_fw_reset_handler(fnic)) { in fnic_scsi_fcpio_reset()
3073 spin_lock_irqsave(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3074 if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) in fnic_scsi_fcpio_reset()
3075 fnic->state = old_state; in fnic_scsi_fcpio_reset()
3076 spin_unlock_irqrestore(&fnic->fnic_lock, flags); in fnic_scsi_fcpio_reset()
3078 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3082 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3085 FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, in fnic_scsi_fcpio_reset()
3089 atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); in fnic_scsi_fcpio_reset()
3091 fnic->fw_reset_done = NULL; in fnic_scsi_fcpio_reset()