Lines Matching full:hw
34 efct_hw_link_event_init(struct efct_hw *hw) in efct_hw_link_event_init() argument
36 hw->link.status = SLI4_LINK_STATUS_MAX; in efct_hw_link_event_init()
37 hw->link.topology = SLI4_LINK_TOPO_NONE; in efct_hw_link_event_init()
38 hw->link.medium = SLI4_LINK_MEDIUM_MAX; in efct_hw_link_event_init()
39 hw->link.speed = 0; in efct_hw_link_event_init()
40 hw->link.loop_map = NULL; in efct_hw_link_event_init()
41 hw->link.fc_id = U32_MAX; in efct_hw_link_event_init()
47 efct_hw_read_max_dump_size(struct efct_hw *hw) in efct_hw_read_max_dump_size() argument
50 struct efct *efct = hw->os; in efct_hw_read_max_dump_size()
58 if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0)) in efct_hw_read_max_dump_size()
64 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); in efct_hw_read_max_dump_size()
66 efc_log_debug(hw->os, "set dump location cmd failed\n"); in efct_hw_read_max_dump_size()
70 hw->dump_size = in efct_hw_read_max_dump_size()
73 efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size); in efct_hw_read_max_dump_size()
79 __efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg) in __efct_read_topology_cb() argument
85 struct efct *efct = hw->os; in __efct_read_topology_cb()
88 efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, in __efct_read_topology_cb()
96 hw->link.status = SLI4_LINK_STATUS_UP; in __efct_read_topology_cb()
99 hw->link.status = SLI4_LINK_STATUS_DOWN; in __efct_read_topology_cb()
102 hw->link.status = SLI4_LINK_STATUS_NO_ALPA; in __efct_read_topology_cb()
105 hw->link.status = SLI4_LINK_STATUS_MAX; in __efct_read_topology_cb()
111 hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL; in __efct_read_topology_cb()
114 hw->link.topology = SLI4_LINK_TOPO_FC_AL; in __efct_read_topology_cb()
115 if (hw->link.status == SLI4_LINK_STATUS_UP) in __efct_read_topology_cb()
116 hw->link.loop_map = hw->loop_map.virt; in __efct_read_topology_cb()
117 hw->link.fc_id = read_topo->acquired_al_pa; in __efct_read_topology_cb()
120 hw->link.topology = SLI4_LINK_TOPO_MAX; in __efct_read_topology_cb()
124 hw->link.medium = SLI4_LINK_MEDIUM_FC; in __efct_read_topology_cb()
130 hw->link.speed = 1 * 1000; in __efct_read_topology_cb()
133 hw->link.speed = 2 * 1000; in __efct_read_topology_cb()
136 hw->link.speed = 4 * 1000; in __efct_read_topology_cb()
139 hw->link.speed = 8 * 1000; in __efct_read_topology_cb()
142 hw->link.speed = 16 * 1000; in __efct_read_topology_cb()
145 hw->link.speed = 32 * 1000; in __efct_read_topology_cb()
148 hw->link.speed = 64 * 1000; in __efct_read_topology_cb()
151 hw->link.speed = 128 * 1000; in __efct_read_topology_cb()
155 drec.speed = hw->link.speed; in __efct_read_topology_cb()
156 drec.fc_id = hw->link.fc_id; in __efct_read_topology_cb()
166 struct efct_hw *hw = ctx; in efct_hw_cb_link() local
170 struct efct *efct = hw->os; in efct_hw_cb_link()
172 efct_hw_link_event_init(hw); in efct_hw_cb_link()
177 hw->link = *event; in efct_hw_cb_link()
183 efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n", in efct_hw_cb_link()
193 efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n", in efct_hw_cb_link()
196 if (!sli_cmd_read_topology(&hw->sli, buf, in efct_hw_cb_link()
197 &hw->loop_map)) { in efct_hw_cb_link()
198 rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT, in efct_hw_cb_link()
203 efc_log_debug(hw->os, "READ_TOPOLOGY failed\n"); in efct_hw_cb_link()
205 efc_log_info(hw->os, "%s(%#x), speed is %d\n", in efct_hw_cb_link()
211 efc_log_info(hw->os, "Link down\n"); in efct_hw_cb_link()
213 hw->link.status = event->status; in efct_hw_cb_link()
221 efc_log_debug(hw->os, "unhandled link status %#x\n", in efct_hw_cb_link()
230 efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev) in efct_hw_setup() argument
234 if (hw->hw_setup_called) in efct_hw_setup()
242 memset(hw, 0, sizeof(struct efct_hw)); in efct_hw_setup()
244 hw->hw_setup_called = true; in efct_hw_setup()
246 hw->os = os; in efct_hw_setup()
248 mutex_init(&hw->bmbx_lock); in efct_hw_setup()
249 spin_lock_init(&hw->cmd_lock); in efct_hw_setup()
250 INIT_LIST_HEAD(&hw->cmd_head); in efct_hw_setup()
251 INIT_LIST_HEAD(&hw->cmd_pending); in efct_hw_setup()
252 hw->cmd_head_count = 0; in efct_hw_setup()
255 hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ, in efct_hw_setup()
257 if (!hw->cmd_ctx_pool) { in efct_hw_setup()
258 efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n"); in efct_hw_setup()
263 hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ, in efct_hw_setup()
265 if (!hw->mbox_rqst_pool) { in efct_hw_setup()
266 efc_log_err(hw->os, "failed to allocate mbox request pool\n"); in efct_hw_setup()
270 spin_lock_init(&hw->io_lock); in efct_hw_setup()
271 INIT_LIST_HEAD(&hw->io_inuse); in efct_hw_setup()
272 INIT_LIST_HEAD(&hw->io_free); in efct_hw_setup()
273 INIT_LIST_HEAD(&hw->io_wait_free); in efct_hw_setup()
275 atomic_set(&hw->io_alloc_failed_count, 0); in efct_hw_setup()
277 hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4; in efct_hw_setup()
278 if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) { in efct_hw_setup()
279 efc_log_err(hw->os, "SLI setup failed\n"); in efct_hw_setup()
283 efct_hw_link_event_init(hw); in efct_hw_setup()
285 sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw); in efct_hw_setup()
290 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) in efct_hw_setup()
291 hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i]; in efct_hw_setup()
297 hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2; in efct_hw_setup()
303 hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD; in efct_hw_setup()
304 hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size; in efct_hw_setup()
307 hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus; in efct_hw_setup()
309 max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED; in efct_hw_setup()
311 hw->config.n_sgl = max_sgl; in efct_hw_setup()
313 (void)efct_hw_read_max_dump_size(hw); in efct_hw_setup()
319 efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id) in efct_logfcfi() argument
321 efc_log_info(hw->os, in efct_logfcfi()
323 j, hw->config.filter_def[j], i, id); in efct_logfcfi()
353 efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_restore_sgl() argument
364 struct efct_hw *hw = io->hw; in efct_hw_wq_process_io() local
379 sli_fc_els_did(&hw->sli, cqe, &ext); in efct_hw_wq_process_io()
380 len = sli_fc_response_length(&hw->sli, cqe); in efct_hw_wq_process_io()
386 len = sli_fc_response_length(&hw->sli, cqe); in efct_hw_wq_process_io()
389 len = sli_fc_io_length(&hw->sli, cqe); in efct_hw_wq_process_io()
392 len = sli_fc_io_length(&hw->sli, cqe); in efct_hw_wq_process_io()
398 /* efct_hw_io_free(hw, io); */ in efct_hw_wq_process_io()
401 efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n", in efct_hw_wq_process_io()
406 ext = sli_fc_ext_status(&hw->sli, cqe); in efct_hw_wq_process_io()
409 * abort for the IO from within the HW in efct_hw_wq_process_io()
415 efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", in efct_hw_wq_process_io()
423 rc = efct_hw_io_abort(hw, io, false, NULL, NULL); in efct_hw_wq_process_io()
440 efc_log_debug(hw->os, "%s%#x tag=%#x\n", in efct_hw_wq_process_io()
448 efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n", in efct_hw_wq_process_io()
469 efct_hw_io_restore_sgl(hw, io); in efct_hw_wq_process_io()
478 efct_hw_setup_io(struct efct_hw *hw) in efct_hw_setup_io() argument
487 struct efct *efct = hw->os; in efct_hw_setup_io()
489 if (!hw->io) { in efct_hw_setup_io()
490 hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL); in efct_hw_setup_io()
491 if (!hw->io) in efct_hw_setup_io()
494 memset(hw->io, 0, hw->config.n_io * sizeof(io)); in efct_hw_setup_io()
496 for (i = 0; i < hw->config.n_io; i++) { in efct_hw_setup_io()
497 hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL); in efct_hw_setup_io()
498 if (!hw->io[i]) in efct_hw_setup_io()
503 hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size), in efct_hw_setup_io()
505 if (!hw->wqe_buffs) { in efct_hw_setup_io()
506 kfree(hw->io); in efct_hw_setup_io()
516 dma = &hw->xfer_rdy; in efct_hw_setup_io()
517 dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io; in efct_hw_setup_io()
523 xfer_virt = (uintptr_t)hw->xfer_rdy.virt; in efct_hw_setup_io()
524 xfer_phys = hw->xfer_rdy.phys; in efct_hw_setup_io()
526 /* Initialize the pool of HW IO objects */ in efct_hw_setup_io()
527 for (i = 0; i < hw->config.n_io; i++) { in efct_hw_setup_io()
530 io = hw->io[i]; in efct_hw_setup_io()
533 io->hw = hw; in efct_hw_setup_io()
536 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size]; in efct_hw_setup_io()
539 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io); in efct_hw_setup_io()
541 efc_log_err(hw->os, "can't allocate request tag\n"); in efct_hw_setup_io()
552 if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI, in efct_hw_setup_io()
554 efc_log_err(hw->os, in efct_hw_setup_io()
561 dma->size = hw->config.n_sgl * in efct_hw_setup_io()
567 efc_log_err(hw->os, "dma_alloc fail %d\n", i); in efct_hw_setup_io()
573 io->def_sgl_count = hw->config.n_sgl; in efct_hw_setup_io()
577 if (hw->xfer_rdy.size) { in efct_hw_setup_io()
589 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { in efct_hw_setup_io()
590 kfree(hw->io[i]); in efct_hw_setup_io()
591 hw->io[i] = NULL; in efct_hw_setup_io()
594 kfree(hw->io); in efct_hw_setup_io()
595 hw->io = NULL; in efct_hw_setup_io()
601 efct_hw_init_prereg_io(struct efct_hw *hw) in efct_hw_init_prereg_io() argument
612 struct efct *efct = hw->os; in efct_hw_init_prereg_io()
627 for (n_rem = hw->config.n_io; n_rem; n_rem -= n) { in efct_hw_init_prereg_io()
636 if (hw->io[idx + n]->indicator != in efct_hw_init_prereg_io()
637 hw->io[idx + n - 1]->indicator + 1) in efct_hw_init_prereg_io()
641 sgls[n] = hw->io[idx + n]->sgl; in efct_hw_init_prereg_io()
644 if (sli_cmd_post_sgl_pages(&hw->sli, cmd, in efct_hw_init_prereg_io()
645 hw->io[idx]->indicator, n, sgls, NULL, &req)) { in efct_hw_init_prereg_io()
650 rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL); in efct_hw_init_prereg_io()
652 efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc); in efct_hw_init_prereg_io()
658 io = hw->io[idx]; in efct_hw_init_prereg_io()
661 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_init_prereg_io()
673 efct_hw_init_io(struct efct_hw *hw) in efct_hw_init_io() argument
680 prereg = hw->sli.params.sgl_pre_registered; in efct_hw_init_io()
683 return efct_hw_init_prereg_io(hw); in efct_hw_init_io()
685 for (i = 0; i < hw->config.n_io; i++, idx++) { in efct_hw_init_io()
686 io = hw->io[idx]; in efct_hw_init_io()
689 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_init_io()
696 efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint) in efct_hw_config_set_fdt_xfer_hint() argument
705 sli_cmd_common_set_features(&hw->sli, buf, in efct_hw_config_set_fdt_xfer_hint()
708 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); in efct_hw_config_set_fdt_xfer_hint()
710 efc_log_warn(hw->os, "set FDT hint %d failed: %d\n", in efct_hw_config_set_fdt_xfer_hint()
713 efc_log_info(hw->os, "Set FTD transfer hint to %d\n", in efct_hw_config_set_fdt_xfer_hint()
720 efct_hw_config_rq(struct efct_hw *hw) in efct_hw_config_rq() argument
726 efc_log_info(hw->os, "using REG_FCFI standard\n"); in efct_hw_config_rq()
729 * Set the filter match/mask values from hw's in efct_hw_config_rq()
734 rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i]; in efct_hw_config_rq()
735 rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8); in efct_hw_config_rq()
736 rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16); in efct_hw_config_rq()
737 rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24); in efct_hw_config_rq()
745 min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ? in efct_hw_config_rq()
746 hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG; in efct_hw_config_rq()
748 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_config_rq()
759 efct_logfcfi(hw, j, i, rq->hdr->id); in efct_hw_config_rq()
764 if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg)) in efct_hw_config_rq()
765 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); in efct_hw_config_rq()
768 efc_log_err(hw->os, "FCFI registration failed\n"); in efct_hw_config_rq()
771 hw->fcf_indicator = in efct_hw_config_rq()
778 efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index) in efct_hw_config_mrq() argument
789 /* Set the filter match/mask values from hw's filter_def values */ in efct_hw_config_mrq()
792 rq_filter[i].type_mask = (u8)hw->config.filter_def[i]; in efct_hw_config_mrq()
793 rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8); in efct_hw_config_mrq()
794 rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16); in efct_hw_config_mrq()
795 rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24); in efct_hw_config_mrq()
798 rq = hw->hw_rq[0]; in efct_hw_config_mrq()
804 efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n", in efct_hw_config_mrq()
805 hw->hw_rq_count, hw->config.rq_selection_policy, mode); in efct_hw_config_mrq()
807 rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index, in efct_hw_config_mrq()
808 hw->config.rq_selection_policy, mrq_bitmask, in efct_hw_config_mrq()
809 hw->hw_mrq_count, rq_filter); in efct_hw_config_mrq()
811 efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n"); in efct_hw_config_mrq()
815 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); in efct_hw_config_mrq()
820 efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n", in efct_hw_config_mrq()
826 hw->fcf_indicator = le16_to_cpu(rsp->fcfi); in efct_hw_config_mrq()
851 efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable) in efct_hw_config_sli_port_health_check() argument
869 sli_cmd_common_set_features(&hw->sli, buf, in efct_hw_config_sli_port_health_check()
872 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL); in efct_hw_config_sli_port_health_check()
874 efc_log_err(hw->os, "efct_hw_command returns %d\n", rc); in efct_hw_config_sli_port_health_check()
876 efc_log_debug(hw->os, "SLI Port Health Check is enabled\n"); in efct_hw_config_sli_port_health_check()
882 efct_hw_init(struct efct_hw *hw) in efct_hw_init() argument
898 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_init()
899 if (!list_empty(&hw->cmd_head)) { in efct_hw_init()
900 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_init()
901 efc_log_err(hw->os, "command found on cmd list\n"); in efct_hw_init()
904 if (!list_empty(&hw->cmd_pending)) { in efct_hw_init()
905 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_init()
906 efc_log_err(hw->os, "command found on pending list\n"); in efct_hw_init()
909 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_init()
912 efct_hw_rx_free(hw); in efct_hw_init()
925 while ((!list_empty(&hw->io_wait_free))) { in efct_hw_init()
927 temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io, in efct_hw_init()
932 efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n", in efct_hw_init()
936 while ((!list_empty(&hw->io_inuse))) { in efct_hw_init()
938 temp = list_first_entry(&hw->io_inuse, struct efct_hw_io, in efct_hw_init()
943 efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n", in efct_hw_init()
947 while ((!list_empty(&hw->io_free))) { in efct_hw_init()
949 temp = list_first_entry(&hw->io_free, struct efct_hw_io, in efct_hw_init()
954 efc_log_debug(hw->os, "rmvd %d items from io_free list\n", in efct_hw_init()
958 if (hw->config.n_rq == 1) in efct_hw_init()
959 hw->sli.features &= (~SLI4_REQFEAT_MRQP); in efct_hw_init()
961 if (sli_init(&hw->sli)) { in efct_hw_init()
962 efc_log_err(hw->os, "SLI failed to initialize\n"); in efct_hw_init()
966 if (hw->sliport_healthcheck) { in efct_hw_init()
967 rc = efct_hw_config_sli_port_health_check(hw, 0, 1); in efct_hw_init()
969 efc_log_err(hw->os, "Enable port Health check fail\n"); in efct_hw_init()
977 if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) { in efct_hw_init()
983 efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT); in efct_hw_init()
987 memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); in efct_hw_init()
988 efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n", in efct_hw_init()
991 memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); in efct_hw_init()
992 efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n", in efct_hw_init()
995 memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); in efct_hw_init()
996 efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n", in efct_hw_init()
999 rc = efct_hw_init_queues(hw); in efct_hw_init()
1003 rc = efct_hw_map_wq_cpu(hw); in efct_hw_init()
1008 rc = efct_hw_rx_allocate(hw); in efct_hw_init()
1010 efc_log_err(hw->os, "rx_allocate failed\n"); in efct_hw_init()
1014 rc = efct_hw_rx_post(hw); in efct_hw_init()
1016 efc_log_err(hw->os, "WARNING - error posting RQ buffers\n"); in efct_hw_init()
1020 if (hw->config.n_eq == 1) { in efct_hw_init()
1021 rc = efct_hw_config_rq(hw); in efct_hw_init()
1023 efc_log_err(hw->os, "config rq failed %d\n", rc); in efct_hw_init()
1027 rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0); in efct_hw_init()
1029 efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n"); in efct_hw_init()
1033 rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0); in efct_hw_init()
1035 efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n"); in efct_hw_init()
1045 hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw); in efct_hw_init()
1046 if (!hw->wq_reqtag_pool) { in efct_hw_init()
1047 efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n"); in efct_hw_init()
1051 rc = efct_hw_setup_io(hw); in efct_hw_init()
1053 efc_log_err(hw->os, "IO allocation failure\n"); in efct_hw_init()
1057 rc = efct_hw_init_io(hw); in efct_hw_init()
1059 efc_log_err(hw->os, "IO initialization failure\n"); in efct_hw_init()
1063 dma = &hw->loop_map; in efct_hw_init()
1065 dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys, in efct_hw_init()
1074 for (i = 0; i < hw->eq_count; i++) in efct_hw_init()
1075 sli_queue_arm(&hw->sli, &hw->eq[i], true); in efct_hw_init()
1080 for (i = 0; i < hw->rq_count; i++) in efct_hw_init()
1081 efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); in efct_hw_init()
1086 for (i = 0; i < hw->wq_count; i++) in efct_hw_init()
1087 efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); in efct_hw_init()
1092 for (i = 0; i < hw->cq_count; i++) { in efct_hw_init()
1093 efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); in efct_hw_init()
1094 sli_queue_arm(&hw->sli, &hw->cq[i], true); in efct_hw_init()
1098 for (i = 0; i < hw->hw_rq_count; i++) { in efct_hw_init()
1099 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_init()
1101 hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2; in efct_hw_init()
1105 hw->state = EFCT_HW_STATE_ACTIVE; in efct_hw_init()
1107 * Allocate a HW IOs for send frame. in efct_hw_init()
1109 hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw); in efct_hw_init()
1110 if (!hw->hw_wq[0]->send_frame_io) in efct_hw_init()
1111 efc_log_err(hw->os, "alloc for send_frame_io failed\n"); in efct_hw_init()
1114 atomic_set(&hw->send_frame_seq_id, 0); in efct_hw_init()
1120 efct_hw_parse_filter(struct efct_hw *hw, void *value) in efct_hw_parse_filter() argument
1127 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) in efct_hw_parse_filter()
1128 hw->config.filter_def[idx] = 0; in efct_hw_parse_filter()
1132 efc_log_err(hw->os, "p is NULL\n"); in efct_hw_parse_filter()
1139 if (kstrtou32(token, 0, &hw->config.filter_def[idx++])) in efct_hw_parse_filter()
1140 efc_log_err(hw->os, "kstrtoint failed\n"); in efct_hw_parse_filter()
1145 if (idx == ARRAY_SIZE(hw->config.filter_def)) in efct_hw_parse_filter()
1154 efct_get_wwnn(struct efct_hw *hw) in efct_get_wwnn() argument
1156 struct sli4 *sli = &hw->sli; in efct_get_wwnn()
1164 efct_get_wwpn(struct efct_hw *hw) in efct_get_wwpn() argument
1166 struct sli4 *sli = &hw->sli; in efct_get_wwpn()
1174 efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count, in efct_hw_rx_buffer_alloc() argument
1177 struct efct *efct = hw->os; in efct_hw_rx_buffer_alloc()
1198 efc_log_err(hw->os, "DMA allocation failed\n"); in efct_hw_rx_buffer_alloc()
1207 efct_hw_rx_buffer_free(struct efct_hw *hw, in efct_hw_rx_buffer_free() argument
1211 struct efct *efct = hw->os; in efct_hw_rx_buffer_free()
1228 efct_hw_rx_allocate(struct efct_hw *hw) in efct_hw_rx_allocate() argument
1230 struct efct *efct = hw->os; in efct_hw_rx_allocate()
1235 u32 payload_size = hw->config.rq_default_buffer_size; in efct_hw_rx_allocate()
1239 for (i = 0; i < hw->hw_rq_count; i++) { in efct_hw_rx_allocate()
1240 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_rx_allocate()
1243 rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex, in efct_hw_rx_allocate()
1252 efc_log_debug(hw->os, in efct_hw_rx_allocate()
1259 rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex, in efct_hw_rx_allocate()
1267 efc_log_debug(hw->os, in efct_hw_rx_allocate()
1277 efct_hw_rx_post(struct efct_hw *hw) in efct_hw_rx_post() argument
1284 if (!hw->seq_pool) { in efct_hw_rx_post()
1287 for (i = 0; i < hw->hw_rq_count; i++) in efct_hw_rx_post()
1288 count += hw->hw_rq[i]->entry_count; in efct_hw_rx_post()
1290 hw->seq_pool = kmalloc_array(count, in efct_hw_rx_post()
1292 if (!hw->seq_pool) in efct_hw_rx_post()
1300 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { in efct_hw_rx_post()
1301 struct hw_rq *rq = hw->hw_rq[rq_idx]; in efct_hw_rx_post()
1306 seq = hw->seq_pool + idx; in efct_hw_rx_post()
1310 rc = efct_hw_sequence_free(hw, seq); in efct_hw_rx_post()
1318 if (rc && hw->seq_pool) in efct_hw_rx_post()
1319 kfree(hw->seq_pool); in efct_hw_rx_post()
1325 efct_hw_rx_free(struct efct_hw *hw) in efct_hw_rx_free() argument
1330 for (i = 0; i < hw->hw_rq_count; i++) { in efct_hw_rx_free()
1331 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_rx_free()
1334 efct_hw_rx_buffer_free(hw, rq->hdr_buf, in efct_hw_rx_free()
1337 efct_hw_rx_buffer_free(hw, rq->payload_buf, in efct_hw_rx_free()
1345 efct_hw_cmd_submit_pending(struct efct_hw *hw) in efct_hw_cmd_submit_pending() argument
1352 while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) && in efct_hw_cmd_submit_pending()
1353 !list_empty(&hw->cmd_pending)) { in efct_hw_cmd_submit_pending()
1356 ctx = list_first_entry(&hw->cmd_pending, in efct_hw_cmd_submit_pending()
1363 list_add_tail(&ctx->list_entry, &hw->cmd_head); in efct_hw_cmd_submit_pending()
1364 hw->cmd_head_count++; in efct_hw_cmd_submit_pending()
1365 if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) { in efct_hw_cmd_submit_pending()
1366 efc_log_debug(hw->os, in efct_hw_cmd_submit_pending()
1376 efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg) in efct_hw_command() argument
1386 if (sli_fw_error_status(&hw->sli) > 0) { in efct_hw_command()
1387 efc_log_crit(hw->os, "Chip in an error state - reset needed\n"); in efct_hw_command()
1388 efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", in efct_hw_command()
1389 sli_reg_read_status(&hw->sli), in efct_hw_command()
1390 sli_reg_read_err1(&hw->sli), in efct_hw_command()
1391 sli_reg_read_err2(&hw->sli)); in efct_hw_command()
1403 mutex_lock(&hw->bmbx_lock); in efct_hw_command()
1404 bmbx = hw->sli.bmbx.virt; in efct_hw_command()
1408 if (sli_bmbx_command(&hw->sli) == 0) { in efct_hw_command()
1412 mutex_unlock(&hw->bmbx_lock); in efct_hw_command()
1416 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_command()
1417 efc_log_err(hw->os, "Can't send command, HW state=%d\n", in efct_hw_command()
1418 hw->state); in efct_hw_command()
1422 ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC); in efct_hw_command()
1434 ctx->ctx = hw; in efct_hw_command()
1436 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command()
1440 list_add_tail(&ctx->list_entry, &hw->cmd_pending); in efct_hw_command()
1443 rc = efct_hw_cmd_submit_pending(hw); in efct_hw_command()
1445 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command()
1452 efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe, in efct_hw_command_process() argument
1458 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command_process()
1459 if (!list_empty(&hw->cmd_head)) { in efct_hw_command_process()
1460 ctx = list_first_entry(&hw->cmd_head, in efct_hw_command_process()
1465 efc_log_err(hw->os, "no command context\n"); in efct_hw_command_process()
1466 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_process()
1470 hw->cmd_head_count--; in efct_hw_command_process()
1473 efct_hw_cmd_submit_pending(hw); in efct_hw_command_process()
1475 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_process()
1479 ctx->cb(hw, status, ctx->buf, ctx->arg); in efct_hw_command_process()
1482 mempool_free(ctx, hw->cmd_ctx_pool); in efct_hw_command_process()
1488 efct_hw_mq_process(struct efct_hw *hw, in efct_hw_mq_process() argument
1494 rc = sli_mq_read(&hw->sli, mq, mqe); in efct_hw_mq_process()
1496 rc = efct_hw_command_process(hw, status, mqe, mq->size); in efct_hw_mq_process()
1502 efct_hw_command_cancel(struct efct_hw *hw) in efct_hw_command_cancel() argument
1507 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1514 while (!list_empty(&hw->cmd_head)) { in efct_hw_command_cancel()
1518 ctx = list_first_entry(&hw->cmd_head, in efct_hw_command_cancel()
1521 efc_log_debug(hw->os, "hung command %08x\n", in efct_hw_command_cancel()
1523 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1524 rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE); in efct_hw_command_cancel()
1525 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1528 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1534 efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg) in efct_mbox_rsp_cb() argument
1540 (*ctx->callback)(hw->os->efcport, status, mqe, in efct_mbox_rsp_cb()
1543 mempool_free(ctx, hw->mbox_rqst_pool); in efct_mbox_rsp_cb()
1552 struct efct_hw *hw = &efct->hw; in efct_issue_mbox_rqst() local
1560 ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC); in efct_issue_mbox_rqst()
1567 rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx); in efct_issue_mbox_rqst()
1570 mempool_free(ctx, hw->mbox_rqst_pool); in efct_issue_mbox_rqst()
1578 _efct_hw_io_alloc(struct efct_hw *hw) in _efct_hw_io_alloc() argument
1582 if (!list_empty(&hw->io_free)) { in _efct_hw_io_alloc()
1583 io = list_first_entry(&hw->io_free, struct efct_hw_io, in _efct_hw_io_alloc()
1589 list_add_tail(&io->list_entry, &hw->io_inuse); in _efct_hw_io_alloc()
1592 io->wq = hw->wq_cpu_array[raw_smp_processor_id()]; in _efct_hw_io_alloc()
1594 efc_log_err(hw->os, "WQ not assigned for cpu:%d\n", in _efct_hw_io_alloc()
1596 io->wq = hw->hw_wq[0]; in _efct_hw_io_alloc()
1601 atomic_add(1, &hw->io_alloc_failed_count); in _efct_hw_io_alloc()
1608 efct_hw_io_alloc(struct efct_hw *hw) in efct_hw_io_alloc() argument
1613 spin_lock_irqsave(&hw->io_lock, flags); in efct_hw_io_alloc()
1614 io = _efct_hw_io_alloc(hw); in efct_hw_io_alloc()
1615 spin_unlock_irqrestore(&hw->io_lock, flags); in efct_hw_io_alloc()
1621 efct_hw_io_free_move_correct_list(struct efct_hw *hw, in efct_hw_io_free_move_correct_list() argument
1634 list_add_tail(&io->list_entry, &hw->io_wait_free); in efct_hw_io_free_move_correct_list()
1639 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_io_free_move_correct_list()
1645 efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_free_common() argument
1651 efct_hw_io_restore_sgl(hw, io); in efct_hw_io_free_common()
1659 struct efct_hw *hw = io->hw; in efct_hw_io_free_internal() local
1662 efct_hw_io_free_common(hw, io); in efct_hw_io_free_internal()
1664 spin_lock_irqsave(&hw->io_lock, flags); in efct_hw_io_free_internal()
1666 if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) { in efct_hw_io_free_internal()
1668 efct_hw_io_free_move_correct_list(hw, io); in efct_hw_io_free_internal()
1670 spin_unlock_irqrestore(&hw->io_lock, flags); in efct_hw_io_free_internal()
1674 efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_free() argument
1680 efct_hw_io_lookup(struct efct_hw *hw, u32 xri) in efct_hw_io_lookup() argument
1684 ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0]; in efct_hw_io_lookup()
1685 return hw->io[ioindex]; in efct_hw_io_lookup()
1689 efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io, in efct_hw_io_init_sges() argument
1698 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io); in efct_hw_io_init_sges()
1750 efc_log_err(hw->os, "unsupported IO type %#x\n", type); in efct_hw_io_init_sges()
1778 efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io, in efct_hw_io_add_sge() argument
1785 efc_log_err(hw->os, in efct_hw_io_add_sge()
1786 "bad parameter hw=%p io=%p addr=%lx length=%u\n", in efct_hw_io_add_sge()
1787 hw, io, addr, length); in efct_hw_io_add_sge()
1791 if (length > hw->sli.sge_supported_length) { in efct_hw_io_add_sge()
1792 efc_log_err(hw->os, in efct_hw_io_add_sge()
1794 length, hw->sli.sge_supported_length); in efct_hw_io_add_sge()
1836 efct_hw_io_abort_all(struct efct_hw *hw) in efct_hw_io_abort_all() argument
1842 &hw->io_inuse, list_entry) { in efct_hw_io_abort_all()
1843 efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL); in efct_hw_io_abort_all()
1851 struct efct_hw *hw = io->hw; in efct_hw_wq_process_abort() local
1862 ext = sli_fc_ext_status(&hw->sli, cqe); in efct_hw_wq_process_abort()
1894 efc_log_err(hw->os, "HW IO already freed\n"); in efct_hw_wq_process_abort()
1898 wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag); in efct_hw_wq_process_abort()
1899 efct_hw_reqtag_free(hw, wqcb); in efct_hw_wq_process_abort()
1905 (void)efct_hw_io_free(hw, io); in efct_hw_wq_process_abort()
1909 efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe) in efct_hw_fill_abort_wqe() argument
1913 memset(abort, 0, hw->sli.wqe_size); in efct_hw_fill_abort_wqe()
1931 efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort, in efct_hw_io_abort() argument
1938 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", in efct_hw_io_abort()
1939 hw, io_to_abort); in efct_hw_io_abort()
1943 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_io_abort()
1944 efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n", in efct_hw_io_abort()
1945 hw->state); in efct_hw_io_abort()
1952 efc_log_debug(hw->os, in efct_hw_io_abort()
1960 efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n", in efct_hw_io_abort()
1974 efc_log_debug(hw->os, in efct_hw_io_abort()
1998 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort); in efct_hw_io_abort()
2000 efc_log_err(hw->os, "can't allocate request tag\n"); in efct_hw_io_abort()
2024 efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe); in efct_hw_io_abort()
2041 efct_hw_reqtag_pool_free(struct efct_hw *hw) in efct_hw_reqtag_pool_free() argument
2044 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; in efct_hw_reqtag_pool_free()
2056 hw->wq_reqtag_pool = NULL; in efct_hw_reqtag_pool_free()
2061 efct_hw_reqtag_pool_alloc(struct efct_hw *hw) in efct_hw_reqtag_pool_alloc() argument
2091 efct_hw_reqtag_alloc(struct efct_hw *hw, in efct_hw_reqtag_alloc() argument
2096 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; in efct_hw_reqtag_alloc()
2122 efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb) in efct_hw_reqtag_free() argument
2125 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; in efct_hw_reqtag_free()
2128 efc_log_err(hw->os, "WQCB is already freed\n"); in efct_hw_reqtag_free()
2134 list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist); in efct_hw_reqtag_free()
2139 efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index) in efct_hw_reqtag_get_instance() argument
2143 wqcb = hw->wq_reqtag_pool->tags[instance_index]; in efct_hw_reqtag_get_instance()
2145 efc_log_err(hw->os, "wqcb for instance %d is null\n", in efct_hw_reqtag_get_instance()
2173 efct_hw_process(struct efct_hw *hw, u32 vector, in efct_hw_process() argument
2189 if (hw->state == EFCT_HW_STATE_UNINITIALIZED) in efct_hw_process()
2193 eq = hw->hw_eq[vector]; in efct_hw_process()
2199 return efct_hw_eq_process(hw, eq, max_isr_time_msec); in efct_hw_process()
2203 efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq, in efct_hw_eq_process() argument
2215 while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) { in efct_hw_eq_process()
2219 rc = sli_eq_parse(&hw->sli, eqe, &cq_id); in efct_hw_eq_process()
2228 for (i = 0; i < hw->cq_count; i++) in efct_hw_eq_process()
2229 efct_hw_cq_process(hw, hw->hw_cq[i]); in efct_hw_eq_process()
2237 index = efct_hw_queue_hash_find(hw->cq_hash, cq_id); in efct_hw_eq_process()
2240 efct_hw_cq_process(hw, hw->hw_cq[index]); in efct_hw_eq_process()
2242 efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); in efct_hw_eq_process()
2246 sli_queue_arm(&hw->sli, eq->queue, false); in efct_hw_eq_process()
2255 sli_queue_eq_arm(&hw->sli, eq->queue, true); in efct_hw_eq_process()
2279 queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf); in _efct_hw_wq_write()
2303 efct_hw_fill_abort_wqe(wq->hw, wqe); in hw_wq_submit_pending()
2314 efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq) in efct_hw_cq_process() argument
2325 while (!sli_cq_read(&hw->sli, cq->queue, cqe)) { in efct_hw_cq_process()
2328 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); in efct_hw_cq_process()
2351 sli_cqe_async(&hw->sli, cqe); in efct_hw_cq_process()
2358 efct_hw_mq_process(hw, status, hw->mq); in efct_hw_cq_process()
2361 efct_hw_wq_process(hw, cq, cqe, status, rid); in efct_hw_cq_process()
2368 index = efct_hw_queue_hash_find(hw->wq_hash, wq_id); in efct_hw_cq_process()
2371 wq = hw->hw_wq[index]; in efct_hw_cq_process()
2373 efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id); in efct_hw_cq_process()
2376 /* Submit any HW IOs that are on the WQ pending list */ in efct_hw_cq_process()
2383 efct_hw_rqpair_process_rq(hw, cq, cqe); in efct_hw_cq_process()
2386 efct_hw_xabt_process(hw, cq, cqe, rid); in efct_hw_cq_process()
2390 efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n", in efct_hw_cq_process()
2400 sli_queue_arm(&hw->sli, cq->queue, false); in efct_hw_cq_process()
2403 sli_queue_arm(&hw->sli, cq->queue, true); in efct_hw_cq_process()
2413 efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq, in efct_hw_wq_process() argument
2420 efc_log_err(hw->os, "reque xri failed, status = %d\n", in efct_hw_wq_process()
2425 wqcb = efct_hw_reqtag_get_instance(hw, rid); in efct_hw_wq_process()
2427 efc_log_err(hw->os, "invalid request tag: x%x\n", rid); in efct_hw_wq_process()
2432 efc_log_err(hw->os, "wqcb callback is NULL\n"); in efct_hw_wq_process()
2440 efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq, in efct_hw_xabt_process() argument
2447 io = efct_hw_io_lookup(hw, rid); in efct_hw_xabt_process()
2450 efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid); in efct_hw_xabt_process()
2455 efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); in efct_hw_xabt_process()
2482 spin_lock_irqsave(&hw->io_lock, flags); in efct_hw_xabt_process()
2493 efct_hw_io_free_move_correct_list(hw, io); in efct_hw_xabt_process()
2496 spin_unlock_irqrestore(&hw->io_lock, flags); in efct_hw_xabt_process()
2500 efct_hw_flush(struct efct_hw *hw) in efct_hw_flush() argument
2505 for (i = 0; i < hw->eq_count; i++) in efct_hw_flush()
2506 efct_hw_process(hw, i, ~0); in efct_hw_flush()
2547 efct_hw_fill_abort_wqe(wq->hw, wqe); in efct_hw_wq_write()
2572 struct efct_hw *hw = &efct->hw; in efct_hw_bls_send() local
2577 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_bls_send()
2578 efc_log_err(hw->os, in efct_hw_bls_send()
2579 "cannot send BLS, HW state=%d\n", hw->state); in efct_hw_bls_send()
2583 hio = efct_hw_io_alloc(hw); in efct_hw_bls_send()
2585 efc_log_err(hw->os, "HIO allocation failed\n"); in efct_hw_bls_send()
2608 if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, in efct_hw_bls_send()
2610 efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); in efct_hw_bls_send()
2627 efc_log_err(hw->os, in efct_hw_bls_send()
2699 struct efct_hw *hw = &efct->hw; in efct_els_hw_srrs_send() local
2708 hio = efct_hw_io_alloc(hw); in efct_els_hw_srrs_send()
2714 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_els_hw_srrs_send()
2715 efc_log_debug(hw->os, in efct_els_hw_srrs_send()
2716 "cannot send SRRS, HW state=%d\n", hw->state); in efct_els_hw_srrs_send()
2768 if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, in efct_els_hw_srrs_send()
2770 efc_log_err(hw->os, "REQ WQE error\n"); in efct_els_hw_srrs_send()
2782 if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send, in efct_els_hw_srrs_send()
2784 efc_log_err(hw->os, "RSP WQE error\n"); in efct_els_hw_srrs_send()
2796 if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, in efct_els_hw_srrs_send()
2798 efc_log_err(hw->os, "GEN WQE error\n"); in efct_els_hw_srrs_send()
2810 if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, in efct_els_hw_srrs_send()
2812 efc_log_err(hw->os, "XMIT SEQ WQE error\n"); in efct_els_hw_srrs_send()
2818 efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type); in efct_els_hw_srrs_send()
2836 efc_log_err(hw->os, in efct_els_hw_srrs_send()
2846 efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type, in efct_hw_io_send() argument
2854 pr_err("bad parm hw=%p io=%p\n", hw, io); in efct_hw_io_send()
2858 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_io_send()
2859 efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); in efct_hw_io_send()
2891 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2895 efc_log_err(hw->os, "TRECEIVE WQE error\n"); in efct_hw_io_send()
2911 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2915 efc_log_err(hw->os, "TSEND WQE error\n"); in efct_hw_io_send()
2931 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2934 efc_log_err(hw->os, "TRSP WQE error\n"); in efct_hw_io_send()
2941 efc_log_err(hw->os, "unsupported IO type %#x\n", type); in efct_hw_io_send()
2952 hw->tcmd_wq_submit[io->wq->instance]++; in efct_hw_io_send()
2960 efc_log_err(hw->os, in efct_hw_io_send()
2970 efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr, in efct_hw_send_frame() argument
2984 ctx->hw = hw; in efct_hw_send_frame()
2987 ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg); in efct_hw_send_frame()
2989 efc_log_err(hw->os, "can't allocate request tag\n"); in efct_hw_send_frame()
2993 wq = hw->hw_wq[0]; in efct_hw_send_frame()
3001 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, in efct_hw_send_frame()
3006 efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); in efct_hw_send_frame()
3013 efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc); in efct_hw_send_frame()
3023 efct_hw_cb_link_stat(struct efct_hw *hw, int status, in efct_hw_cb_link_stat() argument
3097 efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters, in efct_hw_get_link_stats() argument
3115 /* Send the HW command */ in efct_hw_get_link_stats()
3116 if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters, in efct_hw_get_link_stats()
3118 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT, in efct_hw_get_link_stats()
3128 efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg) in efct_hw_cb_host_stat() argument
3182 efct_hw_get_host_stats(struct efct_hw *hw, u8 cc, in efct_hw_get_host_stats() argument
3199 /* Send the HW command to get the host stats */ in efct_hw_get_host_stats()
3200 if (!sli_cmd_read_status(&hw->sli, mbxdata, cc)) in efct_hw_get_host_stats()
3201 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT, in efct_hw_get_host_stats()
3205 efc_log_debug(hw->os, "READ_HOST_STATS failed\n"); in efct_hw_get_host_stats()
3219 efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg) in efct_hw_async_cb() argument
3225 (*ctx->callback)(hw, status, mqe, ctx->arg); in efct_hw_async_cb()
3232 efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg) in efct_hw_async_call() argument
3250 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) { in efct_hw_async_call()
3251 efc_log_err(hw->os, "COMMON_NOP format failure\n"); in efct_hw_async_call()
3256 rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb, in efct_hw_async_call()
3259 efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc); in efct_hw_async_call()
3267 efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg) in efct_hw_cb_fw_write() argument
3299 efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size, in efct_hw_firmware_write() argument
3318 if (!sli_cmd_common_write_object(&hw->sli, mbxdata, in efct_hw_firmware_write()
3321 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT, in efct_hw_firmware_write()
3325 efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n"); in efct_hw_firmware_write()
3333 efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe, in efct_hw_cb_port_control() argument
3340 efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl, in efct_hw_port_control() argument
3352 if (!sli_cmd_config_link(&hw->sli, link)) in efct_hw_port_control()
3353 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT, in efct_hw_port_control()
3357 efc_log_err(hw->os, "CONFIG_LINK failed\n"); in efct_hw_port_control()
3360 speed = hw->config.speed; in efct_hw_port_control()
3364 if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa)) in efct_hw_port_control()
3365 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT, in efct_hw_port_control()
3369 efc_log_err(hw->os, "INIT_LINK failed\n"); in efct_hw_port_control()
3373 if (!sli_cmd_down_link(&hw->sli, link)) in efct_hw_port_control()
3374 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT, in efct_hw_port_control()
3378 efc_log_err(hw->os, "DOWN_LINK failed\n"); in efct_hw_port_control()
3382 efc_log_debug(hw->os, "unhandled control %#x\n", ctrl); in efct_hw_port_control()
3390 efct_hw_teardown(struct efct_hw *hw) in efct_hw_teardown() argument
3396 struct efct *efct = hw->os; in efct_hw_teardown()
3398 destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE); in efct_hw_teardown()
3399 free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED); in efct_hw_teardown()
3402 if (hw->sliport_healthcheck) { in efct_hw_teardown()
3403 hw->sliport_healthcheck = 0; in efct_hw_teardown()
3404 efct_hw_config_sli_port_health_check(hw, 0, 0); in efct_hw_teardown()
3407 if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) { in efct_hw_teardown()
3408 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS; in efct_hw_teardown()
3410 efct_hw_flush(hw); in efct_hw_teardown()
3412 if (list_empty(&hw->cmd_head)) in efct_hw_teardown()
3413 efc_log_debug(hw->os, in efct_hw_teardown()
3416 efc_log_debug(hw->os, in efct_hw_teardown()
3420 efct_hw_command_cancel(hw); in efct_hw_teardown()
3422 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS; in efct_hw_teardown()
3426 hw->rnode_mem.size, hw->rnode_mem.virt, in efct_hw_teardown()
3427 hw->rnode_mem.phys); in efct_hw_teardown()
3428 memset(&hw->rnode_mem, 0, sizeof(struct efc_dma)); in efct_hw_teardown()
3430 if (hw->io) { in efct_hw_teardown()
3431 for (i = 0; i < hw->config.n_io; i++) { in efct_hw_teardown()
3432 if (hw->io[i] && hw->io[i]->sgl && in efct_hw_teardown()
3433 hw->io[i]->sgl->virt) { in efct_hw_teardown()
3435 hw->io[i]->sgl->size, in efct_hw_teardown()
3436 hw->io[i]->sgl->virt, in efct_hw_teardown()
3437 hw->io[i]->sgl->phys); in efct_hw_teardown()
3439 kfree(hw->io[i]); in efct_hw_teardown()
3440 hw->io[i] = NULL; in efct_hw_teardown()
3442 kfree(hw->io); in efct_hw_teardown()
3443 hw->io = NULL; in efct_hw_teardown()
3444 kfree(hw->wqe_buffs); in efct_hw_teardown()
3445 hw->wqe_buffs = NULL; in efct_hw_teardown()
3448 dma = &hw->xfer_rdy; in efct_hw_teardown()
3453 dma = &hw->loop_map; in efct_hw_teardown()
3458 for (i = 0; i < hw->wq_count; i++) in efct_hw_teardown()
3459 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, in efct_hw_teardown()
3462 for (i = 0; i < hw->rq_count; i++) in efct_hw_teardown()
3463 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, in efct_hw_teardown()
3466 for (i = 0; i < hw->mq_count; i++) in efct_hw_teardown()
3467 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, in efct_hw_teardown()
3470 for (i = 0; i < hw->cq_count; i++) in efct_hw_teardown()
3471 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, in efct_hw_teardown()
3474 for (i = 0; i < hw->eq_count; i++) in efct_hw_teardown()
3475 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, in efct_hw_teardown()
3479 efct_hw_rx_free(hw); in efct_hw_teardown()
3481 efct_hw_queue_teardown(hw); in efct_hw_teardown()
3483 kfree(hw->wq_cpu_array); in efct_hw_teardown()
3485 sli_teardown(&hw->sli); in efct_hw_teardown()
3488 hw->state = EFCT_HW_STATE_UNINITIALIZED; in efct_hw_teardown()
3491 kfree(hw->seq_pool); in efct_hw_teardown()
3492 hw->seq_pool = NULL; in efct_hw_teardown()
3495 efct_hw_reqtag_pool_free(hw); in efct_hw_teardown()
3497 mempool_destroy(hw->cmd_ctx_pool); in efct_hw_teardown()
3498 mempool_destroy(hw->mbox_rqst_pool); in efct_hw_teardown()
3500 /* Mark HW setup as not having been called */ in efct_hw_teardown()
3501 hw->hw_setup_called = false; in efct_hw_teardown()
3505 efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset, in efct_hw_sli_reset() argument
3512 efc_log_debug(hw->os, "issuing function level reset\n"); in efct_hw_sli_reset()
3513 if (sli_reset(&hw->sli)) { in efct_hw_sli_reset()
3514 efc_log_err(hw->os, "sli_reset failed\n"); in efct_hw_sli_reset()
3519 efc_log_debug(hw->os, "issuing firmware reset\n"); in efct_hw_sli_reset()
3520 if (sli_fw_reset(&hw->sli)) { in efct_hw_sli_reset()
3521 efc_log_err(hw->os, "sli_soft_reset failed\n"); in efct_hw_sli_reset()
3528 efc_log_debug(hw->os, "issuing function level reset\n"); in efct_hw_sli_reset()
3529 if (sli_reset(&hw->sli)) { in efct_hw_sli_reset()
3530 efc_log_err(hw->os, "sli_reset failed\n"); in efct_hw_sli_reset()
3535 efc_log_err(hw->os, "unknown type - no reset performed\n"); in efct_hw_sli_reset()
3536 hw->state = prev_state; in efct_hw_sli_reset()
3545 efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset) in efct_hw_reset() argument
3548 enum efct_hw_state prev_state = hw->state; in efct_hw_reset()
3550 if (hw->state != EFCT_HW_STATE_ACTIVE) in efct_hw_reset()
3551 efc_log_debug(hw->os, in efct_hw_reset()
3552 "HW state %d is not active\n", hw->state); in efct_hw_reset()
3554 hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS; in efct_hw_reset()
3562 return efct_hw_sli_reset(hw, reset, prev_state); in efct_hw_reset()
3565 efct_hw_flush(hw); in efct_hw_reset()
3567 if (list_empty(&hw->cmd_head)) in efct_hw_reset()
3568 efc_log_debug(hw->os, in efct_hw_reset()
3571 efc_log_err(hw->os, in efct_hw_reset()
3576 rc = efct_hw_sli_reset(hw, reset, prev_state); in efct_hw_reset()