Lines Matching +full:io +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0
36 hw->link.status = SLI4_LINK_STATUS_MAX; in efct_hw_link_event_init()
37 hw->link.topology = SLI4_LINK_TOPO_NONE; in efct_hw_link_event_init()
38 hw->link.medium = SLI4_LINK_MEDIUM_MAX; in efct_hw_link_event_init()
39 hw->link.speed = 0; in efct_hw_link_event_init()
40 hw->link.loop_map = NULL; in efct_hw_link_event_init()
41 hw->link.fc_id = U32_MAX; in efct_hw_link_event_init()
50 struct efct *efct = hw->os; in efct_hw_read_max_dump_size()
55 if (PCI_FUNC(efct->pci->devfn) != 0) in efct_hw_read_max_dump_size()
58 if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0)) in efct_hw_read_max_dump_size()
59 return -EIO; in efct_hw_read_max_dump_size()
66 efc_log_debug(hw->os, "set dump location cmd failed\n"); in efct_hw_read_max_dump_size()
70 hw->dump_size = in efct_hw_read_max_dump_size()
71 le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN; in efct_hw_read_max_dump_size()
73 efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size); in efct_hw_read_max_dump_size()
85 struct efct *efct = hw->os; in __efct_read_topology_cb()
87 if (status || le16_to_cpu(read_topo->hdr.status)) { in __efct_read_topology_cb()
88 efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status, in __efct_read_topology_cb()
89 le16_to_cpu(read_topo->hdr.status)); in __efct_read_topology_cb()
90 return -EIO; in __efct_read_topology_cb()
93 switch (le32_to_cpu(read_topo->dw2_attentype) & in __efct_read_topology_cb()
96 hw->link.status = SLI4_LINK_STATUS_UP; in __efct_read_topology_cb()
99 hw->link.status = SLI4_LINK_STATUS_DOWN; in __efct_read_topology_cb()
102 hw->link.status = SLI4_LINK_STATUS_NO_ALPA; in __efct_read_topology_cb()
105 hw->link.status = SLI4_LINK_STATUS_MAX; in __efct_read_topology_cb()
109 switch (read_topo->topology) { in __efct_read_topology_cb()
111 hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL; in __efct_read_topology_cb()
114 hw->link.topology = SLI4_LINK_TOPO_FC_AL; in __efct_read_topology_cb()
115 if (hw->link.status == SLI4_LINK_STATUS_UP) in __efct_read_topology_cb()
116 hw->link.loop_map = hw->loop_map.virt; in __efct_read_topology_cb()
117 hw->link.fc_id = read_topo->acquired_al_pa; in __efct_read_topology_cb()
120 hw->link.topology = SLI4_LINK_TOPO_MAX; in __efct_read_topology_cb()
124 hw->link.medium = SLI4_LINK_MEDIUM_FC; in __efct_read_topology_cb()
126 speed = (le32_to_cpu(read_topo->currlink_state) & in __efct_read_topology_cb()
130 hw->link.speed = 1 * 1000; in __efct_read_topology_cb()
133 hw->link.speed = 2 * 1000; in __efct_read_topology_cb()
136 hw->link.speed = 4 * 1000; in __efct_read_topology_cb()
139 hw->link.speed = 8 * 1000; in __efct_read_topology_cb()
142 hw->link.speed = 16 * 1000; in __efct_read_topology_cb()
145 hw->link.speed = 32 * 1000; in __efct_read_topology_cb()
148 hw->link.speed = 64 * 1000; in __efct_read_topology_cb()
151 hw->link.speed = 128 * 1000; in __efct_read_topology_cb()
155 drec.speed = hw->link.speed; in __efct_read_topology_cb()
156 drec.fc_id = hw->link.fc_id; in __efct_read_topology_cb()
158 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec); in __efct_read_topology_cb()
170 struct efct *efct = hw->os; in efct_hw_cb_link()
174 switch (event->status) { in efct_hw_cb_link()
177 hw->link = *event; in efct_hw_cb_link()
178 efct->efcport->link_status = EFC_LINK_STATUS_UP; in efct_hw_cb_link()
180 if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) { in efct_hw_cb_link()
183 efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n", in efct_hw_cb_link()
184 event->speed); in efct_hw_cb_link()
185 drec.speed = event->speed; in efct_hw_cb_link()
186 drec.fc_id = event->fc_id; in efct_hw_cb_link()
188 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, in efct_hw_cb_link()
190 } else if (event->topology == SLI4_LINK_TOPO_FC_AL) { in efct_hw_cb_link()
193 efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n", in efct_hw_cb_link()
194 event->speed); in efct_hw_cb_link()
196 if (!sli_cmd_read_topology(&hw->sli, buf, in efct_hw_cb_link()
197 &hw->loop_map)) { in efct_hw_cb_link()
203 efc_log_debug(hw->os, "READ_TOPOLOGY failed\n"); in efct_hw_cb_link()
205 efc_log_info(hw->os, "%s(%#x), speed is %d\n", in efct_hw_cb_link()
207 event->topology, event->speed); in efct_hw_cb_link()
211 efc_log_info(hw->os, "Link down\n"); in efct_hw_cb_link()
213 hw->link.status = event->status; in efct_hw_cb_link()
214 efct->efcport->link_status = EFC_LINK_STATUS_DOWN; in efct_hw_cb_link()
216 d = efct->efcport->domain; in efct_hw_cb_link()
218 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d); in efct_hw_cb_link()
221 efc_log_debug(hw->os, "unhandled link status %#x\n", in efct_hw_cb_link()
222 event->status); in efct_hw_cb_link()
234 if (hw->hw_setup_called) in efct_hw_setup()
239 * needs allocation. If a structure is non-NULL, efct_hw_init() won't in efct_hw_setup()
244 hw->hw_setup_called = true; in efct_hw_setup()
246 hw->os = os; in efct_hw_setup()
248 mutex_init(&hw->bmbx_lock); in efct_hw_setup()
249 spin_lock_init(&hw->cmd_lock); in efct_hw_setup()
250 INIT_LIST_HEAD(&hw->cmd_head); in efct_hw_setup()
251 INIT_LIST_HEAD(&hw->cmd_pending); in efct_hw_setup()
252 hw->cmd_head_count = 0; in efct_hw_setup()
255 hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ, in efct_hw_setup()
257 if (!hw->cmd_ctx_pool) { in efct_hw_setup()
258 efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n"); in efct_hw_setup()
259 return -EIO; in efct_hw_setup()
263 hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ, in efct_hw_setup()
265 if (!hw->mbox_rqst_pool) { in efct_hw_setup()
266 efc_log_err(hw->os, "failed to allocate mbox request pool\n"); in efct_hw_setup()
267 return -EIO; in efct_hw_setup()
270 spin_lock_init(&hw->io_lock); in efct_hw_setup()
271 INIT_LIST_HEAD(&hw->io_inuse); in efct_hw_setup()
272 INIT_LIST_HEAD(&hw->io_free); in efct_hw_setup()
273 INIT_LIST_HEAD(&hw->io_wait_free); in efct_hw_setup()
275 atomic_set(&hw->io_alloc_failed_count, 0); in efct_hw_setup()
277 hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4; in efct_hw_setup()
278 if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) { in efct_hw_setup()
279 efc_log_err(hw->os, "SLI setup failed\n"); in efct_hw_setup()
280 return -EIO; in efct_hw_setup()
285 sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw); in efct_hw_setup()
290 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) in efct_hw_setup()
291 hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i]; in efct_hw_setup()
294 * the WQ to allow for 2 completions per IO. This allows us to in efct_hw_setup()
295 * handle multi-phase as well as aborts. in efct_hw_setup()
297 hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2; in efct_hw_setup()
303 hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD; in efct_hw_setup()
304 hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size; in efct_hw_setup()
307 hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus; in efct_hw_setup()
309 max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED; in efct_hw_setup()
311 hw->config.n_sgl = max_sgl; in efct_hw_setup()
321 efc_log_info(hw->os, in efct_logfcfi()
322 "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n", in efct_logfcfi()
323 j, hw->config.filter_def[j], i, id); in efct_logfcfi()
327 efct_hw_init_free_io(struct efct_hw_io *io) in efct_hw_init_free_io() argument
330 * Set io->done to NULL, to avoid any callbacks, should in efct_hw_init_free_io()
333 io->done = NULL; in efct_hw_init_free_io()
334 io->abort_done = NULL; in efct_hw_init_free_io()
335 io->status_saved = false; in efct_hw_init_free_io()
336 io->abort_in_progress = false; in efct_hw_init_free_io()
337 io->type = 0xFFFF; in efct_hw_init_free_io()
338 io->wq = NULL; in efct_hw_init_free_io()
353 efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_restore_sgl() argument
356 io->sgl = &io->def_sgl; in efct_hw_io_restore_sgl()
357 io->sgl_count = io->def_sgl_count; in efct_hw_io_restore_sgl()
363 struct efct_hw_io *io = arg; in efct_hw_wq_process_io() local
364 struct efct_hw *hw = io->hw; in efct_hw_wq_process_io()
370 if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0) in efct_hw_wq_process_io()
371 io->xbusy = false; in efct_hw_wq_process_io()
374 switch (io->type) { in efct_hw_wq_process_io()
379 sli_fc_els_did(&hw->sli, cqe, &ext); in efct_hw_wq_process_io()
380 len = sli_fc_response_length(&hw->sli, cqe); in efct_hw_wq_process_io()
386 len = sli_fc_response_length(&hw->sli, cqe); in efct_hw_wq_process_io()
389 len = sli_fc_io_length(&hw->sli, cqe); in efct_hw_wq_process_io()
392 len = sli_fc_io_length(&hw->sli, cqe); in efct_hw_wq_process_io()
397 /* release the count for re-posting the buffer */ in efct_hw_wq_process_io()
398 /* efct_hw_io_free(hw, io); */ in efct_hw_wq_process_io()
401 efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n", in efct_hw_wq_process_io()
402 io->type, io->indicator); in efct_hw_wq_process_io()
406 ext = sli_fc_ext_status(&hw->sli, cqe); in efct_hw_wq_process_io()
408 * If we're not an originator IO, and XB is set, then issue in efct_hw_wq_process_io()
409 * abort for the IO from within the HW in efct_hw_wq_process_io()
411 if (efct_hw_iotype_is_originator(io->type) && in efct_hw_wq_process_io()
412 wcqe->flags & SLI4_WCQE_XB) { in efct_hw_wq_process_io()
415 efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", in efct_hw_wq_process_io()
416 io->indicator, io->reqtag); in efct_hw_wq_process_io()
419 * Because targets may send a response when the IO in efct_hw_wq_process_io()
421 * XRI_ABORTED CQE to issue the IO callback in efct_hw_wq_process_io()
423 rc = efct_hw_io_abort(hw, io, false, NULL, NULL); in efct_hw_wq_process_io()
429 io->status_saved = true; in efct_hw_wq_process_io()
430 io->saved_status = status; in efct_hw_wq_process_io()
431 io->saved_ext = ext; in efct_hw_wq_process_io()
432 io->saved_len = len; in efct_hw_wq_process_io()
434 } else if (rc == -EINPROGRESS) { in efct_hw_wq_process_io()
440 efc_log_debug(hw->os, "%s%#x tag=%#x\n", in efct_hw_wq_process_io()
442 io->indicator, io->reqtag); in efct_hw_wq_process_io()
448 efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n", in efct_hw_wq_process_io()
450 io->indicator, io->reqtag, rc); in efct_hw_wq_process_io()
455 if (io->done) { in efct_hw_wq_process_io()
456 efct_hw_done_t done = io->done; in efct_hw_wq_process_io()
458 io->done = NULL; in efct_hw_wq_process_io()
460 if (io->status_saved) { in efct_hw_wq_process_io()
462 status = io->saved_status; in efct_hw_wq_process_io()
463 len = io->saved_len; in efct_hw_wq_process_io()
464 ext = io->saved_ext; in efct_hw_wq_process_io()
465 io->status_saved = false; in efct_hw_wq_process_io()
469 efct_hw_io_restore_sgl(hw, io); in efct_hw_wq_process_io()
470 done(io, len, status, ext, io->arg); in efct_hw_wq_process_io()
481 struct efct_hw_io *io = NULL; in efct_hw_setup_io() local
487 struct efct *efct = hw->os; in efct_hw_setup_io()
489 if (!hw->io) { in efct_hw_setup_io()
490 hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL); in efct_hw_setup_io()
491 if (!hw->io) in efct_hw_setup_io()
492 return -ENOMEM; in efct_hw_setup_io()
494 memset(hw->io, 0, hw->config.n_io * sizeof(io)); in efct_hw_setup_io()
496 for (i = 0; i < hw->config.n_io; i++) { in efct_hw_setup_io()
497 hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL); in efct_hw_setup_io()
498 if (!hw->io[i]) in efct_hw_setup_io()
502 /* Create WQE buffs for IO */ in efct_hw_setup_io()
503 hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size), in efct_hw_setup_io()
505 if (!hw->wqe_buffs) { in efct_hw_setup_io()
506 kfree(hw->io); in efct_hw_setup_io()
507 return -ENOMEM; in efct_hw_setup_io()
511 /* re-use existing IOs, including SGLs */ in efct_hw_setup_io()
516 dma = &hw->xfer_rdy; in efct_hw_setup_io()
517 dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io; in efct_hw_setup_io()
518 dma->virt = dma_alloc_coherent(&efct->pci->dev, in efct_hw_setup_io()
519 dma->size, &dma->phys, GFP_KERNEL); in efct_hw_setup_io()
520 if (!dma->virt) in efct_hw_setup_io()
521 return -ENOMEM; in efct_hw_setup_io()
523 xfer_virt = (uintptr_t)hw->xfer_rdy.virt; in efct_hw_setup_io()
524 xfer_phys = hw->xfer_rdy.phys; in efct_hw_setup_io()
526 /* Initialize the pool of HW IO objects */ in efct_hw_setup_io()
527 for (i = 0; i < hw->config.n_io; i++) { in efct_hw_setup_io()
530 io = hw->io[i]; in efct_hw_setup_io()
532 /* initialize IO fields */ in efct_hw_setup_io()
533 io->hw = hw; in efct_hw_setup_io()
536 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size]; in efct_hw_setup_io()
538 /* Allocate the request tag for this IO */ in efct_hw_setup_io()
539 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io); in efct_hw_setup_io()
541 efc_log_err(hw->os, "can't allocate request tag\n"); in efct_hw_setup_io()
542 return -ENOSPC; in efct_hw_setup_io()
544 io->reqtag = wqcb->instance_index; in efct_hw_setup_io()
547 efct_hw_init_free_io(io); in efct_hw_setup_io()
549 /* The XB flag isn't cleared on IO free, so init to zero */ in efct_hw_setup_io()
550 io->xbusy = 0; in efct_hw_setup_io()
552 if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI, in efct_hw_setup_io()
553 &io->indicator, &index)) { in efct_hw_setup_io()
554 efc_log_err(hw->os, in efct_hw_setup_io()
556 return -ENOMEM; in efct_hw_setup_io()
560 dma = &io->def_sgl; in efct_hw_setup_io()
561 dma->size = hw->config.n_sgl * in efct_hw_setup_io()
563 dma->virt = dma_alloc_coherent(&efct->pci->dev, in efct_hw_setup_io()
564 dma->size, &dma->phys, in efct_hw_setup_io()
566 if (!dma->virt) { in efct_hw_setup_io()
567 efc_log_err(hw->os, "dma_alloc fail %d\n", i); in efct_hw_setup_io()
568 memset(&io->def_sgl, 0, in efct_hw_setup_io()
570 return -ENOMEM; in efct_hw_setup_io()
573 io->def_sgl_count = hw->config.n_sgl; in efct_hw_setup_io()
574 io->sgl = &io->def_sgl; in efct_hw_setup_io()
575 io->sgl_count = io->def_sgl_count; in efct_hw_setup_io()
577 if (hw->xfer_rdy.size) { in efct_hw_setup_io()
578 io->xfer_rdy.virt = (void *)xfer_virt; in efct_hw_setup_io()
579 io->xfer_rdy.phys = xfer_phys; in efct_hw_setup_io()
580 io->xfer_rdy.size = sizeof(struct fcp_txrdy); in efct_hw_setup_io()
589 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) { in efct_hw_setup_io()
590 kfree(hw->io[i]); in efct_hw_setup_io()
591 hw->io[i] = NULL; in efct_hw_setup_io()
594 kfree(hw->io); in efct_hw_setup_io()
595 hw->io = NULL; in efct_hw_setup_io()
597 return -ENOMEM; in efct_hw_setup_io()
604 struct efct_hw_io *io = NULL; in efct_hw_init_prereg_io() local
612 struct efct *efct = hw->os; in efct_hw_init_prereg_io()
616 return -ENOMEM; in efct_hw_init_prereg_io()
620 req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys, in efct_hw_init_prereg_io()
624 return -ENOMEM; in efct_hw_init_prereg_io()
627 for (n_rem = hw->config.n_io; n_rem; n_rem -= n) { in efct_hw_init_prereg_io()
636 if (hw->io[idx + n]->indicator != in efct_hw_init_prereg_io()
637 hw->io[idx + n - 1]->indicator + 1) in efct_hw_init_prereg_io()
641 sgls[n] = hw->io[idx + n]->sgl; in efct_hw_init_prereg_io()
644 if (sli_cmd_post_sgl_pages(&hw->sli, cmd, in efct_hw_init_prereg_io()
645 hw->io[idx]->indicator, n, sgls, NULL, &req)) { in efct_hw_init_prereg_io()
646 rc = -EIO; in efct_hw_init_prereg_io()
652 efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc); in efct_hw_init_prereg_io()
658 io = hw->io[idx]; in efct_hw_init_prereg_io()
659 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_init_prereg_io()
660 INIT_LIST_HEAD(&io->list_entry); in efct_hw_init_prereg_io()
661 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_init_prereg_io()
665 dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys); in efct_hw_init_prereg_io()
677 struct efct_hw_io *io = NULL; in efct_hw_init_io() local
680 prereg = hw->sli.params.sgl_pre_registered; in efct_hw_init_io()
685 for (i = 0; i < hw->config.n_io; i++, idx++) { in efct_hw_init_io()
686 io = hw->io[idx]; in efct_hw_init_io()
687 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_init_io()
688 INIT_LIST_HEAD(&io->list_entry); in efct_hw_init_io()
689 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_init_io()
705 sli_cmd_common_set_features(&hw->sli, buf, in efct_hw_config_set_fdt_xfer_hint()
710 efc_log_warn(hw->os, "set FDT hint %d failed: %d\n", in efct_hw_config_set_fdt_xfer_hint()
713 efc_log_info(hw->os, "Set FTD transfer hint to %d\n", in efct_hw_config_set_fdt_xfer_hint()
726 efc_log_info(hw->os, "using REG_FCFI standard\n"); in efct_hw_config_rq()
734 rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i]; in efct_hw_config_rq()
735 rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8); in efct_hw_config_rq()
736 rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16); in efct_hw_config_rq()
737 rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24); in efct_hw_config_rq()
745 min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ? in efct_hw_config_rq()
746 hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG; in efct_hw_config_rq()
748 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_config_rq()
752 u32 mask = (rq->filter_mask != 0) ? in efct_hw_config_rq()
753 rq->filter_mask : 1; in efct_hw_config_rq()
758 rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id); in efct_hw_config_rq()
759 efct_logfcfi(hw, j, i, rq->hdr->id); in efct_hw_config_rq()
763 rc = -EIO; in efct_hw_config_rq()
764 if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg)) in efct_hw_config_rq()
768 efc_log_err(hw->os, "FCFI registration failed\n"); in efct_hw_config_rq()
771 hw->fcf_indicator = in efct_hw_config_rq()
772 le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi); in efct_hw_config_rq()
792 rq_filter[i].type_mask = (u8)hw->config.filter_def[i]; in efct_hw_config_mrq()
793 rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8); in efct_hw_config_mrq()
794 rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16); in efct_hw_config_mrq()
795 rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24); in efct_hw_config_mrq()
798 rq = hw->hw_rq[0]; in efct_hw_config_mrq()
799 rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id); in efct_hw_config_mrq()
800 rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id); in efct_hw_config_mrq()
804 efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n", in efct_hw_config_mrq()
805 hw->hw_rq_count, hw->config.rq_selection_policy, mode); in efct_hw_config_mrq()
807 rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index, in efct_hw_config_mrq()
808 hw->config.rq_selection_policy, mrq_bitmask, in efct_hw_config_mrq()
809 hw->hw_mrq_count, rq_filter); in efct_hw_config_mrq()
811 efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n"); in efct_hw_config_mrq()
812 return -EIO; in efct_hw_config_mrq()
819 if ((rc) || (le16_to_cpu(rsp->hdr.status))) { in efct_hw_config_mrq()
820 efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n", in efct_hw_config_mrq()
821 rsp->hdr.command, le16_to_cpu(rsp->hdr.status)); in efct_hw_config_mrq()
822 return -EIO; in efct_hw_config_mrq()
826 hw->fcf_indicator = le16_to_cpu(rsp->fcfi); in efct_hw_config_mrq()
835 u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1); in efct_hw_queue_hash_add()
842 hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1); in efct_hw_queue_hash_add()
869 sli_cmd_common_set_features(&hw->sli, buf, in efct_hw_config_sli_port_health_check()
874 efc_log_err(hw->os, "efct_hw_command returns %d\n", rc); in efct_hw_config_sli_port_health_check()
876 efc_log_debug(hw->os, "SLI Port Health Check is enabled\n"); in efct_hw_config_sli_port_health_check()
892 * Make sure the command lists are empty. If this is start-of-day, in efct_hw_init()
894 * If we've just gone through a reset, the command and command pending in efct_hw_init()
895 * lists should have been cleaned up as part of the reset in efct_hw_init()
898 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_init()
899 if (!list_empty(&hw->cmd_head)) { in efct_hw_init()
900 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_init()
901 efc_log_err(hw->os, "command found on cmd list\n"); in efct_hw_init()
902 return -EIO; in efct_hw_init()
904 if (!list_empty(&hw->cmd_pending)) { in efct_hw_init()
905 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_init()
906 efc_log_err(hw->os, "command found on pending list\n"); in efct_hw_init()
907 return -EIO; in efct_hw_init()
909 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_init()
915 * The IO queues must be initialized here for the reset case. The in efct_hw_init()
916 * efct_hw_init_io() function will re-add the IOs to the free list. in efct_hw_init()
921 /* If we are in this function due to a reset, there may be stale items in efct_hw_init()
925 while ((!list_empty(&hw->io_wait_free))) { in efct_hw_init()
927 temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io, in efct_hw_init()
929 list_del_init(&temp->list_entry); in efct_hw_init()
932 efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n", in efct_hw_init()
936 while ((!list_empty(&hw->io_inuse))) { in efct_hw_init()
938 temp = list_first_entry(&hw->io_inuse, struct efct_hw_io, in efct_hw_init()
940 list_del_init(&temp->list_entry); in efct_hw_init()
943 efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n", in efct_hw_init()
947 while ((!list_empty(&hw->io_free))) { in efct_hw_init()
949 temp = list_first_entry(&hw->io_free, struct efct_hw_io, in efct_hw_init()
951 list_del_init(&temp->list_entry); in efct_hw_init()
954 efc_log_debug(hw->os, "rmvd %d items from io_free list\n", in efct_hw_init()
958 if (hw->config.n_rq == 1) in efct_hw_init()
959 hw->sli.features &= (~SLI4_REQFEAT_MRQP); in efct_hw_init()
961 if (sli_init(&hw->sli)) { in efct_hw_init()
962 efc_log_err(hw->os, "SLI failed to initialize\n"); in efct_hw_init()
963 return -EIO; in efct_hw_init()
966 if (hw->sliport_healthcheck) { in efct_hw_init()
969 efc_log_err(hw->os, "Enable port Health check fail\n"); in efct_hw_init()
977 if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) { in efct_hw_init()
979 * Non-fatal error. In particular, we can disregard failure to in efct_hw_init()
987 memset(hw->cq_hash, 0, sizeof(hw->cq_hash)); in efct_hw_init()
988 efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n", in efct_hw_init()
991 memset(hw->rq_hash, 0, sizeof(hw->rq_hash)); in efct_hw_init()
992 efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n", in efct_hw_init()
995 memset(hw->wq_hash, 0, sizeof(hw->wq_hash)); in efct_hw_init()
996 efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n", in efct_hw_init()
1010 efc_log_err(hw->os, "rx_allocate failed\n"); in efct_hw_init()
1016 efc_log_err(hw->os, "WARNING - error posting RQ buffers\n"); in efct_hw_init()
1020 if (hw->config.n_eq == 1) { in efct_hw_init()
1023 efc_log_err(hw->os, "config rq failed %d\n", rc); in efct_hw_init()
1029 efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n"); in efct_hw_init()
1035 efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n"); in efct_hw_init()
1045 hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw); in efct_hw_init()
1046 if (!hw->wq_reqtag_pool) { in efct_hw_init()
1047 efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n"); in efct_hw_init()
1048 return -ENOMEM; in efct_hw_init()
1053 efc_log_err(hw->os, "IO allocation failure\n"); in efct_hw_init()
1059 efc_log_err(hw->os, "IO initialization failure\n"); in efct_hw_init()
1063 dma = &hw->loop_map; in efct_hw_init()
1064 dma->size = SLI4_MIN_LOOP_MAP_BYTES; in efct_hw_init()
1065 dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys, in efct_hw_init()
1067 if (!dma->virt) in efct_hw_init()
1068 return -EIO; in efct_hw_init()
1074 for (i = 0; i < hw->eq_count; i++) in efct_hw_init()
1075 sli_queue_arm(&hw->sli, &hw->eq[i], true); in efct_hw_init()
1080 for (i = 0; i < hw->rq_count; i++) in efct_hw_init()
1081 efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i); in efct_hw_init()
1086 for (i = 0; i < hw->wq_count; i++) in efct_hw_init()
1087 efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i); in efct_hw_init()
1092 for (i = 0; i < hw->cq_count; i++) { in efct_hw_init()
1093 efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i); in efct_hw_init()
1094 sli_queue_arm(&hw->sli, &hw->cq[i], true); in efct_hw_init()
1098 for (i = 0; i < hw->hw_rq_count; i++) { in efct_hw_init()
1099 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_init()
1101 hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2; in efct_hw_init()
1105 hw->state = EFCT_HW_STATE_ACTIVE; in efct_hw_init()
1109 hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw); in efct_hw_init()
1110 if (!hw->hw_wq[0]->send_frame_io) in efct_hw_init()
1111 efc_log_err(hw->os, "alloc for send_frame_io failed\n"); in efct_hw_init()
1114 atomic_set(&hw->send_frame_seq_id, 0); in efct_hw_init()
1127 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) in efct_hw_parse_filter()
1128 hw->config.filter_def[idx] = 0; in efct_hw_parse_filter()
1132 efc_log_err(hw->os, "p is NULL\n"); in efct_hw_parse_filter()
1133 return -ENOMEM; in efct_hw_parse_filter()
1138 if (kstrtou32(token, 0, &hw->config.filter_def[idx++])) in efct_hw_parse_filter()
1139 efc_log_err(hw->os, "kstrtoint failed\n"); in efct_hw_parse_filter()
1144 if (idx == ARRAY_SIZE(hw->config.filter_def)) in efct_hw_parse_filter()
1155 struct sli4 *sli = &hw->sli; in efct_get_wwnn()
1158 memcpy(p, sli->wwnn, sizeof(p)); in efct_get_wwnn()
1165 struct sli4 *sli = &hw->sli; in efct_get_wwpn()
1168 memcpy(p, sli->wwpn, sizeof(p)); in efct_get_wwpn()
1176 struct efct *efct = hw->os; in efct_hw_rx_buffer_alloc()
1190 prq->rqindex = rqindex; in efct_hw_rx_buffer_alloc()
1191 prq->dma.size = size; in efct_hw_rx_buffer_alloc()
1192 prq->dma.virt = dma_alloc_coherent(&efct->pci->dev, in efct_hw_rx_buffer_alloc()
1193 prq->dma.size, in efct_hw_rx_buffer_alloc()
1194 &prq->dma.phys, in efct_hw_rx_buffer_alloc()
1196 if (!prq->dma.virt) { in efct_hw_rx_buffer_alloc()
1197 efc_log_err(hw->os, "DMA allocation failed\n"); in efct_hw_rx_buffer_alloc()
1210 struct efct *efct = hw->os; in efct_hw_rx_buffer_free()
1216 dma_free_coherent(&efct->pci->dev, in efct_hw_rx_buffer_free()
1217 prq->dma.size, prq->dma.virt, in efct_hw_rx_buffer_free()
1218 prq->dma.phys); in efct_hw_rx_buffer_free()
1219 memset(&prq->dma, 0, sizeof(struct efc_dma)); in efct_hw_rx_buffer_free()
1229 struct efct *efct = hw->os; in efct_hw_rx_allocate()
1234 u32 payload_size = hw->config.rq_default_buffer_size; in efct_hw_rx_allocate()
1238 for (i = 0; i < hw->hw_rq_count; i++) { in efct_hw_rx_allocate()
1239 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_rx_allocate()
1242 rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex, in efct_hw_rx_allocate()
1243 rq->entry_count, in efct_hw_rx_allocate()
1245 if (!rq->hdr_buf) { in efct_hw_rx_allocate()
1247 rc = -EIO; in efct_hw_rx_allocate()
1251 efc_log_debug(hw->os, in efct_hw_rx_allocate()
1253 i, rq->hdr->id, rq->entry_count, hdr_size); in efct_hw_rx_allocate()
1258 rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex, in efct_hw_rx_allocate()
1259 rq->entry_count, in efct_hw_rx_allocate()
1261 if (!rq->payload_buf) { in efct_hw_rx_allocate()
1263 rc = -EIO; in efct_hw_rx_allocate()
1266 efc_log_debug(hw->os, in efct_hw_rx_allocate()
1268 i, rq->data->id, rq->entry_count, payload_size); in efct_hw_rx_allocate()
1272 return rc ? -EIO : 0; in efct_hw_rx_allocate()
1283 if (!hw->seq_pool) { in efct_hw_rx_post()
1286 for (i = 0; i < hw->hw_rq_count; i++) in efct_hw_rx_post()
1287 count += hw->hw_rq[i]->entry_count; in efct_hw_rx_post()
1289 hw->seq_pool = kmalloc_array(count, in efct_hw_rx_post()
1291 if (!hw->seq_pool) in efct_hw_rx_post()
1292 return -ENOMEM; in efct_hw_rx_post()
1299 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) { in efct_hw_rx_post()
1300 struct hw_rq *rq = hw->hw_rq[rq_idx]; in efct_hw_rx_post()
1302 for (i = 0; i < rq->entry_count - 1; i++) { in efct_hw_rx_post()
1305 seq = hw->seq_pool + idx; in efct_hw_rx_post()
1307 seq->header = &rq->hdr_buf[i]; in efct_hw_rx_post()
1308 seq->payload = &rq->payload_buf[i]; in efct_hw_rx_post()
1317 if (rc && hw->seq_pool) in efct_hw_rx_post()
1318 kfree(hw->seq_pool); in efct_hw_rx_post()
1329 for (i = 0; i < hw->hw_rq_count; i++) { in efct_hw_rx_free()
1330 struct hw_rq *rq = hw->hw_rq[i]; in efct_hw_rx_free()
1333 efct_hw_rx_buffer_free(hw, rq->hdr_buf, in efct_hw_rx_free()
1334 rq->entry_count); in efct_hw_rx_free()
1335 rq->hdr_buf = NULL; in efct_hw_rx_free()
1336 efct_hw_rx_buffer_free(hw, rq->payload_buf, in efct_hw_rx_free()
1337 rq->entry_count); in efct_hw_rx_free()
1338 rq->payload_buf = NULL; in efct_hw_rx_free()
1351 while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) && in efct_hw_cmd_submit_pending()
1352 !list_empty(&hw->cmd_pending)) { in efct_hw_cmd_submit_pending()
1355 ctx = list_first_entry(&hw->cmd_pending, in efct_hw_cmd_submit_pending()
1360 list_del_init(&ctx->list_entry); in efct_hw_cmd_submit_pending()
1362 list_add_tail(&ctx->list_entry, &hw->cmd_head); in efct_hw_cmd_submit_pending()
1363 hw->cmd_head_count++; in efct_hw_cmd_submit_pending()
1364 if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) { in efct_hw_cmd_submit_pending()
1365 efc_log_debug(hw->os, in efct_hw_cmd_submit_pending()
1367 rc = -EIO; in efct_hw_cmd_submit_pending()
1377 int rc = -EIO; in efct_hw_command()
1385 if (sli_fw_error_status(&hw->sli) > 0) { in efct_hw_command()
1386 efc_log_crit(hw->os, "Chip in an error state - reset needed\n"); in efct_hw_command()
1387 efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n", in efct_hw_command()
1388 sli_reg_read_status(&hw->sli), in efct_hw_command()
1389 sli_reg_read_err1(&hw->sli), in efct_hw_command()
1390 sli_reg_read_err2(&hw->sli)); in efct_hw_command()
1392 return -EIO; in efct_hw_command()
1402 mutex_lock(&hw->bmbx_lock); in efct_hw_command()
1403 bmbx = hw->sli.bmbx.virt; in efct_hw_command()
1407 if (sli_bmbx_command(&hw->sli) == 0) { in efct_hw_command()
1411 mutex_unlock(&hw->bmbx_lock); in efct_hw_command()
1415 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_command()
1416 efc_log_err(hw->os, "Can't send command, HW state=%d\n", in efct_hw_command()
1417 hw->state); in efct_hw_command()
1418 return -EIO; in efct_hw_command()
1421 ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC); in efct_hw_command()
1423 return -ENOSPC; in efct_hw_command()
1428 ctx->cb = cb; in efct_hw_command()
1429 ctx->arg = arg; in efct_hw_command()
1432 memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE); in efct_hw_command()
1433 ctx->ctx = hw; in efct_hw_command()
1435 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command()
1438 INIT_LIST_HEAD(&ctx->list_entry); in efct_hw_command()
1439 list_add_tail(&ctx->list_entry, &hw->cmd_pending); in efct_hw_command()
1444 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command()
1457 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command_process()
1458 if (!list_empty(&hw->cmd_head)) { in efct_hw_command_process()
1459 ctx = list_first_entry(&hw->cmd_head, in efct_hw_command_process()
1461 list_del_init(&ctx->list_entry); in efct_hw_command_process()
1464 efc_log_err(hw->os, "no command context\n"); in efct_hw_command_process()
1465 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_process()
1466 return -EIO; in efct_hw_command_process()
1469 hw->cmd_head_count--; in efct_hw_command_process()
1474 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_process()
1476 if (ctx->cb) { in efct_hw_command_process()
1477 memcpy(ctx->buf, mqe, size); in efct_hw_command_process()
1478 ctx->cb(hw, status, ctx->buf, ctx->arg); in efct_hw_command_process()
1481 mempool_free(ctx, hw->cmd_ctx_pool); in efct_hw_command_process()
1493 rc = sli_mq_read(&hw->sli, mq, mqe); in efct_hw_mq_process()
1495 rc = efct_hw_command_process(hw, status, mqe, mq->size); in efct_hw_mq_process()
1506 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1513 while (!list_empty(&hw->cmd_head)) { in efct_hw_command_cancel()
1517 ctx = list_first_entry(&hw->cmd_head, in efct_hw_command_cancel()
1520 efc_log_debug(hw->os, "hung command %08x\n", in efct_hw_command_cancel()
1521 !ctx ? U32_MAX : *((u32 *)ctx->buf)); in efct_hw_command_cancel()
1522 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1523 rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE); in efct_hw_command_cancel()
1524 spin_lock_irqsave(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1527 spin_unlock_irqrestore(&hw->cmd_lock, flags); in efct_hw_command_cancel()
1538 if (ctx->callback) in efct_mbox_rsp_cb()
1539 (*ctx->callback)(hw->os->efcport, status, mqe, in efct_mbox_rsp_cb()
1540 ctx->arg); in efct_mbox_rsp_cb()
1542 mempool_free(ctx, hw->mbox_rqst_pool); in efct_mbox_rsp_cb()
1551 struct efct_hw *hw = &efct->hw; in efct_issue_mbox_rqst()
1559 ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC); in efct_issue_mbox_rqst()
1561 return -EIO; in efct_issue_mbox_rqst()
1563 ctx->callback = cb; in efct_issue_mbox_rqst()
1564 ctx->arg = arg; in efct_issue_mbox_rqst()
1569 mempool_free(ctx, hw->mbox_rqst_pool); in efct_issue_mbox_rqst()
1570 return -EIO; in efct_issue_mbox_rqst()
1579 struct efct_hw_io *io = NULL; in _efct_hw_io_alloc() local
1581 if (!list_empty(&hw->io_free)) { in _efct_hw_io_alloc()
1582 io = list_first_entry(&hw->io_free, struct efct_hw_io, in _efct_hw_io_alloc()
1584 list_del(&io->list_entry); in _efct_hw_io_alloc()
1586 if (io) { in _efct_hw_io_alloc()
1587 INIT_LIST_HEAD(&io->list_entry); in _efct_hw_io_alloc()
1588 list_add_tail(&io->list_entry, &hw->io_inuse); in _efct_hw_io_alloc()
1589 io->state = EFCT_HW_IO_STATE_INUSE; in _efct_hw_io_alloc()
1590 io->abort_reqtag = U32_MAX; in _efct_hw_io_alloc()
1591 io->wq = hw->wq_cpu_array[raw_smp_processor_id()]; in _efct_hw_io_alloc()
1592 if (!io->wq) { in _efct_hw_io_alloc()
1593 efc_log_err(hw->os, "WQ not assigned for cpu:%d\n", in _efct_hw_io_alloc()
1595 io->wq = hw->hw_wq[0]; in _efct_hw_io_alloc()
1597 kref_init(&io->ref); in _efct_hw_io_alloc()
1598 io->release = efct_hw_io_free_internal; in _efct_hw_io_alloc()
1600 atomic_add(1, &hw->io_alloc_failed_count); in _efct_hw_io_alloc()
1603 return io; in _efct_hw_io_alloc()
1609 struct efct_hw_io *io = NULL; in efct_hw_io_alloc() local
1612 spin_lock_irqsave(&hw->io_lock, flags); in efct_hw_io_alloc()
1613 io = _efct_hw_io_alloc(hw); in efct_hw_io_alloc()
1614 spin_unlock_irqrestore(&hw->io_lock, flags); in efct_hw_io_alloc()
1616 return io; in efct_hw_io_alloc()
1621 struct efct_hw_io *io) in efct_hw_io_free_move_correct_list() argument
1624 * When an IO is freed, depending on the exchange busy flag, in efct_hw_io_free_move_correct_list()
1627 if (io->xbusy) { in efct_hw_io_free_move_correct_list()
1632 INIT_LIST_HEAD(&io->list_entry); in efct_hw_io_free_move_correct_list()
1633 list_add_tail(&io->list_entry, &hw->io_wait_free); in efct_hw_io_free_move_correct_list()
1634 io->state = EFCT_HW_IO_STATE_WAIT_FREE; in efct_hw_io_free_move_correct_list()
1636 /* IO not busy, add to free list */ in efct_hw_io_free_move_correct_list()
1637 INIT_LIST_HEAD(&io->list_entry); in efct_hw_io_free_move_correct_list()
1638 list_add_tail(&io->list_entry, &hw->io_free); in efct_hw_io_free_move_correct_list()
1639 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_io_free_move_correct_list()
1644 efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_free_common() argument
1646 /* initialize IO fields */ in efct_hw_io_free_common()
1647 efct_hw_init_free_io(io); in efct_hw_io_free_common()
1650 efct_hw_io_restore_sgl(hw, io); in efct_hw_io_free_common()
1657 struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref); in efct_hw_io_free_internal() local
1658 struct efct_hw *hw = io->hw; in efct_hw_io_free_internal()
1661 efct_hw_io_free_common(hw, io); in efct_hw_io_free_internal()
1663 spin_lock_irqsave(&hw->io_lock, flags); in efct_hw_io_free_internal()
1664 /* remove from in-use list */ in efct_hw_io_free_internal()
1665 if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) { in efct_hw_io_free_internal()
1666 list_del_init(&io->list_entry); in efct_hw_io_free_internal()
1667 efct_hw_io_free_move_correct_list(hw, io); in efct_hw_io_free_internal()
1669 spin_unlock_irqrestore(&hw->io_lock, flags); in efct_hw_io_free_internal()
1673 efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io) in efct_hw_io_free() argument
1675 return kref_put(&io->ref, io->release); in efct_hw_io_free()
1683 ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0]; in efct_hw_io_lookup()
1684 return hw->io[ioindex]; in efct_hw_io_lookup()
1688 efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io, in efct_hw_io_init_sges() argument
1696 if (!io) { in efct_hw_io_init_sges()
1697 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io); in efct_hw_io_init_sges()
1698 return -EIO; in efct_hw_io_init_sges()
1701 /* Clear / reset the scatter-gather list */ in efct_hw_io_init_sges()
1702 io->sgl = &io->def_sgl; in efct_hw_io_init_sges()
1703 io->sgl_count = io->def_sgl_count; in efct_hw_io_init_sges()
1704 io->first_data_sge = 0; in efct_hw_io_init_sges()
1706 memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge)); in efct_hw_io_init_sges()
1707 io->n_sge = 0; in efct_hw_io_init_sges()
1708 io->sge_offset = 0; in efct_hw_io_init_sges()
1710 io->type = type; in efct_hw_io_init_sges()
1712 data = io->sgl->virt; in efct_hw_io_init_sges()
1715 * Some IO types have underlying hardware requirements on the order in efct_hw_io_init_sges()
1722 sge_flags = le32_to_cpu(data->dw2_flags); in efct_hw_io_init_sges()
1725 data->buffer_address_high = in efct_hw_io_init_sges()
1726 cpu_to_le32(upper_32_bits(io->xfer_rdy.phys)); in efct_hw_io_init_sges()
1727 data->buffer_address_low = in efct_hw_io_init_sges()
1728 cpu_to_le32(lower_32_bits(io->xfer_rdy.phys)); in efct_hw_io_init_sges()
1729 data->buffer_length = cpu_to_le32(io->xfer_rdy.size); in efct_hw_io_init_sges()
1730 data->dw2_flags = cpu_to_le32(sge_flags); in efct_hw_io_init_sges()
1735 io->n_sge = 1; in efct_hw_io_init_sges()
1749 efc_log_err(hw->os, "unsupported IO type %#x\n", type); in efct_hw_io_init_sges()
1750 return -EIO; in efct_hw_io_init_sges()
1757 sge_flags = le32_to_cpu(data->dw2_flags); in efct_hw_io_init_sges()
1760 data->dw2_flags = cpu_to_le32(sge_flags); in efct_hw_io_init_sges()
1764 io->n_sge += skips; in efct_hw_io_init_sges()
1769 sge_flags = le32_to_cpu(data->dw2_flags); in efct_hw_io_init_sges()
1771 data->dw2_flags = cpu_to_le32(sge_flags); in efct_hw_io_init_sges()
1777 efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io, in efct_hw_io_add_sge() argument
1783 if (!io || !addr || !length) { in efct_hw_io_add_sge()
1784 efc_log_err(hw->os, in efct_hw_io_add_sge()
1785 "bad parameter hw=%p io=%p addr=%lx length=%u\n", in efct_hw_io_add_sge()
1786 hw, io, addr, length); in efct_hw_io_add_sge()
1787 return -EIO; in efct_hw_io_add_sge()
1790 if (length > hw->sli.sge_supported_length) { in efct_hw_io_add_sge()
1791 efc_log_err(hw->os, in efct_hw_io_add_sge()
1793 length, hw->sli.sge_supported_length); in efct_hw_io_add_sge()
1794 return -EIO; in efct_hw_io_add_sge()
1797 data = io->sgl->virt; in efct_hw_io_add_sge()
1798 data += io->n_sge; in efct_hw_io_add_sge()
1800 sge_flags = le32_to_cpu(data->dw2_flags); in efct_hw_io_add_sge()
1804 sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset; in efct_hw_io_add_sge()
1806 data->buffer_address_high = cpu_to_le32(upper_32_bits(addr)); in efct_hw_io_add_sge()
1807 data->buffer_address_low = cpu_to_le32(lower_32_bits(addr)); in efct_hw_io_add_sge()
1808 data->buffer_length = cpu_to_le32(length); in efct_hw_io_add_sge()
1816 data->dw2_flags = cpu_to_le32(sge_flags); in efct_hw_io_add_sge()
1818 if (io->n_sge) { in efct_hw_io_add_sge()
1819 sge_flags = le32_to_cpu(data[-1].dw2_flags); in efct_hw_io_add_sge()
1821 data[-1].dw2_flags = cpu_to_le32(sge_flags); in efct_hw_io_add_sge()
1825 if (io->first_data_sge == 0) in efct_hw_io_add_sge()
1826 io->first_data_sge = io->n_sge; in efct_hw_io_add_sge()
1828 io->sge_offset += length; in efct_hw_io_add_sge()
1829 io->n_sge++; in efct_hw_io_add_sge()
1841 &hw->io_inuse, list_entry) { in efct_hw_io_abort_all()
1849 struct efct_hw_io *io = arg; in efct_hw_wq_process_abort() local
1850 struct efct_hw *hw = io->hw; in efct_hw_wq_process_abort()
1861 ext = sli_fc_ext_status(&hw->sli, cqe); in efct_hw_wq_process_abort()
1863 ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) { in efct_hw_wq_process_abort()
1864 efct_hw_done_t done = io->done; in efct_hw_wq_process_abort()
1866 io->done = NULL; in efct_hw_wq_process_abort()
1874 status = io->saved_status; in efct_hw_wq_process_abort()
1875 len = io->saved_len; in efct_hw_wq_process_abort()
1876 ext = io->saved_ext; in efct_hw_wq_process_abort()
1877 io->status_saved = false; in efct_hw_wq_process_abort()
1878 done(io, len, status, ext, io->arg); in efct_hw_wq_process_abort()
1881 if (io->abort_done) { in efct_hw_wq_process_abort()
1882 efct_hw_done_t done = io->abort_done; in efct_hw_wq_process_abort()
1884 io->abort_done = NULL; in efct_hw_wq_process_abort()
1885 done(io, len, status, ext, io->abort_arg); in efct_hw_wq_process_abort()
1889 io->abort_in_progress = false; in efct_hw_wq_process_abort()
1892 if (io->abort_reqtag == U32_MAX) { in efct_hw_wq_process_abort()
1893 efc_log_err(hw->os, "HW IO already freed\n"); in efct_hw_wq_process_abort()
1897 wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag); in efct_hw_wq_process_abort()
1904 (void)efct_hw_io_free(hw, io); in efct_hw_wq_process_abort()
1910 struct sli4_abort_wqe *abort = (void *)wqe->wqebuf; in efct_hw_fill_abort_wqe()
1912 memset(abort, 0, hw->sli.wqe_size); in efct_hw_fill_abort_wqe()
1914 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG; in efct_hw_fill_abort_wqe()
1915 abort->ia_ir_byte |= wqe->send_abts ? 0 : 1; in efct_hw_fill_abort_wqe()
1918 abort->ia_ir_byte |= SLI4_ABRT_WQE_IR; in efct_hw_fill_abort_wqe()
1920 abort->t_tag = cpu_to_le32(wqe->id); in efct_hw_fill_abort_wqe()
1921 abort->command = SLI4_WQE_ABORT; in efct_hw_fill_abort_wqe()
1922 abort->request_tag = cpu_to_le16(wqe->abort_reqtag); in efct_hw_fill_abort_wqe()
1924 abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD); in efct_hw_fill_abort_wqe()
1926 abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT); in efct_hw_fill_abort_wqe()
1937 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", in efct_hw_io_abort()
1939 return -EIO; in efct_hw_io_abort()
1942 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_io_abort()
1943 efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n", in efct_hw_io_abort()
1944 hw->state); in efct_hw_io_abort()
1945 return -EIO; in efct_hw_io_abort()
1948 /* take a reference on IO being aborted */ in efct_hw_io_abort()
1949 if (kref_get_unless_zero(&io_to_abort->ref) == 0) { in efct_hw_io_abort()
1951 efc_log_debug(hw->os, in efct_hw_io_abort()
1952 "io not active xri=0x%x tag=0x%x\n", in efct_hw_io_abort()
1953 io_to_abort->indicator, io_to_abort->reqtag); in efct_hw_io_abort()
1954 return -ENOENT; in efct_hw_io_abort()
1958 if (!io_to_abort->wq) { in efct_hw_io_abort()
1959 efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n", in efct_hw_io_abort()
1960 io_to_abort->indicator); in efct_hw_io_abort()
1962 kref_put(&io_to_abort->ref, io_to_abort->release); in efct_hw_io_abort()
1963 return -ENOENT; in efct_hw_io_abort()
1970 if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) { in efct_hw_io_abort()
1972 kref_put(&io_to_abort->ref, io_to_abort->release); in efct_hw_io_abort()
1973 efc_log_debug(hw->os, in efct_hw_io_abort()
1974 "io already being aborted xri=0x%x tag=0x%x\n", in efct_hw_io_abort()
1975 io_to_abort->indicator, io_to_abort->reqtag); in efct_hw_io_abort()
1976 return -EINPROGRESS; in efct_hw_io_abort()
1981 * - host owned xri in efct_hw_io_abort()
1982 * - io_to_abort->wq_index != U32_MAX in efct_hw_io_abort()
1983 * - submit ABORT_WQE to same WQ in efct_hw_io_abort()
1984 * - port owned xri: in efct_hw_io_abort()
1985 * - rxri: io_to_abort->wq_index == U32_MAX in efct_hw_io_abort()
1986 * - submit ABORT_WQE to any WQ in efct_hw_io_abort()
1987 * - non-rxri in efct_hw_io_abort()
1988 * - io_to_abort->index != U32_MAX in efct_hw_io_abort()
1989 * - submit ABORT_WQE to same WQ in efct_hw_io_abort()
1990 * - io_to_abort->index == U32_MAX in efct_hw_io_abort()
1991 * - submit ABORT_WQE to any WQ in efct_hw_io_abort()
1993 io_to_abort->abort_done = cb; in efct_hw_io_abort()
1994 io_to_abort->abort_arg = arg; in efct_hw_io_abort()
1996 /* Allocate a request tag for the abort portion of this IO */ in efct_hw_io_abort()
1999 efc_log_err(hw->os, "can't allocate request tag\n"); in efct_hw_io_abort()
2000 return -ENOSPC; in efct_hw_io_abort()
2003 io_to_abort->abort_reqtag = wqcb->instance_index; in efct_hw_io_abort()
2004 io_to_abort->wqe.send_abts = send_abts; in efct_hw_io_abort()
2005 io_to_abort->wqe.id = io_to_abort->indicator; in efct_hw_io_abort()
2006 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag; in efct_hw_io_abort()
2010 * aborted when the IO's wqe is removed from the list. in efct_hw_io_abort()
2012 if (io_to_abort->wq) { in efct_hw_io_abort()
2013 spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags); in efct_hw_io_abort()
2014 if (io_to_abort->wqe.list_entry.next) { in efct_hw_io_abort()
2015 io_to_abort->wqe.abort_wqe_submit_needed = true; in efct_hw_io_abort()
2016 spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, in efct_hw_io_abort()
2020 spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags); in efct_hw_io_abort()
2023 efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe); in efct_hw_io_abort()
2026 * therefore, keep xbusy as-is to track the exchange's state, in efct_hw_io_abort()
2029 if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) { in efct_hw_io_abort()
2030 io_to_abort->abort_in_progress = false; in efct_hw_io_abort()
2032 kref_put(&io_to_abort->ref, io_to_abort->release); in efct_hw_io_abort()
2033 return -EIO; in efct_hw_io_abort()
2043 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; in efct_hw_reqtag_pool_free()
2048 wqcb = reqtag_pool->tags[i]; in efct_hw_reqtag_pool_free()
2055 hw->wq_reqtag_pool = NULL; in efct_hw_reqtag_pool_free()
2070 INIT_LIST_HEAD(&reqtag_pool->freelist); in efct_hw_reqtag_pool_alloc()
2072 spin_lock_init(&reqtag_pool->lock); in efct_hw_reqtag_pool_alloc()
2078 reqtag_pool->tags[i] = wqcb; in efct_hw_reqtag_pool_alloc()
2079 wqcb->instance_index = i; in efct_hw_reqtag_pool_alloc()
2080 wqcb->callback = NULL; in efct_hw_reqtag_pool_alloc()
2081 wqcb->arg = NULL; in efct_hw_reqtag_pool_alloc()
2082 INIT_LIST_HEAD(&wqcb->list_entry); in efct_hw_reqtag_pool_alloc()
2083 list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist); in efct_hw_reqtag_pool_alloc()
2095 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; in efct_hw_reqtag_alloc()
2101 spin_lock_irqsave(&reqtag_pool->lock, flags); in efct_hw_reqtag_alloc()
2103 if (!list_empty(&reqtag_pool->freelist)) { in efct_hw_reqtag_alloc()
2104 wqcb = list_first_entry(&reqtag_pool->freelist, in efct_hw_reqtag_alloc()
2109 list_del_init(&wqcb->list_entry); in efct_hw_reqtag_alloc()
2110 spin_unlock_irqrestore(&reqtag_pool->lock, flags); in efct_hw_reqtag_alloc()
2111 wqcb->callback = callback; in efct_hw_reqtag_alloc()
2112 wqcb->arg = arg; in efct_hw_reqtag_alloc()
2114 spin_unlock_irqrestore(&reqtag_pool->lock, flags); in efct_hw_reqtag_alloc()
2124 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool; in efct_hw_reqtag_free()
2126 if (!wqcb->callback) in efct_hw_reqtag_free()
2127 efc_log_err(hw->os, "WQCB is already freed\n"); in efct_hw_reqtag_free()
2129 spin_lock_irqsave(&reqtag_pool->lock, flags); in efct_hw_reqtag_free()
2130 wqcb->callback = NULL; in efct_hw_reqtag_free()
2131 wqcb->arg = NULL; in efct_hw_reqtag_free()
2132 INIT_LIST_HEAD(&wqcb->list_entry); in efct_hw_reqtag_free()
2133 list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist); in efct_hw_reqtag_free()
2134 spin_unlock_irqrestore(&reqtag_pool->lock, flags); in efct_hw_reqtag_free()
2142 wqcb = hw->wq_reqtag_pool->tags[instance_index]; in efct_hw_reqtag_get_instance()
2144 efc_log_err(hw->os, "wqcb for instance %d is null\n", in efct_hw_reqtag_get_instance()
2153 int index = -1; in efct_hw_queue_hash_find()
2154 int i = id & (EFCT_HW_Q_HASH_SIZE - 1); in efct_hw_queue_hash_find()
2165 i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1); in efct_hw_queue_hash_find()
2166 } while (index == -1 && hash[i].in_use); in efct_hw_queue_hash_find()
2180 * EFCT_HW_STATE_UNINITIALIZED - No queues allocated in efct_hw_process()
2181 * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset, in efct_hw_process()
2183 * EFCT_HW_STATE_ACTIVE - Chip and queues are operational in efct_hw_process()
2184 * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions in efct_hw_process()
2185 * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox in efct_hw_process()
2188 if (hw->state == EFCT_HW_STATE_UNINITIALIZED) in efct_hw_process()
2192 eq = hw->hw_eq[vector]; in efct_hw_process()
2196 eq->use_count++; in efct_hw_process()
2214 while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) { in efct_hw_eq_process()
2218 rc = sli_eq_parse(&hw->sli, eqe, &cq_id); in efct_hw_eq_process()
2227 for (i = 0; i < hw->cq_count; i++) in efct_hw_eq_process()
2228 efct_hw_cq_process(hw, hw->hw_cq[i]); in efct_hw_eq_process()
2236 index = efct_hw_queue_hash_find(hw->cq_hash, cq_id); in efct_hw_eq_process()
2239 efct_hw_cq_process(hw, hw->hw_cq[index]); in efct_hw_eq_process()
2241 efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id); in efct_hw_eq_process()
2244 if (eq->queue->n_posted > eq->queue->posted_limit) in efct_hw_eq_process()
2245 sli_queue_arm(&hw->sli, eq->queue, false); in efct_hw_eq_process()
2247 if (tcheck_count && (--tcheck_count == 0)) { in efct_hw_eq_process()
2249 telapsed = jiffies_to_msecs(jiffies) - tstart; in efct_hw_eq_process()
2254 sli_queue_eq_arm(&hw->sli, eq->queue, true); in efct_hw_eq_process()
2265 if (wq->wqec_count) in _efct_hw_wq_write()
2266 wq->wqec_count--; in _efct_hw_wq_write()
2268 if (wq->wqec_count == 0) { in _efct_hw_wq_write()
2269 struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf; in _efct_hw_wq_write()
2271 genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC; in _efct_hw_wq_write()
2272 wq->wqec_count = wq->wqec_set_count; in _efct_hw_wq_write()
2276 wq->free_count--; in _efct_hw_wq_write()
2278 queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf); in _efct_hw_wq_write()
2280 return (queue_rc < 0) ? -EIO : 0; in _efct_hw_wq_write()
2289 spin_lock_irqsave(&wq->queue->lock, flags); in hw_wq_submit_pending()
2292 wq->free_count += update_free_count; in hw_wq_submit_pending()
2294 while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) { in hw_wq_submit_pending()
2295 wqe = list_first_entry(&wq->pending_list, in hw_wq_submit_pending()
2297 list_del_init(&wqe->list_entry); in hw_wq_submit_pending()
2300 if (wqe->abort_wqe_submit_needed) { in hw_wq_submit_pending()
2301 wqe->abort_wqe_submit_needed = false; in hw_wq_submit_pending()
2302 efct_hw_fill_abort_wqe(wq->hw, wqe); in hw_wq_submit_pending()
2303 INIT_LIST_HEAD(&wqe->list_entry); in hw_wq_submit_pending()
2304 list_add_tail(&wqe->list_entry, &wq->pending_list); in hw_wq_submit_pending()
2305 wq->wq_pending_count++; in hw_wq_submit_pending()
2309 spin_unlock_irqrestore(&wq->queue->lock, flags); in hw_wq_submit_pending()
2324 while (!sli_cq_read(&hw->sli, cq->queue, cqe)) { in efct_hw_cq_process()
2327 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid); in efct_hw_cq_process()
2350 sli_cqe_async(&hw->sli, cqe); in efct_hw_cq_process()
2357 efct_hw_mq_process(hw, status, hw->mq); in efct_hw_cq_process()
2367 index = efct_hw_queue_hash_find(hw->wq_hash, wq_id); in efct_hw_cq_process()
2370 wq = hw->hw_wq[index]; in efct_hw_cq_process()
2372 efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id); in efct_hw_cq_process()
2376 hw_wq_submit_pending(wq, wq->wqec_set_count); in efct_hw_cq_process()
2389 efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n", in efct_hw_cq_process()
2395 if (n_processed == cq->queue->proc_limit) in efct_hw_cq_process()
2398 if (cq->queue->n_posted >= cq->queue->posted_limit) in efct_hw_cq_process()
2399 sli_queue_arm(&hw->sli, cq->queue, false); in efct_hw_cq_process()
2402 sli_queue_arm(&hw->sli, cq->queue, true); in efct_hw_cq_process()
2404 if (n_processed > cq->queue->max_num_processed) in efct_hw_cq_process()
2405 cq->queue->max_num_processed = n_processed; in efct_hw_cq_process()
2406 telapsed = jiffies_to_msecs(jiffies) - tstart; in efct_hw_cq_process()
2407 if (telapsed > cq->queue->max_process_time) in efct_hw_cq_process()
2408 cq->queue->max_process_time = telapsed; in efct_hw_cq_process()
2419 efc_log_err(hw->os, "reque xri failed, status = %d\n", in efct_hw_wq_process()
2426 efc_log_err(hw->os, "invalid request tag: x%x\n", rid); in efct_hw_wq_process()
2430 if (!wqcb->callback) { in efct_hw_wq_process()
2431 efc_log_err(hw->os, "wqcb callback is NULL\n"); in efct_hw_wq_process()
2435 (*wqcb->callback)(wqcb->arg, cqe, status); in efct_hw_wq_process()
2443 struct efct_hw_io *io = NULL; in efct_hw_xabt_process() local
2446 io = efct_hw_io_lookup(hw, rid); in efct_hw_xabt_process()
2447 if (!io) { in efct_hw_xabt_process()
2448 /* IO lookup failure should never happen */ in efct_hw_xabt_process()
2449 efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid); in efct_hw_xabt_process()
2453 if (!io->xbusy) in efct_hw_xabt_process()
2454 efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid); in efct_hw_xabt_process()
2456 /* mark IO as no longer busy */ in efct_hw_xabt_process()
2457 io->xbusy = false; in efct_hw_xabt_process()
2463 if (io->done) { in efct_hw_xabt_process()
2464 efct_hw_done_t done = io->done; in efct_hw_xabt_process()
2465 void *arg = io->arg; in efct_hw_xabt_process()
2471 int status = io->saved_status; in efct_hw_xabt_process()
2472 u32 len = io->saved_len; in efct_hw_xabt_process()
2473 u32 ext = io->saved_ext; in efct_hw_xabt_process()
2475 io->done = NULL; in efct_hw_xabt_process()
2476 io->status_saved = false; in efct_hw_xabt_process()
2478 done(io, len, status, ext, arg); in efct_hw_xabt_process()
2481 spin_lock_irqsave(&hw->io_lock, flags); in efct_hw_xabt_process()
2482 if (io->state == EFCT_HW_IO_STATE_INUSE || in efct_hw_xabt_process()
2483 io->state == EFCT_HW_IO_STATE_WAIT_FREE) { in efct_hw_xabt_process()
2484 /* if on wait_free list, caller has already freed IO; in efct_hw_xabt_process()
2486 * if on in-use list, already marked as no longer busy; in efct_hw_xabt_process()
2489 if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) { in efct_hw_xabt_process()
2490 io->state = EFCT_HW_IO_STATE_FREE; in efct_hw_xabt_process()
2491 list_del_init(&io->list_entry); in efct_hw_xabt_process()
2492 efct_hw_io_free_move_correct_list(hw, io); in efct_hw_xabt_process()
2495 spin_unlock_irqrestore(&hw->io_lock, flags); in efct_hw_xabt_process()
2504 for (i = 0; i < hw->eq_count; i++) in efct_hw_flush()
2516 spin_lock_irqsave(&wq->queue->lock, flags); in efct_hw_wq_write()
2517 if (list_empty(&wq->pending_list)) { in efct_hw_wq_write()
2518 if (wq->free_count > 0) { in efct_hw_wq_write()
2521 INIT_LIST_HEAD(&wqe->list_entry); in efct_hw_wq_write()
2522 list_add_tail(&wqe->list_entry, &wq->pending_list); in efct_hw_wq_write()
2523 wq->wq_pending_count++; in efct_hw_wq_write()
2526 spin_unlock_irqrestore(&wq->queue->lock, flags); in efct_hw_wq_write()
2530 INIT_LIST_HEAD(&wqe->list_entry); in efct_hw_wq_write()
2531 list_add_tail(&wqe->list_entry, &wq->pending_list); in efct_hw_wq_write()
2532 wq->wq_pending_count++; in efct_hw_wq_write()
2533 while (wq->free_count > 0) { in efct_hw_wq_write()
2534 wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe, in efct_hw_wq_write()
2539 list_del_init(&wqe->list_entry); in efct_hw_wq_write()
2544 if (wqe->abort_wqe_submit_needed) { in efct_hw_wq_write()
2545 wqe->abort_wqe_submit_needed = false; in efct_hw_wq_write()
2546 efct_hw_fill_abort_wqe(wq->hw, wqe); in efct_hw_wq_write()
2548 INIT_LIST_HEAD(&wqe->list_entry); in efct_hw_wq_write()
2549 list_add_tail(&wqe->list_entry, &wq->pending_list); in efct_hw_wq_write()
2550 wq->wq_pending_count++; in efct_hw_wq_write()
2554 spin_unlock_irqrestore(&wq->queue->lock, flags); in efct_hw_wq_write()
2562 struct efct *efct = efc->base; in efct_efc_bls_send()
2571 struct efct_hw *hw = &efct->hw; in efct_hw_bls_send()
2576 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_bls_send()
2577 efc_log_err(hw->os, in efct_hw_bls_send()
2578 "cannot send BLS, HW state=%d\n", hw->state); in efct_hw_bls_send()
2579 return -EIO; in efct_hw_bls_send()
2584 efc_log_err(hw->os, "HIO allocation failed\n"); in efct_hw_bls_send()
2585 return -EIO; in efct_hw_bls_send()
2588 hio->done = cb; in efct_hw_bls_send()
2589 hio->arg = arg; in efct_hw_bls_send()
2591 bls_params->xri = hio->indicator; in efct_hw_bls_send()
2592 bls_params->tag = hio->reqtag; in efct_hw_bls_send()
2595 hio->type = EFCT_HW_BLS_ACC; in efct_hw_bls_send()
2597 memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc)); in efct_hw_bls_send()
2599 hio->type = EFCT_HW_BLS_RJT; in efct_hw_bls_send()
2601 memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt)); in efct_hw_bls_send()
2604 bls.ox_id = cpu_to_le16(bls_params->ox_id); in efct_hw_bls_send()
2605 bls.rx_id = cpu_to_le16(bls_params->rx_id); in efct_hw_bls_send()
2607 if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, in efct_hw_bls_send()
2609 efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n"); in efct_hw_bls_send()
2610 return -EIO; in efct_hw_bls_send()
2613 hio->xbusy = true; in efct_hw_bls_send()
2616 * Add IO to active io wqe list before submitting, in case the in efct_hw_bls_send()
2619 hio->wq->use_count++; in efct_hw_bls_send()
2620 rc = efct_hw_wq_write(hio->wq, &hio->wqe); in efct_hw_bls_send()
2622 /* non-negative return is success */ in efct_hw_bls_send()
2626 efc_log_err(hw->os, in efct_hw_bls_send()
2628 hio->xbusy = false; in efct_hw_bls_send()
2638 struct efc_disc_io *io = arg; in efct_els_ssrs_send_cb() local
2640 efc_disc_io_complete(io, length, status, ext_status); in efct_els_ssrs_send_cb()
2645 efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params) in efct_fill_els_params() argument
2647 u8 *cmd = io->req.virt; in efct_fill_els_params()
2649 params->cmd = *cmd; in efct_fill_els_params()
2650 params->s_id = io->s_id; in efct_fill_els_params()
2651 params->d_id = io->d_id; in efct_fill_els_params()
2652 params->ox_id = io->iparam.els.ox_id; in efct_fill_els_params()
2653 params->rpi = io->rpi; in efct_fill_els_params()
2654 params->vpi = io->vpi; in efct_fill_els_params()
2655 params->rpi_registered = io->rpi_registered; in efct_fill_els_params()
2656 params->xmit_len = io->xmit_len; in efct_fill_els_params()
2657 params->rsp_len = io->rsp_len; in efct_fill_els_params()
2658 params->timeout = io->iparam.els.timeout; in efct_fill_els_params()
2662 efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params) in efct_fill_ct_params() argument
2664 params->r_ctl = io->iparam.ct.r_ctl; in efct_fill_ct_params()
2665 params->type = io->iparam.ct.type; in efct_fill_ct_params()
2666 params->df_ctl = io->iparam.ct.df_ctl; in efct_fill_ct_params()
2667 params->d_id = io->d_id; in efct_fill_ct_params()
2668 params->ox_id = io->iparam.ct.ox_id; in efct_fill_ct_params()
2669 params->rpi = io->rpi; in efct_fill_ct_params()
2670 params->vpi = io->vpi; in efct_fill_ct_params()
2671 params->rpi_registered = io->rpi_registered; in efct_fill_ct_params()
2672 params->xmit_len = io->xmit_len; in efct_fill_ct_params()
2673 params->rsp_len = io->rsp_len; in efct_fill_ct_params()
2674 params->timeout = io->iparam.ct.timeout; in efct_fill_ct_params()
2678 * efct_els_hw_srrs_send() - Send a single request and response cmd.
2680 * @io: Discovery IO used to hold els and ct cmd context.
2684 * - Sending an ELS request.
2685 * - Sending an ELS response - To send an ELS response, the caller must provide
2687 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
2694 efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io) in efct_els_hw_srrs_send() argument
2696 struct efct *efct = efc->base; in efct_els_hw_srrs_send()
2698 struct efct_hw *hw = &efct->hw; in efct_els_hw_srrs_send()
2699 struct efc_dma *send = &io->req; in efct_els_hw_srrs_send()
2700 struct efc_dma *receive = &io->rsp; in efct_els_hw_srrs_send()
2703 u32 len = io->xmit_len; in efct_els_hw_srrs_send()
2710 return -EIO; in efct_els_hw_srrs_send()
2713 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_els_hw_srrs_send()
2714 efc_log_debug(hw->os, in efct_els_hw_srrs_send()
2715 "cannot send SRRS, HW state=%d\n", hw->state); in efct_els_hw_srrs_send()
2716 return -EIO; in efct_els_hw_srrs_send()
2719 hio->done = efct_els_ssrs_send_cb; in efct_els_hw_srrs_send()
2720 hio->arg = io; in efct_els_hw_srrs_send()
2722 sge = hio->sgl->virt; in efct_els_hw_srrs_send()
2725 memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge)); in efct_els_hw_srrs_send()
2729 if (send->size) { in efct_els_hw_srrs_send()
2731 cpu_to_le32(upper_32_bits(send->phys)); in efct_els_hw_srrs_send()
2733 cpu_to_le32(lower_32_bits(send->phys)); in efct_els_hw_srrs_send()
2740 if (io->io_type == EFC_DISC_IO_ELS_REQ || in efct_els_hw_srrs_send()
2741 io->io_type == EFC_DISC_IO_CT_REQ) { in efct_els_hw_srrs_send()
2743 cpu_to_le32(upper_32_bits(receive->phys)); in efct_els_hw_srrs_send()
2745 cpu_to_le32(lower_32_bits(receive->phys)); in efct_els_hw_srrs_send()
2750 sge[1].buffer_length = cpu_to_le32(receive->size); in efct_els_hw_srrs_send()
2758 switch (io->io_type) { in efct_els_hw_srrs_send()
2762 hio->type = EFCT_HW_ELS_REQ; in efct_els_hw_srrs_send()
2763 efct_fill_els_params(io, &els_params); in efct_els_hw_srrs_send()
2764 els_params.xri = hio->indicator; in efct_els_hw_srrs_send()
2765 els_params.tag = hio->reqtag; in efct_els_hw_srrs_send()
2767 if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, in efct_els_hw_srrs_send()
2769 efc_log_err(hw->os, "REQ WQE error\n"); in efct_els_hw_srrs_send()
2770 rc = -EIO; in efct_els_hw_srrs_send()
2777 hio->type = EFCT_HW_ELS_RSP; in efct_els_hw_srrs_send()
2778 efct_fill_els_params(io, &els_params); in efct_els_hw_srrs_send()
2779 els_params.xri = hio->indicator; in efct_els_hw_srrs_send()
2780 els_params.tag = hio->reqtag; in efct_els_hw_srrs_send()
2781 if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send, in efct_els_hw_srrs_send()
2783 efc_log_err(hw->os, "RSP WQE error\n"); in efct_els_hw_srrs_send()
2784 rc = -EIO; in efct_els_hw_srrs_send()
2791 hio->type = EFCT_HW_FC_CT; in efct_els_hw_srrs_send()
2792 efct_fill_ct_params(io, &ct_params); in efct_els_hw_srrs_send()
2793 ct_params.xri = hio->indicator; in efct_els_hw_srrs_send()
2794 ct_params.tag = hio->reqtag; in efct_els_hw_srrs_send()
2795 if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, in efct_els_hw_srrs_send()
2797 efc_log_err(hw->os, "GEN WQE error\n"); in efct_els_hw_srrs_send()
2798 rc = -EIO; in efct_els_hw_srrs_send()
2805 hio->type = EFCT_HW_FC_CT_RSP; in efct_els_hw_srrs_send()
2806 efct_fill_ct_params(io, &ct_params); in efct_els_hw_srrs_send()
2807 ct_params.xri = hio->indicator; in efct_els_hw_srrs_send()
2808 ct_params.tag = hio->reqtag; in efct_els_hw_srrs_send()
2809 if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl, in efct_els_hw_srrs_send()
2811 efc_log_err(hw->os, "XMIT SEQ WQE error\n"); in efct_els_hw_srrs_send()
2812 rc = -EIO; in efct_els_hw_srrs_send()
2817 efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type); in efct_els_hw_srrs_send()
2818 rc = -EIO; in efct_els_hw_srrs_send()
2822 hio->xbusy = true; in efct_els_hw_srrs_send()
2825 * Add IO to active io wqe list before submitting, in case the in efct_els_hw_srrs_send()
2828 hio->wq->use_count++; in efct_els_hw_srrs_send()
2829 rc = efct_hw_wq_write(hio->wq, &hio->wqe); in efct_els_hw_srrs_send()
2831 /* non-negative return is success */ in efct_els_hw_srrs_send()
2835 efc_log_err(hw->os, in efct_els_hw_srrs_send()
2837 hio->xbusy = false; in efct_els_hw_srrs_send()
2846 struct efct_hw_io *io, union efct_hw_io_param_u *iparam, in efct_hw_io_send() argument
2852 if (!io) { in efct_hw_io_send()
2853 pr_err("bad parm hw=%p io=%p\n", hw, io); in efct_hw_io_send()
2854 return -EIO; in efct_hw_io_send()
2857 if (hw->state != EFCT_HW_STATE_ACTIVE) { in efct_hw_io_send()
2858 efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state); in efct_hw_io_send()
2859 return -EIO; in efct_hw_io_send()
2865 io->type = type; in efct_hw_io_send()
2866 io->done = cb; in efct_hw_io_send()
2867 io->arg = arg; in efct_hw_io_send()
2870 * Format the work queue entry used to send the IO in efct_hw_io_send()
2874 u16 *flags = &iparam->fcp_tgt.flags; in efct_hw_io_send()
2875 struct fcp_txrdy *xfer = io->xfer_rdy.virt; in efct_hw_io_send()
2880 xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset); in efct_hw_io_send()
2881 xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len); in efct_hw_io_send()
2883 if (io->xbusy) in efct_hw_io_send()
2887 iparam->fcp_tgt.xri = io->indicator; in efct_hw_io_send()
2888 iparam->fcp_tgt.tag = io->reqtag; in efct_hw_io_send()
2890 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2891 &io->def_sgl, io->first_data_sge, in efct_hw_io_send()
2893 0, 0, &iparam->fcp_tgt)) { in efct_hw_io_send()
2894 efc_log_err(hw->os, "TRECEIVE WQE error\n"); in efct_hw_io_send()
2895 rc = -EIO; in efct_hw_io_send()
2900 u16 *flags = &iparam->fcp_tgt.flags; in efct_hw_io_send()
2902 if (io->xbusy) in efct_hw_io_send()
2907 iparam->fcp_tgt.xri = io->indicator; in efct_hw_io_send()
2908 iparam->fcp_tgt.tag = io->reqtag; in efct_hw_io_send()
2910 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2911 &io->def_sgl, io->first_data_sge, in efct_hw_io_send()
2913 0, 0, &iparam->fcp_tgt)) { in efct_hw_io_send()
2914 efc_log_err(hw->os, "TSEND WQE error\n"); in efct_hw_io_send()
2915 rc = -EIO; in efct_hw_io_send()
2920 u16 *flags = &iparam->fcp_tgt.flags; in efct_hw_io_send()
2922 if (io->xbusy) in efct_hw_io_send()
2927 iparam->fcp_tgt.xri = io->indicator; in efct_hw_io_send()
2928 iparam->fcp_tgt.tag = io->reqtag; in efct_hw_io_send()
2930 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, in efct_hw_io_send()
2931 &io->def_sgl, SLI4_CQ_DEFAULT, in efct_hw_io_send()
2932 0, &iparam->fcp_tgt)) { in efct_hw_io_send()
2933 efc_log_err(hw->os, "TRSP WQE error\n"); in efct_hw_io_send()
2934 rc = -EIO; in efct_hw_io_send()
2940 efc_log_err(hw->os, "unsupported IO type %#x\n", type); in efct_hw_io_send()
2941 rc = -EIO; in efct_hw_io_send()
2945 io->xbusy = true; in efct_hw_io_send()
2948 * Add IO to active io wqe list before submitting, in case the in efct_hw_io_send()
2951 hw->tcmd_wq_submit[io->wq->instance]++; in efct_hw_io_send()
2952 io->wq->use_count++; in efct_hw_io_send()
2953 rc = efct_hw_wq_write(io->wq, &io->wqe); in efct_hw_io_send()
2955 /* non-negative return is success */ in efct_hw_io_send()
2959 efc_log_err(hw->os, in efct_hw_io_send()
2961 io->xbusy = false; in efct_hw_io_send()
2980 wqe = &ctx->wqe; in efct_hw_send_frame()
2983 ctx->hw = hw; in efct_hw_send_frame()
2986 ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg); in efct_hw_send_frame()
2987 if (!ctx->wqcb) { in efct_hw_send_frame()
2988 efc_log_err(hw->os, "can't allocate request tag\n"); in efct_hw_send_frame()
2989 return -ENOSPC; in efct_hw_send_frame()
2992 wq = hw->hw_wq[0]; in efct_hw_send_frame()
2997 xri = wq->send_frame_io->indicator; in efct_hw_send_frame()
3000 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, in efct_hw_send_frame()
3001 sof, eof, (u32 *)hdr, payload, payload->len, in efct_hw_send_frame()
3003 ctx->wqcb->instance_index); in efct_hw_send_frame()
3005 efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc); in efct_hw_send_frame()
3006 return -EIO; in efct_hw_send_frame()
3012 efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc); in efct_hw_send_frame()
3013 return -EIO; in efct_hw_send_frame()
3016 wq->use_count++; in efct_hw_send_frame()
3032 mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags); in efct_hw_cb_link_stat()
3042 le32_to_cpu(mbox_rsp->linkfail_errcnt); in efct_hw_cb_link_stat()
3044 le32_to_cpu(mbox_rsp->losssync_errcnt); in efct_hw_cb_link_stat()
3046 le32_to_cpu(mbox_rsp->losssignal_errcnt); in efct_hw_cb_link_stat()
3048 le32_to_cpu(mbox_rsp->primseq_errcnt); in efct_hw_cb_link_stat()
3050 le32_to_cpu(mbox_rsp->inval_txword_errcnt); in efct_hw_cb_link_stat()
3052 le32_to_cpu(mbox_rsp->crc_errcnt); in efct_hw_cb_link_stat()
3054 le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt); in efct_hw_cb_link_stat()
3056 le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt); in efct_hw_cb_link_stat()
3058 le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt); in efct_hw_cb_link_stat()
3060 le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit); in efct_hw_cb_link_stat()
3062 le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit); in efct_hw_cb_link_stat()
3064 le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit); in efct_hw_cb_link_stat()
3066 le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit); in efct_hw_cb_link_stat()
3068 le32_to_cpu(mbox_rsp->rx_eofa_cnt); in efct_hw_cb_link_stat()
3070 le32_to_cpu(mbox_rsp->rx_eofdti_cnt); in efct_hw_cb_link_stat()
3072 le32_to_cpu(mbox_rsp->rx_eofni_cnt); in efct_hw_cb_link_stat()
3074 le32_to_cpu(mbox_rsp->rx_soff_cnt); in efct_hw_cb_link_stat()
3076 le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt); in efct_hw_cb_link_stat()
3078 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt); in efct_hw_cb_link_stat()
3080 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt); in efct_hw_cb_link_stat()
3083 if (cb_arg->cb) { in efct_hw_cb_link_stat()
3084 if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status)) in efct_hw_cb_link_stat()
3085 status = le16_to_cpu(mbox_rsp->hdr.status); in efct_hw_cb_link_stat()
3086 cb_arg->cb(status, num_counters, counts, cb_arg->arg); in efct_hw_cb_link_stat()
3103 int rc = -EIO; in efct_hw_get_link_stats()
3109 return -ENOMEM; in efct_hw_get_link_stats()
3111 cb_arg->cb = cb; in efct_hw_get_link_stats()
3112 cb_arg->arg = arg; in efct_hw_get_link_stats()
3115 if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters, in efct_hw_get_link_stats()
3139 le32_to_cpu(mbox_rsp->trans_kbyte_cnt); in efct_hw_cb_host_stat()
3141 le32_to_cpu(mbox_rsp->recv_kbyte_cnt); in efct_hw_cb_host_stat()
3143 le32_to_cpu(mbox_rsp->trans_frame_cnt); in efct_hw_cb_host_stat()
3145 le32_to_cpu(mbox_rsp->recv_frame_cnt); in efct_hw_cb_host_stat()
3147 le32_to_cpu(mbox_rsp->trans_seq_cnt); in efct_hw_cb_host_stat()
3149 le32_to_cpu(mbox_rsp->recv_seq_cnt); in efct_hw_cb_host_stat()
3151 le32_to_cpu(mbox_rsp->tot_exchanges_orig); in efct_hw_cb_host_stat()
3153 le32_to_cpu(mbox_rsp->tot_exchanges_resp); in efct_hw_cb_host_stat()
3155 le32_to_cpu(mbox_rsp->recv_p_bsy_cnt); in efct_hw_cb_host_stat()
3157 le32_to_cpu(mbox_rsp->recv_f_bsy_cnt); in efct_hw_cb_host_stat()
3159 le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt); in efct_hw_cb_host_stat()
3161 le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt); in efct_hw_cb_host_stat()
3163 le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt); in efct_hw_cb_host_stat()
3165 le32_to_cpu(mbox_rsp->empty_xri_pool_cnt); in efct_hw_cb_host_stat()
3168 if (cb_arg->cb) { in efct_hw_cb_host_stat()
3169 if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status)) in efct_hw_cb_host_stat()
3170 status = le16_to_cpu(mbox_rsp->hdr.status); in efct_hw_cb_host_stat()
3171 cb_arg->cb(status, num_counters, counts, cb_arg->arg); in efct_hw_cb_host_stat()
3187 int rc = -EIO; in efct_hw_get_host_stats()
3193 return -ENOMEM; in efct_hw_get_host_stats()
3195 cb_arg->cb = cb; in efct_hw_get_host_stats()
3196 cb_arg->arg = arg; in efct_hw_get_host_stats()
3199 if (!sli_cmd_read_status(&hw->sli, mbxdata, cc)) in efct_hw_get_host_stats()
3204 efc_log_debug(hw->os, "READ_HOST_STATS failed\n"); in efct_hw_get_host_stats()
3223 if (ctx->callback) in efct_hw_async_cb()
3224 (*ctx->callback)(hw, status, mqe, ctx->arg); in efct_hw_async_cb()
3243 return -ENOMEM; in efct_hw_async_call()
3245 ctx->callback = callback; in efct_hw_async_call()
3246 ctx->arg = arg; in efct_hw_async_call()
3249 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) { in efct_hw_async_call()
3250 efc_log_err(hw->os, "COMMON_NOP format failure\n"); in efct_hw_async_call()
3252 return -EIO; in efct_hw_async_call()
3255 rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb, in efct_hw_async_call()
3258 efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc); in efct_hw_async_call()
3260 return -EIO; in efct_hw_async_call()
3277 &mbox_rsp->payload.embed; in efct_hw_cb_fw_write()
3278 bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length); in efct_hw_cb_fw_write()
3279 mbox_status = le16_to_cpu(mbox_rsp->hdr.status); in efct_hw_cb_fw_write()
3280 change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) & in efct_hw_cb_fw_write()
3284 if (cb_arg->cb) { in efct_hw_cb_fw_write()
3287 cb_arg->cb(status, bytes_written, change_status, in efct_hw_cb_fw_write()
3288 cb_arg->arg); in efct_hw_cb_fw_write()
3304 int rc = -EIO; in efct_hw_firmware_write()
3311 return -ENOMEM; in efct_hw_firmware_write()
3313 cb_arg->cb = cb; in efct_hw_firmware_write()
3314 cb_arg->arg = arg; in efct_hw_firmware_write()
3317 if (!sli_cmd_common_write_object(&hw->sli, mbxdata, in efct_hw_firmware_write()
3324 efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n"); in efct_hw_firmware_write()
3344 int rc = -EIO; in efct_hw_port_control()
3351 if (!sli_cmd_config_link(&hw->sli, link)) in efct_hw_port_control()
3356 efc_log_err(hw->os, "CONFIG_LINK failed\n"); in efct_hw_port_control()
3359 speed = hw->config.speed; in efct_hw_port_control()
3362 rc = -EIO; in efct_hw_port_control()
3363 if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa)) in efct_hw_port_control()
3368 efc_log_err(hw->os, "INIT_LINK failed\n"); in efct_hw_port_control()
3372 if (!sli_cmd_down_link(&hw->sli, link)) in efct_hw_port_control()
3377 efc_log_err(hw->os, "DOWN_LINK failed\n"); in efct_hw_port_control()
3381 efc_log_debug(hw->os, "unhandled control %#x\n", ctrl); in efct_hw_port_control()
3395 struct efct *efct = hw->os; in efct_hw_teardown()
3397 destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE); in efct_hw_teardown()
3398 free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED); in efct_hw_teardown()
3401 if (hw->sliport_healthcheck) { in efct_hw_teardown()
3402 hw->sliport_healthcheck = 0; in efct_hw_teardown()
3406 if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) { in efct_hw_teardown()
3407 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS; in efct_hw_teardown()
3411 if (list_empty(&hw->cmd_head)) in efct_hw_teardown()
3412 efc_log_debug(hw->os, in efct_hw_teardown()
3415 efc_log_debug(hw->os, in efct_hw_teardown()
3421 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS; in efct_hw_teardown()
3424 dma_free_coherent(&efct->pci->dev, in efct_hw_teardown()
3425 hw->rnode_mem.size, hw->rnode_mem.virt, in efct_hw_teardown()
3426 hw->rnode_mem.phys); in efct_hw_teardown()
3427 memset(&hw->rnode_mem, 0, sizeof(struct efc_dma)); in efct_hw_teardown()
3429 if (hw->io) { in efct_hw_teardown()
3430 for (i = 0; i < hw->config.n_io; i++) { in efct_hw_teardown()
3431 if (hw->io[i] && hw->io[i]->sgl && in efct_hw_teardown()
3432 hw->io[i]->sgl->virt) { in efct_hw_teardown()
3433 dma_free_coherent(&efct->pci->dev, in efct_hw_teardown()
3434 hw->io[i]->sgl->size, in efct_hw_teardown()
3435 hw->io[i]->sgl->virt, in efct_hw_teardown()
3436 hw->io[i]->sgl->phys); in efct_hw_teardown()
3438 kfree(hw->io[i]); in efct_hw_teardown()
3439 hw->io[i] = NULL; in efct_hw_teardown()
3441 kfree(hw->io); in efct_hw_teardown()
3442 hw->io = NULL; in efct_hw_teardown()
3443 kfree(hw->wqe_buffs); in efct_hw_teardown()
3444 hw->wqe_buffs = NULL; in efct_hw_teardown()
3447 dma = &hw->xfer_rdy; in efct_hw_teardown()
3448 dma_free_coherent(&efct->pci->dev, in efct_hw_teardown()
3449 dma->size, dma->virt, dma->phys); in efct_hw_teardown()
3452 dma = &hw->loop_map; in efct_hw_teardown()
3453 dma_free_coherent(&efct->pci->dev, in efct_hw_teardown()
3454 dma->size, dma->virt, dma->phys); in efct_hw_teardown()
3457 for (i = 0; i < hw->wq_count; i++) in efct_hw_teardown()
3458 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, in efct_hw_teardown()
3461 for (i = 0; i < hw->rq_count; i++) in efct_hw_teardown()
3462 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, in efct_hw_teardown()
3465 for (i = 0; i < hw->mq_count; i++) in efct_hw_teardown()
3466 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, in efct_hw_teardown()
3469 for (i = 0; i < hw->cq_count; i++) in efct_hw_teardown()
3470 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, in efct_hw_teardown()
3473 for (i = 0; i < hw->eq_count; i++) in efct_hw_teardown()
3474 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, in efct_hw_teardown()
3482 kfree(hw->wq_cpu_array); in efct_hw_teardown()
3484 sli_teardown(&hw->sli); in efct_hw_teardown()
3486 /* record the fact that the queues are non-functional */ in efct_hw_teardown()
3487 hw->state = EFCT_HW_STATE_UNINITIALIZED; in efct_hw_teardown()
3490 kfree(hw->seq_pool); in efct_hw_teardown()
3491 hw->seq_pool = NULL; in efct_hw_teardown()
3496 mempool_destroy(hw->cmd_ctx_pool); in efct_hw_teardown()
3497 mempool_destroy(hw->mbox_rqst_pool); in efct_hw_teardown()
3500 hw->hw_setup_called = false; in efct_hw_teardown()
3504 efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset, in efct_hw_sli_reset() argument
3509 switch (reset) { in efct_hw_sli_reset()
3511 efc_log_debug(hw->os, "issuing function level reset\n"); in efct_hw_sli_reset()
3512 if (sli_reset(&hw->sli)) { in efct_hw_sli_reset()
3513 efc_log_err(hw->os, "sli_reset failed\n"); in efct_hw_sli_reset()
3514 rc = -EIO; in efct_hw_sli_reset()
3518 efc_log_debug(hw->os, "issuing firmware reset\n"); in efct_hw_sli_reset()
3519 if (sli_fw_reset(&hw->sli)) { in efct_hw_sli_reset()
3520 efc_log_err(hw->os, "sli_soft_reset failed\n"); in efct_hw_sli_reset()
3521 rc = -EIO; in efct_hw_sli_reset()
3524 * Because the FW reset leaves the FW in a non-running state, in efct_hw_sli_reset()
3525 * follow that with a regular reset. in efct_hw_sli_reset()
3527 efc_log_debug(hw->os, "issuing function level reset\n"); in efct_hw_sli_reset()
3528 if (sli_reset(&hw->sli)) { in efct_hw_sli_reset()
3529 efc_log_err(hw->os, "sli_reset failed\n"); in efct_hw_sli_reset()
3530 rc = -EIO; in efct_hw_sli_reset()
3534 efc_log_err(hw->os, "unknown type - no reset performed\n"); in efct_hw_sli_reset()
3535 hw->state = prev_state; in efct_hw_sli_reset()
3536 rc = -EINVAL; in efct_hw_sli_reset()
3544 efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset) in efct_hw_reset() argument
3547 enum efct_hw_state prev_state = hw->state; in efct_hw_reset()
3549 if (hw->state != EFCT_HW_STATE_ACTIVE) in efct_hw_reset()
3550 efc_log_debug(hw->os, in efct_hw_reset()
3551 "HW state %d is not active\n", hw->state); in efct_hw_reset()
3553 hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS; in efct_hw_reset()
3556 * If the prev_state is already reset/teardown in progress, in efct_hw_reset()
3561 return efct_hw_sli_reset(hw, reset, prev_state); in efct_hw_reset()
3566 if (list_empty(&hw->cmd_head)) in efct_hw_reset()
3567 efc_log_debug(hw->os, in efct_hw_reset()
3570 efc_log_err(hw->os, in efct_hw_reset()
3574 /* Reset the chip */ in efct_hw_reset()
3575 rc = efct_hw_sli_reset(hw, reset, prev_state); in efct_hw_reset()
3576 if (rc == -EINVAL) in efct_hw_reset()
3577 return -EIO; in efct_hw_reset()