Lines Matching +full:lock +full:- +full:status
1 /*-
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
190 struct mtx lock; member
235 snprintf(cfg, sizeof(cfg), "hw.pvscsi.%d.%s", device_get_unit(sc->dev), in pvscsi_get_tunable()
246 if (!sc->frozen) { in pvscsi_freeze()
247 xpt_freeze_simq(sc->sim, 1); in pvscsi_freeze()
248 sc->frozen = 1; in pvscsi_freeze()
256 return (bus_read_4(sc->mm_res, offset)); in pvscsi_reg_read()
263 bus_write_4(sc->mm_res, offset, val); in pvscsi_reg_write()
286 if (sc->use_msg) { in pvscsi_intr_enable()
309 s = sc->rings_state; in pvscsi_kick_io()
311 if (!sc->use_req_call_threshold || in pvscsi_kick_io()
312 (s->req_prod_idx - s->req_cons_idx) >= in pvscsi_kick_io()
313 s->req_call_threshold) { in pvscsi_kick_io()
346 return (hcb - sc->hcbs + 1); in pvscsi_hcb_to_context()
353 return (sc->hcbs + (context - 1)); in pvscsi_context_to_hcb()
361 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_hcb_get()
363 hcb = SLIST_FIRST(&sc->free_list); in pvscsi_hcb_get()
365 SLIST_REMOVE_HEAD(&sc->free_list, links); in pvscsi_hcb_get()
375 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_hcb_put()
376 hcb->ccb = NULL; in pvscsi_hcb_put()
377 hcb->e = NULL; in pvscsi_hcb_put()
378 hcb->recovery = PVSCSI_HCB_NONE; in pvscsi_hcb_put()
379 SLIST_INSERT_HEAD(&sc->free_list, hcb, links); in pvscsi_hcb_put()
400 uint32_t status; in pvscsi_setup_req_call() local
410 status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); in pvscsi_setup_req_call()
412 if (status != -1) { in pvscsi_setup_req_call()
417 status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); in pvscsi_setup_req_call()
419 return (status != 0); in pvscsi_setup_req_call()
435 *dest = segs->ds_addr; in pvscsi_dma_cb()
443 if (dma->tag != NULL) { in pvscsi_dma_free()
444 if (dma->paddr != 0) { in pvscsi_dma_free()
445 bus_dmamap_unload(dma->tag, dma->map); in pvscsi_dma_free()
448 if (dma->vaddr != NULL) { in pvscsi_dma_free()
449 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); in pvscsi_dma_free()
452 bus_dma_tag_destroy(dma->tag); in pvscsi_dma_free()
466 error = bus_dma_tag_create(sc->parent_dmat, alignment, 0, in pvscsi_dma_alloc()
468 BUS_DMA_ALLOCNOW, NULL, NULL, &dma->tag); in pvscsi_dma_alloc()
470 device_printf(sc->dev, "error creating dma tag, error %d\n", in pvscsi_dma_alloc()
475 error = bus_dmamem_alloc(dma->tag, &dma->vaddr, in pvscsi_dma_alloc()
476 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dma->map); in pvscsi_dma_alloc()
478 device_printf(sc->dev, "error allocating dma mem, error %d\n", in pvscsi_dma_alloc()
483 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, in pvscsi_dma_alloc()
484 pvscsi_dma_cb, &dma->paddr, BUS_DMA_NOWAIT); in pvscsi_dma_alloc()
486 device_printf(sc->dev, "error mapping dma mam, error %d\n", in pvscsi_dma_alloc()
491 dma->size = size; in pvscsi_dma_alloc()
510 device_printf(sc->dev, "Error allocating pages, error %d\n", in pvscsi_dma_alloc_ppns()
515 ppn = dma->paddr >> PAGE_SHIFT; in pvscsi_dma_alloc_ppns()
530 lock_owned = mtx_owned(&sc->lock); in pvscsi_dma_free_per_hcb()
533 mtx_unlock(&sc->lock); in pvscsi_dma_free_per_hcb()
536 hcb = sc->hcbs + i; in pvscsi_dma_free_per_hcb()
537 callout_drain(&hcb->callout); in pvscsi_dma_free_per_hcb()
540 mtx_lock(&sc->lock); in pvscsi_dma_free_per_hcb()
544 hcb = sc->hcbs + i; in pvscsi_dma_free_per_hcb()
545 bus_dmamap_destroy(sc->buffer_dmat, hcb->dma_map); in pvscsi_dma_free_per_hcb()
548 pvscsi_dma_free(sc, &sc->sense_buffer_dma); in pvscsi_dma_free_per_hcb()
549 pvscsi_dma_free(sc, &sc->sg_list_dma); in pvscsi_dma_free_per_hcb()
561 error = pvscsi_dma_alloc(sc, &sc->sg_list_dma, in pvscsi_dma_alloc_per_hcb()
562 sizeof(struct pvscsi_sg_list) * sc->hcb_cnt, 1); in pvscsi_dma_alloc_per_hcb()
564 device_printf(sc->dev, in pvscsi_dma_alloc_per_hcb()
569 error = pvscsi_dma_alloc(sc, &sc->sense_buffer_dma, in pvscsi_dma_alloc_per_hcb()
570 PVSCSI_SENSE_LENGTH * sc->hcb_cnt, 1); in pvscsi_dma_alloc_per_hcb()
572 device_printf(sc->dev, in pvscsi_dma_alloc_per_hcb()
577 for (i = 0; i < sc->hcb_cnt; ++i) { in pvscsi_dma_alloc_per_hcb()
578 hcb = sc->hcbs + i; in pvscsi_dma_alloc_per_hcb()
580 error = bus_dmamap_create(sc->buffer_dmat, 0, &hcb->dma_map); in pvscsi_dma_alloc_per_hcb()
582 device_printf(sc->dev, in pvscsi_dma_alloc_per_hcb()
588 hcb->sense_buffer = in pvscsi_dma_alloc_per_hcb()
589 (void *)((caddr_t)sc->sense_buffer_dma.vaddr + in pvscsi_dma_alloc_per_hcb()
591 hcb->sense_buffer_paddr = in pvscsi_dma_alloc_per_hcb()
592 sc->sense_buffer_dma.paddr + PVSCSI_SENSE_LENGTH * i; in pvscsi_dma_alloc_per_hcb()
594 hcb->sg_list = in pvscsi_dma_alloc_per_hcb()
595 (struct pvscsi_sg_list *)((caddr_t)sc->sg_list_dma.vaddr + in pvscsi_dma_alloc_per_hcb()
597 hcb->sg_list_paddr = in pvscsi_dma_alloc_per_hcb()
598 sc->sg_list_dma.paddr + sizeof(struct pvscsi_sg_list) * i; in pvscsi_dma_alloc_per_hcb()
600 callout_init_mtx(&hcb->callout, &sc->lock, 0); in pvscsi_dma_alloc_per_hcb()
603 SLIST_INIT(&sc->free_list); in pvscsi_dma_alloc_per_hcb()
604 for (i = (sc->hcb_cnt - 1); i >= 0; --i) { in pvscsi_dma_alloc_per_hcb()
605 hcb = sc->hcbs + i; in pvscsi_dma_alloc_per_hcb()
606 SLIST_INSERT_HEAD(&sc->free_list, hcb, links); in pvscsi_dma_alloc_per_hcb()
621 pvscsi_dma_free(sc, &sc->rings_state_dma); in pvscsi_free_rings()
622 pvscsi_dma_free(sc, &sc->req_ring_dma); in pvscsi_free_rings()
623 pvscsi_dma_free(sc, &sc->cmp_ring_dma); in pvscsi_free_rings()
624 if (sc->use_msg) { in pvscsi_free_rings()
625 pvscsi_dma_free(sc, &sc->msg_ring_dma); in pvscsi_free_rings()
634 error = pvscsi_dma_alloc_ppns(sc, &sc->rings_state_dma, in pvscsi_allocate_rings()
635 &sc->rings_state_ppn, 1); in pvscsi_allocate_rings()
637 device_printf(sc->dev, in pvscsi_allocate_rings()
641 sc->rings_state = sc->rings_state_dma.vaddr; in pvscsi_allocate_rings()
643 error = pvscsi_dma_alloc_ppns(sc, &sc->req_ring_dma, sc->req_ring_ppn, in pvscsi_allocate_rings()
644 sc->req_ring_num_pages); in pvscsi_allocate_rings()
646 device_printf(sc->dev, in pvscsi_allocate_rings()
650 sc->req_ring = sc->req_ring_dma.vaddr; in pvscsi_allocate_rings()
652 error = pvscsi_dma_alloc_ppns(sc, &sc->cmp_ring_dma, sc->cmp_ring_ppn, in pvscsi_allocate_rings()
653 sc->cmp_ring_num_pages); in pvscsi_allocate_rings()
655 device_printf(sc->dev, in pvscsi_allocate_rings()
659 sc->cmp_ring = sc->cmp_ring_dma.vaddr; in pvscsi_allocate_rings()
661 sc->msg_ring = NULL; in pvscsi_allocate_rings()
662 if (sc->use_msg) { in pvscsi_allocate_rings()
663 error = pvscsi_dma_alloc_ppns(sc, &sc->msg_ring_dma, in pvscsi_allocate_rings()
664 sc->msg_ring_ppn, sc->msg_ring_num_pages); in pvscsi_allocate_rings()
666 device_printf(sc->dev, in pvscsi_allocate_rings()
671 sc->msg_ring = sc->msg_ring_dma.vaddr; in pvscsi_allocate_rings()
674 DEBUG_PRINTF(1, sc->dev, "rings_state: %p\n", sc->rings_state); in pvscsi_allocate_rings()
675 DEBUG_PRINTF(1, sc->dev, "req_ring: %p - %u pages\n", sc->req_ring, in pvscsi_allocate_rings()
676 sc->req_ring_num_pages); in pvscsi_allocate_rings()
677 DEBUG_PRINTF(1, sc->dev, "cmp_ring: %p - %u pages\n", sc->cmp_ring, in pvscsi_allocate_rings()
678 sc->cmp_ring_num_pages); in pvscsi_allocate_rings()
679 DEBUG_PRINTF(1, sc->dev, "msg_ring: %p - %u pages\n", sc->msg_ring, in pvscsi_allocate_rings()
680 sc->msg_ring_num_pages); in pvscsi_allocate_rings()
697 cmd.rings_state_ppn = sc->rings_state_ppn; in pvscsi_setup_rings()
699 cmd.req_ring_num_pages = sc->req_ring_num_pages; in pvscsi_setup_rings()
700 for (i = 0; i < sc->req_ring_num_pages; ++i) { in pvscsi_setup_rings()
701 cmd.req_ring_ppns[i] = sc->req_ring_ppn[i]; in pvscsi_setup_rings()
704 cmd.cmp_ring_num_pages = sc->cmp_ring_num_pages; in pvscsi_setup_rings()
705 for (i = 0; i < sc->cmp_ring_num_pages; ++i) { in pvscsi_setup_rings()
706 cmd.cmp_ring_ppns[i] = sc->cmp_ring_ppn[i]; in pvscsi_setup_rings()
715 uint32_t status; in pvscsi_hw_supports_msg() local
719 status = pvscsi_reg_read(sc, PVSCSI_REG_OFFSET_COMMAND_STATUS); in pvscsi_hw_supports_msg()
721 return (status != -1); in pvscsi_hw_supports_msg()
730 KASSERT(sc->use_msg, ("msg is not being used")); in pvscsi_setup_msg_ring()
734 cmd.num_pages = sc->msg_ring_num_pages; in pvscsi_setup_msg_ring()
735 for (i = 0; i < sc->msg_ring_num_pages; ++i) { in pvscsi_setup_msg_ring()
736 cmd.ring_ppns[i] = sc->msg_ring_ppn[i]; in pvscsi_setup_msg_ring()
747 device_printf(sc->dev, "Adapter Reset\n"); in pvscsi_adapter_reset()
752 DEBUG_PRINTF(2, sc->dev, "adapter reset done: %u\n", val); in pvscsi_adapter_reset()
759 device_printf(sc->dev, "Bus Reset\n"); in pvscsi_bus_reset()
764 DEBUG_PRINTF(2, sc->dev, "bus reset done\n"); in pvscsi_bus_reset()
776 device_printf(sc->dev, "Device reset for target %u\n", target); in pvscsi_device_reset()
781 DEBUG_PRINTF(2, sc->dev, "device reset done\n"); in pvscsi_device_reset()
793 hcb = ccb->ccb_h.ccb_pvscsi_hcb; in pvscsi_abort()
802 device_printf(sc->dev, "Abort for target %u context %llx\n", in pvscsi_abort()
808 DEBUG_PRINTF(2, sc->dev, "abort done\n"); in pvscsi_abort()
810 DEBUG_PRINTF(1, sc->dev, in pvscsi_abort()
842 ccb = hcb->ccb; in pvscsi_timeout()
849 sc = ccb->ccb_h.ccb_pvscsi_sc; in pvscsi_timeout()
850 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_timeout()
852 device_printf(sc->dev, "Command timed out hcb=%p ccb=%p.\n", hcb, ccb); in pvscsi_timeout()
854 switch (hcb->recovery) { in pvscsi_timeout()
856 hcb->recovery = PVSCSI_HCB_ABORT; in pvscsi_timeout()
857 pvscsi_abort(sc, ccb->ccb_h.target_id, ccb); in pvscsi_timeout()
858 callout_reset_sbt(&hcb->callout, PVSCSI_ABORT_TIMEOUT * SBT_1S, in pvscsi_timeout()
862 hcb->recovery = PVSCSI_HCB_DEVICE_RESET; in pvscsi_timeout()
864 pvscsi_device_reset(sc, ccb->ccb_h.target_id); in pvscsi_timeout()
865 callout_reset_sbt(&hcb->callout, PVSCSI_RESET_TIMEOUT * SBT_1S, in pvscsi_timeout()
869 hcb->recovery = PVSCSI_HCB_BUS_RESET; in pvscsi_timeout()
872 callout_reset_sbt(&hcb->callout, PVSCSI_RESET_TIMEOUT * SBT_1S, in pvscsi_timeout()
888 uint32_t status; in pvscsi_process_completion() local
893 hcb = pvscsi_context_to_hcb(sc, e->context); in pvscsi_process_completion()
895 callout_stop(&hcb->callout); in pvscsi_process_completion()
897 ccb = hcb->ccb; in pvscsi_process_completion()
899 btstat = e->host_status; in pvscsi_process_completion()
900 sdstat = e->scsi_status; in pvscsi_process_completion()
902 ccb->csio.scsi_status = sdstat; in pvscsi_process_completion()
903 ccb->csio.resid = ccb->csio.dxfer_len - e->data_len; in pvscsi_process_completion()
905 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { in pvscsi_process_completion()
906 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { in pvscsi_process_completion()
911 bus_dmamap_sync(sc->buffer_dmat, hcb->dma_map, op); in pvscsi_process_completion()
912 bus_dmamap_unload(sc->buffer_dmat, hcb->dma_map); in pvscsi_process_completion()
916 DEBUG_PRINTF(3, sc->dev, in pvscsi_process_completion()
918 (unsigned long long)e->context); in pvscsi_process_completion()
919 ccb->csio.resid = 0; in pvscsi_process_completion()
920 status = CAM_REQ_CMP; in pvscsi_process_completion()
928 ccb->csio.resid = 0; in pvscsi_process_completion()
929 status = CAM_REQ_CMP; in pvscsi_process_completion()
932 status = CAM_SCSI_STATUS_ERROR; in pvscsi_process_completion()
934 if (ccb->csio.sense_len != 0) { in pvscsi_process_completion()
935 status |= CAM_AUTOSNS_VALID; in pvscsi_process_completion()
937 memset(&ccb->csio.sense_data, 0, in pvscsi_process_completion()
938 sizeof(ccb->csio.sense_data)); in pvscsi_process_completion()
939 memcpy(&ccb->csio.sense_data, in pvscsi_process_completion()
940 hcb->sense_buffer, in pvscsi_process_completion()
941 MIN(ccb->csio.sense_len, in pvscsi_process_completion()
942 e->sense_len)); in pvscsi_process_completion()
947 status = CAM_REQUEUE_REQ; in pvscsi_process_completion()
951 status = CAM_REQ_ABORTED; in pvscsi_process_completion()
954 DEBUG_PRINTF(1, sc->dev, in pvscsi_process_completion()
956 status = CAM_SCSI_STATUS_ERROR; in pvscsi_process_completion()
961 status = CAM_SEL_TIMEOUT; in pvscsi_process_completion()
965 status = CAM_DATA_RUN_ERR; in pvscsi_process_completion()
969 status = CAM_REQUEUE_REQ; in pvscsi_process_completion()
975 status = CAM_SCSI_BUS_RESET; in pvscsi_process_completion()
978 status = CAM_UNCOR_PARITY; in pvscsi_process_completion()
981 status = CAM_UNEXP_BUSFREE; in pvscsi_process_completion()
984 status = CAM_SEQUENCE_FAIL; in pvscsi_process_completion()
987 status = CAM_AUTOSENSE_FAIL; in pvscsi_process_completion()
994 status = CAM_REQ_CMP_ERR; in pvscsi_process_completion()
998 status = CAM_NO_HBA; in pvscsi_process_completion()
1001 device_printf(sc->dev, "unknown hba status: 0x%x\n", in pvscsi_process_completion()
1003 status = CAM_NO_HBA; in pvscsi_process_completion()
1007 DEBUG_PRINTF(3, sc->dev, in pvscsi_process_completion()
1008 "completing command context %llx btstat %x sdstat %x - status %x\n", in pvscsi_process_completion()
1009 (unsigned long long)e->context, btstat, sdstat, status); in pvscsi_process_completion()
1012 ccb->ccb_h.ccb_pvscsi_hcb = NULL; in pvscsi_process_completion()
1013 ccb->ccb_h.ccb_pvscsi_sc = NULL; in pvscsi_process_completion()
1016 ccb->ccb_h.status = in pvscsi_process_completion()
1017 status | (ccb->ccb_h.status & ~(CAM_STATUS_MASK | CAM_SIM_QUEUED)); in pvscsi_process_completion()
1019 if (sc->frozen) { in pvscsi_process_completion()
1020 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; in pvscsi_process_completion()
1021 sc->frozen = 0; in pvscsi_process_completion()
1024 if (status != CAM_REQ_CMP) { in pvscsi_process_completion()
1025 ccb->ccb_h.status |= CAM_DEV_QFRZN; in pvscsi_process_completion()
1026 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); in pvscsi_process_completion()
1039 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_process_cmp_ring()
1041 s = sc->rings_state; in pvscsi_process_cmp_ring()
1042 ring = sc->cmp_ring; in pvscsi_process_cmp_ring()
1043 mask = MASK(s->cmp_num_entries_log2); in pvscsi_process_cmp_ring()
1045 while (s->cmp_cons_idx != s->cmp_prod_idx) { in pvscsi_process_cmp_ring()
1046 e = ring + (s->cmp_cons_idx & mask); in pvscsi_process_cmp_ring()
1051 s->cmp_cons_idx++; in pvscsi_process_cmp_ring()
1061 switch (e->type) { in pvscsi_process_msg()
1066 device_printf(sc->dev, "MSG: device %s at scsi%u:%u:%u\n", in pvscsi_process_msg()
1067 desc->type == PVSCSI_MSG_DEV_ADDED ? "addition" : "removal", in pvscsi_process_msg()
1068 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1072 device_printf(sc->dev, in pvscsi_process_msg()
1077 if (xpt_create_path(&ccb->ccb_h.path, NULL, in pvscsi_process_msg()
1078 cam_sim_path(sc->sim), desc->target, desc->lun[1]) in pvscsi_process_msg()
1080 device_printf(sc->dev, in pvscsi_process_msg()
1089 device_printf(sc->dev, "Unknown msg type 0x%x\n", e->type); in pvscsi_process_msg()
1101 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_process_msg_ring()
1103 s = sc->rings_state; in pvscsi_process_msg_ring()
1104 ring = sc->msg_ring; in pvscsi_process_msg_ring()
1105 mask = MASK(s->msg_num_entries_log2); in pvscsi_process_msg_ring()
1107 while (s->msg_cons_idx != s->msg_prod_idx) { in pvscsi_process_msg_ring()
1108 e = ring + (s->msg_cons_idx & mask); in pvscsi_process_msg_ring()
1113 s->msg_cons_idx++; in pvscsi_process_msg_ring()
1122 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_intr_locked()
1129 if (sc->use_msg) { in pvscsi_intr_locked()
1142 mtx_assert(&sc->lock, MA_NOTOWNED); in pvscsi_intr()
1144 mtx_lock(&sc->lock); in pvscsi_intr()
1146 mtx_unlock(&sc->lock); in pvscsi_intr()
1156 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_poll()
1172 ccb = hcb->ccb; in pvscsi_execute_ccb()
1173 e = hcb->e; in pvscsi_execute_ccb()
1174 sc = ccb->ccb_h.ccb_pvscsi_sc; in pvscsi_execute_ccb()
1175 s = sc->rings_state; in pvscsi_execute_ccb()
1177 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_execute_ccb()
1180 device_printf(sc->dev, "pvscsi_execute_ccb error %d\n", error); in pvscsi_execute_ccb()
1183 ccb->ccb_h.status = CAM_REQ_TOO_BIG; in pvscsi_execute_ccb()
1185 ccb->ccb_h.status = CAM_REQ_CMP_ERR; in pvscsi_execute_ccb()
1193 e->flags = 0; in pvscsi_execute_ccb()
1195 switch (ccb->ccb_h.flags & CAM_DIR_MASK) { in pvscsi_execute_ccb()
1197 e->flags |= PVSCSI_FLAG_CMD_DIR_NONE; in pvscsi_execute_ccb()
1200 e->flags |= PVSCSI_FLAG_CMD_DIR_TOHOST; in pvscsi_execute_ccb()
1204 e->flags |= PVSCSI_FLAG_CMD_DIR_TODEVICE; in pvscsi_execute_ccb()
1220 sge = hcb->sg_list->sge; in pvscsi_execute_ccb()
1221 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; in pvscsi_execute_ccb()
1229 e->data_addr = hcb->sg_list_paddr; in pvscsi_execute_ccb()
1231 e->data_addr = segs->ds_addr; in pvscsi_execute_ccb()
1234 bus_dmamap_sync(sc->buffer_dmat, hcb->dma_map, op); in pvscsi_execute_ccb()
1236 e->data_addr = 0; in pvscsi_execute_ccb()
1239 cdb0 = e->cdb[0]; in pvscsi_execute_ccb()
1240 ccb->ccb_h.status |= CAM_SIM_QUEUED; in pvscsi_execute_ccb()
1242 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { in pvscsi_execute_ccb()
1243 callout_reset_sbt(&hcb->callout, ccb->ccb_h.timeout * SBT_1MS, in pvscsi_execute_ccb()
1248 s->req_prod_idx++; in pvscsi_execute_ccb()
1259 ccb_h = &ccb->ccb_h; in pvscsi_action()
1261 mtx_assert(&sc->lock, MA_OWNED); in pvscsi_action()
1263 switch (ccb_h->func_code) { in pvscsi_action()
1273 csio = &ccb->csio; in pvscsi_action()
1274 ring = sc->req_ring; in pvscsi_action()
1275 s = sc->rings_state; in pvscsi_action()
1283 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { in pvscsi_action()
1288 req_num_entries_log2 = s->req_num_entries_log2; in pvscsi_action()
1290 if (s->req_prod_idx - s->cmp_cons_idx >= in pvscsi_action()
1292 device_printf(sc->dev, in pvscsi_action()
1295 ccb_h->status = CAM_REQUEUE_REQ; in pvscsi_action()
1301 device_printf(sc->dev, "No free hcbs.\n"); in pvscsi_action()
1303 ccb_h->status = CAM_REQUEUE_REQ; in pvscsi_action()
1307 hcb->ccb = ccb; in pvscsi_action()
1308 ccb_h->ccb_pvscsi_hcb = hcb; in pvscsi_action()
1309 ccb_h->ccb_pvscsi_sc = sc; in pvscsi_action()
1311 if (csio->cdb_len > sizeof(e->cdb)) { in pvscsi_action()
1312 DEBUG_PRINTF(2, sc->dev, "cdb length %u too large\n", in pvscsi_action()
1313 csio->cdb_len); in pvscsi_action()
1314 ccb_h->status = CAM_REQ_INVALID; in pvscsi_action()
1318 if (ccb_h->flags & CAM_CDB_PHYS) { in pvscsi_action()
1319 DEBUG_PRINTF(2, sc->dev, in pvscsi_action()
1321 ccb_h->status = CAM_REQ_INVALID; in pvscsi_action()
1325 e = ring + (s->req_prod_idx & MASK(req_num_entries_log2)); in pvscsi_action()
1327 e->bus = cam_sim_bus(sim); in pvscsi_action()
1328 e->target = ccb_h->target_id; in pvscsi_action()
1329 memset(e->lun, 0, sizeof(e->lun)); in pvscsi_action()
1330 e->lun[1] = ccb_h->target_lun; in pvscsi_action()
1331 e->data_addr = 0; in pvscsi_action()
1332 e->data_len = csio->dxfer_len; in pvscsi_action()
1333 e->vcpu_hint = curcpu; in pvscsi_action()
1335 e->cdb_len = csio->cdb_len; in pvscsi_action()
1336 memcpy(e->cdb, scsiio_cdb_ptr(csio), csio->cdb_len); in pvscsi_action()
1338 e->sense_addr = 0; in pvscsi_action()
1339 e->sense_len = csio->sense_len; in pvscsi_action()
1340 if (e->sense_len > 0) { in pvscsi_action()
1341 e->sense_addr = hcb->sense_buffer_paddr; in pvscsi_action()
1344 e->tag = MSG_SIMPLE_Q_TAG; in pvscsi_action()
1345 if (ccb_h->flags & CAM_TAG_ACTION_VALID) { in pvscsi_action()
1346 e->tag = csio->tag_action; in pvscsi_action()
1349 e->context = pvscsi_hcb_to_context(sc, hcb); in pvscsi_action()
1350 hcb->e = e; in pvscsi_action()
1352 DEBUG_PRINTF(3, sc->dev, in pvscsi_action()
1353 " queuing command %02x context %llx\n", e->cdb[0], in pvscsi_action()
1354 (unsigned long long)e->context); in pvscsi_action()
1355 bus_dmamap_load_ccb(sc->buffer_dmat, hcb->dma_map, ccb, in pvscsi_action()
1370 abort_ccb = ccb->cab.abort_ccb; in pvscsi_action()
1371 abort_hcb = abort_ccb->ccb_h.ccb_pvscsi_hcb; in pvscsi_action()
1373 if (abort_hcb->ccb != NULL && abort_hcb->ccb == abort_ccb) { in pvscsi_action()
1374 if (abort_ccb->ccb_h.func_code == XPT_SCSI_IO) { in pvscsi_action()
1375 pvscsi_abort(sc, ccb_h->target_id, abort_ccb); in pvscsi_action()
1376 ccb_h->status = CAM_REQ_CMP; in pvscsi_action()
1378 ccb_h->status = CAM_UA_ABORT; in pvscsi_action()
1381 device_printf(sc->dev, in pvscsi_action()
1383 ccb, ccb_h->target_id); in pvscsi_action()
1384 ccb_h->status = CAM_REQ_CMP; in pvscsi_action()
1390 pvscsi_device_reset(sc, ccb_h->target_id); in pvscsi_action()
1391 ccb_h->status = CAM_REQ_CMP; in pvscsi_action()
1397 ccb_h->status = CAM_REQ_CMP; in pvscsi_action()
1404 cpi = &ccb->cpi; in pvscsi_action()
1406 cpi->version_num = 1; in pvscsi_action()
1407 cpi->hba_inquiry = PI_TAG_ABLE; in pvscsi_action()
1408 cpi->target_sprt = 0; in pvscsi_action()
1409 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; in pvscsi_action()
1410 cpi->hba_eng_cnt = 0; in pvscsi_action()
1411 /* cpi->vuhba_flags = 0; */ in pvscsi_action()
1412 cpi->max_target = sc->max_targets - 1; in pvscsi_action()
1413 cpi->max_lun = 0; in pvscsi_action()
1414 cpi->async_flags = 0; in pvscsi_action()
1415 cpi->hpath_id = 0; in pvscsi_action()
1416 cpi->unit_number = cam_sim_unit(sim); in pvscsi_action()
1417 cpi->bus_id = cam_sim_bus(sim); in pvscsi_action()
1418 cpi->initiator_id = 7; in pvscsi_action()
1419 cpi->base_transfer_speed = 750000; in pvscsi_action()
1420 strlcpy(cpi->sim_vid, "VMware", SIM_IDLEN); in pvscsi_action()
1421 strlcpy(cpi->hba_vid, "VMware", HBA_IDLEN); in pvscsi_action()
1422 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); in pvscsi_action()
1424 cpi->maxio = (PVSCSI_MAX_SG_ENTRIES_PER_SEGMENT / 2) * PAGE_SIZE; in pvscsi_action()
1425 cpi->protocol = PROTO_SCSI; in pvscsi_action()
1426 cpi->protocol_version = SCSI_REV_SPC2; in pvscsi_action()
1427 cpi->transport = XPORT_SAS; in pvscsi_action()
1428 cpi->transport_version = 0; in pvscsi_action()
1430 ccb_h->status = CAM_REQ_CMP; in pvscsi_action()
1437 cts = &ccb->cts; in pvscsi_action()
1439 cts->protocol = PROTO_SCSI; in pvscsi_action()
1440 cts->protocol_version = SCSI_REV_SPC2; in pvscsi_action()
1441 cts->transport = XPORT_SAS; in pvscsi_action()
1442 cts->transport_version = 0; in pvscsi_action()
1444 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; in pvscsi_action()
1445 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; in pvscsi_action()
1448 cts->xport_specific.sas.valid &= ~CTS_SAS_VALID_SPEED; in pvscsi_action()
1449 cts->xport_specific.sas.bitrate = 0; in pvscsi_action()
1451 ccb_h->status = CAM_REQ_CMP; in pvscsi_action()
1456 cam_calc_geometry(&ccb->ccg, 1); in pvscsi_action()
1460 ccb_h->status = CAM_REQ_INVALID; in pvscsi_action()
1470 if (sc->irq_handler != NULL) { in pvscsi_free_interrupts()
1471 bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handler); in pvscsi_free_interrupts()
1473 if (sc->irq_res != NULL) { in pvscsi_free_interrupts()
1474 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_id, in pvscsi_free_interrupts()
1475 sc->irq_res); in pvscsi_free_interrupts()
1477 if (sc->use_msi_or_msix) { in pvscsi_free_interrupts()
1478 pci_release_msi(sc->dev); in pvscsi_free_interrupts()
1491 sc->use_msi_or_msix = 0; in pvscsi_setup_interrupts()
1496 if (use_msix && pci_msix_count(sc->dev) > 0) { in pvscsi_setup_interrupts()
1498 if (pci_alloc_msix(sc->dev, &count) == 0 && count == 1) { in pvscsi_setup_interrupts()
1499 sc->use_msi_or_msix = 1; in pvscsi_setup_interrupts()
1500 device_printf(sc->dev, "Interrupt: MSI-X\n"); in pvscsi_setup_interrupts()
1502 pci_release_msi(sc->dev); in pvscsi_setup_interrupts()
1506 if (sc->use_msi_or_msix == 0 && use_msi && pci_msi_count(sc->dev) > 0) { in pvscsi_setup_interrupts()
1508 if (pci_alloc_msi(sc->dev, &count) == 0 && count == 1) { in pvscsi_setup_interrupts()
1509 sc->use_msi_or_msix = 1; in pvscsi_setup_interrupts()
1510 device_printf(sc->dev, "Interrupt: MSI\n"); in pvscsi_setup_interrupts()
1512 pci_release_msi(sc->dev); in pvscsi_setup_interrupts()
1517 if (sc->use_msi_or_msix) { in pvscsi_setup_interrupts()
1518 sc->irq_id = 1; in pvscsi_setup_interrupts()
1520 device_printf(sc->dev, "Interrupt: INT\n"); in pvscsi_setup_interrupts()
1521 sc->irq_id = 0; in pvscsi_setup_interrupts()
1525 sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_id, in pvscsi_setup_interrupts()
1527 if (sc->irq_res == NULL) { in pvscsi_setup_interrupts()
1528 device_printf(sc->dev, "IRQ allocation failed\n"); in pvscsi_setup_interrupts()
1529 if (sc->use_msi_or_msix) { in pvscsi_setup_interrupts()
1530 pci_release_msi(sc->dev); in pvscsi_setup_interrupts()
1535 error = bus_setup_intr(sc->dev, sc->irq_res, in pvscsi_setup_interrupts()
1537 &sc->irq_handler); in pvscsi_setup_interrupts()
1539 device_printf(sc->dev, "IRQ handler setup failed\n"); in pvscsi_setup_interrupts()
1551 if (sc->sim) { in pvscsi_free_all()
1554 if (sc->bus_path) { in pvscsi_free_all()
1555 xpt_free_path(sc->bus_path); in pvscsi_free_all()
1558 error = xpt_bus_deregister(cam_sim_path(sc->sim)); in pvscsi_free_all()
1560 device_printf(sc->dev, in pvscsi_free_all()
1564 cam_sim_free(sc->sim, TRUE); in pvscsi_free_all()
1567 pvscsi_dma_free_per_hcb(sc, sc->hcb_cnt); in pvscsi_free_all()
1569 if (sc->hcbs) { in pvscsi_free_all()
1570 free(sc->hcbs, M_PVSCSI); in pvscsi_free_all()
1577 if (sc->buffer_dmat != NULL) { in pvscsi_free_all()
1578 bus_dma_tag_destroy(sc->buffer_dmat); in pvscsi_free_all()
1581 if (sc->parent_dmat != NULL) { in pvscsi_free_all()
1582 bus_dma_tag_destroy(sc->parent_dmat); in pvscsi_free_all()
1585 if (sc->mm_res != NULL) { in pvscsi_free_all()
1586 bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->mm_rid, in pvscsi_free_all()
1587 sc->mm_res); in pvscsi_free_all()
1603 sc->dev = dev; in pvscsi_attach()
1605 mtx_init(&sc->lock, "pvscsi", NULL, MTX_DEF); in pvscsi_attach()
1609 sc->mm_rid = -1; in pvscsi_attach()
1613 sc->mm_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, in pvscsi_attach()
1615 if (sc->mm_res != NULL) { in pvscsi_attach()
1616 sc->mm_rid = rid; in pvscsi_attach()
1621 if (sc->mm_res == NULL) { in pvscsi_attach()
1629 &sc->parent_dmat); in pvscsi_attach()
1637 error = bus_dma_tag_create(sc->parent_dmat, 1, 0, in pvscsi_attach()
1641 NULL, NULL, &sc->buffer_dmat); in pvscsi_attach()
1656 sc->max_targets = pvscsi_get_max_targets(sc); in pvscsi_attach()
1658 sc->use_msg = pvscsi_get_tunable(sc, "use_msg", pvscsi_use_msg) && in pvscsi_attach()
1660 sc->msg_ring_num_pages = sc->use_msg ? 1 : 0; in pvscsi_attach()
1662 sc->req_ring_num_pages = pvscsi_get_tunable(sc, "request_ring_pages", in pvscsi_attach()
1664 if (sc->req_ring_num_pages <= 0) { in pvscsi_attach()
1665 if (sc->max_targets <= 16) { in pvscsi_attach()
1666 sc->req_ring_num_pages = in pvscsi_attach()
1669 sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING; in pvscsi_attach()
1671 } else if (sc->req_ring_num_pages > PVSCSI_MAX_NUM_PAGES_REQ_RING) { in pvscsi_attach()
1672 sc->req_ring_num_pages = PVSCSI_MAX_NUM_PAGES_REQ_RING; in pvscsi_attach()
1674 sc->cmp_ring_num_pages = sc->req_ring_num_pages; in pvscsi_attach()
1679 adapter_queue_size = (sc->req_ring_num_pages * PAGE_SIZE) / in pvscsi_attach()
1687 device_printf(sc->dev, "Use Msg: %d\n", sc->use_msg); in pvscsi_attach()
1688 device_printf(sc->dev, "Max targets: %d\n", sc->max_targets); in pvscsi_attach()
1689 device_printf(sc->dev, "REQ num pages: %d\n", sc->req_ring_num_pages); in pvscsi_attach()
1690 device_printf(sc->dev, "CMP num pages: %d\n", sc->cmp_ring_num_pages); in pvscsi_attach()
1691 device_printf(sc->dev, "MSG num pages: %d\n", sc->msg_ring_num_pages); in pvscsi_attach()
1692 device_printf(sc->dev, "Queue size: %d\n", adapter_queue_size); in pvscsi_attach()
1700 sc->hcb_cnt = adapter_queue_size; in pvscsi_attach()
1701 sc->hcbs = malloc(sc->hcb_cnt * sizeof(*sc->hcbs), M_PVSCSI, in pvscsi_attach()
1703 if (sc->hcbs == NULL) { in pvscsi_attach()
1724 sc->sim = cam_sim_alloc(pvscsi_action, pvscsi_poll, "pvscsi", sc, in pvscsi_attach()
1725 device_get_unit(dev), &sc->lock, 1, adapter_queue_size, devq); in pvscsi_attach()
1726 if (sc->sim == NULL) { in pvscsi_attach()
1733 mtx_lock(&sc->lock); in pvscsi_attach()
1735 if (xpt_bus_register(sc->sim, dev, 0) != CAM_SUCCESS) { in pvscsi_attach()
1738 mtx_unlock(&sc->lock); in pvscsi_attach()
1742 if (xpt_create_path(&sc->bus_path, NULL, cam_sim_path(sc->sim), in pvscsi_attach()
1746 mtx_unlock(&sc->lock); in pvscsi_attach()
1751 if (sc->use_msg) { in pvscsi_attach()
1755 sc->use_req_call_threshold = pvscsi_setup_req_call(sc, 1); in pvscsi_attach()
1759 mtx_unlock(&sc->lock); in pvscsi_attach()
1774 if (sc->irq_handler != NULL) { in pvscsi_detach()
1775 bus_teardown_intr(dev, sc->irq_res, sc->irq_handler); in pvscsi_detach()
1778 mtx_lock(&sc->lock); in pvscsi_detach()
1780 mtx_unlock(&sc->lock); in pvscsi_detach()
1782 mtx_destroy(&sc->lock); in pvscsi_detach()