Lines Matching +full:always +full:- +full:wait +full:- +full:for +full:- +full:ack

1 /*-
2 * Generic routines for LSI Fusion adapters.
5 * SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
30 /*-
42 * a substantially similar Disclaimer requirement for further binary
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64 * Support from LSI-Logic has also gone a great deal toward making this a
67 /*-
81 * a substantially similar Disclaimer requirement for further binary
89 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
91 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
154 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { in mpt_pers_find()
169 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { in mpt_pers_find_reverse()
170 start_at--; in mpt_pers_find_reverse()
178 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
180 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
183 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
185 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
234 * ordering information. We want the core to always register FIRST.
243 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
261 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { in mpt_modevent()
269 pers->id = i; in mpt_modevent()
272 /* Install standard/noop handlers for any NULL entries. */ in mpt_modevent()
282 error = (pers->load(pers)); in mpt_modevent()
292 error = pers->unload(pers); in mpt_modevent()
293 mpt_personalities[pers->id] = NULL; in mpt_modevent()
306 /* Load is always successful. */ in mpt_stdload()
314 /* Probe is always successful. */ in mpt_stdprobe()
322 /* Attach is always successful. */ in mpt_stdattach()
330 /* Enable is always successful. */ in mpt_stdenable()
344 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); in mpt_stdevent()
345 /* Event was not for us. */ in mpt_stdevent()
371 /* Unload is always successful. */ in mpt_stdunload()
388 pers->ready(mpt); in mpt_postattach()
400 map_info->error = error; in mpt_map_rquest()
401 map_info->phys = segs->ds_addr; in mpt_map_rquest()
420 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { in mpt_register_handler()
490 req->index, req->serno, reply_desc, reply_frame); in mpt_default_reply_handler()
510 cfgp = (MSG_CONFIG *)req->req_vbuf; in mpt_config_reply_handler()
512 req->IOCStatus = le16toh(reply_frame->IOCStatus); in mpt_config_reply_handler()
513 bcopy(&reply->Header, &cfgp->Header, in mpt_config_reply_handler()
514 sizeof(cfgp->Header)); in mpt_config_reply_handler()
515 cfgp->ExtPageLength = reply->ExtPageLength; in mpt_config_reply_handler()
516 cfgp->ExtPageType = reply->ExtPageType; in mpt_config_reply_handler()
518 req->state &= ~REQ_STATE_QUEUED; in mpt_config_reply_handler()
519 req->state |= REQ_STATE_DONE; in mpt_config_reply_handler()
520 TAILQ_REMOVE(&mpt->request_pending_list, req, links); in mpt_config_reply_handler()
521 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { in mpt_config_reply_handler()
523 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { in mpt_config_reply_handler()
525 * Whew- we can free this request (late completion) in mpt_config_reply_handler()
553 switch (reply_frame->Function) { in mpt_event_reply_handler()
562 msg->EventDataLength = le16toh(msg->EventDataLength); in mpt_event_reply_handler()
563 msg->IOCStatus = le16toh(msg->IOCStatus); in mpt_event_reply_handler()
564 msg->IOCLogInfo = le32toh(msg->IOCLogInfo); in mpt_event_reply_handler()
565 msg->Event = le32toh(msg->Event); in mpt_event_reply_handler()
567 handled += pers->event(mpt, req, msg); in mpt_event_reply_handler()
569 if (handled == 0 && mpt->mpt_pers_mask == 0) { in mpt_event_reply_handler()
571 "No Handlers For Any Event Notify Frames. " in mpt_event_reply_handler()
572 "Event %#x (ACK %sequired).\n", in mpt_event_reply_handler()
573 msg->Event, msg->AckRequired? "r" : "not r"); in mpt_event_reply_handler()
576 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO, in mpt_event_reply_handler()
578 "(ACK %sequired).\n", in mpt_event_reply_handler()
579 msg->Event, msg->AckRequired? "r" : "not r"); in mpt_event_reply_handler()
582 if (msg->AckRequired) { in mpt_event_reply_handler()
586 context = req->index | MPT_REPLY_HANDLER_EVENTS; in mpt_event_reply_handler()
592 evtf->context = context; in mpt_event_reply_handler()
593 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); in mpt_event_reply_handler()
599 * Don't check for CONTINUATION_REPLY here in mpt_event_reply_handler()
612 reply_frame->Function); in mpt_event_reply_handler()
624 * Let's just be safe for now and not free them up until we figure in mpt_event_reply_handler()
628 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { in mpt_event_reply_handler()
629 TAILQ_REMOVE(&mpt->request_pending_list, req, links); in mpt_event_reply_handler()
631 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", in mpt_event_reply_handler()
632 reply_frame->Function, req, req->serno); in mpt_event_reply_handler()
633 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { in mpt_event_reply_handler()
637 msg->Event, msg->AckRequired); in mpt_event_reply_handler()
640 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", in mpt_event_reply_handler()
641 reply_frame->Function, req, req->serno); in mpt_event_reply_handler()
642 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { in mpt_event_reply_handler()
646 msg->Event, msg->AckRequired); in mpt_event_reply_handler()
663 msg->Event & 0xFF); in mpt_core_event()
664 switch(msg->Event & 0xFF) { in mpt_core_event()
673 msg->IOCLogInfo); in mpt_core_event()
675 for (i = 0; i < msg->EventDataLength; i++) in mpt_core_event()
676 mpt_prtc(mpt, " %08x", msg->Data[i]); in mpt_core_event()
701 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; in mpt_send_event_ack()
703 ackp->Function = MPI_FUNCTION_EVENT_ACK; in mpt_send_event_ack()
704 ackp->Event = htole32(msg->Event); in mpt_send_event_ack()
705 ackp->EventContext = htole32(msg->EventContext); in mpt_send_event_ack()
706 ackp->MsgContext = htole32(context); in mpt_send_event_ack()
742 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); in mpt_intr()
743 bus_dmamap_sync_range(mpt->reply_dmat, in mpt_intr()
744 mpt->reply_dmap, offset, MPT_REPLY_SIZE, in mpt_intr()
747 ctxt_idx = le32toh(reply_frame->MsgContext); in mpt_intr()
762 if (mpt->tgt_cmd_ptrs == NULL) { in mpt_intr()
768 if (ctxt_idx >= mpt->tgt_cmds_allocated) { in mpt_intr()
776 req = mpt->tgt_cmd_ptrs[ctxt_idx]; in mpt_intr()
791 req->index | mpt->scsi_tgt_handler_id; in mpt_intr()
815 req = &mpt->request_pool[req_index]; in mpt_intr()
821 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, in mpt_intr()
827 bus_dmamap_sync_range(mpt->reply_dmat, in mpt_intr()
828 mpt->reply_dmap, offset, MPT_REPLY_SIZE, in mpt_intr()
836 if (mpt->disabled) { in mpt_intr()
862 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, in mpt_complete_request_chain()
864 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; in mpt_complete_request_chain()
865 ioc_status_frame.Function = msg_hdr->Function; in mpt_complete_request_chain()
866 ioc_status_frame.MsgContext = msg_hdr->MsgContext; in mpt_complete_request_chain()
867 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); in mpt_complete_request_chain()
868 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, in mpt_complete_request_chain()
905 /* Busy wait for a door bell to be read by IOC */
911 for (i=0; i < MPT_MAX_WAIT; i++) { in mpt_wait_db_ack()
921 /* Busy wait for a door bell interrupt */
927 for (i = 0; i < MPT_MAX_WAIT; i++) { in mpt_wait_db_int()
937 /* Wait for IOC to transition to a give state */
949 /* Wait for IOC to transition to a give state */
955 for (i = 0; i < MPT_MAX_WAIT; i++) { in mpt_wait_state()
995 mpt_prt(mpt, "soft reset failed: ack timeout\n"); in mpt_soft_reset()
999 /* Wait for the IOC to reload and come out of reset state */ in mpt_soft_reset()
1014 while (--try) { in mpt_enable_diag_mode()
1047 int wait; in mpt_hard_reset() local
1052 if (mpt->is_1078) { in mpt_hard_reset()
1060 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); in mpt_hard_reset()
1067 * This appears to be a workaround required for some in mpt_hard_reset()
1082 wait = 5000; in mpt_hard_reset()
1086 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); in mpt_hard_reset()
1088 if (wait == 0) { in mpt_hard_reset()
1089 mpt_prt(mpt, "WARNING - Failed hard reset! " in mpt_hard_reset()
1097 if (mpt->fw_image != NULL) { in mpt_hard_reset()
1101 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); in mpt_hard_reset()
1120 * appropriate for an IOC reset. in mpt_core_ioc_reset()
1122 mpt_complete_request_chain(mpt, &mpt->request_pending_list, in mpt_core_ioc_reset()
1145 for (cnt = 0; cnt < 5; cnt++) { in mpt_reset()
1150 * Wait for the IOC to reload in mpt_reset()
1158 * Okay- try to check again... in mpt_reset()
1173 * the specified wait condition, it should stop its wait. in mpt_reset()
1175 mpt->reset_cnt++; in mpt_reset()
1177 pers->reset(mpt, ret); in mpt_reset()
1200 if (req == NULL || req != &mpt->request_pool[req->index]) { in mpt_free_request()
1203 if ((nxt = req->chain) != NULL) { in mpt_free_request()
1204 req->chain = NULL; in mpt_free_request()
1207 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); in mpt_free_request()
1208 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); in mpt_free_request()
1212 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); in mpt_free_request()
1215 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); in mpt_free_request()
1220 req->ccb = NULL; in mpt_free_request()
1221 if (LIST_EMPTY(&mpt->ack_frames)) { in mpt_free_request()
1225 req->serno = 0; in mpt_free_request()
1226 req->state = REQ_STATE_FREE; in mpt_free_request()
1228 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); in mpt_free_request()
1230 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); in mpt_free_request()
1231 if (mpt->getreqwaiter != 0) { in mpt_free_request()
1232 mpt->getreqwaiter = 0; in mpt_free_request()
1233 wakeup(&mpt->request_free_list); in mpt_free_request()
1239 * Process an ack frame deferred due to resource shortage. in mpt_free_request()
1241 record = LIST_FIRST(&mpt->ack_frames); in mpt_free_request()
1243 req->state = REQ_STATE_ALLOCATED; in mpt_free_request()
1245 mpt_send_event_ack(mpt, req, &record->reply, record->context); in mpt_free_request()
1246 offset = (uint32_t)((uint8_t *)record - mpt->reply); in mpt_free_request()
1247 reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF); in mpt_free_request()
1248 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset, in mpt_free_request()
1261 req = TAILQ_FIRST(&mpt->request_free_list); in mpt_get_request()
1263 KASSERT(req == &mpt->request_pool[req->index], in mpt_get_request()
1265 KASSERT(req->state == REQ_STATE_FREE, in mpt_get_request()
1267 req, req->serno, req->state, req->index, in mpt_get_request()
1268 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); in mpt_get_request()
1269 TAILQ_REMOVE(&mpt->request_free_list, req, links); in mpt_get_request()
1270 req->state = REQ_STATE_ALLOCATED; in mpt_get_request()
1271 req->chain = NULL; in mpt_get_request()
1274 mpt->getreqwaiter = 1; in mpt_get_request()
1275 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); in mpt_get_request()
1286 if (mpt->verbose > MPT_PRT_DEBUG2) { in mpt_send_cmd()
1289 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, in mpt_send_cmd()
1291 req->state |= REQ_STATE_QUEUED; in mpt_send_cmd()
1294 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); in mpt_send_cmd()
1297 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); in mpt_send_cmd()
1298 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); in mpt_send_cmd()
1299 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); in mpt_send_cmd()
1303 * Wait for a request to complete.
1307 * req request to wait for
1313 * non-0 Timeout fired before request completion.
1325 * time_ms is in ms, 0 indicates infinite wait. in mpt_wait_req()
1337 req->state |= REQ_STATE_NEED_WAKEUP; in mpt_wait_req()
1339 saved_cnt = mpt->reset_cnt; in mpt_wait_req()
1340 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { in mpt_wait_req()
1348 if (time_ms != 0 && --timeout == 0) { in mpt_wait_req()
1355 req->state &= ~REQ_STATE_NEED_WAKEUP; in mpt_wait_req()
1356 if (mpt->reset_cnt != saved_cnt) { in mpt_wait_req()
1360 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; in mpt_wait_req()
1361 req->state |= REQ_STATE_TIMEDOUT; in mpt_wait_req()
1362 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); in mpt_wait_req()
1371 * Only done at initialization time and for certain unusual
1386 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); in mpt_send_handshake_cmd()
1407 /* Wait for the chip to notice */ in mpt_send_handshake_cmd()
1417 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n"); in mpt_send_handshake_cmd()
1422 for (i = 0; i < len; i++) { in mpt_send_handshake_cmd()
1468 * Warn about a reply that's too short (except for IOC FACTS REPLY) in mpt_recv_handshake_reply()
1470 if ((reply_len >> 1) != hdr->MsgLength && in mpt_recv_handshake_reply()
1471 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ in mpt_recv_handshake_reply()
1473 "got %x; expected %zx for function %x\n", in mpt_recv_handshake_reply()
1474 hdr->MsgLength << 2, reply_len << 1, hdr->Function); in mpt_recv_handshake_reply()
1478 left = (hdr->MsgLength << 1) - 2; in mpt_recv_handshake_reply()
1479 reply_left = reply_len - 2; in mpt_recv_handshake_reply()
1480 while (left--) { in mpt_recv_handshake_reply()
1486 if (reply_left-- > 0) in mpt_recv_handshake_reply()
1491 /* One more wait & clear at the end */ in mpt_recv_handshake_reply()
1498 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { in mpt_recv_handshake_reply()
1499 if (mpt->verbose >= MPT_PRT_TRACE) in mpt_recv_handshake_reply()
1501 return (MPT_FAIL | hdr->IOCStatus); in mpt_recv_handshake_reply()
1584 cfgp = req->req_vbuf; in mpt_issue_cfg_req()
1586 cfgp->Action = params->Action; in mpt_issue_cfg_req()
1587 cfgp->Function = MPI_FUNCTION_CONFIG; in mpt_issue_cfg_req()
1588 cfgp->Header.PageVersion = params->PageVersion; in mpt_issue_cfg_req()
1589 cfgp->Header.PageNumber = params->PageNumber; in mpt_issue_cfg_req()
1590 cfgp->PageAddress = htole32(params->PageAddress); in mpt_issue_cfg_req()
1591 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) == in mpt_issue_cfg_req()
1593 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; in mpt_issue_cfg_req()
1594 cfgp->Header.PageLength = 0; in mpt_issue_cfg_req()
1595 cfgp->ExtPageLength = htole16(params->ExtPageLength); in mpt_issue_cfg_req()
1596 cfgp->ExtPageType = params->ExtPageType; in mpt_issue_cfg_req()
1598 cfgp->Header.PageType = params->PageType; in mpt_issue_cfg_req()
1599 cfgp->Header.PageLength = params->PageLength; in mpt_issue_cfg_req()
1601 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; in mpt_issue_cfg_req()
1602 se->Address = htole32(addr); in mpt_issue_cfg_req()
1607 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT in mpt_issue_cfg_req()
1608 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) in mpt_issue_cfg_req()
1610 se->FlagsLength = htole32(se->FlagsLength); in mpt_issue_cfg_req()
1611 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); in mpt_issue_cfg_req()
1657 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { in mpt_read_extcfg_header()
1659 cfgp = req->req_vbuf; in mpt_read_extcfg_header()
1660 rslt->PageVersion = cfgp->Header.PageVersion; in mpt_read_extcfg_header()
1661 rslt->PageNumber = cfgp->Header.PageNumber; in mpt_read_extcfg_header()
1662 rslt->PageType = cfgp->Header.PageType; in mpt_read_extcfg_header()
1663 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength); in mpt_read_extcfg_header()
1664 rslt->ExtPageType = cfgp->ExtPageType; in mpt_read_extcfg_header()
1675 req->IOCStatus); in mpt_read_extcfg_header()
1695 return (-1); in mpt_read_extcfg_page()
1699 params.PageVersion = hdr->PageVersion; in mpt_read_extcfg_page()
1701 params.PageNumber = hdr->PageNumber; in mpt_read_extcfg_page()
1704 params.ExtPageType = hdr->ExtPageType; in mpt_read_extcfg_page()
1705 params.ExtPageLength = hdr->ExtPageLength; in mpt_read_extcfg_page()
1707 req->req_pbuf + MPT_RQSL(mpt), in mpt_read_extcfg_page()
1711 return (-1); in mpt_read_extcfg_page()
1714 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { in mpt_read_extcfg_page()
1716 req->IOCStatus); in mpt_read_extcfg_page()
1718 return (-1); in mpt_read_extcfg_page()
1720 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); in mpt_read_extcfg_page()
1760 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { in mpt_read_cfg_header()
1762 cfgp = req->req_vbuf; in mpt_read_cfg_header()
1763 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); in mpt_read_cfg_header()
1774 req->IOCStatus); in mpt_read_cfg_header()
1794 return (-1); in mpt_read_cfg_page()
1798 params.PageVersion = hdr->PageVersion; in mpt_read_cfg_page()
1799 params.PageLength = hdr->PageLength; in mpt_read_cfg_page()
1800 params.PageNumber = hdr->PageNumber; in mpt_read_cfg_page()
1801 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; in mpt_read_cfg_page()
1804 req->req_pbuf + MPT_RQSL(mpt), in mpt_read_cfg_page()
1808 return (-1); in mpt_read_cfg_page()
1811 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { in mpt_read_cfg_page()
1813 req->IOCStatus); in mpt_read_cfg_page()
1815 return (-1); in mpt_read_cfg_page()
1817 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); in mpt_read_cfg_page()
1832 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; in mpt_write_cfg_page()
1836 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); in mpt_write_cfg_page()
1837 return (-1); in mpt_write_cfg_page()
1844 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK; in mpt_write_cfg_page()
1849 return (-1); in mpt_write_cfg_page()
1851 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len); in mpt_write_cfg_page()
1859 params.PageVersion = hdr->PageVersion; in mpt_write_cfg_page()
1860 params.PageLength = hdr->PageLength; in mpt_write_cfg_page()
1861 params.PageNumber = hdr->PageNumber; in mpt_write_cfg_page()
1865 hdr->PageType |= hdr_attr; in mpt_write_cfg_page()
1866 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; in mpt_write_cfg_page()
1868 params.PageType = hdr->PageType; in mpt_write_cfg_page()
1871 req->req_pbuf + MPT_RQSL(mpt), in mpt_write_cfg_page()
1875 return (-1); in mpt_write_cfg_page()
1878 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { in mpt_write_cfg_page()
1880 req->IOCStatus); in mpt_write_cfg_page()
1882 return (-1); in mpt_write_cfg_page()
1918 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); in mpt_read_config_info_ioc()
1919 if (mpt->ioc_page2 == NULL) { in mpt_read_config_info_ioc()
1920 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); in mpt_read_config_info_ioc()
1924 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); in mpt_read_config_info_ioc()
1926 &mpt->ioc_page2->Header, len, FALSE, 5000); in mpt_read_config_info_ioc()
1932 mpt2host_config_page_ioc2(mpt->ioc_page2); in mpt_read_config_info_ioc()
1934 if (mpt->ioc_page2->CapabilitiesFlags != 0) { in mpt_read_config_info_ioc()
1938 for (mask = 1; mask != 0; mask <<= 1) { in mpt_read_config_info_ioc()
1939 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { in mpt_read_config_info_ioc()
1944 mpt_prtc(mpt, " RAID-0"); in mpt_read_config_info_ioc()
1947 mpt_prtc(mpt, " RAID-1E"); in mpt_read_config_info_ioc()
1950 mpt_prtc(mpt, " RAID-1"); in mpt_read_config_info_ioc()
1959 mpt_prtc(mpt, " Multi-Channel-Arrays"); in mpt_read_config_info_ioc()
1965 if ((mpt->ioc_page2->CapabilitiesFlags in mpt_read_config_info_ioc()
1970 mpt->ioc_page2->NumActiveVolumes, in mpt_read_config_info_ioc()
1971 mpt->ioc_page2->NumActiveVolumes != 1 in mpt_read_config_info_ioc()
1973 mpt->ioc_page2->MaxVolumes); in mpt_read_config_info_ioc()
1975 mpt->ioc_page2->NumActivePhysDisks, in mpt_read_config_info_ioc()
1976 mpt->ioc_page2->NumActivePhysDisks != 1 in mpt_read_config_info_ioc()
1978 mpt->ioc_page2->MaxPhysDisks); in mpt_read_config_info_ioc()
1982 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); in mpt_read_config_info_ioc()
1983 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); in mpt_read_config_info_ioc()
1984 if (mpt->raid_volumes == NULL) { in mpt_read_config_info_ioc()
1995 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; in mpt_read_config_info_ioc()
1997 len = sizeof(*mpt->raid_volumes->config_page) + in mpt_read_config_info_ioc()
1998 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); in mpt_read_config_info_ioc()
1999 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { in mpt_read_config_info_ioc()
2000 mpt_raid = &mpt->raid_volumes[i]; in mpt_read_config_info_ioc()
2001 mpt_raid->config_page = in mpt_read_config_info_ioc()
2003 if (mpt_raid->config_page == NULL) { in mpt_read_config_info_ioc()
2009 mpt->raid_page0_len = len; in mpt_read_config_info_ioc()
2011 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); in mpt_read_config_info_ioc()
2012 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); in mpt_read_config_info_ioc()
2013 if (mpt->raid_disks == NULL) { in mpt_read_config_info_ioc()
2018 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; in mpt_read_config_info_ioc()
2034 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); in mpt_read_config_info_ioc()
2035 if (mpt->ioc_page3 == NULL) { in mpt_read_config_info_ioc()
2036 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); in mpt_read_config_info_ioc()
2040 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); in mpt_read_config_info_ioc()
2042 &mpt->ioc_page3->Header, len, FALSE, 5000); in mpt_read_config_info_ioc()
2047 mpt2host_config_page_ioc3(mpt->ioc_page3); in mpt_read_config_info_ioc()
2064 return (-1); in mpt_send_port_enable()
2066 enable_req = req->req_vbuf; in mpt_send_port_enable()
2069 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; in mpt_send_port_enable()
2070 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); in mpt_send_port_enable()
2071 enable_req->PortNumber = port; in mpt_send_port_enable()
2078 FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000); in mpt_send_port_enable()
2081 return (-1); in mpt_send_port_enable()
2101 enable_req = req->req_vbuf; in mpt_send_event_request()
2104 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; in mpt_send_event_request()
2105 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); in mpt_send_event_request()
2106 enable_req->Switch = onoff; in mpt_send_event_request()
2112 * Send the command off, but don't wait for it. in mpt_send_event_request()
2119 * Un-mask the interrupts on the chip.
2144 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); in mpt_sysctl_attach()
2145 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); in mpt_sysctl_attach()
2148 "debug", CTLFLAG_RW, &mpt->verbose, 0, in mpt_sysctl_attach()
2151 "role", CTLFLAG_RD, &mpt->role, 0, in mpt_sysctl_attach()
2155 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1, in mpt_sysctl_attach()
2171 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { in mpt_attach()
2176 if (pers->probe(mpt) == 0) { in mpt_attach()
2177 error = pers->attach(mpt); in mpt_attach()
2182 mpt->mpt_pers_mask |= (0x1 << pers->id); in mpt_attach()
2183 pers->use_count++; in mpt_attach()
2189 * for all of the personalities. This allows the personalities in mpt_attach()
2190 * to do setups that are appropriate for them prior to enabling in mpt_attach()
2193 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { in mpt_attach()
2196 error = pers->enable(mpt); in mpt_attach()
2199 " not enable (%d)\n", pers->name, error); in mpt_attach()
2214 pers->shutdown(mpt); in mpt_shutdown()
2225 pers->detach(mpt); in mpt_detach()
2226 mpt->mpt_pers_mask &= ~(0x1 << pers->id); in mpt_detach()
2227 pers->use_count--; in mpt_detach()
2242 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { in mpt_core_load()
2256 * Initialize per-instance driver data and perform
2264 LIST_INIT(&mpt->ack_frames); in mpt_core_attach()
2266 TAILQ_INIT(&mpt->request_pending_list); in mpt_core_attach()
2267 TAILQ_INIT(&mpt->request_free_list); in mpt_core_attach()
2268 TAILQ_INIT(&mpt->request_timeout_list); in mpt_core_attach()
2269 for (val = 0; val < MPT_MAX_LUNS; val++) { in mpt_core_attach()
2270 STAILQ_INIT(&mpt->trt[val].atios); in mpt_core_attach()
2271 STAILQ_INIT(&mpt->trt[val].inots); in mpt_core_attach()
2273 STAILQ_INIT(&mpt->trt_wildcard.atios); in mpt_core_attach()
2274 STAILQ_INIT(&mpt->trt_wildcard.inots); in mpt_core_attach()
2276 mpt->failure_id = -1; in mpt_core_attach()
2278 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; in mpt_core_attach()
2302 * Enable asynchronous event reporting- all personalities in mpt_core_enable()
2311 * This seems to be crucial- otherwise in mpt_core_enable()
2324 * This seems to be crucial- otherwise in mpt_core_enable()
2359 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { in mpt_core_detach()
2360 request_t *req = &mpt->request_pool[val]; in mpt_core_detach()
2361 mpt_callout_drain(mpt, &req->callout); in mpt_core_detach()
2371 /* Unload is always successful. */ in mpt_core_unload()
2376 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2392 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; in mpt_upload_fw()
2393 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; in mpt_upload_fw()
2394 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); in mpt_upload_fw()
2395 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; in mpt_upload_fw()
2396 tsge->DetailsLength = 12; in mpt_upload_fw()
2397 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; in mpt_upload_fw()
2398 tsge->ImageSize = htole32(mpt->fw_image_size); in mpt_upload_fw()
2404 sge->FlagsLength = htole32(flags | mpt->fw_image_size); in mpt_upload_fw()
2405 sge->Address = htole32(mpt->fw_phys); in mpt_upload_fw()
2406 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD); in mpt_upload_fw()
2411 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD); in mpt_upload_fw()
2422 if (mpt->is_sas) { in mpt_diag_outsl()
2423 pci_enable_io(mpt->dev, SYS_RES_IOPORT); in mpt_diag_outsl()
2430 if (mpt->is_sas) { in mpt_diag_outsl()
2431 pci_disable_io(mpt->dev, SYS_RES_IOPORT); in mpt_diag_outsl()
2443 if (mpt->pci_pio_reg == NULL) { in mpt_download_fw()
2448 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", in mpt_download_fw()
2449 mpt->fw_image_size); in mpt_download_fw()
2460 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; in mpt_download_fw()
2461 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE); in mpt_download_fw()
2462 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, in mpt_download_fw()
2463 fw_hdr->ImageSize); in mpt_download_fw()
2464 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE); in mpt_download_fw()
2466 ext_offset = fw_hdr->NextImageHeaderOffset; in mpt_download_fw()
2471 ext_offset = ext->NextImageHeaderOffset; in mpt_download_fw()
2472 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, in mpt_download_fw()
2474 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, in mpt_download_fw()
2475 ext->ImageSize); in mpt_download_fw()
2476 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, in mpt_download_fw()
2480 if (mpt->is_sas) { in mpt_download_fw()
2481 pci_enable_io(mpt->dev, SYS_RES_IOPORT); in mpt_download_fw()
2484 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); in mpt_download_fw()
2485 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); in mpt_download_fw()
2489 * to auto-boot from flash. Clear the status so that the controller in mpt_download_fw()
2497 if (mpt->is_sas) { in mpt_download_fw()
2498 pci_disable_io(mpt->dev, SYS_RES_IOPORT); in mpt_download_fw()
2502 * Re-enable the processor and clear the boot halt flag. in mpt_download_fw()
2520 /* Create a child tag for data buffers */ in mpt_dma_buf_alloc()
2521 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, in mpt_dma_buf_alloc()
2523 NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE, in mpt_dma_buf_alloc()
2524 mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0, in mpt_dma_buf_alloc()
2525 &mpt->buffer_dmat) != 0) { in mpt_dma_buf_alloc()
2526 mpt_prt(mpt, "cannot create a dma tag for data buffers\n"); in mpt_dma_buf_alloc()
2530 /* Create a child tag for request buffers */ in mpt_dma_buf_alloc()
2531 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, in mpt_dma_buf_alloc()
2534 &mpt->request_dmat) != 0) { in mpt_dma_buf_alloc()
2535 mpt_prt(mpt, "cannot create a dma tag for requests\n"); in mpt_dma_buf_alloc()
2539 /* Allocate some DMA accessible memory for requests */ in mpt_dma_buf_alloc()
2540 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request, in mpt_dma_buf_alloc()
2541 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) { in mpt_dma_buf_alloc()
2551 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request, in mpt_dma_buf_alloc()
2555 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n", in mpt_dma_buf_alloc()
2559 mpt->request_phys = mi.phys; in mpt_dma_buf_alloc()
2562 * Now create per-request dma maps in mpt_dma_buf_alloc()
2565 pptr = mpt->request_phys; in mpt_dma_buf_alloc()
2566 vptr = mpt->request; in mpt_dma_buf_alloc()
2569 request_t *req = &mpt->request_pool[i]; in mpt_dma_buf_alloc()
2570 req->index = i++; in mpt_dma_buf_alloc()
2573 req->req_pbuf = pptr; in mpt_dma_buf_alloc()
2574 req->req_vbuf = vptr; in mpt_dma_buf_alloc()
2579 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); in mpt_dma_buf_alloc()
2580 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); in mpt_dma_buf_alloc()
2582 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap); in mpt_dma_buf_alloc()
2584 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n", in mpt_dma_buf_alloc()
2598 if (mpt->request_dmat == 0) { in mpt_dma_buf_free()
2602 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { in mpt_dma_buf_free()
2603 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap); in mpt_dma_buf_free()
2605 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap); in mpt_dma_buf_free()
2606 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap); in mpt_dma_buf_free()
2607 bus_dma_tag_destroy(mpt->request_dmat); in mpt_dma_buf_free()
2608 mpt->request_dmat = 0; in mpt_dma_buf_free()
2609 bus_dma_tag_destroy(mpt->buffer_dmat); in mpt_dma_buf_free()
2613 * Allocate/Initialize data structures for the controller. Called
2624 return (-1); in mpt_configure_ioc()
2643 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) { in mpt_configure_ioc()
2647 mpt2host_iocfacts_reply(&mpt->ioc_facts); in mpt_configure_ioc()
2650 mpt->ioc_facts.MsgVersion >> 8, in mpt_configure_ioc()
2651 mpt->ioc_facts.MsgVersion & 0xFF, in mpt_configure_ioc()
2652 mpt->ioc_facts.HeaderVersion >> 8, in mpt_configure_ioc()
2653 mpt->ioc_facts.HeaderVersion & 0xFF); in mpt_configure_ioc()
2657 * the actual (reasonable) segment limit for read/write I/O. in mpt_configure_ioc()
2668 * than MPT_MAX_REQUESTS areas. Therefore, to account for both in mpt_configure_ioc()
2669 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. in mpt_configure_ioc()
2673 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; in mpt_configure_ioc()
2676 mpt->max_seg_cnt *= MPT_NRFM(mpt); in mpt_configure_ioc()
2679 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) { in mpt_configure_ioc()
2682 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt); in mpt_configure_ioc()
2683 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth; in mpt_configure_ioc()
2687 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); in mpt_configure_ioc()
2690 * Use this as the basis for reporting the maximum I/O size to CAM. in mpt_configure_ioc()
2692 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, btoc(maxphys) + 1); in mpt_configure_ioc()
2704 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { in mpt_configure_ioc()
2705 request_t *req = &mpt->request_pool[val]; in mpt_configure_ioc()
2706 req->state = REQ_STATE_ALLOCATED; in mpt_configure_ioc()
2707 mpt_callout_init(mpt, &req->callout); in mpt_configure_ioc()
2712 "CAM Segment Count: %u\n", mpt->max_seg_cnt, in mpt_configure_ioc()
2713 mpt->max_cam_seg_cnt); in mpt_configure_ioc()
2716 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber); in mpt_configure_ioc()
2720 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize, in mpt_configure_ioc()
2721 mpt->ioc_facts.RequestFrameSize << 2, in mpt_configure_ioc()
2722 mpt->ioc_facts.MaxChainDepth); in mpt_configure_ioc()
2724 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts, in mpt_configure_ioc()
2725 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags); in mpt_configure_ioc()
2727 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY); in mpt_configure_ioc()
2728 mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); in mpt_configure_ioc()
2729 if (mpt->port_facts == NULL) { in mpt_configure_ioc()
2730 mpt_prt(mpt, "unable to allocate memory for port facts\n"); in mpt_configure_ioc()
2734 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) && in mpt_configure_ioc()
2735 (mpt->fw_uploaded == 0)) { in mpt_configure_ioc()
2744 * retrieved, we are responsible for re-downloading in mpt_configure_ioc()
2745 * the firmware after any hard-reset. in mpt_configure_ioc()
2748 mpt->fw_image_size = mpt->ioc_facts.FWImageSize; in mpt_configure_ioc()
2749 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0, in mpt_configure_ioc()
2751 mpt->fw_image_size, 1, mpt->fw_image_size, 0, in mpt_configure_ioc()
2752 &mpt->fw_dmat); in mpt_configure_ioc()
2758 error = bus_dmamem_alloc(mpt->fw_dmat, in mpt_configure_ioc()
2759 (void **)&mpt->fw_image, BUS_DMA_NOWAIT | in mpt_configure_ioc()
2760 BUS_DMA_COHERENT, &mpt->fw_dmap); in mpt_configure_ioc()
2763 bus_dma_tag_destroy(mpt->fw_dmat); in mpt_configure_ioc()
2769 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, in mpt_configure_ioc()
2770 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0); in mpt_configure_ioc()
2771 mpt->fw_phys = mi.phys; in mpt_configure_ioc()
2777 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); in mpt_configure_ioc()
2778 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, in mpt_configure_ioc()
2779 mpt->fw_dmap); in mpt_configure_ioc()
2780 bus_dma_tag_destroy(mpt->fw_dmat); in mpt_configure_ioc()
2781 mpt->fw_image = NULL; in mpt_configure_ioc()
2784 mpt->fw_uploaded = 1; in mpt_configure_ioc()
2787 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) { in mpt_configure_ioc()
2788 pfp = &mpt->port_facts[port]; in mpt_configure_ioc()
2793 free(mpt->port_facts, M_DEVBUF); in mpt_configure_ioc()
2794 mpt->port_facts = NULL; in mpt_configure_ioc()
2806 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID, in mpt_configure_ioc()
2807 pfp->MaxDevices); in mpt_configure_ioc()
2813 pfp = &mpt->port_facts[0]; in mpt_configure_ioc()
2814 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) { in mpt_configure_ioc()
2815 mpt->is_fc = 1; in mpt_configure_ioc()
2816 mpt->is_sas = 0; in mpt_configure_ioc()
2817 mpt->is_spi = 0; in mpt_configure_ioc()
2818 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) { in mpt_configure_ioc()
2819 mpt->is_fc = 0; in mpt_configure_ioc()
2820 mpt->is_sas = 1; in mpt_configure_ioc()
2821 mpt->is_spi = 0; in mpt_configure_ioc()
2822 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) { in mpt_configure_ioc()
2823 mpt->is_fc = 0; in mpt_configure_ioc()
2824 mpt->is_sas = 0; in mpt_configure_ioc()
2825 mpt->is_spi = 1; in mpt_configure_ioc()
2826 if (mpt->mpt_ini_id == MPT_INI_ID_NONE) in mpt_configure_ioc()
2827 mpt->mpt_ini_id = pfp->PortSCSIID; in mpt_configure_ioc()
2828 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) { in mpt_configure_ioc()
2831 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) { in mpt_configure_ioc()
2835 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType); in mpt_configure_ioc()
2845 mpt->role = MPT_ROLE_NONE; in mpt_configure_ioc()
2846 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { in mpt_configure_ioc()
2847 mpt->role |= MPT_ROLE_INITIATOR; in mpt_configure_ioc()
2849 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { in mpt_configure_ioc()
2850 mpt->role |= MPT_ROLE_TARGET; in mpt_configure_ioc()
2865 * settings for Integrated Mirroring (e.g.). in mpt_configure_ioc()
2896 for (val = 0, pptr = mpt->reply_phys; in mpt_enable_ioc()
2897 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); in mpt_enable_ioc()
2900 if (++val == mpt->ioc_facts.GlobalCredits - 1) in mpt_enable_ioc()
2923 * Endian Conversion Functions- only used on Big Endian machines
2958 mpt2host_sge_simple_union(&rp->HostPageBufferSGE); in mpt2host_iocfacts_reply()
2988 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) { in mpt2host_config_page_ioc2()
3033 for (i = 0; i < sizeof(sp2->DeviceSettings) / in mpt2host_config_page_scsi_port_2()
3034 sizeof(*sp2->DeviceSettings); i++) { in mpt2host_config_page_scsi_port_2()
3121 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) { in mpt2host_config_page_raid_vol_0()