1 /*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 */ 61 /*- 62 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 63 * Copyright (c) 2005, WHEEL Sp. z o.o. 64 * Copyright (c) 2004, 2005 Justin T. Gibbs 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions are 69 * met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 73 * substantially similar to the "NO WARRANTY" disclaimer below 74 * ("Disclaimer") and any redistribution must be conditioned upon including 75 * a substantially similar Disclaimer requirement for further binary 76 * redistribution. 77 * 3. Neither the names of the above listed copyright holders nor the names 78 * of any contributors may be used to endorse or promote products derived 79 * from this software without specific prior written permission. 80 * 81 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 82 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 83 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 84 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 85 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 92 */ 93 94 #include <sys/cdefs.h> 95 __FBSDID("$FreeBSD$"); 96 97 #include <dev/mpt/mpt.h> 98 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 99 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 100 101 #include <dev/mpt/mpilib/mpi.h> 102 #include <dev/mpt/mpilib/mpi_ioc.h> 103 104 #include <sys/sysctl.h> 105 106 #define MPT_MAX_TRYS 3 107 #define MPT_MAX_WAIT 300000 108 109 static int maxwait_ack = 0; 110 static int maxwait_int = 0; 111 static int maxwait_state = 0; 112 113 TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 114 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 115 116 static mpt_reply_handler_t mpt_default_reply_handler; 117 static mpt_reply_handler_t mpt_config_reply_handler; 118 static mpt_reply_handler_t mpt_handshake_reply_handler; 119 static mpt_reply_handler_t mpt_event_reply_handler; 120 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 121 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 122 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 123 static int mpt_soft_reset(struct mpt_softc *mpt); 124 static void mpt_hard_reset(struct mpt_softc *mpt); 125 static int mpt_configure_ioc(struct mpt_softc *mpt); 126 static int mpt_enable_ioc(struct mpt_softc *mpt); 127 128 /************************* Personality Module Support *************************/ 129 /* 130 * We include one extra entry that is guaranteed to be NULL 131 * to simplify our itterator. 132 */ 133 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 134 static __inline struct mpt_personality* 135 mpt_pers_find(struct mpt_softc *, u_int); 136 static __inline struct mpt_personality* 137 mpt_pers_find_reverse(struct mpt_softc *, u_int); 138 139 static __inline struct mpt_personality * 140 mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 141 { 142 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 143 ("mpt_pers_find: starting position out of range\n")); 144 145 while (start_at < MPT_MAX_PERSONALITIES 146 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 147 start_at++; 148 } 149 return (mpt_personalities[start_at]); 150 } 151 152 /* 153 * Used infrequenstly, so no need to optimize like a forward 154 * traversal where we use the MAX+1 is guaranteed to be NULL 155 * trick. 156 */ 157 static __inline struct mpt_personality * 158 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 159 { 160 while (start_at < MPT_MAX_PERSONALITIES 161 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 162 start_at--; 163 } 164 if (start_at < MPT_MAX_PERSONALITIES) 165 return (mpt_personalities[start_at]); 166 return (NULL); 167 } 168 169 #define MPT_PERS_FOREACH(mpt, pers) \ 170 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 171 pers != NULL; \ 172 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 173 174 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 175 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 176 pers != NULL; \ 177 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 178 179 static mpt_load_handler_t mpt_stdload; 180 static mpt_probe_handler_t mpt_stdprobe; 181 static mpt_attach_handler_t mpt_stdattach; 182 static mpt_event_handler_t mpt_stdevent; 183 static mpt_reset_handler_t mpt_stdreset; 184 static mpt_shutdown_handler_t mpt_stdshutdown; 185 static mpt_detach_handler_t mpt_stddetach; 186 static mpt_unload_handler_t mpt_stdunload; 187 static struct mpt_personality mpt_default_personality = 188 { 189 .load = mpt_stdload, 190 .probe = mpt_stdprobe, 191 .attach = mpt_stdattach, 192 .event = mpt_stdevent, 193 .reset = mpt_stdreset, 194 .shutdown = mpt_stdshutdown, 195 .detach = mpt_stddetach, 196 .unload = mpt_stdunload 197 }; 198 199 static mpt_load_handler_t mpt_core_load; 200 static mpt_attach_handler_t mpt_core_attach; 201 static mpt_reset_handler_t mpt_core_ioc_reset; 202 static mpt_event_handler_t mpt_core_event; 203 static mpt_shutdown_handler_t mpt_core_shutdown; 204 static mpt_shutdown_handler_t mpt_core_detach; 205 static mpt_unload_handler_t mpt_core_unload; 206 static struct mpt_personality mpt_core_personality = 207 { 208 .name = "mpt_core", 209 .load = mpt_core_load, 210 .attach = mpt_core_attach, 211 .event = mpt_core_event, 212 .reset = mpt_core_ioc_reset, 213 .shutdown = mpt_core_shutdown, 214 .detach = mpt_core_detach, 215 .unload = mpt_core_unload, 216 }; 217 218 /* 219 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 220 * ordering information. We want the core to always register FIRST. 221 * other modules are set to SI_ORDER_SECOND. 222 */ 223 static moduledata_t mpt_core_mod = { 224 "mpt_core", mpt_modevent, &mpt_core_personality 225 }; 226 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 227 MODULE_VERSION(mpt_core, 1); 228 229 #define MPT_PERS_ATACHED(pers, mpt) \ 230 ((mpt)->pers_mask & (0x1 << pers->id)) 231 232 233 int 234 mpt_modevent(module_t mod, int type, void *data) 235 { 236 struct mpt_personality *pers; 237 int error; 238 239 pers = (struct mpt_personality *)data; 240 241 error = 0; 242 switch (type) { 243 case MOD_LOAD: 244 { 245 mpt_load_handler_t **def_handler; 246 mpt_load_handler_t **pers_handler; 247 int i; 248 249 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 250 if (mpt_personalities[i] == NULL) 251 break; 252 } 253 if (i >= MPT_MAX_PERSONALITIES) { 254 error = ENOMEM; 255 break; 256 } 257 pers->id = i; 258 mpt_personalities[i] = pers; 259 260 /* Install standard/noop handlers for any NULL entries. */ 261 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 262 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 263 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 264 if (*pers_handler == NULL) 265 *pers_handler = *def_handler; 266 pers_handler++; 267 def_handler++; 268 } 269 270 error = (pers->load(pers)); 271 if (error != 0) 272 mpt_personalities[i] = NULL; 273 break; 274 } 275 case MOD_SHUTDOWN: 276 break; 277 case MOD_QUIESCE: 278 break; 279 case MOD_UNLOAD: 280 error = pers->unload(pers); 281 mpt_personalities[pers->id] = NULL; 282 break; 283 default: 284 error = EINVAL; 285 break; 286 } 287 return (error); 288 } 289 290 int 291 mpt_stdload(struct mpt_personality *pers) 292 { 293 /* Load is always successfull. */ 294 return (0); 295 } 296 297 int 298 mpt_stdprobe(struct mpt_softc *mpt) 299 { 300 /* Probe is always successfull. */ 301 return (0); 302 } 303 304 int 305 mpt_stdattach(struct mpt_softc *mpt) 306 { 307 /* Attach is always successfull. */ 308 return (0); 309 } 310 311 int 312 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 313 { 314 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 315 /* Event was not for us. */ 316 return (0); 317 } 318 319 void 320 mpt_stdreset(struct mpt_softc *mpt, int type) 321 { 322 } 323 324 void 325 mpt_stdshutdown(struct mpt_softc *mpt) 326 { 327 } 328 329 void 330 mpt_stddetach(struct mpt_softc *mpt) 331 { 332 } 333 334 int 335 mpt_stdunload(struct mpt_personality *pers) 336 { 337 /* Unload is always successfull. */ 338 return (0); 339 } 340 341 /******************************* Bus DMA Support ******************************/ 342 void 343 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 344 { 345 struct mpt_map_info *map_info; 346 347 map_info = (struct mpt_map_info *)arg; 348 map_info->error = error; 349 map_info->phys = segs->ds_addr; 350 } 351 352 /**************************** Reply/Event Handling ****************************/ 353 int 354 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 355 mpt_handler_t handler, uint32_t *phandler_id) 356 { 357 358 switch (type) { 359 case MPT_HANDLER_REPLY: 360 { 361 u_int cbi; 362 u_int free_cbi; 363 364 if (phandler_id == NULL) 365 return (EINVAL); 366 367 free_cbi = MPT_HANDLER_ID_NONE; 368 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 369 /* 370 * If the same handler is registered multiple 371 * times, don't error out. Just return the 372 * index of the original registration. 373 */ 374 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 375 *phandler_id = MPT_CBI_TO_HID(cbi); 376 return (0); 377 } 378 379 /* 380 * Fill from the front in the hope that 381 * all registered handlers consume only a 382 * single cache line. 383 * 384 * We don't break on the first empty slot so 385 * that the full table is checked to see if 386 * this handler was previously registered. 387 */ 388 if (free_cbi == MPT_HANDLER_ID_NONE 389 && (mpt_reply_handlers[cbi] 390 == mpt_default_reply_handler)) 391 free_cbi = cbi; 392 } 393 if (free_cbi == MPT_HANDLER_ID_NONE) 394 return (ENOMEM); 395 mpt_reply_handlers[free_cbi] = handler.reply_handler; 396 *phandler_id = MPT_CBI_TO_HID(free_cbi); 397 break; 398 } 399 default: 400 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 401 return (EINVAL); 402 } 403 return (0); 404 } 405 406 int 407 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 408 mpt_handler_t handler, uint32_t handler_id) 409 { 410 411 switch (type) { 412 case MPT_HANDLER_REPLY: 413 { 414 u_int cbi; 415 416 cbi = MPT_CBI(handler_id); 417 if (cbi >= MPT_NUM_REPLY_HANDLERS 418 || mpt_reply_handlers[cbi] != handler.reply_handler) 419 return (ENOENT); 420 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 421 break; 422 } 423 default: 424 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 425 return (EINVAL); 426 } 427 return (0); 428 } 429 430 static int 431 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 432 MSG_DEFAULT_REPLY *reply_frame) 433 { 434 mpt_prt(mpt, "XXXX Default Handler Called. Req %p, Frame %p\n", 435 req, reply_frame); 436 437 if (reply_frame != NULL) 438 mpt_dump_reply_frame(mpt, reply_frame); 439 440 mpt_prt(mpt, "XXXX Reply Frame Ignored\n"); 441 442 return (/*free_reply*/TRUE); 443 } 444 445 static int 446 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 447 MSG_DEFAULT_REPLY *reply_frame) 448 { 449 if (req != NULL) { 450 451 if (reply_frame != NULL) { 452 MSG_CONFIG *cfgp; 453 MSG_CONFIG_REPLY *reply; 454 455 cfgp = (MSG_CONFIG *)req->req_vbuf; 456 reply = (MSG_CONFIG_REPLY *)reply_frame; 457 req->IOCStatus = le16toh(reply_frame->IOCStatus); 458 bcopy(&reply->Header, &cfgp->Header, 459 sizeof(cfgp->Header)); 460 } 461 req->state &= ~REQ_STATE_QUEUED; 462 req->state |= REQ_STATE_DONE; 463 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 464 465 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) 466 wakeup(req); 467 } 468 469 return (/*free_reply*/TRUE); 470 } 471 472 static int 473 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 474 MSG_DEFAULT_REPLY *reply_frame) 475 { 476 /* Nothing to be done. */ 477 return (/*free_reply*/TRUE); 478 } 479 480 static int 481 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 482 MSG_DEFAULT_REPLY *reply_frame) 483 { 484 int free_reply; 485 486 if (reply_frame == NULL) { 487 mpt_prt(mpt, "Event Handler: req %p - Unexpected NULL reply\n"); 488 return (/*free_reply*/TRUE); 489 } 490 491 free_reply = TRUE; 492 switch (reply_frame->Function) { 493 case MPI_FUNCTION_EVENT_NOTIFICATION: 494 { 495 MSG_EVENT_NOTIFY_REPLY *msg; 496 struct mpt_personality *pers; 497 u_int handled; 498 499 handled = 0; 500 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 501 MPT_PERS_FOREACH(mpt, pers) 502 handled += pers->event(mpt, req, msg); 503 504 if (handled == 0 && mpt->mpt_pers_mask == 0) { 505 mpt_lprt(mpt, MPT_PRT_INFO, 506 "No Handlers For Any Event Notify Frames. " 507 "Event %#x (ACK %sequired).\n", 508 msg->Event, msg->AckRequired? "r" : "not r"); 509 } else if (handled == 0) { 510 mpt_lprt(mpt, MPT_PRT_WARN, 511 "Unhandled Event Notify Frame. Event %#x " 512 "(ACK %sequired).\n", 513 msg->Event, msg->AckRequired? "r" : "not r"); 514 } 515 516 if (msg->AckRequired) { 517 request_t *ack_req; 518 uint32_t context; 519 520 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 521 ack_req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 522 if (ack_req == NULL) { 523 struct mpt_evtf_record *evtf; 524 525 evtf = (struct mpt_evtf_record *)reply_frame; 526 evtf->context = context; 527 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 528 free_reply = FALSE; 529 break; 530 } 531 mpt_send_event_ack(mpt, ack_req, msg, context); 532 } 533 break; 534 } 535 case MPI_FUNCTION_PORT_ENABLE: 536 mpt_lprt(mpt, MPT_PRT_DEBUG, "enable port reply\n"); 537 break; 538 case MPI_FUNCTION_EVENT_ACK: 539 break; 540 default: 541 mpt_prt(mpt, "Unknown Event Function: %x\n", 542 reply_frame->Function); 543 break; 544 } 545 546 if (req != NULL 547 && (reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 548 549 req->state &= ~REQ_STATE_QUEUED; 550 req->state |= REQ_STATE_DONE; 551 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 552 553 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) 554 wakeup(req); 555 else 556 mpt_free_request(mpt, req); 557 } 558 return (free_reply); 559 } 560 561 /* 562 * Process an asynchronous event from the IOC. 563 */ 564 static int 565 mpt_core_event(struct mpt_softc *mpt, request_t *req, 566 MSG_EVENT_NOTIFY_REPLY *msg) 567 { 568 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 569 msg->Event & 0xFF); 570 switch(msg->Event & 0xFF) { 571 case MPI_EVENT_NONE: 572 break; 573 case MPI_EVENT_LOG_DATA: 574 { 575 int i; 576 577 /* Some error occured that LSI wants logged */ 578 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 579 msg->IOCLogInfo); 580 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 581 for (i = 0; i < msg->EventDataLength; i++) 582 mpt_prtc(mpt, " %08x", msg->Data[i]); 583 mpt_prtc(mpt, "\n"); 584 break; 585 } 586 case MPI_EVENT_EVENT_CHANGE: 587 /* 588 * This is just an acknowledgement 589 * of our mpt_send_event_request. 590 */ 591 break; 592 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 593 break; 594 default: 595 return (/*handled*/0); 596 break; 597 } 598 return (/*handled*/1); 599 } 600 601 static void 602 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 603 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 604 { 605 MSG_EVENT_ACK *ackp; 606 607 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 608 bzero(ackp, sizeof *ackp); 609 ackp->Function = MPI_FUNCTION_EVENT_ACK; 610 ackp->Event = msg->Event; 611 ackp->EventContext = msg->EventContext; 612 ackp->MsgContext = context; 613 mpt_check_doorbell(mpt); 614 mpt_send_cmd(mpt, ack_req); 615 } 616 617 /***************************** Interrupt Handling *****************************/ 618 void 619 mpt_intr(void *arg) 620 { 621 struct mpt_softc *mpt; 622 uint32_t reply_desc; 623 624 mpt = (struct mpt_softc *)arg; 625 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 626 request_t *req; 627 MSG_DEFAULT_REPLY *reply_frame; 628 uint32_t reply_baddr; 629 u_int cb_index; 630 u_int req_index; 631 int free_rf; 632 633 req = NULL; 634 reply_frame = NULL; 635 reply_baddr = 0; 636 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 637 u_int offset; 638 639 /* 640 * Insure that the reply frame is coherent. 641 */ 642 reply_baddr = (reply_desc << 1); 643 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 644 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, 645 offset, MPT_REPLY_SIZE, 646 BUS_DMASYNC_POSTREAD); 647 reply_frame = MPT_REPLY_OTOV(mpt, offset); 648 reply_desc = le32toh(reply_frame->MsgContext); 649 } 650 cb_index = MPT_CONTEXT_TO_CBI(reply_desc); 651 req_index = MPT_CONTEXT_TO_REQI(reply_desc); 652 if (req_index < MPT_MAX_REQUESTS(mpt)) 653 req = &mpt->request_pool[req_index]; 654 655 free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_frame); 656 657 if (reply_frame != NULL && free_rf) 658 mpt_free_reply(mpt, reply_baddr); 659 } 660 } 661 662 /******************************* Error Recovery *******************************/ 663 void 664 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 665 u_int iocstatus) 666 { 667 MSG_DEFAULT_REPLY ioc_status_frame; 668 request_t *req; 669 670 bzero(&ioc_status_frame, sizeof(ioc_status_frame)); 671 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 672 ioc_status_frame.IOCStatus = iocstatus; 673 while((req = TAILQ_FIRST(chain)) != NULL) { 674 MSG_REQUEST_HEADER *msg_hdr; 675 u_int cb_index; 676 677 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 678 ioc_status_frame.Function = msg_hdr->Function; 679 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 680 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 681 mpt_reply_handlers[cb_index](mpt, req, &ioc_status_frame); 682 } 683 } 684 685 /********************************* Diagnostics ********************************/ 686 /* 687 * Perform a diagnostic dump of a reply frame. 688 */ 689 void 690 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 691 { 692 693 mpt_prt(mpt, "Address Reply:\n"); 694 mpt_print_reply(reply_frame); 695 } 696 697 /******************************* Doorbell Access ******************************/ 698 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 699 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 700 701 static __inline uint32_t 702 mpt_rd_db(struct mpt_softc *mpt) 703 { 704 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 705 } 706 707 static __inline uint32_t 708 mpt_rd_intr(struct mpt_softc *mpt) 709 { 710 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 711 } 712 713 /* Busy wait for a door bell to be read by IOC */ 714 static int 715 mpt_wait_db_ack(struct mpt_softc *mpt) 716 { 717 int i; 718 for (i=0; i < MPT_MAX_WAIT; i++) { 719 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 720 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 721 return MPT_OK; 722 } 723 724 DELAY(1000); 725 } 726 return MPT_FAIL; 727 } 728 729 /* Busy wait for a door bell interrupt */ 730 static int 731 mpt_wait_db_int(struct mpt_softc *mpt) 732 { 733 int i; 734 for (i=0; i < MPT_MAX_WAIT; i++) { 735 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 736 maxwait_int = i > maxwait_int ? i : maxwait_int; 737 return MPT_OK; 738 } 739 DELAY(100); 740 } 741 return MPT_FAIL; 742 } 743 744 /* Wait for IOC to transition to a give state */ 745 void 746 mpt_check_doorbell(struct mpt_softc *mpt) 747 { 748 uint32_t db = mpt_rd_db(mpt); 749 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 750 mpt_prt(mpt, "Device not running\n"); 751 mpt_print_db(db); 752 } 753 } 754 755 /* Wait for IOC to transition to a give state */ 756 static int 757 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 758 { 759 int i; 760 761 for (i = 0; i < MPT_MAX_WAIT; i++) { 762 uint32_t db = mpt_rd_db(mpt); 763 if (MPT_STATE(db) == state) { 764 maxwait_state = i > maxwait_state ? i : maxwait_state; 765 return (MPT_OK); 766 } 767 DELAY(100); 768 } 769 return (MPT_FAIL); 770 } 771 772 773 /************************* Intialization/Configuration ************************/ 774 static int mpt_download_fw(struct mpt_softc *mpt); 775 776 /* Issue the reset COMMAND to the IOC */ 777 static int 778 mpt_soft_reset(struct mpt_softc *mpt) 779 { 780 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 781 782 /* Have to use hard reset if we are not in Running state */ 783 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 784 mpt_prt(mpt, "soft reset failed: device not running\n"); 785 return MPT_FAIL; 786 } 787 788 /* If door bell is in use we don't have a chance of getting 789 * a word in since the IOC probably crashed in message 790 * processing. So don't waste our time. 791 */ 792 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 793 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 794 return MPT_FAIL; 795 } 796 797 /* Send the reset request to the IOC */ 798 mpt_write(mpt, MPT_OFFSET_DOORBELL, 799 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 800 if (mpt_wait_db_ack(mpt) != MPT_OK) { 801 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 802 return MPT_FAIL; 803 } 804 805 /* Wait for the IOC to reload and come out of reset state */ 806 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 807 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 808 return MPT_FAIL; 809 } 810 811 return MPT_OK; 812 } 813 814 static int 815 mpt_enable_diag_mode(struct mpt_softc *mpt) 816 { 817 int try; 818 819 try = 20; 820 while (--try) { 821 822 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 823 break; 824 825 /* Enable diagnostic registers */ 826 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 827 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 828 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 829 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 830 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 831 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 832 833 DELAY(100000); 834 } 835 if (try == 0) 836 return (EIO); 837 return (0); 838 } 839 840 static void 841 mpt_disable_diag_mode(struct mpt_softc *mpt) 842 { 843 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 844 } 845 846 /* This is a magic diagnostic reset that resets all the ARM 847 * processors in the chip. 848 */ 849 static void 850 mpt_hard_reset(struct mpt_softc *mpt) 851 { 852 int error; 853 int wait; 854 uint32_t diagreg; 855 856 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 857 858 error = mpt_enable_diag_mode(mpt); 859 if (error) { 860 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 861 mpt_prt(mpt, "Trying to reset anyway.\n"); 862 } 863 864 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 865 866 /* 867 * This appears to be a workaround required for some 868 * firmware or hardware revs. 869 */ 870 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 871 DELAY(1000); 872 873 /* Diag. port is now active so we can now hit the reset bit */ 874 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 875 876 /* 877 * Ensure that the reset has finished. We delay 1ms 878 * prior to reading the register to make sure the chip 879 * has sufficiently completed its reset to handle register 880 * accesses. 881 */ 882 wait = 5000; 883 do { 884 DELAY(1000); 885 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 886 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 887 888 if (wait == 0) { 889 mpt_prt(mpt, "WARNING - Failed hard reset! " 890 "Trying to initialize anyway.\n"); 891 } 892 893 /* 894 * If we have firmware to download, it must be loaded before 895 * the controller will become operational. Do so now. 896 */ 897 if (mpt->fw_image != NULL) { 898 899 error = mpt_download_fw(mpt); 900 901 if (error) { 902 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 903 mpt_prt(mpt, "Trying to initialize anyway.\n"); 904 } 905 } 906 907 /* 908 * Reseting the controller should have disabled write 909 * access to the diagnostic registers, but disable 910 * manually to be sure. 911 */ 912 mpt_disable_diag_mode(mpt); 913 } 914 915 static void 916 mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 917 { 918 /* 919 * Complete all pending requests with a status 920 * appropriate for an IOC reset. 921 */ 922 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 923 MPI_IOCSTATUS_INVALID_STATE); 924 } 925 926 927 /* 928 * Reset the IOC when needed. Try software command first then if needed 929 * poke at the magic diagnostic reset. Note that a hard reset resets 930 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 931 * fouls up the PCI configuration registers. 932 */ 933 int 934 mpt_reset(struct mpt_softc *mpt, int reinit) 935 { 936 struct mpt_personality *pers; 937 int ret; 938 int retry_cnt = 0; 939 940 /* 941 * Try a soft reset. If that fails, get out the big hammer. 942 */ 943 again: 944 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 945 int cnt; 946 for (cnt = 0; cnt < 5; cnt++) { 947 /* Failed; do a hard reset */ 948 mpt_hard_reset(mpt); 949 950 /* 951 * Wait for the IOC to reload 952 * and come out of reset state 953 */ 954 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 955 if (ret == MPT_OK) { 956 break; 957 } 958 /* 959 * Okay- try to check again... 960 */ 961 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 962 if (ret == MPT_OK) { 963 break; 964 } 965 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 966 retry_cnt, cnt); 967 } 968 } 969 970 if (retry_cnt == 0) { 971 /* 972 * Invoke reset handlers. We bump the reset count so 973 * that mpt_wait_req() understands that regardless of 974 * the specified wait condition, it should stop its wait. 975 */ 976 mpt->reset_cnt++; 977 MPT_PERS_FOREACH(mpt, pers) 978 pers->reset(mpt, ret); 979 } 980 981 if (reinit != 0) { 982 ret = mpt_enable_ioc(mpt); 983 if (ret == MPT_OK) { 984 mpt_enable_ints(mpt); 985 } 986 } 987 if (ret != MPT_OK && retry_cnt++ < 2) { 988 goto again; 989 } 990 return ret; 991 } 992 993 /* Return a command buffer to the free queue */ 994 void 995 mpt_free_request(struct mpt_softc *mpt, request_t *req) 996 { 997 request_t *nxt; 998 struct mpt_evtf_record *record; 999 uint32_t reply_baddr; 1000 1001 if (req == NULL || req != &mpt->request_pool[req->index]) { 1002 panic("mpt_free_request bad req ptr\n"); 1003 return; 1004 } 1005 if ((nxt = req->chain) != NULL) { 1006 req->chain = NULL; 1007 mpt_free_request(mpt, nxt); /* NB: recursion */ 1008 } 1009 req->ccb = NULL; 1010 req->state = REQ_STATE_FREE; 1011 if (LIST_EMPTY(&mpt->ack_frames)) { 1012 TAILQ_INSERT_HEAD(&mpt->request_free_list, req, links); 1013 if (mpt->getreqwaiter != 0) { 1014 mpt->getreqwaiter = 0; 1015 wakeup(&mpt->request_free_list); 1016 } 1017 return; 1018 } 1019 1020 /* 1021 * Process an ack frame deferred due to resource shortage. 1022 */ 1023 record = LIST_FIRST(&mpt->ack_frames); 1024 LIST_REMOVE(record, links); 1025 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1026 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1027 + (mpt->reply_phys & 0xFFFFFFFF); 1028 mpt_free_reply(mpt, reply_baddr); 1029 } 1030 1031 /* Get a command buffer from the free queue */ 1032 request_t * 1033 mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1034 { 1035 request_t *req; 1036 1037 retry: 1038 req = TAILQ_FIRST(&mpt->request_free_list); 1039 if (req != NULL) { 1040 KASSERT(req == &mpt->request_pool[req->index], 1041 ("mpt_get_request: corrupted request free list\n")); 1042 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1043 req->state = REQ_STATE_ALLOCATED; 1044 req->chain = NULL; 1045 } else if (sleep_ok != 0) { 1046 mpt->getreqwaiter = 1; 1047 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1048 goto retry; 1049 } 1050 return req; 1051 } 1052 1053 /* Pass the command to the IOC */ 1054 void 1055 mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1056 { 1057 uint32_t *pReq; 1058 1059 pReq = req->req_vbuf; 1060 if (mpt->verbose > MPT_PRT_TRACE) { 1061 int offset; 1062 mpt_prt(mpt, "Send Request %d (0x%x):", 1063 req->index, req->req_pbuf); 1064 for (offset = 0; offset < mpt->request_frame_size; offset++) { 1065 if ((offset & 0x7) == 0) { 1066 mpt_prtc(mpt, "\n"); 1067 mpt_prt(mpt, " "); 1068 } 1069 mpt_prtc(mpt, " %08x", pReq[offset]); 1070 } 1071 mpt_prtc(mpt, "\n"); 1072 } 1073 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1074 BUS_DMASYNC_PREWRITE); 1075 req->state |= REQ_STATE_QUEUED; 1076 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1077 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1078 } 1079 1080 /* 1081 * Wait for a request to complete. 1082 * 1083 * Inputs: 1084 * mpt softc of controller executing request 1085 * req request to wait for 1086 * sleep_ok nonzero implies may sleep in this context 1087 * time_ms timeout in ms. 0 implies no timeout. 1088 * 1089 * Return Values: 1090 * 0 Request completed 1091 * non-0 Timeout fired before request completion. 1092 */ 1093 int 1094 mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1095 mpt_req_state_t state, mpt_req_state_t mask, 1096 int sleep_ok, int time_ms) 1097 { 1098 int error; 1099 int timeout; 1100 u_int saved_cnt; 1101 1102 /* 1103 * timeout is in ms. 0 indicates infinite wait. 1104 * Convert to ticks or 500us units depending on 1105 * our sleep mode. 1106 */ 1107 if (sleep_ok != 0) 1108 timeout = (time_ms * hz) / 1000; 1109 else 1110 timeout = time_ms * 2; 1111 req->state |= REQ_STATE_NEED_WAKEUP; 1112 mask &= ~REQ_STATE_NEED_WAKEUP; 1113 saved_cnt = mpt->reset_cnt; 1114 while ((req->state & mask) != state 1115 && mpt->reset_cnt == saved_cnt) { 1116 1117 if (sleep_ok != 0) { 1118 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1119 if (error == EWOULDBLOCK) { 1120 timeout = 0; 1121 break; 1122 } 1123 } else { 1124 if (time_ms != 0 && --timeout == 0) { 1125 mpt_prt(mpt, "mpt_wait_req timed out\n"); 1126 break; 1127 } 1128 DELAY(500); 1129 mpt_intr(mpt); 1130 } 1131 } 1132 req->state &= ~REQ_STATE_NEED_WAKEUP; 1133 if (mpt->reset_cnt != saved_cnt) 1134 return (EIO); 1135 if (time_ms && timeout <= 0) 1136 return (ETIMEDOUT); 1137 return (0); 1138 } 1139 1140 /* 1141 * Send a command to the IOC via the handshake register. 1142 * 1143 * Only done at initialization time and for certain unusual 1144 * commands such as device/bus reset as specified by LSI. 1145 */ 1146 int 1147 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1148 { 1149 int i; 1150 uint32_t data, *data32; 1151 1152 /* Check condition of the IOC */ 1153 data = mpt_rd_db(mpt); 1154 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1155 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1156 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1157 || MPT_DB_IS_IN_USE(data)) { 1158 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1159 mpt_print_db(data); 1160 return (EBUSY); 1161 } 1162 1163 /* We move things in 32 bit chunks */ 1164 len = (len + 3) >> 2; 1165 data32 = cmd; 1166 1167 /* Clear any left over pending doorbell interupts */ 1168 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1169 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1170 1171 /* 1172 * Tell the handshake reg. we are going to send a command 1173 * and how long it is going to be. 1174 */ 1175 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1176 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1177 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1178 1179 /* Wait for the chip to notice */ 1180 if (mpt_wait_db_int(mpt) != MPT_OK) { 1181 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1182 return (ETIMEDOUT); 1183 } 1184 1185 /* Clear the interrupt */ 1186 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1187 1188 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1189 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1190 return (ETIMEDOUT); 1191 } 1192 1193 /* Send the command */ 1194 for (i = 0; i < len; i++) { 1195 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1196 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1197 mpt_prt(mpt, 1198 "mpt_send_handshake_cmd timeout! index = %d\n", 1199 i); 1200 return (ETIMEDOUT); 1201 } 1202 } 1203 return MPT_OK; 1204 } 1205 1206 /* Get the response from the handshake register */ 1207 int 1208 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1209 { 1210 int left, reply_left; 1211 u_int16_t *data16; 1212 MSG_DEFAULT_REPLY *hdr; 1213 1214 /* We move things out in 16 bit chunks */ 1215 reply_len >>= 1; 1216 data16 = (u_int16_t *)reply; 1217 1218 hdr = (MSG_DEFAULT_REPLY *)reply; 1219 1220 /* Get first word */ 1221 if (mpt_wait_db_int(mpt) != MPT_OK) { 1222 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1223 return ETIMEDOUT; 1224 } 1225 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1226 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1227 1228 /* Get Second Word */ 1229 if (mpt_wait_db_int(mpt) != MPT_OK) { 1230 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1231 return ETIMEDOUT; 1232 } 1233 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1234 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1235 1236 /* With the second word, we can now look at the length */ 1237 if (((reply_len >> 1) != hdr->MsgLength)) { 1238 mpt_prt(mpt, "reply length does not match message length: " 1239 "got 0x%02x, expected 0x%02x\n", 1240 hdr->MsgLength << 2, reply_len << 1); 1241 } 1242 1243 /* Get rest of the reply; but don't overflow the provided buffer */ 1244 left = (hdr->MsgLength << 1) - 2; 1245 reply_left = reply_len - 2; 1246 while (left--) { 1247 u_int16_t datum; 1248 1249 if (mpt_wait_db_int(mpt) != MPT_OK) { 1250 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1251 return ETIMEDOUT; 1252 } 1253 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1254 1255 if (reply_left-- > 0) 1256 *data16++ = datum & MPT_DB_DATA_MASK; 1257 1258 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1259 } 1260 1261 /* One more wait & clear at the end */ 1262 if (mpt_wait_db_int(mpt) != MPT_OK) { 1263 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1264 return ETIMEDOUT; 1265 } 1266 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1267 1268 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1269 if (mpt->verbose >= MPT_PRT_TRACE) 1270 mpt_print_reply(hdr); 1271 return (MPT_FAIL | hdr->IOCStatus); 1272 } 1273 1274 return (0); 1275 } 1276 1277 static int 1278 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1279 { 1280 MSG_IOC_FACTS f_req; 1281 int error; 1282 1283 bzero(&f_req, sizeof f_req); 1284 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1285 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1286 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1287 if (error) 1288 return(error); 1289 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1290 return (error); 1291 } 1292 1293 static int 1294 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1295 { 1296 MSG_PORT_FACTS f_req; 1297 int error; 1298 1299 /* XXX: Only getting PORT FACTS for Port 0 */ 1300 memset(&f_req, 0, sizeof f_req); 1301 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1302 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1303 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1304 if (error) 1305 return(error); 1306 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1307 return (error); 1308 } 1309 1310 /* 1311 * Send the initialization request. This is where we specify how many 1312 * SCSI busses and how many devices per bus we wish to emulate. 1313 * This is also the command that specifies the max size of the reply 1314 * frames from the IOC that we will be allocating. 1315 */ 1316 static int 1317 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1318 { 1319 int error = 0; 1320 MSG_IOC_INIT init; 1321 MSG_IOC_INIT_REPLY reply; 1322 1323 bzero(&init, sizeof init); 1324 init.WhoInit = who; 1325 init.Function = MPI_FUNCTION_IOC_INIT; 1326 if (mpt->is_fc) { 1327 init.MaxDevices = 255; 1328 } else if (mpt->is_sas) { 1329 init.MaxDevices = mpt->mpt_max_devices; 1330 } else { 1331 init.MaxDevices = 16; 1332 } 1333 init.MaxBuses = 1; 1334 1335 init.MsgVersion = htole16(MPI_VERSION); 1336 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1337 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1338 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1339 1340 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1341 return(error); 1342 } 1343 1344 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1345 return (error); 1346 } 1347 1348 1349 /* 1350 * Utiltity routine to read configuration headers and pages 1351 */ 1352 int 1353 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1354 u_int PageVersion, u_int PageLength, u_int PageNumber, 1355 u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1356 bus_size_t len, int sleep_ok, int timeout_ms) 1357 { 1358 MSG_CONFIG *cfgp; 1359 SGE_SIMPLE32 *se; 1360 1361 cfgp = req->req_vbuf; 1362 memset(cfgp, 0, sizeof *cfgp); 1363 cfgp->Action = Action; 1364 cfgp->Function = MPI_FUNCTION_CONFIG; 1365 cfgp->Header.PageVersion = PageVersion; 1366 cfgp->Header.PageLength = PageLength; 1367 cfgp->Header.PageNumber = PageNumber; 1368 cfgp->Header.PageType = PageType; 1369 cfgp->PageAddress = PageAddress; 1370 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1371 se->Address = addr; 1372 MPI_pSGE_SET_LENGTH(se, len); 1373 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1374 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1375 MPI_SGE_FLAGS_END_OF_LIST | 1376 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1377 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1378 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1379 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1380 1381 mpt_check_doorbell(mpt); 1382 mpt_send_cmd(mpt, req); 1383 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1384 sleep_ok, timeout_ms)); 1385 } 1386 1387 1388 int 1389 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1390 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1391 int sleep_ok, int timeout_ms) 1392 { 1393 request_t *req; 1394 MSG_CONFIG *cfgp; 1395 int error; 1396 1397 req = mpt_get_request(mpt, sleep_ok); 1398 if (req == NULL) { 1399 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1400 return (ENOMEM); 1401 } 1402 1403 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1404 /*PageVersion*/0, /*PageLength*/0, PageNumber, 1405 PageType, PageAddress, /*addr*/0, /*len*/0, 1406 sleep_ok, timeout_ms); 1407 if (error != 0) { 1408 mpt_free_request(mpt, req); 1409 mpt_prt(mpt, "read_cfg_header timed out\n"); 1410 return (ETIMEDOUT); 1411 } 1412 1413 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1414 case MPI_IOCSTATUS_SUCCESS: 1415 cfgp = req->req_vbuf; 1416 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1417 error = 0; 1418 break; 1419 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1420 mpt_lprt(mpt, MPT_PRT_DEBUG, 1421 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1422 PageType, PageNumber, PageAddress); 1423 error = EINVAL; 1424 break; 1425 default: 1426 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1427 req->IOCStatus); 1428 error = EIO; 1429 break; 1430 } 1431 mpt_free_request(mpt, req); 1432 return (error); 1433 } 1434 1435 #define CFG_DATA_OFF 128 1436 1437 int 1438 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1439 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1440 int timeout_ms) 1441 { 1442 request_t *req; 1443 int error; 1444 1445 req = mpt_get_request(mpt, sleep_ok); 1446 if (req == NULL) { 1447 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1448 return (-1); 1449 } 1450 1451 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1452 hdr->PageLength, hdr->PageNumber, 1453 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1454 PageAddress, req->req_pbuf + CFG_DATA_OFF, 1455 len, sleep_ok, timeout_ms); 1456 if (error != 0) { 1457 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1458 return (-1); 1459 } 1460 1461 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1462 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1463 req->IOCStatus); 1464 mpt_free_request(mpt, req); 1465 return (-1); 1466 } 1467 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1468 BUS_DMASYNC_POSTREAD); 1469 memcpy(hdr, ((uint8_t *)req->req_vbuf)+CFG_DATA_OFF, len); 1470 mpt_free_request(mpt, req); 1471 return (0); 1472 } 1473 1474 int 1475 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1476 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1477 int timeout_ms) 1478 { 1479 request_t *req; 1480 u_int hdr_attr; 1481 int error; 1482 1483 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1484 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1485 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1486 mpt_prt(mpt, "page type 0x%x not changeable\n", 1487 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1488 return (-1); 1489 } 1490 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1491 1492 req = mpt_get_request(mpt, sleep_ok); 1493 if (req == NULL) 1494 return (-1); 1495 1496 memcpy(((caddr_t)req->req_vbuf)+CFG_DATA_OFF, hdr, len); 1497 /* Restore stripped out attributes */ 1498 hdr->PageType |= hdr_attr; 1499 1500 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1501 hdr->PageLength, hdr->PageNumber, 1502 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1503 PageAddress, req->req_pbuf + CFG_DATA_OFF, 1504 len, sleep_ok, timeout_ms); 1505 if (error != 0) { 1506 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1507 return (-1); 1508 } 1509 1510 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1511 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1512 req->IOCStatus); 1513 mpt_free_request(mpt, req); 1514 return (-1); 1515 } 1516 mpt_free_request(mpt, req); 1517 return (0); 1518 } 1519 1520 /* 1521 * Read IOC configuration information 1522 */ 1523 static int 1524 mpt_read_config_info_ioc(struct mpt_softc *mpt) 1525 { 1526 CONFIG_PAGE_HEADER hdr; 1527 struct mpt_raid_volume *mpt_raid; 1528 int rv; 1529 int i; 1530 size_t len; 1531 1532 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1533 /*PageNumber*/2, /*PageAddress*/0, &hdr, 1534 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1535 /* 1536 * If it's an invalid page, so what? Not a supported function.... 1537 */ 1538 if (rv == EINVAL) 1539 return (0); 1540 if (rv) 1541 return (rv); 1542 1543 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %x, " 1544 "num %x, type %x\n", hdr.PageVersion, 1545 hdr.PageLength * sizeof(uint32_t), 1546 hdr.PageNumber, hdr.PageType); 1547 1548 len = hdr.PageLength * sizeof(uint32_t); 1549 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1550 if (mpt->ioc_page2 == NULL) 1551 return (ENOMEM); 1552 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1553 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1554 &mpt->ioc_page2->Header, len, 1555 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1556 if (rv) { 1557 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1558 } else if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1559 uint32_t mask; 1560 1561 mpt_prt(mpt, "Capabilities: ("); 1562 for (mask = 1; mask != 0; mask <<= 1) { 1563 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) 1564 continue; 1565 1566 switch (mask) { 1567 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1568 mpt_prtc(mpt, " RAID-0"); 1569 break; 1570 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1571 mpt_prtc(mpt, " RAID-1E"); 1572 break; 1573 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1574 mpt_prtc(mpt, " RAID-1"); 1575 break; 1576 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1577 mpt_prtc(mpt, " SES"); 1578 break; 1579 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1580 mpt_prtc(mpt, " SAFTE"); 1581 break; 1582 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1583 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1584 default: 1585 break; 1586 } 1587 } 1588 mpt_prtc(mpt, " )\n"); 1589 if ((mpt->ioc_page2->CapabilitiesFlags 1590 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1591 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1592 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1593 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1594 mpt->ioc_page2->NumActiveVolumes, 1595 mpt->ioc_page2->NumActiveVolumes != 1 1596 ? "s " : " ", 1597 mpt->ioc_page2->MaxVolumes); 1598 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1599 mpt->ioc_page2->NumActivePhysDisks, 1600 mpt->ioc_page2->NumActivePhysDisks != 1 1601 ? "s " : " ", 1602 mpt->ioc_page2->MaxPhysDisks); 1603 } 1604 } 1605 1606 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1607 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT); 1608 if (mpt->raid_volumes == NULL) { 1609 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1610 } else { 1611 memset(mpt->raid_volumes, 0, len); 1612 } 1613 1614 /* 1615 * Copy critical data out of ioc_page2 so that we can 1616 * safely refresh the page without windows of unreliable 1617 * data. 1618 */ 1619 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1620 1621 len = sizeof(*mpt->raid_volumes->config_page) 1622 + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1)); 1623 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1624 mpt_raid = &mpt->raid_volumes[i]; 1625 mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT); 1626 if (mpt_raid->config_page == NULL) { 1627 mpt_prt(mpt, "Could not allocate RAID page data\n"); 1628 break; 1629 } 1630 memset(mpt_raid->config_page, 0, len); 1631 } 1632 mpt->raid_page0_len = len; 1633 1634 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1635 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT); 1636 if (mpt->raid_disks == NULL) { 1637 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1638 } else { 1639 memset(mpt->raid_disks, 0, len); 1640 } 1641 1642 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1643 1644 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1645 /*PageNumber*/3, /*PageAddress*/0, &hdr, 1646 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1647 if (rv) 1648 return (EIO); 1649 1650 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1651 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1652 1653 if (mpt->ioc_page3 != NULL) 1654 free(mpt->ioc_page3, M_DEVBUF); 1655 len = hdr.PageLength * sizeof(uint32_t); 1656 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1657 if (mpt->ioc_page3 == NULL) 1658 return (-1); 1659 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1660 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1661 &mpt->ioc_page3->Header, len, 1662 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1663 if (rv) { 1664 mpt_prt(mpt, "failed to read IOC Page 3\n"); 1665 } 1666 1667 mpt_raid_wakeup(mpt); 1668 1669 return (0); 1670 } 1671 1672 /* 1673 * Read SCSI configuration information 1674 */ 1675 static int 1676 mpt_read_config_info_spi(struct mpt_softc *mpt) 1677 { 1678 int rv, i; 1679 1680 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 1681 0, &mpt->mpt_port_page0.Header, 1682 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1683 if (rv) 1684 return (-1); 1685 mpt_lprt(mpt, MPT_PRT_DEBUG, 1686 "SPI Port Page 0 Header: %x %x %x %x\n", 1687 mpt->mpt_port_page0.Header.PageVersion, 1688 mpt->mpt_port_page0.Header.PageLength, 1689 mpt->mpt_port_page0.Header.PageNumber, 1690 mpt->mpt_port_page0.Header.PageType); 1691 1692 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 1693 0, &mpt->mpt_port_page1.Header, 1694 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1695 if (rv) 1696 return (-1); 1697 1698 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 1699 mpt->mpt_port_page1.Header.PageVersion, 1700 mpt->mpt_port_page1.Header.PageLength, 1701 mpt->mpt_port_page1.Header.PageNumber, 1702 mpt->mpt_port_page1.Header.PageType); 1703 1704 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 1705 /*PageAddress*/0, &mpt->mpt_port_page2.Header, 1706 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1707 if (rv) 1708 return (-1); 1709 1710 mpt_lprt(mpt, MPT_PRT_DEBUG, 1711 "SPI Port Page 2 Header: %x %x %x %x\n", 1712 mpt->mpt_port_page1.Header.PageVersion, 1713 mpt->mpt_port_page1.Header.PageLength, 1714 mpt->mpt_port_page1.Header.PageNumber, 1715 mpt->mpt_port_page1.Header.PageType); 1716 1717 for (i = 0; i < 16; i++) { 1718 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1719 0, i, &mpt->mpt_dev_page0[i].Header, 1720 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1721 if (rv) 1722 return (-1); 1723 1724 mpt_lprt(mpt, MPT_PRT_DEBUG, 1725 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", 1726 i, mpt->mpt_dev_page0[i].Header.PageVersion, 1727 mpt->mpt_dev_page0[i].Header.PageLength, 1728 mpt->mpt_dev_page0[i].Header.PageNumber, 1729 mpt->mpt_dev_page0[i].Header.PageType); 1730 1731 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1732 1, i, &mpt->mpt_dev_page1[i].Header, 1733 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1734 if (rv) 1735 return (-1); 1736 1737 mpt_lprt(mpt, MPT_PRT_DEBUG, 1738 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", 1739 i, mpt->mpt_dev_page1[i].Header.PageVersion, 1740 mpt->mpt_dev_page1[i].Header.PageLength, 1741 mpt->mpt_dev_page1[i].Header.PageNumber, 1742 mpt->mpt_dev_page1[i].Header.PageType); 1743 } 1744 1745 /* 1746 * At this point, we don't *have* to fail. As long as we have 1747 * valid config header information, we can (barely) lurch 1748 * along. 1749 */ 1750 1751 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1752 &mpt->mpt_port_page0.Header, 1753 sizeof(mpt->mpt_port_page0), 1754 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1755 if (rv) { 1756 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 1757 } else { 1758 mpt_lprt(mpt, MPT_PRT_DEBUG, 1759 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 1760 mpt->mpt_port_page0.Capabilities, 1761 mpt->mpt_port_page0.PhysicalInterface); 1762 } 1763 1764 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1765 &mpt->mpt_port_page1.Header, 1766 sizeof(mpt->mpt_port_page1), 1767 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1768 if (rv) { 1769 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 1770 } else { 1771 mpt_lprt(mpt, MPT_PRT_DEBUG, 1772 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 1773 mpt->mpt_port_page1.Configuration, 1774 mpt->mpt_port_page1.OnBusTimerValue); 1775 } 1776 1777 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1778 &mpt->mpt_port_page2.Header, 1779 sizeof(mpt->mpt_port_page2), 1780 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1781 if (rv) { 1782 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1783 } else { 1784 mpt_lprt(mpt, MPT_PRT_DEBUG, 1785 "SPI Port Page 2: Flags %x Settings %x\n", 1786 mpt->mpt_port_page2.PortFlags, 1787 mpt->mpt_port_page2.PortSettings); 1788 for (i = 0; i < 16; i++) { 1789 mpt_lprt(mpt, MPT_PRT_DEBUG, 1790 "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1791 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1792 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1793 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1794 } 1795 } 1796 1797 for (i = 0; i < 16; i++) { 1798 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i, 1799 &mpt->mpt_dev_page0[i].Header, 1800 sizeof(*mpt->mpt_dev_page0), 1801 /*sleep_ok*/FALSE, 1802 /*timeout_ms*/5000); 1803 if (rv) { 1804 mpt_prt(mpt, 1805 "cannot read SPI Tgt %d Device Page 0\n", i); 1806 continue; 1807 } 1808 mpt_lprt(mpt, MPT_PRT_DEBUG, 1809 "SPI Tgt %d Page 0: NParms %x Information %x", 1810 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1811 mpt->mpt_dev_page0[i].Information); 1812 1813 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i, 1814 &mpt->mpt_dev_page1[i].Header, 1815 sizeof(*mpt->mpt_dev_page1), 1816 /*sleep_ok*/FALSE, 1817 /*timeout_ms*/5000); 1818 if (rv) { 1819 mpt_prt(mpt, 1820 "cannot read SPI Tgt %d Device Page 1\n", i); 1821 continue; 1822 } 1823 mpt_lprt(mpt, MPT_PRT_DEBUG, 1824 "SPI Tgt %d Page 1: RParms %x Configuration %x\n", 1825 i, mpt->mpt_dev_page1[i].RequestedParameters, 1826 mpt->mpt_dev_page1[i].Configuration); 1827 } 1828 return (0); 1829 } 1830 1831 /* 1832 * Validate SPI configuration information. 1833 * 1834 * In particular, validate SPI Port Page 1. 1835 */ 1836 static int 1837 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1838 { 1839 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 1840 int error; 1841 1842 mpt->mpt_disc_enable = 0xff; 1843 mpt->mpt_tag_enable = 0; 1844 1845 if (mpt->mpt_port_page1.Configuration != pp1val) { 1846 CONFIG_PAGE_SCSI_PORT_1 tmp; 1847 1848 mpt_prt(mpt, 1849 "SPI Port Page 1 Config value bad (%x)- should be %x\n", 1850 mpt->mpt_port_page1.Configuration, pp1val); 1851 tmp = mpt->mpt_port_page1; 1852 tmp.Configuration = pp1val; 1853 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0, 1854 &tmp.Header, sizeof(tmp), 1855 /*sleep_ok*/FALSE, 1856 /*timeout_ms*/5000); 1857 if (error) 1858 return (-1); 1859 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1860 &tmp.Header, sizeof(tmp), 1861 /*sleep_ok*/FALSE, 1862 /*timeout_ms*/5000); 1863 if (error) 1864 return (-1); 1865 if (tmp.Configuration != pp1val) { 1866 mpt_prt(mpt, 1867 "failed to reset SPI Port Page 1 Config value\n"); 1868 return (-1); 1869 } 1870 mpt->mpt_port_page1 = tmp; 1871 } 1872 1873 for (i = 0; i < 16; i++) { 1874 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 1875 tmp = mpt->mpt_dev_page1[i]; 1876 tmp.RequestedParameters = 0; 1877 tmp.Configuration = 0; 1878 mpt_lprt(mpt, MPT_PRT_DEBUG, 1879 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n", 1880 i, tmp.RequestedParameters, tmp.Configuration); 1881 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i, 1882 &tmp.Header, sizeof(tmp), 1883 /*sleep_ok*/FALSE, 1884 /*timeout_ms*/5000); 1885 if (error) 1886 return (-1); 1887 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i, 1888 &tmp.Header, sizeof(tmp), 1889 /*sleep_ok*/FALSE, 1890 /*timeout_ms*/5000); 1891 if (error) 1892 return (-1); 1893 mpt->mpt_dev_page1[i] = tmp; 1894 mpt_lprt(mpt, MPT_PRT_DEBUG, 1895 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i, 1896 mpt->mpt_dev_page1[i].RequestedParameters, 1897 mpt->mpt_dev_page1[i].Configuration); 1898 } 1899 return (0); 1900 } 1901 1902 /* 1903 * Enable IOC port 1904 */ 1905 static int 1906 mpt_send_port_enable(struct mpt_softc *mpt, int port) 1907 { 1908 request_t *req; 1909 MSG_PORT_ENABLE *enable_req; 1910 int error; 1911 1912 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1913 if (req == NULL) 1914 return (-1); 1915 1916 enable_req = req->req_vbuf; 1917 bzero(enable_req, sizeof *enable_req); 1918 1919 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1920 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1921 enable_req->PortNumber = port; 1922 1923 mpt_check_doorbell(mpt); 1924 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1925 1926 mpt_send_cmd(mpt, req); 1927 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1928 /*sleep_ok*/FALSE, 1929 /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1930 if (error != 0) { 1931 mpt_prt(mpt, "port enable timed out\n"); 1932 return (-1); 1933 } 1934 mpt_free_request(mpt, req); 1935 return (0); 1936 } 1937 1938 /* 1939 * Enable/Disable asynchronous event reporting. 1940 * 1941 * NB: this is the first command we send via shared memory 1942 * instead of the handshake register. 1943 */ 1944 static int 1945 mpt_send_event_request(struct mpt_softc *mpt, int onoff) 1946 { 1947 request_t *req; 1948 MSG_EVENT_NOTIFY *enable_req; 1949 1950 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1951 1952 enable_req = req->req_vbuf; 1953 bzero(enable_req, sizeof *enable_req); 1954 1955 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1956 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1957 enable_req->Switch = onoff; 1958 1959 mpt_check_doorbell(mpt); 1960 mpt_lprt(mpt, MPT_PRT_DEBUG, 1961 "%sabling async events\n", onoff ? "en" : "dis"); 1962 mpt_send_cmd(mpt, req); 1963 1964 return (0); 1965 } 1966 1967 /* 1968 * Un-mask the interupts on the chip. 1969 */ 1970 void 1971 mpt_enable_ints(struct mpt_softc *mpt) 1972 { 1973 /* Unmask every thing except door bell int */ 1974 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1975 } 1976 1977 /* 1978 * Mask the interupts on the chip. 1979 */ 1980 void 1981 mpt_disable_ints(struct mpt_softc *mpt) 1982 { 1983 /* Mask all interrupts */ 1984 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1985 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1986 } 1987 1988 static void 1989 mpt_sysctl_attach(struct mpt_softc *mpt) 1990 { 1991 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1992 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1993 1994 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1995 "debug", CTLFLAG_RW, &mpt->verbose, 0, 1996 "Debugging/Verbose level"); 1997 } 1998 1999 int 2000 mpt_attach(struct mpt_softc *mpt) 2001 { 2002 int i; 2003 2004 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2005 struct mpt_personality *pers; 2006 int error; 2007 2008 pers = mpt_personalities[i]; 2009 if (pers == NULL) 2010 continue; 2011 2012 if (pers->probe(mpt) == 0) { 2013 error = pers->attach(mpt); 2014 if (error != 0) { 2015 mpt_detach(mpt); 2016 return (error); 2017 } 2018 mpt->mpt_pers_mask |= (0x1 << pers->id); 2019 pers->use_count++; 2020 } 2021 } 2022 2023 return (0); 2024 } 2025 2026 int 2027 mpt_shutdown(struct mpt_softc *mpt) 2028 { 2029 struct mpt_personality *pers; 2030 2031 MPT_PERS_FOREACH_REVERSE(mpt, pers) 2032 pers->shutdown(mpt); 2033 2034 mpt_reset(mpt, /*reinit*/FALSE); 2035 return (0); 2036 } 2037 2038 int 2039 mpt_detach(struct mpt_softc *mpt) 2040 { 2041 struct mpt_personality *pers; 2042 2043 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2044 pers->detach(mpt); 2045 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2046 pers->use_count--; 2047 } 2048 2049 return (0); 2050 } 2051 2052 int 2053 mpt_core_load(struct mpt_personality *pers) 2054 { 2055 int i; 2056 2057 /* 2058 * Setup core handlers and insert the default handler 2059 * into all "empty slots". 2060 */ 2061 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) 2062 mpt_reply_handlers[i] = mpt_default_reply_handler; 2063 2064 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2065 mpt_event_reply_handler; 2066 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2067 mpt_config_reply_handler; 2068 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2069 mpt_handshake_reply_handler; 2070 2071 return (0); 2072 } 2073 2074 /* 2075 * Initialize per-instance driver data and perform 2076 * initial controller configuration. 2077 */ 2078 int 2079 mpt_core_attach(struct mpt_softc *mpt) 2080 { 2081 int val; 2082 int error; 2083 2084 LIST_INIT(&mpt->ack_frames); 2085 2086 /* Put all request buffers on the free list */ 2087 TAILQ_INIT(&mpt->request_pending_list); 2088 TAILQ_INIT(&mpt->request_free_list); 2089 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) 2090 mpt_free_request(mpt, &mpt->request_pool[val]); 2091 2092 mpt_sysctl_attach(mpt); 2093 2094 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2095 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2096 2097 error = mpt_configure_ioc(mpt); 2098 2099 return (error); 2100 } 2101 2102 void 2103 mpt_core_shutdown(struct mpt_softc *mpt) 2104 { 2105 } 2106 2107 void 2108 mpt_core_detach(struct mpt_softc *mpt) 2109 { 2110 } 2111 2112 int 2113 mpt_core_unload(struct mpt_personality *pers) 2114 { 2115 /* Unload is always successfull. */ 2116 return (0); 2117 } 2118 2119 #define FW_UPLOAD_REQ_SIZE \ 2120 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2121 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2122 2123 static int 2124 mpt_upload_fw(struct mpt_softc *mpt) 2125 { 2126 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2127 MSG_FW_UPLOAD_REPLY fw_reply; 2128 MSG_FW_UPLOAD *fw_req; 2129 FW_UPLOAD_TCSGE *tsge; 2130 SGE_SIMPLE32 *sge; 2131 uint32_t flags; 2132 int error; 2133 2134 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2135 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2136 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2137 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2138 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2139 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2140 tsge->DetailsLength = 12; 2141 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2142 tsge->ImageSize = htole32(mpt->fw_image_size); 2143 sge = (SGE_SIMPLE32 *)(tsge + 1); 2144 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2145 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2146 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2147 flags <<= MPI_SGE_FLAGS_SHIFT; 2148 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2149 sge->Address = htole32(mpt->fw_phys); 2150 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2151 if (error) 2152 return(error); 2153 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2154 return (error); 2155 } 2156 2157 static void 2158 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2159 uint32_t *data, bus_size_t len) 2160 { 2161 uint32_t *data_end; 2162 2163 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2164 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2165 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2166 while (data != data_end) { 2167 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2168 data++; 2169 } 2170 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2171 } 2172 2173 static int 2174 mpt_download_fw(struct mpt_softc *mpt) 2175 { 2176 MpiFwHeader_t *fw_hdr; 2177 int error; 2178 uint32_t ext_offset; 2179 uint32_t data; 2180 2181 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2182 mpt->fw_image_size); 2183 2184 error = mpt_enable_diag_mode(mpt); 2185 if (error != 0) { 2186 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2187 return (EIO); 2188 } 2189 2190 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2191 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2192 2193 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2194 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2195 fw_hdr->ImageSize); 2196 2197 ext_offset = fw_hdr->NextImageHeaderOffset; 2198 while (ext_offset != 0) { 2199 MpiExtImageHeader_t *ext; 2200 2201 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2202 ext_offset = ext->NextImageHeaderOffset; 2203 2204 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2205 ext->ImageSize); 2206 } 2207 2208 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2209 /* Setup the address to jump to on reset. */ 2210 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2211 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2212 2213 /* 2214 * The controller sets the "flash bad" status after attempting 2215 * to auto-boot from flash. Clear the status so that the controller 2216 * will continue the boot process with our newly installed firmware. 2217 */ 2218 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2219 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2220 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2221 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2222 2223 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2224 2225 /* 2226 * Re-enable the processor and clear the boot halt flag. 2227 */ 2228 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2229 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2230 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2231 2232 mpt_disable_diag_mode(mpt); 2233 return (0); 2234 } 2235 2236 /* 2237 * Allocate/Initialize data structures for the controller. Called 2238 * once at instance startup. 2239 */ 2240 static int 2241 mpt_configure_ioc(struct mpt_softc *mpt) 2242 { 2243 MSG_PORT_FACTS_REPLY pfp; 2244 MSG_IOC_FACTS_REPLY facts; 2245 int try; 2246 int needreset; 2247 uint32_t max_chain_depth; 2248 2249 needreset = 0; 2250 for (try = 0; try < MPT_MAX_TRYS; try++) { 2251 2252 /* 2253 * No need to reset if the IOC is already in the READY state. 2254 * 2255 * Force reset if initialization failed previously. 2256 * Note that a hard_reset of the second channel of a '929 2257 * will stop operation of the first channel. Hopefully, if the 2258 * first channel is ok, the second will not require a hard 2259 * reset. 2260 */ 2261 if (needreset || (mpt_rd_db(mpt) & MPT_DB_STATE_MASK) != 2262 MPT_DB_STATE_READY) { 2263 if (mpt_reset(mpt, /*reinit*/FALSE) != MPT_OK) 2264 continue; 2265 } 2266 needreset = 0; 2267 2268 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2269 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2270 needreset = 1; 2271 continue; 2272 } 2273 2274 mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2275 mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2276 mpt->ioc_facts_flags = facts.Flags; 2277 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2278 le16toh(facts.MsgVersion) >> 8, 2279 le16toh(facts.MsgVersion) & 0xFF, 2280 le16toh(facts.HeaderVersion) >> 8, 2281 le16toh(facts.HeaderVersion) & 0xFF); 2282 2283 /* 2284 * Now that we know request frame size, we can calculate 2285 * the actual (reasonable) segment limit for read/write I/O. 2286 * 2287 * This limit is constrained by: 2288 * 2289 * + The size of each area we allocate per command (and how 2290 * many chain segments we can fit into it). 2291 * + The total number of areas we've set up. 2292 * + The actual chain depth the card will allow. 2293 * 2294 * The first area's segment count is limited by the I/O request 2295 * at the head of it. We cannot allocate realistically more 2296 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2297 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2298 * 2299 */ 2300 max_chain_depth = facts.MaxChainDepth; 2301 2302 /* total number of request areas we (can) allocate */ 2303 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2304 2305 /* converted to the number of chain areas possible */ 2306 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2307 2308 /* limited by the number of chain areas the card will support */ 2309 if (mpt->max_seg_cnt > max_chain_depth) { 2310 mpt_lprt(mpt, MPT_PRT_DEBUG, 2311 "chain depth limited to %u (from %u)\n", 2312 max_chain_depth, mpt->max_seg_cnt); 2313 mpt->max_seg_cnt = max_chain_depth; 2314 } 2315 2316 /* converted to the number of simple sges in chain segments. */ 2317 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2318 2319 mpt_lprt(mpt, MPT_PRT_DEBUG, 2320 "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2321 mpt_lprt(mpt, MPT_PRT_DEBUG, 2322 "MsgLength=%u IOCNumber = %d\n", 2323 facts.MsgLength, facts.IOCNumber); 2324 mpt_lprt(mpt, MPT_PRT_DEBUG, 2325 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2326 "Request Frame Size %u bytes Max Chain Depth %u\n", 2327 mpt->mpt_global_credits, facts.BlockSize, 2328 mpt->request_frame_size << 2, max_chain_depth); 2329 mpt_lprt(mpt, MPT_PRT_DEBUG, 2330 "IOCFACTS: Num Ports %d, FWImageSize %d, " 2331 "Flags=%#x\n", facts.NumberOfPorts, 2332 le32toh(facts.FWImageSize), facts.Flags); 2333 2334 2335 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2336 struct mpt_map_info mi; 2337 int error; 2338 2339 /* 2340 * In some configurations, the IOC's firmware is 2341 * stored in a shared piece of system NVRAM that 2342 * is only accessable via the BIOS. In this 2343 * case, the firmware keeps a copy of firmware in 2344 * RAM until the OS driver retrieves it. Once 2345 * retrieved, we are responsible for re-downloading 2346 * the firmware after any hard-reset. 2347 */ 2348 mpt->fw_image_size = le32toh(facts.FWImageSize); 2349 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2350 /*alignment*/1, /*boundary*/0, 2351 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2352 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2353 /*filterarg*/NULL, mpt->fw_image_size, 2354 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2355 /*flags*/0, &mpt->fw_dmat); 2356 if (error != 0) { 2357 mpt_prt(mpt, "cannot create fw dma tag\n"); 2358 return (ENOMEM); 2359 } 2360 error = bus_dmamem_alloc(mpt->fw_dmat, 2361 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2362 &mpt->fw_dmap); 2363 if (error != 0) { 2364 mpt_prt(mpt, "cannot allocate fw mem.\n"); 2365 bus_dma_tag_destroy(mpt->fw_dmat); 2366 return (ENOMEM); 2367 } 2368 mi.mpt = mpt; 2369 mi.error = 0; 2370 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2371 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2372 &mi, 0); 2373 mpt->fw_phys = mi.phys; 2374 2375 error = mpt_upload_fw(mpt); 2376 if (error != 0) { 2377 mpt_prt(mpt, "fw upload failed.\n"); 2378 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2379 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2380 mpt->fw_dmap); 2381 bus_dma_tag_destroy(mpt->fw_dmat); 2382 mpt->fw_image = NULL; 2383 return (EIO); 2384 } 2385 } 2386 2387 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2388 mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2389 needreset = 1; 2390 continue; 2391 } 2392 2393 mpt_lprt(mpt, MPT_PRT_DEBUG, 2394 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2395 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2396 pfp.MaxDevices); 2397 2398 mpt->mpt_port_type = pfp.PortType; 2399 mpt->mpt_proto_flags = pfp.ProtocolFlags; 2400 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2401 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2402 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2403 mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2404 pfp.PortType); 2405 return (ENXIO); 2406 } 2407 if (!(pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) { 2408 mpt_prt(mpt, "initiator role unsupported\n"); 2409 return (ENXIO); 2410 } 2411 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2412 mpt->is_fc = 1; 2413 mpt->is_sas = 0; 2414 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2415 mpt->is_fc = 0; 2416 mpt->is_sas = 1; 2417 } else { 2418 mpt->is_fc = 0; 2419 mpt->is_sas = 0; 2420 } 2421 mpt->mpt_ini_id = pfp.PortSCSIID; 2422 mpt->mpt_max_devices = pfp.MaxDevices; 2423 2424 if (mpt_enable_ioc(mpt) != 0) { 2425 mpt_prt(mpt, "Unable to initialize IOC\n"); 2426 return (ENXIO); 2427 } 2428 2429 /* 2430 * Read and set up initial configuration information 2431 * (IOC and SPI only for now) 2432 * 2433 * XXX Should figure out what "personalities" are 2434 * available and defer all initialization junk to 2435 * them. 2436 */ 2437 mpt_read_config_info_ioc(mpt); 2438 2439 if (mpt->is_fc == 0 && mpt->is_sas == 0) { 2440 if (mpt_read_config_info_spi(mpt)) { 2441 return (EIO); 2442 } 2443 if (mpt_set_initial_config_spi(mpt)) { 2444 return (EIO); 2445 } 2446 } 2447 2448 /* Everything worked */ 2449 break; 2450 } 2451 2452 if (try >= MPT_MAX_TRYS) { 2453 mpt_prt(mpt, "failed to initialize IOC"); 2454 return (EIO); 2455 } 2456 2457 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling interrupts\n"); 2458 2459 mpt_enable_ints(mpt); 2460 return (0); 2461 } 2462 2463 static int 2464 mpt_enable_ioc(struct mpt_softc *mpt) 2465 { 2466 uint32_t pptr; 2467 int val; 2468 2469 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2470 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2471 return (EIO); 2472 } 2473 2474 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2475 2476 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2477 mpt_prt(mpt, "IOC failed to go to run state\n"); 2478 return (ENXIO); 2479 } 2480 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2481 2482 /* 2483 * Give it reply buffers 2484 * 2485 * Do *not* exceed global credits. 2486 */ 2487 for (val = 0, pptr = mpt->reply_phys; 2488 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2489 pptr += MPT_REPLY_SIZE) { 2490 mpt_free_reply(mpt, pptr); 2491 if (++val == mpt->mpt_global_credits - 1) 2492 break; 2493 } 2494 2495 /* 2496 * Enable asynchronous event reporting 2497 */ 2498 mpt_send_event_request(mpt, 1); 2499 2500 /* 2501 * Enable the port 2502 */ 2503 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2504 mpt_prt(mpt, "failed to enable port 0\n"); 2505 return (ENXIO); 2506 } 2507 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n"); 2508 2509 2510 return (MPT_OK); 2511 } 2512