1 /*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * 62 * Support from LSI-Logic has also gone a great deal toward making this a 63 * workable subsystem and is gratefully acknowledged. 64 */ 65 /*- 66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 67 * Copyright (c) 2005, WHEEL Sp. z o.o. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * All rights reserved. 70 * 71 * Redistribution and use in source and binary forms, with or without 72 * modification, are permitted provided that the following conditions are 73 * met: 74 * 1. Redistributions of source code must retain the above copyright 75 * notice, this list of conditions and the following disclaimer. 76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 77 * substantially similar to the "NO WARRANTY" disclaimer below 78 * ("Disclaimer") and any redistribution must be conditioned upon including 79 * a substantially similar Disclaimer requirement for further binary 80 * redistribution. 81 * 3. Neither the names of the above listed copyright holders nor the names 82 * of any contributors may be used to endorse or promote products derived 83 * from this software without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 96 */ 97 98 #include <sys/cdefs.h> 99 __FBSDID("$FreeBSD$"); 100 101 #include <dev/mpt/mpt.h> 102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 104 105 #include <dev/mpt/mpilib/mpi.h> 106 #include <dev/mpt/mpilib/mpi_ioc.h> 107 #include <dev/mpt/mpilib/mpi_fc.h> 108 #include <dev/mpt/mpilib/mpi_targ.h> 109 110 #include <sys/sysctl.h> 111 112 #define MPT_MAX_TRYS 3 113 #define MPT_MAX_WAIT 300000 114 115 static int maxwait_ack = 0; 116 static int maxwait_int = 0; 117 static int maxwait_state = 0; 118 119 TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 121 122 static mpt_reply_handler_t mpt_default_reply_handler; 123 static mpt_reply_handler_t mpt_config_reply_handler; 124 static mpt_reply_handler_t mpt_handshake_reply_handler; 125 static mpt_reply_handler_t mpt_event_reply_handler; 126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 129 static int mpt_soft_reset(struct mpt_softc *mpt); 130 static void mpt_hard_reset(struct mpt_softc *mpt); 131 static int mpt_configure_ioc(struct mpt_softc *mpt); 132 static int mpt_enable_ioc(struct mpt_softc *mpt, int); 133 134 /************************* Personality Module Support *************************/ 135 /* 136 * We include one extra entry that is guaranteed to be NULL 137 * to simplify our itterator. 138 */ 139 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 140 static __inline struct mpt_personality* 141 mpt_pers_find(struct mpt_softc *, u_int); 142 static __inline struct mpt_personality* 143 mpt_pers_find_reverse(struct mpt_softc *, u_int); 144 145 static __inline struct mpt_personality * 146 mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 147 { 148 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 149 ("mpt_pers_find: starting position out of range\n")); 150 151 while (start_at < MPT_MAX_PERSONALITIES 152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 153 start_at++; 154 } 155 return (mpt_personalities[start_at]); 156 } 157 158 /* 159 * Used infrequently, so no need to optimize like a forward 160 * traversal where we use the MAX+1 is guaranteed to be NULL 161 * trick. 162 */ 163 static __inline struct mpt_personality * 164 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 165 { 166 while (start_at < MPT_MAX_PERSONALITIES 167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 168 start_at--; 169 } 170 if (start_at < MPT_MAX_PERSONALITIES) 171 return (mpt_personalities[start_at]); 172 return (NULL); 173 } 174 175 #define MPT_PERS_FOREACH(mpt, pers) \ 176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 177 pers != NULL; \ 178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 179 180 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 182 pers != NULL; \ 183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 184 185 static mpt_load_handler_t mpt_stdload; 186 static mpt_probe_handler_t mpt_stdprobe; 187 static mpt_attach_handler_t mpt_stdattach; 188 static mpt_enable_handler_t mpt_stdenable; 189 static mpt_event_handler_t mpt_stdevent; 190 static mpt_reset_handler_t mpt_stdreset; 191 static mpt_shutdown_handler_t mpt_stdshutdown; 192 static mpt_detach_handler_t mpt_stddetach; 193 static mpt_unload_handler_t mpt_stdunload; 194 static struct mpt_personality mpt_default_personality = 195 { 196 .load = mpt_stdload, 197 .probe = mpt_stdprobe, 198 .attach = mpt_stdattach, 199 .enable = mpt_stdenable, 200 .event = mpt_stdevent, 201 .reset = mpt_stdreset, 202 .shutdown = mpt_stdshutdown, 203 .detach = mpt_stddetach, 204 .unload = mpt_stdunload 205 }; 206 207 static mpt_load_handler_t mpt_core_load; 208 static mpt_attach_handler_t mpt_core_attach; 209 static mpt_enable_handler_t mpt_core_enable; 210 static mpt_reset_handler_t mpt_core_ioc_reset; 211 static mpt_event_handler_t mpt_core_event; 212 static mpt_shutdown_handler_t mpt_core_shutdown; 213 static mpt_shutdown_handler_t mpt_core_detach; 214 static mpt_unload_handler_t mpt_core_unload; 215 static struct mpt_personality mpt_core_personality = 216 { 217 .name = "mpt_core", 218 .load = mpt_core_load, 219 .attach = mpt_core_attach, 220 .enable = mpt_core_enable, 221 .event = mpt_core_event, 222 .reset = mpt_core_ioc_reset, 223 .shutdown = mpt_core_shutdown, 224 .detach = mpt_core_detach, 225 .unload = mpt_core_unload, 226 }; 227 228 /* 229 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 230 * ordering information. We want the core to always register FIRST. 231 * other modules are set to SI_ORDER_SECOND. 232 */ 233 static moduledata_t mpt_core_mod = { 234 "mpt_core", mpt_modevent, &mpt_core_personality 235 }; 236 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 237 MODULE_VERSION(mpt_core, 1); 238 239 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 240 241 242 int 243 mpt_modevent(module_t mod, int type, void *data) 244 { 245 struct mpt_personality *pers; 246 int error; 247 248 pers = (struct mpt_personality *)data; 249 250 error = 0; 251 switch (type) { 252 case MOD_LOAD: 253 { 254 mpt_load_handler_t **def_handler; 255 mpt_load_handler_t **pers_handler; 256 int i; 257 258 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 259 if (mpt_personalities[i] == NULL) 260 break; 261 } 262 if (i >= MPT_MAX_PERSONALITIES) { 263 error = ENOMEM; 264 break; 265 } 266 pers->id = i; 267 mpt_personalities[i] = pers; 268 269 /* Install standard/noop handlers for any NULL entries. */ 270 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 271 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 272 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 273 if (*pers_handler == NULL) 274 *pers_handler = *def_handler; 275 pers_handler++; 276 def_handler++; 277 } 278 279 error = (pers->load(pers)); 280 if (error != 0) 281 mpt_personalities[i] = NULL; 282 break; 283 } 284 case MOD_SHUTDOWN: 285 break; 286 #if __FreeBSD_version >= 500000 287 case MOD_QUIESCE: 288 break; 289 #endif 290 case MOD_UNLOAD: 291 error = pers->unload(pers); 292 mpt_personalities[pers->id] = NULL; 293 break; 294 default: 295 error = EINVAL; 296 break; 297 } 298 return (error); 299 } 300 301 int 302 mpt_stdload(struct mpt_personality *pers) 303 { 304 /* Load is always successfull. */ 305 return (0); 306 } 307 308 int 309 mpt_stdprobe(struct mpt_softc *mpt) 310 { 311 /* Probe is always successfull. */ 312 return (0); 313 } 314 315 int 316 mpt_stdattach(struct mpt_softc *mpt) 317 { 318 /* Attach is always successfull. */ 319 return (0); 320 } 321 322 int 323 mpt_stdenable(struct mpt_softc *mpt) 324 { 325 /* Enable is always successfull. */ 326 return (0); 327 } 328 329 int 330 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 331 { 332 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 333 /* Event was not for us. */ 334 return (0); 335 } 336 337 void 338 mpt_stdreset(struct mpt_softc *mpt, int type) 339 { 340 } 341 342 void 343 mpt_stdshutdown(struct mpt_softc *mpt) 344 { 345 } 346 347 void 348 mpt_stddetach(struct mpt_softc *mpt) 349 { 350 } 351 352 int 353 mpt_stdunload(struct mpt_personality *pers) 354 { 355 /* Unload is always successfull. */ 356 return (0); 357 } 358 359 /******************************* Bus DMA Support ******************************/ 360 void 361 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 362 { 363 struct mpt_map_info *map_info; 364 365 map_info = (struct mpt_map_info *)arg; 366 map_info->error = error; 367 map_info->phys = segs->ds_addr; 368 } 369 370 /**************************** Reply/Event Handling ****************************/ 371 int 372 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 373 mpt_handler_t handler, uint32_t *phandler_id) 374 { 375 376 switch (type) { 377 case MPT_HANDLER_REPLY: 378 { 379 u_int cbi; 380 u_int free_cbi; 381 382 if (phandler_id == NULL) 383 return (EINVAL); 384 385 free_cbi = MPT_HANDLER_ID_NONE; 386 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 387 /* 388 * If the same handler is registered multiple 389 * times, don't error out. Just return the 390 * index of the original registration. 391 */ 392 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 393 *phandler_id = MPT_CBI_TO_HID(cbi); 394 return (0); 395 } 396 397 /* 398 * Fill from the front in the hope that 399 * all registered handlers consume only a 400 * single cache line. 401 * 402 * We don't break on the first empty slot so 403 * that the full table is checked to see if 404 * this handler was previously registered. 405 */ 406 if (free_cbi == MPT_HANDLER_ID_NONE && 407 (mpt_reply_handlers[cbi] 408 == mpt_default_reply_handler)) 409 free_cbi = cbi; 410 } 411 if (free_cbi == MPT_HANDLER_ID_NONE) { 412 return (ENOMEM); 413 } 414 mpt_reply_handlers[free_cbi] = handler.reply_handler; 415 *phandler_id = MPT_CBI_TO_HID(free_cbi); 416 break; 417 } 418 default: 419 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 420 return (EINVAL); 421 } 422 return (0); 423 } 424 425 int 426 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 427 mpt_handler_t handler, uint32_t handler_id) 428 { 429 430 switch (type) { 431 case MPT_HANDLER_REPLY: 432 { 433 u_int cbi; 434 435 cbi = MPT_CBI(handler_id); 436 if (cbi >= MPT_NUM_REPLY_HANDLERS 437 || mpt_reply_handlers[cbi] != handler.reply_handler) 438 return (ENOENT); 439 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 440 break; 441 } 442 default: 443 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 444 return (EINVAL); 445 } 446 return (0); 447 } 448 449 static int 450 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 451 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 452 { 453 mpt_prt(mpt, 454 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 455 req, req->serno, reply_desc, reply_frame); 456 457 if (reply_frame != NULL) 458 mpt_dump_reply_frame(mpt, reply_frame); 459 460 mpt_prt(mpt, "Reply Frame Ignored\n"); 461 462 return (/*free_reply*/TRUE); 463 } 464 465 static int 466 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 467 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 468 { 469 if (req != NULL) { 470 471 if (reply_frame != NULL) { 472 MSG_CONFIG *cfgp; 473 MSG_CONFIG_REPLY *reply; 474 475 cfgp = (MSG_CONFIG *)req->req_vbuf; 476 reply = (MSG_CONFIG_REPLY *)reply_frame; 477 req->IOCStatus = le16toh(reply_frame->IOCStatus); 478 bcopy(&reply->Header, &cfgp->Header, 479 sizeof(cfgp->Header)); 480 } 481 req->state &= ~REQ_STATE_QUEUED; 482 req->state |= REQ_STATE_DONE; 483 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 484 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 485 wakeup(req); 486 } 487 } 488 489 return (TRUE); 490 } 491 492 static int 493 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 494 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 495 { 496 /* Nothing to be done. */ 497 return (TRUE); 498 } 499 500 static int 501 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 502 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 503 { 504 int free_reply; 505 506 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 507 KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 508 509 free_reply = TRUE; 510 switch (reply_frame->Function) { 511 case MPI_FUNCTION_EVENT_NOTIFICATION: 512 { 513 MSG_EVENT_NOTIFY_REPLY *msg; 514 struct mpt_personality *pers; 515 u_int handled; 516 517 handled = 0; 518 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 519 MPT_PERS_FOREACH(mpt, pers) 520 handled += pers->event(mpt, req, msg); 521 522 if (handled == 0 && mpt->mpt_pers_mask == 0) { 523 mpt_lprt(mpt, MPT_PRT_INFO, 524 "No Handlers For Any Event Notify Frames. " 525 "Event %#x (ACK %sequired).\n", 526 msg->Event, msg->AckRequired? "r" : "not r"); 527 } else if (handled == 0) { 528 mpt_lprt(mpt, MPT_PRT_WARN, 529 "Unhandled Event Notify Frame. Event %#x " 530 "(ACK %sequired).\n", 531 msg->Event, msg->AckRequired? "r" : "not r"); 532 } 533 534 if (msg->AckRequired) { 535 request_t *ack_req; 536 uint32_t context; 537 538 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 539 ack_req = mpt_get_request(mpt, FALSE); 540 if (ack_req == NULL) { 541 struct mpt_evtf_record *evtf; 542 543 evtf = (struct mpt_evtf_record *)reply_frame; 544 evtf->context = context; 545 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 546 free_reply = FALSE; 547 break; 548 } 549 mpt_send_event_ack(mpt, ack_req, msg, context); 550 /* 551 * Don't check for CONTINUATION_REPLY here 552 */ 553 return (free_reply); 554 } 555 break; 556 } 557 case MPI_FUNCTION_PORT_ENABLE: 558 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 559 break; 560 case MPI_FUNCTION_EVENT_ACK: 561 break; 562 default: 563 mpt_prt(mpt, "unknown event function: %x\n", 564 reply_frame->Function); 565 break; 566 } 567 568 /* 569 * I'm not sure that this continuation stuff works as it should. 570 * 571 * I've had FC async events occur that free the frame up because 572 * the continuation bit isn't set, and then additional async events 573 * then occur using the same context. As you might imagine, this 574 * leads to Very Bad Thing. 575 * 576 * Let's just be safe for now and not free them up until we figure 577 * out what's actually happening here. 578 */ 579 #if 0 580 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 581 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 582 mpt_free_request(mpt, req); 583 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 584 reply_frame->Function, req, req->serno); 585 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 586 MSG_EVENT_NOTIFY_REPLY *msg = 587 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 588 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 589 msg->Event, msg->AckRequired); 590 } 591 } else { 592 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 593 reply_frame->Function, req, req->serno); 594 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 595 MSG_EVENT_NOTIFY_REPLY *msg = 596 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 597 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 598 msg->Event, msg->AckRequired); 599 } 600 mpt_prtc(mpt, "\n"); 601 } 602 #endif 603 return (free_reply); 604 } 605 606 /* 607 * Process an asynchronous event from the IOC. 608 */ 609 static int 610 mpt_core_event(struct mpt_softc *mpt, request_t *req, 611 MSG_EVENT_NOTIFY_REPLY *msg) 612 { 613 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 614 msg->Event & 0xFF); 615 switch(msg->Event & 0xFF) { 616 case MPI_EVENT_NONE: 617 break; 618 case MPI_EVENT_LOG_DATA: 619 { 620 int i; 621 622 /* Some error occured that LSI wants logged */ 623 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 624 msg->IOCLogInfo); 625 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 626 for (i = 0; i < msg->EventDataLength; i++) 627 mpt_prtc(mpt, " %08x", msg->Data[i]); 628 mpt_prtc(mpt, "\n"); 629 break; 630 } 631 case MPI_EVENT_EVENT_CHANGE: 632 /* 633 * This is just an acknowledgement 634 * of our mpt_send_event_request. 635 */ 636 break; 637 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 638 break; 639 default: 640 return (0); 641 break; 642 } 643 return (1); 644 } 645 646 static void 647 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 648 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 649 { 650 MSG_EVENT_ACK *ackp; 651 652 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 653 memset(ackp, 0, sizeof (*ackp)); 654 ackp->Function = MPI_FUNCTION_EVENT_ACK; 655 ackp->Event = msg->Event; 656 ackp->EventContext = msg->EventContext; 657 ackp->MsgContext = context; 658 mpt_check_doorbell(mpt); 659 mpt_send_cmd(mpt, ack_req); 660 } 661 662 /***************************** Interrupt Handling *****************************/ 663 void 664 mpt_intr(void *arg) 665 { 666 struct mpt_softc *mpt; 667 uint32_t reply_desc; 668 int ntrips = 0; 669 670 mpt = (struct mpt_softc *)arg; 671 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 672 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 673 request_t *req; 674 MSG_DEFAULT_REPLY *reply_frame; 675 uint32_t reply_baddr; 676 uint32_t ctxt_idx; 677 u_int cb_index; 678 u_int req_index; 679 int free_rf; 680 681 req = NULL; 682 reply_frame = NULL; 683 reply_baddr = 0; 684 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 685 u_int offset; 686 /* 687 * Insure that the reply frame is coherent. 688 */ 689 reply_baddr = MPT_REPLY_BADDR(reply_desc); 690 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 691 bus_dmamap_sync_range(mpt->reply_dmat, 692 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 693 BUS_DMASYNC_POSTREAD); 694 reply_frame = MPT_REPLY_OTOV(mpt, offset); 695 ctxt_idx = le32toh(reply_frame->MsgContext); 696 } else { 697 uint32_t type; 698 699 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 700 ctxt_idx = reply_desc; 701 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 702 reply_desc); 703 704 switch (type) { 705 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 706 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 707 break; 708 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 709 ctxt_idx = GET_IO_INDEX(reply_desc); 710 if (mpt->tgt_cmd_ptrs == NULL) { 711 mpt_prt(mpt, 712 "mpt_intr: no target cmd ptrs\n"); 713 reply_desc = MPT_REPLY_EMPTY; 714 break; 715 } 716 if (ctxt_idx >= mpt->tgt_cmds_allocated) { 717 mpt_prt(mpt, 718 "mpt_intr: bad tgt cmd ctxt %u\n", 719 ctxt_idx); 720 reply_desc = MPT_REPLY_EMPTY; 721 ntrips = 1000; 722 break; 723 } 724 req = mpt->tgt_cmd_ptrs[ctxt_idx]; 725 if (req == NULL) { 726 mpt_prt(mpt, "no request backpointer " 727 "at index %u", ctxt_idx); 728 reply_desc = MPT_REPLY_EMPTY; 729 ntrips = 1000; 730 break; 731 } 732 /* 733 * Reformulate ctxt_idx to be just as if 734 * it were another type of context reply 735 * so the code below will find the request 736 * via indexing into the pool. 737 */ 738 ctxt_idx = 739 req->index | mpt->scsi_tgt_handler_id; 740 req = NULL; 741 break; 742 case MPI_CONTEXT_REPLY_TYPE_LAN: 743 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 744 reply_desc); 745 reply_desc = MPT_REPLY_EMPTY; 746 break; 747 default: 748 mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 749 reply_desc = MPT_REPLY_EMPTY; 750 break; 751 } 752 if (reply_desc == MPT_REPLY_EMPTY) { 753 if (ntrips++ > 1000) { 754 break; 755 } 756 continue; 757 } 758 } 759 760 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 761 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 762 if (req_index < MPT_MAX_REQUESTS(mpt)) { 763 req = &mpt->request_pool[req_index]; 764 } else { 765 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 766 " 0x%x)\n", req_index, reply_desc); 767 } 768 769 free_rf = mpt_reply_handlers[cb_index](mpt, req, 770 reply_desc, reply_frame); 771 772 if (reply_frame != NULL && free_rf) { 773 mpt_free_reply(mpt, reply_baddr); 774 } 775 776 /* 777 * If we got ourselves disabled, don't get stuck in a loop 778 */ 779 if (mpt->disabled) { 780 mpt_disable_ints(mpt); 781 break; 782 } 783 if (ntrips++ > 1000) { 784 break; 785 } 786 } 787 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 788 } 789 790 /******************************* Error Recovery *******************************/ 791 void 792 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 793 u_int iocstatus) 794 { 795 MSG_DEFAULT_REPLY ioc_status_frame; 796 request_t *req; 797 798 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 799 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 800 ioc_status_frame.IOCStatus = iocstatus; 801 while((req = TAILQ_FIRST(chain)) != NULL) { 802 MSG_REQUEST_HEADER *msg_hdr; 803 u_int cb_index; 804 805 TAILQ_REMOVE(chain, req, links); 806 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 807 ioc_status_frame.Function = msg_hdr->Function; 808 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 809 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 810 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 811 &ioc_status_frame); 812 } 813 } 814 815 /********************************* Diagnostics ********************************/ 816 /* 817 * Perform a diagnostic dump of a reply frame. 818 */ 819 void 820 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 821 { 822 mpt_prt(mpt, "Address Reply:\n"); 823 mpt_print_reply(reply_frame); 824 } 825 826 /******************************* Doorbell Access ******************************/ 827 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 828 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 829 830 static __inline uint32_t 831 mpt_rd_db(struct mpt_softc *mpt) 832 { 833 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 834 } 835 836 static __inline uint32_t 837 mpt_rd_intr(struct mpt_softc *mpt) 838 { 839 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 840 } 841 842 /* Busy wait for a door bell to be read by IOC */ 843 static int 844 mpt_wait_db_ack(struct mpt_softc *mpt) 845 { 846 int i; 847 for (i=0; i < MPT_MAX_WAIT; i++) { 848 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 849 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 850 return (MPT_OK); 851 } 852 DELAY(200); 853 } 854 return (MPT_FAIL); 855 } 856 857 /* Busy wait for a door bell interrupt */ 858 static int 859 mpt_wait_db_int(struct mpt_softc *mpt) 860 { 861 int i; 862 for (i=0; i < MPT_MAX_WAIT; i++) { 863 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 864 maxwait_int = i > maxwait_int ? i : maxwait_int; 865 return MPT_OK; 866 } 867 DELAY(100); 868 } 869 return (MPT_FAIL); 870 } 871 872 /* Wait for IOC to transition to a give state */ 873 void 874 mpt_check_doorbell(struct mpt_softc *mpt) 875 { 876 uint32_t db = mpt_rd_db(mpt); 877 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 878 mpt_prt(mpt, "Device not running\n"); 879 mpt_print_db(db); 880 } 881 } 882 883 /* Wait for IOC to transition to a give state */ 884 static int 885 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 886 { 887 int i; 888 889 for (i = 0; i < MPT_MAX_WAIT; i++) { 890 uint32_t db = mpt_rd_db(mpt); 891 if (MPT_STATE(db) == state) { 892 maxwait_state = i > maxwait_state ? i : maxwait_state; 893 return (MPT_OK); 894 } 895 DELAY(100); 896 } 897 return (MPT_FAIL); 898 } 899 900 901 /************************* Intialization/Configuration ************************/ 902 static int mpt_download_fw(struct mpt_softc *mpt); 903 904 /* Issue the reset COMMAND to the IOC */ 905 static int 906 mpt_soft_reset(struct mpt_softc *mpt) 907 { 908 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 909 910 /* Have to use hard reset if we are not in Running state */ 911 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 912 mpt_prt(mpt, "soft reset failed: device not running\n"); 913 return (MPT_FAIL); 914 } 915 916 /* If door bell is in use we don't have a chance of getting 917 * a word in since the IOC probably crashed in message 918 * processing. So don't waste our time. 919 */ 920 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 921 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 922 return (MPT_FAIL); 923 } 924 925 /* Send the reset request to the IOC */ 926 mpt_write(mpt, MPT_OFFSET_DOORBELL, 927 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 928 if (mpt_wait_db_ack(mpt) != MPT_OK) { 929 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 930 return (MPT_FAIL); 931 } 932 933 /* Wait for the IOC to reload and come out of reset state */ 934 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 935 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 936 return (MPT_FAIL); 937 } 938 939 return MPT_OK; 940 } 941 942 static int 943 mpt_enable_diag_mode(struct mpt_softc *mpt) 944 { 945 int try; 946 947 try = 20; 948 while (--try) { 949 950 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 951 break; 952 953 /* Enable diagnostic registers */ 954 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 955 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 956 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 957 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 958 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 959 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 960 961 DELAY(100000); 962 } 963 if (try == 0) 964 return (EIO); 965 return (0); 966 } 967 968 static void 969 mpt_disable_diag_mode(struct mpt_softc *mpt) 970 { 971 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 972 } 973 974 /* This is a magic diagnostic reset that resets all the ARM 975 * processors in the chip. 976 */ 977 static void 978 mpt_hard_reset(struct mpt_softc *mpt) 979 { 980 int error; 981 int wait; 982 uint32_t diagreg; 983 984 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 985 986 error = mpt_enable_diag_mode(mpt); 987 if (error) { 988 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 989 mpt_prt(mpt, "Trying to reset anyway.\n"); 990 } 991 992 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 993 994 /* 995 * This appears to be a workaround required for some 996 * firmware or hardware revs. 997 */ 998 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 999 DELAY(1000); 1000 1001 /* Diag. port is now active so we can now hit the reset bit */ 1002 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1003 1004 /* 1005 * Ensure that the reset has finished. We delay 1ms 1006 * prior to reading the register to make sure the chip 1007 * has sufficiently completed its reset to handle register 1008 * accesses. 1009 */ 1010 wait = 5000; 1011 do { 1012 DELAY(1000); 1013 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1014 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1015 1016 if (wait == 0) { 1017 mpt_prt(mpt, "WARNING - Failed hard reset! " 1018 "Trying to initialize anyway.\n"); 1019 } 1020 1021 /* 1022 * If we have firmware to download, it must be loaded before 1023 * the controller will become operational. Do so now. 1024 */ 1025 if (mpt->fw_image != NULL) { 1026 1027 error = mpt_download_fw(mpt); 1028 1029 if (error) { 1030 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1031 mpt_prt(mpt, "Trying to initialize anyway.\n"); 1032 } 1033 } 1034 1035 /* 1036 * Reseting the controller should have disabled write 1037 * access to the diagnostic registers, but disable 1038 * manually to be sure. 1039 */ 1040 mpt_disable_diag_mode(mpt); 1041 } 1042 1043 static void 1044 mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1045 { 1046 /* 1047 * Complete all pending requests with a status 1048 * appropriate for an IOC reset. 1049 */ 1050 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1051 MPI_IOCSTATUS_INVALID_STATE); 1052 } 1053 1054 1055 /* 1056 * Reset the IOC when needed. Try software command first then if needed 1057 * poke at the magic diagnostic reset. Note that a hard reset resets 1058 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1059 * fouls up the PCI configuration registers. 1060 */ 1061 int 1062 mpt_reset(struct mpt_softc *mpt, int reinit) 1063 { 1064 struct mpt_personality *pers; 1065 int ret; 1066 int retry_cnt = 0; 1067 1068 /* 1069 * Try a soft reset. If that fails, get out the big hammer. 1070 */ 1071 again: 1072 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1073 int cnt; 1074 for (cnt = 0; cnt < 5; cnt++) { 1075 /* Failed; do a hard reset */ 1076 mpt_hard_reset(mpt); 1077 1078 /* 1079 * Wait for the IOC to reload 1080 * and come out of reset state 1081 */ 1082 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1083 if (ret == MPT_OK) { 1084 break; 1085 } 1086 /* 1087 * Okay- try to check again... 1088 */ 1089 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1090 if (ret == MPT_OK) { 1091 break; 1092 } 1093 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1094 retry_cnt, cnt); 1095 } 1096 } 1097 1098 if (retry_cnt == 0) { 1099 /* 1100 * Invoke reset handlers. We bump the reset count so 1101 * that mpt_wait_req() understands that regardless of 1102 * the specified wait condition, it should stop its wait. 1103 */ 1104 mpt->reset_cnt++; 1105 MPT_PERS_FOREACH(mpt, pers) 1106 pers->reset(mpt, ret); 1107 } 1108 1109 if (reinit) { 1110 ret = mpt_enable_ioc(mpt, 1); 1111 if (ret == MPT_OK) { 1112 mpt_enable_ints(mpt); 1113 } 1114 } 1115 if (ret != MPT_OK && retry_cnt++ < 2) { 1116 goto again; 1117 } 1118 return ret; 1119 } 1120 1121 /* Return a command buffer to the free queue */ 1122 void 1123 mpt_free_request(struct mpt_softc *mpt, request_t *req) 1124 { 1125 request_t *nxt; 1126 struct mpt_evtf_record *record; 1127 uint32_t reply_baddr; 1128 1129 if (req == NULL || req != &mpt->request_pool[req->index]) { 1130 panic("mpt_free_request bad req ptr\n"); 1131 return; 1132 } 1133 if ((nxt = req->chain) != NULL) { 1134 req->chain = NULL; 1135 mpt_free_request(mpt, nxt); /* NB: recursion */ 1136 } 1137 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1138 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1139 KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n")); 1140 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1141 ("mpt_free_request: req %p:%u func %x already on freelist", 1142 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1143 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1144 ("mpt_free_request: req %p:%u func %x on pending list", 1145 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1146 #ifdef INVARIANTS 1147 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1148 #endif 1149 1150 req->ccb = NULL; 1151 if (LIST_EMPTY(&mpt->ack_frames)) { 1152 /* 1153 * Insert free ones at the tail 1154 */ 1155 req->serno = 0; 1156 req->state = REQ_STATE_FREE; 1157 #ifdef INVARIANTS 1158 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1159 #endif 1160 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1161 if (mpt->getreqwaiter != 0) { 1162 mpt->getreqwaiter = 0; 1163 wakeup(&mpt->request_free_list); 1164 } 1165 return; 1166 } 1167 1168 /* 1169 * Process an ack frame deferred due to resource shortage. 1170 */ 1171 record = LIST_FIRST(&mpt->ack_frames); 1172 LIST_REMOVE(record, links); 1173 req->state = REQ_STATE_ALLOCATED; 1174 mpt_assign_serno(mpt, req); 1175 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1176 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1177 + (mpt->reply_phys & 0xFFFFFFFF); 1178 mpt_free_reply(mpt, reply_baddr); 1179 } 1180 1181 /* Get a command buffer from the free queue */ 1182 request_t * 1183 mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1184 { 1185 request_t *req; 1186 1187 retry: 1188 KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n")); 1189 req = TAILQ_FIRST(&mpt->request_free_list); 1190 if (req != NULL) { 1191 KASSERT(req == &mpt->request_pool[req->index], 1192 ("mpt_get_request: corrupted request free list\n")); 1193 KASSERT(req->state == REQ_STATE_FREE, 1194 ("req %p:%u not free on free list %x index %d function %x", 1195 req, req->serno, req->state, req->index, 1196 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1197 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1198 req->state = REQ_STATE_ALLOCATED; 1199 req->chain = NULL; 1200 mpt_assign_serno(mpt, req); 1201 } else if (sleep_ok != 0) { 1202 mpt->getreqwaiter = 1; 1203 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1204 goto retry; 1205 } 1206 return (req); 1207 } 1208 1209 /* Pass the command to the IOC */ 1210 void 1211 mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1212 { 1213 if (mpt->verbose > MPT_PRT_DEBUG2) { 1214 mpt_dump_request(mpt, req); 1215 } 1216 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1217 BUS_DMASYNC_PREWRITE); 1218 req->state |= REQ_STATE_QUEUED; 1219 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1220 ("req %p:%u func %x on freelist list in mpt_send_cmd", 1221 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1222 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1223 ("req %p:%u func %x already on pending list in mpt_send_cmd", 1224 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1225 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1226 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1227 } 1228 1229 /* 1230 * Wait for a request to complete. 1231 * 1232 * Inputs: 1233 * mpt softc of controller executing request 1234 * req request to wait for 1235 * sleep_ok nonzero implies may sleep in this context 1236 * time_ms timeout in ms. 0 implies no timeout. 1237 * 1238 * Return Values: 1239 * 0 Request completed 1240 * non-0 Timeout fired before request completion. 1241 */ 1242 int 1243 mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1244 mpt_req_state_t state, mpt_req_state_t mask, 1245 int sleep_ok, int time_ms) 1246 { 1247 int error; 1248 int timeout; 1249 u_int saved_cnt; 1250 1251 /* 1252 * timeout is in ms. 0 indicates infinite wait. 1253 * Convert to ticks or 500us units depending on 1254 * our sleep mode. 1255 */ 1256 if (sleep_ok != 0) { 1257 timeout = (time_ms * hz) / 1000; 1258 } else { 1259 timeout = time_ms * 2; 1260 } 1261 req->state |= REQ_STATE_NEED_WAKEUP; 1262 mask &= ~REQ_STATE_NEED_WAKEUP; 1263 saved_cnt = mpt->reset_cnt; 1264 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1265 if (sleep_ok != 0) { 1266 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1267 if (error == EWOULDBLOCK) { 1268 timeout = 0; 1269 break; 1270 } 1271 } else { 1272 if (time_ms != 0 && --timeout == 0) { 1273 break; 1274 } 1275 DELAY(500); 1276 mpt_intr(mpt); 1277 } 1278 } 1279 req->state &= ~REQ_STATE_NEED_WAKEUP; 1280 if (mpt->reset_cnt != saved_cnt) { 1281 return (EIO); 1282 } 1283 if (time_ms && timeout <= 0) { 1284 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1285 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1286 return (ETIMEDOUT); 1287 } 1288 return (0); 1289 } 1290 1291 /* 1292 * Send a command to the IOC via the handshake register. 1293 * 1294 * Only done at initialization time and for certain unusual 1295 * commands such as device/bus reset as specified by LSI. 1296 */ 1297 int 1298 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1299 { 1300 int i; 1301 uint32_t data, *data32; 1302 1303 /* Check condition of the IOC */ 1304 data = mpt_rd_db(mpt); 1305 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1306 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1307 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1308 || MPT_DB_IS_IN_USE(data)) { 1309 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1310 mpt_print_db(data); 1311 return (EBUSY); 1312 } 1313 1314 /* We move things in 32 bit chunks */ 1315 len = (len + 3) >> 2; 1316 data32 = cmd; 1317 1318 /* Clear any left over pending doorbell interupts */ 1319 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1320 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1321 1322 /* 1323 * Tell the handshake reg. we are going to send a command 1324 * and how long it is going to be. 1325 */ 1326 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1327 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1328 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1329 1330 /* Wait for the chip to notice */ 1331 if (mpt_wait_db_int(mpt) != MPT_OK) { 1332 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1333 return (ETIMEDOUT); 1334 } 1335 1336 /* Clear the interrupt */ 1337 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1338 1339 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1340 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1341 return (ETIMEDOUT); 1342 } 1343 1344 /* Send the command */ 1345 for (i = 0; i < len; i++) { 1346 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1347 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1348 mpt_prt(mpt, 1349 "mpt_send_handshake_cmd timeout! index = %d\n", 1350 i); 1351 return (ETIMEDOUT); 1352 } 1353 } 1354 return MPT_OK; 1355 } 1356 1357 /* Get the response from the handshake register */ 1358 int 1359 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1360 { 1361 int left, reply_left; 1362 u_int16_t *data16; 1363 MSG_DEFAULT_REPLY *hdr; 1364 1365 /* We move things out in 16 bit chunks */ 1366 reply_len >>= 1; 1367 data16 = (u_int16_t *)reply; 1368 1369 hdr = (MSG_DEFAULT_REPLY *)reply; 1370 1371 /* Get first word */ 1372 if (mpt_wait_db_int(mpt) != MPT_OK) { 1373 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1374 return ETIMEDOUT; 1375 } 1376 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1377 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1378 1379 /* Get Second Word */ 1380 if (mpt_wait_db_int(mpt) != MPT_OK) { 1381 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1382 return ETIMEDOUT; 1383 } 1384 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1385 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1386 1387 /* 1388 * With the second word, we can now look at the length. 1389 * Warn about a reply that's too short (except for IOC FACTS REPLY) 1390 */ 1391 if ((reply_len >> 1) != hdr->MsgLength && 1392 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1393 #if __FreeBSD_version >= 500000 1394 mpt_prt(mpt, "reply length does not match message length: " 1395 "got %x; expected %zx for function %x\n", 1396 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1397 #else 1398 mpt_prt(mpt, "reply length does not match message length: " 1399 "got %x; expected %x for function %x\n", 1400 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1401 #endif 1402 } 1403 1404 /* Get rest of the reply; but don't overflow the provided buffer */ 1405 left = (hdr->MsgLength << 1) - 2; 1406 reply_left = reply_len - 2; 1407 while (left--) { 1408 u_int16_t datum; 1409 1410 if (mpt_wait_db_int(mpt) != MPT_OK) { 1411 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1412 return ETIMEDOUT; 1413 } 1414 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1415 1416 if (reply_left-- > 0) 1417 *data16++ = datum & MPT_DB_DATA_MASK; 1418 1419 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1420 } 1421 1422 /* One more wait & clear at the end */ 1423 if (mpt_wait_db_int(mpt) != MPT_OK) { 1424 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1425 return ETIMEDOUT; 1426 } 1427 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1428 1429 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1430 if (mpt->verbose >= MPT_PRT_TRACE) 1431 mpt_print_reply(hdr); 1432 return (MPT_FAIL | hdr->IOCStatus); 1433 } 1434 1435 return (0); 1436 } 1437 1438 static int 1439 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1440 { 1441 MSG_IOC_FACTS f_req; 1442 int error; 1443 1444 memset(&f_req, 0, sizeof f_req); 1445 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1446 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1447 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1448 if (error) 1449 return(error); 1450 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1451 return (error); 1452 } 1453 1454 static int 1455 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1456 { 1457 MSG_PORT_FACTS f_req; 1458 int error; 1459 1460 /* XXX: Only getting PORT FACTS for Port 0 */ 1461 memset(&f_req, 0, sizeof f_req); 1462 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1463 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1464 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1465 if (error) 1466 return(error); 1467 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1468 return (error); 1469 } 1470 1471 /* 1472 * Send the initialization request. This is where we specify how many 1473 * SCSI busses and how many devices per bus we wish to emulate. 1474 * This is also the command that specifies the max size of the reply 1475 * frames from the IOC that we will be allocating. 1476 */ 1477 static int 1478 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1479 { 1480 int error = 0; 1481 MSG_IOC_INIT init; 1482 MSG_IOC_INIT_REPLY reply; 1483 1484 memset(&init, 0, sizeof init); 1485 init.WhoInit = who; 1486 init.Function = MPI_FUNCTION_IOC_INIT; 1487 if (mpt->is_fc) { 1488 init.MaxDevices = 255; 1489 } else if (mpt->is_sas) { 1490 init.MaxDevices = mpt->mpt_max_devices; 1491 } else { 1492 init.MaxDevices = 16; 1493 } 1494 init.MaxBuses = 1; 1495 1496 init.MsgVersion = htole16(MPI_VERSION); 1497 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1498 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1499 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1500 1501 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1502 return(error); 1503 } 1504 1505 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1506 return (error); 1507 } 1508 1509 1510 /* 1511 * Utiltity routine to read configuration headers and pages 1512 */ 1513 int 1514 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1515 u_int PageVersion, u_int PageLength, u_int PageNumber, 1516 u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1517 bus_size_t len, int sleep_ok, int timeout_ms) 1518 { 1519 MSG_CONFIG *cfgp; 1520 SGE_SIMPLE32 *se; 1521 1522 cfgp = req->req_vbuf; 1523 memset(cfgp, 0, sizeof *cfgp); 1524 cfgp->Action = Action; 1525 cfgp->Function = MPI_FUNCTION_CONFIG; 1526 cfgp->Header.PageVersion = PageVersion; 1527 cfgp->Header.PageLength = PageLength; 1528 cfgp->Header.PageNumber = PageNumber; 1529 cfgp->Header.PageType = PageType; 1530 cfgp->PageAddress = PageAddress; 1531 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1532 se->Address = addr; 1533 MPI_pSGE_SET_LENGTH(se, len); 1534 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1535 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1536 MPI_SGE_FLAGS_END_OF_LIST | 1537 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1538 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1539 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1540 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1541 1542 mpt_check_doorbell(mpt); 1543 mpt_send_cmd(mpt, req); 1544 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1545 sleep_ok, timeout_ms)); 1546 } 1547 1548 1549 int 1550 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1551 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1552 int sleep_ok, int timeout_ms) 1553 { 1554 request_t *req; 1555 MSG_CONFIG *cfgp; 1556 int error; 1557 1558 req = mpt_get_request(mpt, sleep_ok); 1559 if (req == NULL) { 1560 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1561 return (ENOMEM); 1562 } 1563 1564 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1565 /*PageVersion*/0, /*PageLength*/0, PageNumber, 1566 PageType, PageAddress, /*addr*/0, /*len*/0, 1567 sleep_ok, timeout_ms); 1568 if (error != 0) { 1569 mpt_free_request(mpt, req); 1570 mpt_prt(mpt, "read_cfg_header timed out\n"); 1571 return (ETIMEDOUT); 1572 } 1573 1574 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1575 case MPI_IOCSTATUS_SUCCESS: 1576 cfgp = req->req_vbuf; 1577 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1578 error = 0; 1579 break; 1580 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1581 mpt_lprt(mpt, MPT_PRT_DEBUG, 1582 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1583 PageType, PageNumber, PageAddress); 1584 error = EINVAL; 1585 break; 1586 default: 1587 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1588 req->IOCStatus); 1589 error = EIO; 1590 break; 1591 } 1592 mpt_free_request(mpt, req); 1593 return (error); 1594 } 1595 1596 int 1597 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1598 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1599 int timeout_ms) 1600 { 1601 request_t *req; 1602 int error; 1603 1604 req = mpt_get_request(mpt, sleep_ok); 1605 if (req == NULL) { 1606 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1607 return (-1); 1608 } 1609 1610 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1611 hdr->PageLength, hdr->PageNumber, 1612 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1613 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1614 len, sleep_ok, timeout_ms); 1615 if (error != 0) { 1616 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1617 return (-1); 1618 } 1619 1620 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1621 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1622 req->IOCStatus); 1623 mpt_free_request(mpt, req); 1624 return (-1); 1625 } 1626 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1627 BUS_DMASYNC_POSTREAD); 1628 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1629 mpt_free_request(mpt, req); 1630 return (0); 1631 } 1632 1633 int 1634 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1635 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1636 int timeout_ms) 1637 { 1638 request_t *req; 1639 u_int hdr_attr; 1640 int error; 1641 1642 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1643 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1644 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1645 mpt_prt(mpt, "page type 0x%x not changeable\n", 1646 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1647 return (-1); 1648 } 1649 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1650 1651 req = mpt_get_request(mpt, sleep_ok); 1652 if (req == NULL) 1653 return (-1); 1654 1655 memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len); 1656 /* Restore stripped out attributes */ 1657 hdr->PageType |= hdr_attr; 1658 1659 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1660 hdr->PageLength, hdr->PageNumber, 1661 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1662 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1663 len, sleep_ok, timeout_ms); 1664 if (error != 0) { 1665 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1666 return (-1); 1667 } 1668 1669 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1670 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1671 req->IOCStatus); 1672 mpt_free_request(mpt, req); 1673 return (-1); 1674 } 1675 mpt_free_request(mpt, req); 1676 return (0); 1677 } 1678 1679 /* 1680 * Read IOC configuration information 1681 */ 1682 static int 1683 mpt_read_config_info_ioc(struct mpt_softc *mpt) 1684 { 1685 CONFIG_PAGE_HEADER hdr; 1686 struct mpt_raid_volume *mpt_raid; 1687 int rv; 1688 int i; 1689 size_t len; 1690 1691 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1692 2, 0, &hdr, FALSE, 5000); 1693 /* 1694 * If it's an invalid page, so what? Not a supported function.... 1695 */ 1696 if (rv == EINVAL) { 1697 return (0); 1698 } 1699 if (rv) { 1700 return (rv); 1701 } 1702 1703 #if __FreeBSD_version >= 500000 1704 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " 1705 "num %x, type %x\n", hdr.PageVersion, 1706 hdr.PageLength * sizeof(uint32_t), 1707 hdr.PageNumber, hdr.PageType); 1708 #else 1709 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " 1710 "num %x, type %x\n", hdr.PageVersion, 1711 hdr.PageLength * sizeof(uint32_t), 1712 hdr.PageNumber, hdr.PageType); 1713 #endif 1714 1715 len = hdr.PageLength * sizeof(uint32_t); 1716 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1717 if (mpt->ioc_page2 == NULL) { 1718 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); 1719 mpt_raid_free_mem(mpt); 1720 return (ENOMEM); 1721 } 1722 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1723 rv = mpt_read_cur_cfg_page(mpt, 0, 1724 &mpt->ioc_page2->Header, len, FALSE, 5000); 1725 if (rv) { 1726 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1727 mpt_raid_free_mem(mpt); 1728 return (EIO); 1729 } 1730 1731 if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1732 uint32_t mask; 1733 1734 mpt_prt(mpt, "Capabilities: ("); 1735 for (mask = 1; mask != 0; mask <<= 1) { 1736 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { 1737 continue; 1738 } 1739 switch (mask) { 1740 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1741 mpt_prtc(mpt, " RAID-0"); 1742 break; 1743 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1744 mpt_prtc(mpt, " RAID-1E"); 1745 break; 1746 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1747 mpt_prtc(mpt, " RAID-1"); 1748 break; 1749 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1750 mpt_prtc(mpt, " SES"); 1751 break; 1752 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1753 mpt_prtc(mpt, " SAFTE"); 1754 break; 1755 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1756 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1757 default: 1758 break; 1759 } 1760 } 1761 mpt_prtc(mpt, " )\n"); 1762 if ((mpt->ioc_page2->CapabilitiesFlags 1763 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1764 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1765 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1766 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1767 mpt->ioc_page2->NumActiveVolumes, 1768 mpt->ioc_page2->NumActiveVolumes != 1 1769 ? "s " : " ", 1770 mpt->ioc_page2->MaxVolumes); 1771 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1772 mpt->ioc_page2->NumActivePhysDisks, 1773 mpt->ioc_page2->NumActivePhysDisks != 1 1774 ? "s " : " ", 1775 mpt->ioc_page2->MaxPhysDisks); 1776 } 1777 } 1778 1779 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1780 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1781 if (mpt->raid_volumes == NULL) { 1782 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1783 mpt_raid_free_mem(mpt); 1784 return (ENOMEM); 1785 } 1786 1787 /* 1788 * Copy critical data out of ioc_page2 so that we can 1789 * safely refresh the page without windows of unreliable 1790 * data. 1791 */ 1792 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1793 1794 len = sizeof(*mpt->raid_volumes->config_page) + 1795 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); 1796 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1797 mpt_raid = &mpt->raid_volumes[i]; 1798 mpt_raid->config_page = 1799 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1800 if (mpt_raid->config_page == NULL) { 1801 mpt_prt(mpt, "Could not allocate RAID page data\n"); 1802 mpt_raid_free_mem(mpt); 1803 return (ENOMEM); 1804 } 1805 } 1806 mpt->raid_page0_len = len; 1807 1808 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1809 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1810 if (mpt->raid_disks == NULL) { 1811 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1812 mpt_raid_free_mem(mpt); 1813 return (ENOMEM); 1814 } 1815 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1816 1817 /* 1818 * Load page 3. 1819 */ 1820 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1821 3, 0, &hdr, FALSE, 5000); 1822 if (rv) { 1823 mpt_raid_free_mem(mpt); 1824 return (EIO); 1825 } 1826 1827 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1828 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1829 1830 len = hdr.PageLength * sizeof(uint32_t); 1831 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1832 if (mpt->ioc_page3 == NULL) { 1833 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); 1834 mpt_raid_free_mem(mpt); 1835 return (ENOMEM); 1836 } 1837 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1838 rv = mpt_read_cur_cfg_page(mpt, 0, 1839 &mpt->ioc_page3->Header, len, FALSE, 5000); 1840 if (rv) { 1841 mpt_raid_free_mem(mpt); 1842 return (EIO); 1843 } 1844 mpt_raid_wakeup(mpt); 1845 return (0); 1846 } 1847 1848 /* 1849 * Enable IOC port 1850 */ 1851 static int 1852 mpt_send_port_enable(struct mpt_softc *mpt, int port) 1853 { 1854 request_t *req; 1855 MSG_PORT_ENABLE *enable_req; 1856 int error; 1857 1858 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1859 if (req == NULL) 1860 return (-1); 1861 1862 enable_req = req->req_vbuf; 1863 memset(enable_req, 0, MPT_RQSL(mpt)); 1864 1865 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1866 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1867 enable_req->PortNumber = port; 1868 1869 mpt_check_doorbell(mpt); 1870 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1871 1872 mpt_send_cmd(mpt, req); 1873 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1874 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1875 if (error != 0) { 1876 mpt_prt(mpt, "port %d enable timed out\n", port); 1877 return (-1); 1878 } 1879 mpt_free_request(mpt, req); 1880 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 1881 return (0); 1882 } 1883 1884 /* 1885 * Enable/Disable asynchronous event reporting. 1886 */ 1887 static int 1888 mpt_send_event_request(struct mpt_softc *mpt, int onoff) 1889 { 1890 request_t *req; 1891 MSG_EVENT_NOTIFY *enable_req; 1892 1893 req = mpt_get_request(mpt, FALSE); 1894 if (req == NULL) { 1895 return (ENOMEM); 1896 } 1897 enable_req = req->req_vbuf; 1898 memset(enable_req, 0, sizeof *enable_req); 1899 1900 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1901 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1902 enable_req->Switch = onoff; 1903 1904 mpt_check_doorbell(mpt); 1905 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 1906 onoff ? "en" : "dis"); 1907 /* 1908 * Send the command off, but don't wait for it. 1909 */ 1910 mpt_send_cmd(mpt, req); 1911 return (0); 1912 } 1913 1914 /* 1915 * Un-mask the interupts on the chip. 1916 */ 1917 void 1918 mpt_enable_ints(struct mpt_softc *mpt) 1919 { 1920 /* Unmask every thing except door bell int */ 1921 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1922 } 1923 1924 /* 1925 * Mask the interupts on the chip. 1926 */ 1927 void 1928 mpt_disable_ints(struct mpt_softc *mpt) 1929 { 1930 /* Mask all interrupts */ 1931 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1932 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1933 } 1934 1935 static void 1936 mpt_sysctl_attach(struct mpt_softc *mpt) 1937 { 1938 #if __FreeBSD_version >= 500000 1939 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1940 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1941 1942 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1943 "debug", CTLFLAG_RW, &mpt->verbose, 0, 1944 "Debugging/Verbose level"); 1945 #endif 1946 } 1947 1948 int 1949 mpt_attach(struct mpt_softc *mpt) 1950 { 1951 struct mpt_personality *pers; 1952 int i; 1953 int error; 1954 1955 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1956 pers = mpt_personalities[i]; 1957 if (pers == NULL) { 1958 continue; 1959 } 1960 if (pers->probe(mpt) == 0) { 1961 error = pers->attach(mpt); 1962 if (error != 0) { 1963 mpt_detach(mpt); 1964 return (error); 1965 } 1966 mpt->mpt_pers_mask |= (0x1 << pers->id); 1967 pers->use_count++; 1968 } 1969 } 1970 1971 /* 1972 * Now that we've attached everything, do the enable function 1973 * for all of the personalities. This allows the personalities 1974 * to do setups that are appropriate for them prior to enabling 1975 * any ports. 1976 */ 1977 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1978 pers = mpt_personalities[i]; 1979 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 1980 error = pers->enable(mpt); 1981 if (error != 0) { 1982 mpt_prt(mpt, "personality %s attached but would" 1983 " not enable (%d)\n", pers->name, error); 1984 mpt_detach(mpt); 1985 return (error); 1986 } 1987 } 1988 } 1989 return (0); 1990 } 1991 1992 int 1993 mpt_shutdown(struct mpt_softc *mpt) 1994 { 1995 struct mpt_personality *pers; 1996 1997 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 1998 pers->shutdown(mpt); 1999 } 2000 return (0); 2001 } 2002 2003 int 2004 mpt_detach(struct mpt_softc *mpt) 2005 { 2006 struct mpt_personality *pers; 2007 2008 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2009 pers->detach(mpt); 2010 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2011 pers->use_count--; 2012 } 2013 2014 return (0); 2015 } 2016 2017 int 2018 mpt_core_load(struct mpt_personality *pers) 2019 { 2020 int i; 2021 2022 /* 2023 * Setup core handlers and insert the default handler 2024 * into all "empty slots". 2025 */ 2026 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2027 mpt_reply_handlers[i] = mpt_default_reply_handler; 2028 } 2029 2030 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2031 mpt_event_reply_handler; 2032 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2033 mpt_config_reply_handler; 2034 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2035 mpt_handshake_reply_handler; 2036 return (0); 2037 } 2038 2039 /* 2040 * Initialize per-instance driver data and perform 2041 * initial controller configuration. 2042 */ 2043 int 2044 mpt_core_attach(struct mpt_softc *mpt) 2045 { 2046 int val; 2047 int error; 2048 2049 2050 LIST_INIT(&mpt->ack_frames); 2051 2052 /* Put all request buffers on the free list */ 2053 TAILQ_INIT(&mpt->request_pending_list); 2054 TAILQ_INIT(&mpt->request_free_list); 2055 TAILQ_INIT(&mpt->request_timeout_list); 2056 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2057 request_t *req = &mpt->request_pool[val]; 2058 req->state = REQ_STATE_ALLOCATED; 2059 mpt_free_request(mpt, req); 2060 } 2061 2062 for (val = 0; val < MPT_MAX_LUNS; val++) { 2063 STAILQ_INIT(&mpt->trt[val].atios); 2064 STAILQ_INIT(&mpt->trt[val].inots); 2065 } 2066 STAILQ_INIT(&mpt->trt_wildcard.atios); 2067 STAILQ_INIT(&mpt->trt_wildcard.inots); 2068 2069 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2070 2071 mpt_sysctl_attach(mpt); 2072 2073 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2074 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2075 2076 error = mpt_configure_ioc(mpt); 2077 2078 return (error); 2079 } 2080 2081 int 2082 mpt_core_enable(struct mpt_softc *mpt) 2083 { 2084 /* 2085 * We enter with the IOC enabled, but async events 2086 * not enabled, ports not enabled and interrupts 2087 * not enabled. 2088 */ 2089 2090 /* 2091 * Enable asynchronous event reporting- all personalities 2092 * have attached so that they should be able to now field 2093 * async events. 2094 */ 2095 mpt_send_event_request(mpt, 1); 2096 2097 /* 2098 * Catch any pending interrupts 2099 * 2100 * This seems to be crucial- otherwise 2101 * the portenable below times out. 2102 */ 2103 mpt_intr(mpt); 2104 2105 /* 2106 * Enable Interrupts 2107 */ 2108 mpt_enable_ints(mpt); 2109 2110 /* 2111 * Catch any pending interrupts 2112 * 2113 * This seems to be crucial- otherwise 2114 * the portenable below times out. 2115 */ 2116 mpt_intr(mpt); 2117 2118 /* 2119 * Enable the port. 2120 */ 2121 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2122 mpt_prt(mpt, "failed to enable port 0\n"); 2123 return (ENXIO); 2124 } 2125 return (0); 2126 } 2127 2128 void 2129 mpt_core_shutdown(struct mpt_softc *mpt) 2130 { 2131 mpt_disable_ints(mpt); 2132 } 2133 2134 void 2135 mpt_core_detach(struct mpt_softc *mpt) 2136 { 2137 mpt_disable_ints(mpt); 2138 } 2139 2140 int 2141 mpt_core_unload(struct mpt_personality *pers) 2142 { 2143 /* Unload is always successfull. */ 2144 return (0); 2145 } 2146 2147 #define FW_UPLOAD_REQ_SIZE \ 2148 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2149 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2150 2151 static int 2152 mpt_upload_fw(struct mpt_softc *mpt) 2153 { 2154 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2155 MSG_FW_UPLOAD_REPLY fw_reply; 2156 MSG_FW_UPLOAD *fw_req; 2157 FW_UPLOAD_TCSGE *tsge; 2158 SGE_SIMPLE32 *sge; 2159 uint32_t flags; 2160 int error; 2161 2162 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2163 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2164 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2165 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2166 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2167 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2168 tsge->DetailsLength = 12; 2169 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2170 tsge->ImageSize = htole32(mpt->fw_image_size); 2171 sge = (SGE_SIMPLE32 *)(tsge + 1); 2172 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2173 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2174 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2175 flags <<= MPI_SGE_FLAGS_SHIFT; 2176 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2177 sge->Address = htole32(mpt->fw_phys); 2178 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2179 if (error) 2180 return(error); 2181 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2182 return (error); 2183 } 2184 2185 static void 2186 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2187 uint32_t *data, bus_size_t len) 2188 { 2189 uint32_t *data_end; 2190 2191 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2192 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2193 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2194 while (data != data_end) { 2195 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2196 data++; 2197 } 2198 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2199 } 2200 2201 static int 2202 mpt_download_fw(struct mpt_softc *mpt) 2203 { 2204 MpiFwHeader_t *fw_hdr; 2205 int error; 2206 uint32_t ext_offset; 2207 uint32_t data; 2208 2209 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2210 mpt->fw_image_size); 2211 2212 error = mpt_enable_diag_mode(mpt); 2213 if (error != 0) { 2214 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2215 return (EIO); 2216 } 2217 2218 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2219 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2220 2221 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2222 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2223 fw_hdr->ImageSize); 2224 2225 ext_offset = fw_hdr->NextImageHeaderOffset; 2226 while (ext_offset != 0) { 2227 MpiExtImageHeader_t *ext; 2228 2229 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2230 ext_offset = ext->NextImageHeaderOffset; 2231 2232 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2233 ext->ImageSize); 2234 } 2235 2236 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2237 /* Setup the address to jump to on reset. */ 2238 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2239 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2240 2241 /* 2242 * The controller sets the "flash bad" status after attempting 2243 * to auto-boot from flash. Clear the status so that the controller 2244 * will continue the boot process with our newly installed firmware. 2245 */ 2246 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2247 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2248 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2249 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2250 2251 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2252 2253 /* 2254 * Re-enable the processor and clear the boot halt flag. 2255 */ 2256 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2257 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2258 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2259 2260 mpt_disable_diag_mode(mpt); 2261 return (0); 2262 } 2263 2264 /* 2265 * Allocate/Initialize data structures for the controller. Called 2266 * once at instance startup. 2267 */ 2268 static int 2269 mpt_configure_ioc(struct mpt_softc *mpt) 2270 { 2271 MSG_PORT_FACTS_REPLY pfp; 2272 MSG_IOC_FACTS_REPLY facts; 2273 int try; 2274 int needreset; 2275 uint32_t max_chain_depth; 2276 2277 needreset = 0; 2278 for (try = 0; try < MPT_MAX_TRYS; try++) { 2279 2280 /* 2281 * No need to reset if the IOC is already in the READY state. 2282 * 2283 * Force reset if initialization failed previously. 2284 * Note that a hard_reset of the second channel of a '929 2285 * will stop operation of the first channel. Hopefully, if the 2286 * first channel is ok, the second will not require a hard 2287 * reset. 2288 */ 2289 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != 2290 MPT_DB_STATE_READY) { 2291 if (mpt_reset(mpt, FALSE) != MPT_OK) { 2292 continue; 2293 } 2294 } 2295 needreset = 0; 2296 2297 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2298 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2299 needreset = 1; 2300 continue; 2301 } 2302 2303 mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2304 mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2305 mpt->ioc_facts_flags = facts.Flags; 2306 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2307 le16toh(facts.MsgVersion) >> 8, 2308 le16toh(facts.MsgVersion) & 0xFF, 2309 le16toh(facts.HeaderVersion) >> 8, 2310 le16toh(facts.HeaderVersion) & 0xFF); 2311 2312 /* 2313 * Now that we know request frame size, we can calculate 2314 * the actual (reasonable) segment limit for read/write I/O. 2315 * 2316 * This limit is constrained by: 2317 * 2318 * + The size of each area we allocate per command (and how 2319 * many chain segments we can fit into it). 2320 * + The total number of areas we've set up. 2321 * + The actual chain depth the card will allow. 2322 * 2323 * The first area's segment count is limited by the I/O request 2324 * at the head of it. We cannot allocate realistically more 2325 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2326 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2327 * 2328 */ 2329 max_chain_depth = facts.MaxChainDepth; 2330 2331 /* total number of request areas we (can) allocate */ 2332 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2333 2334 /* converted to the number of chain areas possible */ 2335 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2336 2337 /* limited by the number of chain areas the card will support */ 2338 if (mpt->max_seg_cnt > max_chain_depth) { 2339 mpt_lprt(mpt, MPT_PRT_DEBUG, 2340 "chain depth limited to %u (from %u)\n", 2341 max_chain_depth, mpt->max_seg_cnt); 2342 mpt->max_seg_cnt = max_chain_depth; 2343 } 2344 2345 /* converted to the number of simple sges in chain segments. */ 2346 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2347 2348 mpt_lprt(mpt, MPT_PRT_DEBUG, 2349 "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2350 mpt_lprt(mpt, MPT_PRT_DEBUG, 2351 "MsgLength=%u IOCNumber = %d\n", 2352 facts.MsgLength, facts.IOCNumber); 2353 mpt_lprt(mpt, MPT_PRT_DEBUG, 2354 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2355 "Request Frame Size %u bytes Max Chain Depth %u\n", 2356 mpt->mpt_global_credits, facts.BlockSize, 2357 mpt->request_frame_size << 2, max_chain_depth); 2358 mpt_lprt(mpt, MPT_PRT_DEBUG, 2359 "IOCFACTS: Num Ports %d, FWImageSize %d, " 2360 "Flags=%#x\n", facts.NumberOfPorts, 2361 le32toh(facts.FWImageSize), facts.Flags); 2362 2363 2364 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2365 struct mpt_map_info mi; 2366 int error; 2367 2368 /* 2369 * In some configurations, the IOC's firmware is 2370 * stored in a shared piece of system NVRAM that 2371 * is only accessable via the BIOS. In this 2372 * case, the firmware keeps a copy of firmware in 2373 * RAM until the OS driver retrieves it. Once 2374 * retrieved, we are responsible for re-downloading 2375 * the firmware after any hard-reset. 2376 */ 2377 mpt->fw_image_size = le32toh(facts.FWImageSize); 2378 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2379 /*alignment*/1, /*boundary*/0, 2380 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2381 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2382 /*filterarg*/NULL, mpt->fw_image_size, 2383 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2384 /*flags*/0, &mpt->fw_dmat); 2385 if (error != 0) { 2386 mpt_prt(mpt, "cannot create fw dma tag\n"); 2387 return (ENOMEM); 2388 } 2389 error = bus_dmamem_alloc(mpt->fw_dmat, 2390 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2391 &mpt->fw_dmap); 2392 if (error != 0) { 2393 mpt_prt(mpt, "cannot allocate fw mem.\n"); 2394 bus_dma_tag_destroy(mpt->fw_dmat); 2395 return (ENOMEM); 2396 } 2397 mi.mpt = mpt; 2398 mi.error = 0; 2399 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2400 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2401 &mi, 0); 2402 mpt->fw_phys = mi.phys; 2403 2404 error = mpt_upload_fw(mpt); 2405 if (error != 0) { 2406 mpt_prt(mpt, "fw upload failed.\n"); 2407 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2408 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2409 mpt->fw_dmap); 2410 bus_dma_tag_destroy(mpt->fw_dmat); 2411 mpt->fw_image = NULL; 2412 return (EIO); 2413 } 2414 } 2415 2416 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2417 mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2418 needreset = 1; 2419 continue; 2420 } 2421 2422 mpt_lprt(mpt, MPT_PRT_DEBUG, 2423 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2424 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2425 pfp.MaxDevices); 2426 2427 mpt->mpt_port_type = pfp.PortType; 2428 mpt->mpt_proto_flags = pfp.ProtocolFlags; 2429 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2430 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2431 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2432 mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2433 pfp.PortType); 2434 return (ENXIO); 2435 } 2436 mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); 2437 2438 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2439 mpt->is_fc = 1; 2440 mpt->is_sas = 0; 2441 mpt->is_spi = 0; 2442 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2443 mpt->is_fc = 0; 2444 mpt->is_sas = 1; 2445 mpt->is_spi = 0; 2446 } else { 2447 mpt->is_fc = 0; 2448 mpt->is_sas = 0; 2449 mpt->is_spi = 1; 2450 } 2451 mpt->mpt_ini_id = pfp.PortSCSIID; 2452 mpt->mpt_max_devices = pfp.MaxDevices; 2453 2454 /* 2455 * Set our expected role with what this port supports. 2456 */ 2457 2458 mpt->role = MPT_ROLE_NONE; 2459 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2460 mpt->role |= MPT_ROLE_INITIATOR; 2461 } 2462 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2463 mpt->role |= MPT_ROLE_TARGET; 2464 } 2465 if (mpt->role == MPT_ROLE_NONE) { 2466 mpt_prt(mpt, "port does not support either target or " 2467 "initiator role\n"); 2468 return (ENXIO); 2469 } 2470 2471 if (mpt_enable_ioc(mpt, 0) != MPT_OK) { 2472 mpt_prt(mpt, "unable to initialize IOC\n"); 2473 return (ENXIO); 2474 } 2475 2476 /* 2477 * Read IOC configuration information. 2478 * 2479 * We need this to determine whether or not we have certain 2480 * settings for Integrated Mirroring (e.g.). 2481 */ 2482 mpt_read_config_info_ioc(mpt); 2483 2484 /* Everything worked */ 2485 break; 2486 } 2487 2488 if (try >= MPT_MAX_TRYS) { 2489 mpt_prt(mpt, "failed to initialize IOC"); 2490 return (EIO); 2491 } 2492 2493 return (0); 2494 } 2495 2496 static int 2497 mpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2498 { 2499 uint32_t pptr; 2500 int val; 2501 2502 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2503 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2504 return (EIO); 2505 } 2506 2507 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2508 2509 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2510 mpt_prt(mpt, "IOC failed to go to run state\n"); 2511 return (ENXIO); 2512 } 2513 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2514 2515 /* 2516 * Give it reply buffers 2517 * 2518 * Do *not* exceed global credits. 2519 */ 2520 for (val = 0, pptr = mpt->reply_phys; 2521 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2522 pptr += MPT_REPLY_SIZE) { 2523 mpt_free_reply(mpt, pptr); 2524 if (++val == mpt->mpt_global_credits - 1) 2525 break; 2526 } 2527 2528 2529 /* 2530 * Enable the port if asked. This is only done if we're resetting 2531 * the IOC after initial startup. 2532 */ 2533 if (portenable) { 2534 /* 2535 * Enable asynchronous event reporting 2536 */ 2537 mpt_send_event_request(mpt, 1); 2538 2539 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2540 mpt_prt(mpt, "failed to enable port 0\n"); 2541 return (ENXIO); 2542 } 2543 } 2544 return (MPT_OK); 2545 } 2546